From eb1acaf92742d90573fecefd2cd0849009c7dff2 Mon Sep 17 00:00:00 2001 From: Jack May Date: Thu, 14 May 2020 18:22:47 -0700 Subject: [PATCH] Remove archiver and storage program (#9992) automerge --- Cargo.lock | 444 +++------ Cargo.toml | 7 - archiver-lib/Cargo.toml | 43 - archiver-lib/src/archiver.rs | 922 ------------------ archiver-lib/src/lib.rs | 11 - archiver-lib/src/result.rs | 47 - archiver-utils/Cargo.toml | 28 - archiver-utils/src/lib.rs | 120 --- archiver/.gitignore | 2 - archiver/Cargo.toml | 23 - archiver/src/main.rs | 131 --- bench-exchange/src/main.rs | 7 +- bench-exchange/tests/bench_exchange.rs | 2 +- bench-tps/src/main.rs | 9 +- chacha-cuda/Cargo.toml | 27 - chacha-cuda/src/chacha_cuda.rs | 280 ------ chacha-cuda/src/lib.rs | 8 - chacha-sys/.gitignore | 2 - chacha-sys/Cargo.toml | 15 - chacha-sys/build.rs | 8 - chacha-sys/cpu-crypt/.gitignore | 1 - chacha-sys/cpu-crypt/Makefile | 25 - chacha-sys/cpu-crypt/chacha.h | 31 - chacha-sys/cpu-crypt/chacha20_core.c | 49 - chacha-sys/cpu-crypt/chacha_cbc.c | 72 -- chacha-sys/src/lib.rs | 21 - chacha/.gitignore | 1 - chacha/Cargo.toml | 28 - chacha/src/chacha.rs | 185 ---- chacha/src/lib.rs | 8 - ci/test-stable.sh | 2 +- cli/Cargo.toml | 1 - cli/src/cli.rs | 58 +- cli/src/lib.rs | 1 - cli/src/storage.rs | 400 -------- core/Cargo.toml | 6 - core/benches/chacha.rs | 29 - core/src/cluster_info.rs | 155 +-- core/src/contact_info.rs | 20 +- core/src/gossip_service.rs | 56 +- core/src/lib.rs | 1 - core/src/repair_service.rs | 124 +-- core/src/replay_stage.rs | 23 +- core/src/retransmit_stage.rs | 6 +- core/src/rpc.rs | 84 -- core/src/rpc_service.rs | 6 +- core/src/serve_repair.rs | 6 +- core/src/storage_stage.rs | 740 -------------- core/src/tvu.rs | 25 +- core/src/validator.rs | 22 +- core/src/window_service.rs | 90 +- core/tests/storage_stage.rs | 261 ----- docs/src/SUMMARY.md | 7 - docs/src/cluster/README.md | 8 +- docs/src/cluster/ledger-replication.md | 269 ----- docs/src/cluster/synchronization.md | 2 +- .../ed_overview/README.md | 10 +- .../ed_overview/ed_attack_vectors.md | 14 - .../ed_overview/ed_economic_sustainability.md | 11 +- .../ed_overview/ed_mvp.md | 4 - .../ed_replication_client_economics/README.md | 6 - ...plication_client_reward_auto_delegation.md | 8 - .../ed_rce_storage_replication_rewards.md | 8 - .../ed_overview/ed_storage_rent_economics.md | 2 +- .../ed_validation_client_economics/README.md | 4 +- ...replication_validation_transaction_fees.md | 11 - ...state_validation_protocol_based_rewards.md | 8 +- ...d_vce_state_validation_transaction_fees.md | 2 +- .../ed_vce_validation_stake_delegation.md | 9 +- .../ledger-replication-to-implement.md | 137 --- docs/src/terminology.md | 34 +- docs/src/validator/blockstore.md | 3 - dos/src/main.rs | 2 +- genesis-programs/Cargo.toml | 1 - genesis-programs/src/lib.rs | 9 +- genesis/Cargo.toml | 1 - genesis/src/main.rs | 5 - gossip/src/main.rs | 6 +- local-cluster/Cargo.toml | 2 - local-cluster/src/cluster.rs | 1 - local-cluster/src/cluster_tests.rs | 6 +- local-cluster/src/lib.rs | 3 - local-cluster/src/local_cluster.rs | 162 +-- local-cluster/tests/archiver.rs | 195 ---- local-cluster/tests/local_cluster.rs | 27 +- multinode-demo/archiver-x.sh | 7 - multinode-demo/archiver.sh | 83 -- multinode-demo/common.sh | 1 - multinode-demo/validator.sh | 13 - net/common.sh | 3 - net/gce.sh | 42 +- net/net.sh | 18 +- net/remote/remote-node.sh | 25 - net/ssh.sh | 9 - perf/src/perf_libs.rs | 20 - programs/bpf/Cargo.lock | 16 - programs/bpf/rust/sysval/src/lib.rs | 6 +- programs/stake/src/stake_instruction.rs | 6 +- programs/storage/Cargo.toml | 30 - programs/storage/src/lib.rs | 12 - programs/storage/src/rewards_pools.rs | 51 - programs/storage/src/storage_contract.rs | 642 ------------ programs/storage/src/storage_instruction.rs | 189 ---- programs/storage/src/storage_processor.rs | 296 ------ runtime/Cargo.toml | 1 - runtime/src/accounts.rs | 5 +- runtime/src/bank.rs | 156 +-- runtime/src/genesis_utils.rs | 1 - runtime/src/lib.rs | 1 - runtime/src/storage_utils.rs | 248 ----- runtime/tests/storage.rs | 482 --------- sdk/src/clock.rs | 57 +- sdk/src/genesis_config.rs | 9 +- sdk/src/sysvar/rewards.rs | 12 +- .../genesis-test/cluster_token_count.sh | 14 +- .../genesis-test/get_program_accounts.sh | 1 - validator/src/main.rs | 12 - 117 files changed, 383 insertions(+), 7735 deletions(-) delete mode 100644 archiver-lib/Cargo.toml delete mode 100644 archiver-lib/src/archiver.rs delete mode 100644 archiver-lib/src/lib.rs delete mode 100644 archiver-lib/src/result.rs delete mode 100644 archiver-utils/Cargo.toml delete mode 100644 archiver-utils/src/lib.rs delete mode 100644 archiver/.gitignore delete mode 100644 archiver/Cargo.toml delete mode 100644 archiver/src/main.rs delete mode 100644 chacha-cuda/Cargo.toml delete mode 100644 chacha-cuda/src/chacha_cuda.rs delete mode 100644 chacha-cuda/src/lib.rs delete mode 100644 chacha-sys/.gitignore delete mode 100644 chacha-sys/Cargo.toml delete mode 100644 chacha-sys/build.rs delete mode 100644 chacha-sys/cpu-crypt/.gitignore delete mode 100644 chacha-sys/cpu-crypt/Makefile delete mode 100644 chacha-sys/cpu-crypt/chacha.h delete mode 100644 chacha-sys/cpu-crypt/chacha20_core.c delete mode 100644 chacha-sys/cpu-crypt/chacha_cbc.c delete mode 100644 chacha-sys/src/lib.rs delete mode 100644 chacha/.gitignore delete mode 100644 chacha/Cargo.toml delete mode 100644 chacha/src/chacha.rs delete mode 100644 chacha/src/lib.rs delete mode 100644 cli/src/storage.rs delete mode 100644 core/benches/chacha.rs delete mode 100644 core/src/storage_stage.rs delete mode 100644 core/tests/storage_stage.rs delete mode 100644 docs/src/cluster/ledger-replication.md delete mode 100644 docs/src/implemented-proposals/ed_overview/ed_attack_vectors.md delete mode 100644 docs/src/implemented-proposals/ed_overview/ed_replication_client_economics/README.md delete mode 100644 docs/src/implemented-proposals/ed_overview/ed_replication_client_economics/ed_rce_replication_client_reward_auto_delegation.md delete mode 100644 docs/src/implemented-proposals/ed_overview/ed_replication_client_economics/ed_rce_storage_replication_rewards.md delete mode 100644 docs/src/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_replication_validation_transaction_fees.md delete mode 100644 docs/src/proposals/ledger-replication-to-implement.md delete mode 100644 local-cluster/tests/archiver.rs delete mode 100755 multinode-demo/archiver-x.sh delete mode 100755 multinode-demo/archiver.sh delete mode 100644 programs/storage/Cargo.toml delete mode 100644 programs/storage/src/lib.rs delete mode 100644 programs/storage/src/rewards_pools.rs delete mode 100644 programs/storage/src/storage_contract.rs delete mode 100644 programs/storage/src/storage_instruction.rs delete mode 100644 programs/storage/src/storage_processor.rs delete mode 100644 runtime/src/storage_utils.rs delete mode 100644 runtime/tests/storage.rs diff --git a/Cargo.lock b/Cargo.lock index 31f03d48b0..52fc9c9b76 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -124,9 +124,9 @@ dependencies = [ [[package]] name = "backtrace-sys" -version = "0.1.36" +version = "0.1.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78848718ee1255a2485d1309ad9cdecfc2e7d0362dd11c6829364c6b35ae1bc7" +checksum = "18fbebbe1c9d1f383a9cc7e8ccdb471b91c8d024ee9c2ca5b5346121fe8b4399" dependencies = [ "cc", "libc", @@ -165,9 +165,9 @@ checksum = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7" [[package]] name = "base64" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d5ca2cd0adc3f48f9e9ea5a6bbdf9ccc0bfade884847e484d452414c7ccffb3" +checksum = "53d1ccbaf7d9ec9537465a97bf19edc1a4e158ecb49fc16178202238c569cc42" [[package]] name = "bech32" @@ -201,7 +201,7 @@ dependencies = [ "lazycell", "log 0.4.8", "peeking_take_while", - "proc-macro2 1.0.10", + "proc-macro2 1.0.12", "quote 1.0.1", "regex", "rustc-hash", @@ -211,24 +211,18 @@ dependencies = [ [[package]] name = "bit-set" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e84c238982c4b1e1ee668d136c510c67a13465279c0cb367ea6baf6310620a80" +checksum = "6e11e16035ea35e4e5997b393eacbf6f63983188f7a2ad25bfb13465f5ad59de" dependencies = [ - "bit-vec 0.5.1", + "bit-vec", ] [[package]] name = "bit-vec" -version = "0.5.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f59bbe95d4e52a6398ec21238d31577f2b28a9d86807f06ca59d191d8440d0bb" - -[[package]] -name = "bit-vec" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4523a10839ffae575fb08aa3423026c8cb4687eef43952afb956229d4f246f7" +checksum = "5f0dc55f2d8a1a85650ac47858bb001b4c0dd73d79e3c455a842925e68d29cd3" [[package]] name = "bitflags" @@ -294,9 +288,9 @@ checksum = "476e9cd489f9e121e02ffa6014a8ef220ecb15c05ed23fc34cca13925dc283fb" [[package]] name = "bstr" -version = "0.2.12" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2889e6d50f394968c8bf4240dc3f2a7eb4680844d27308f798229ac9d4725f41" +checksum = "31accafdb70df7871592c058eca3985b71104e15ac32f64706022c58867da931" dependencies = [ "lazy_static", "memchr 2.3.3", @@ -416,11 +410,11 @@ dependencies = [ "clap", "heck", "log 0.4.8", - "proc-macro2 1.0.10", + "proc-macro2 1.0.12", "quote 1.0.1", "serde", "serde_json", - "syn 1.0.18", + "syn 1.0.19", "tempfile", "toml", ] @@ -814,9 +808,9 @@ version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb582b60359da160a9477ee80f15c8d784c477e69c217ef2cdd4169c24ea380f" dependencies = [ - "proc-macro2 1.0.10", + "proc-macro2 1.0.12", "quote 1.0.1", - "syn 1.0.18", + "syn 1.0.19", ] [[package]] @@ -1025,9 +1019,9 @@ dependencies = [ [[package]] name = "failure" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8529c2421efa3066a5cbd8063d2244603824daccb6936b079010bb2aa89464b" +checksum = "d32e9bd16cc02eae7db7ef620b392808b89f6a5e16bb3497d159c6b92a0f4f86" dependencies = [ "backtrace", "failure_derive", @@ -1035,13 +1029,13 @@ dependencies = [ [[package]] name = "failure_derive" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "030a733c8287d6213886dd487564ff5c8f6aae10278b3588ed177f9d18f8d231" +checksum = "aa4da3c766cd7a0db8242e326e9e4e081edd567072893ed320008189715366a4" dependencies = [ - "proc-macro2 1.0.10", + "proc-macro2 1.0.12", "quote 1.0.1", - "syn 1.0.18", + "syn 1.0.19", "synstructure", ] @@ -1068,9 +1062,9 @@ checksum = "835a3dc7d1ec9e75e2b5fb4ba75396837112d2060b03f7d43bc1897c7f7211da" [[package]] name = "filetime" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f59efc38004c988e4201d11d263b8171f49a2e7ec0bdbb71773433f271504a5e" +checksum = "affc17579b132fc2461adf7c575cc6e8b134ebca52c51f5411388965227dc695" dependencies = [ "cfg-if", "libc", @@ -1153,18 +1147,18 @@ checksum = "1b980f2816d6ee8673b6517b52cb0e808a180efc92e5c19d02cdda79066703ef" [[package]] name = "futures-channel" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0c77d04ce8edd9cb903932b608268b3fffec4163dc053b3b402bf47eac1f1a8" +checksum = "f366ad74c28cca6ba456d95e6422883cfb4b252a83bed929c83abfdbbf2967d5" dependencies = [ "futures-core", ] [[package]] name = "futures-core" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f25592f769825e89b92358db00d26f965761e094951ac44d3663ef25b7ac464a" +checksum = "59f5fff90fd5d971f936ad674802482ba441b6f09ba5e15fd8b39145582ca399" [[package]] name = "futures-cpupool" @@ -1178,45 +1172,49 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a638959aa96152c7a4cddf50fcb1e3fede0583b27157c26e67d6f99904090dc6" +checksum = "de27142b013a8e869c14957e6d2edeef89e97c289e69d042ee3a49acd8b51789" [[package]] name = "futures-macro" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a5081aa3de1f7542a794a397cde100ed903b0630152d0973479018fd85423a7" +checksum = "d0b5a30a4328ab5473878237c447333c093297bded83a4983d10f4deea240d39" dependencies = [ "proc-macro-hack", - "proc-macro2 1.0.10", + "proc-macro2 1.0.12", "quote 1.0.1", - "syn 1.0.18", + "syn 1.0.19", ] [[package]] name = "futures-sink" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3466821b4bc114d95b087b850a724c6f83115e929bc88f1fa98a3304a944c8a6" +checksum = "3f2032893cb734c7a05d85ce0cc8b8c4075278e93b24b66f9de99d6eb0fa8acc" [[package]] name = "futures-task" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b0a34e53cf6cdcd0178aa573aed466b646eb3db769570841fda0c7ede375a27" +checksum = "bdb66b5f09e22019b1ab0830f7785bcea8e7a42148683f99214f73f8ec21a626" +dependencies = [ + "once_cell", +] [[package]] name = "futures-util" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22766cf25d64306bedf0384da004d05c9974ab104fcc4528f1236181c18004c5" +checksum = "8764574ff08b701a084482c3c7031349104b07ac897393010494beaa18ce32c6" dependencies = [ "futures-core", "futures-io", "futures-macro", "futures-task", "memchr 2.3.3", + "pin-project", "pin-utils", "proc-macro-hack", "proc-macro-nested", @@ -1341,9 +1339,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "377038bf3c89d18d6ca1431e7a5027194fbd724ca10592b9487ede5e8e144f42" +checksum = "79b7246d7e4b979c03fa093da39cfb3617a96bbeee6310af63991668d7e843ff" dependencies = [ "bytes 0.5.4", "fnv", @@ -1354,7 +1352,7 @@ dependencies = [ "indexmap", "log 0.4.8", "slab", - "tokio 0.2.19", + "tokio 0.2.20", "tokio-util", ] @@ -1403,25 +1401,6 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "644f9158b2f133fd50f5fb3242878846d9eb792e445c893805ff0e3824006e35" -[[package]] -name = "hex-literal" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "961de220ec9a91af2e1e5bd80d02109155695e516771762381ef8581317066e0" -dependencies = [ - "hex-literal-impl", - "proc-macro-hack", -] - -[[package]] -name = "hex-literal-impl" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d4c5c844e2fee0bf673d54c2c177f1713b3d2af2ff6e666b49cb7572e6cf42d" -dependencies = [ - "proc-macro-hack", -] - [[package]] name = "hex_fmt" version = "0.3.0" @@ -1579,7 +1558,7 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", - "h2 0.2.4", + "h2 0.2.5", "http 0.2.1", "http-body 0.3.1", "httparse", @@ -1588,7 +1567,7 @@ dependencies = [ "net2", "pin-project", "time", - "tokio 0.2.19", + "tokio 0.2.20", "tower-service", "want 0.3.0", ] @@ -1606,7 +1585,7 @@ dependencies = [ "log 0.4.8", "rustls", "rustls-native-certs", - "tokio 0.2.19", + "tokio 0.2.20", "tokio-rustls", "webpki", ] @@ -1751,9 +1730,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.37" +version = "0.3.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a27d435371a2fa5b6d2b028a74bbdb1234f308da363226a2854ca3ff8ba7055" +checksum = "fa5a448de267e7358beaf4a5d849518fe9a0c13fce7afd44b06e68550e5562a7" dependencies = [ "wasm-bindgen", ] @@ -1805,9 +1784,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d386855ea963193689078018f46f9f39e06acda14c10d396e153cfd728fc690c" dependencies = [ "proc-macro-crate", - "proc-macro2 1.0.10", + "proc-macro2 1.0.12", "quote 1.0.1", - "syn 1.0.18", + "syn 1.0.19", ] [[package]] @@ -1974,9 +1953,9 @@ dependencies = [ [[package]] name = "linked-hash-map" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae91b68aebc4ddb91978b11a1b02ddd8602a05ec19002801c5666000e05e0f83" +checksum = "8dd5a6d5999d9907cda8ed67bbd137d3af8085216c2ac62de5be860bd41f304a" [[package]] name = "lock_api" @@ -2105,9 +2084,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.6.21" +version = "0.6.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "302dec22bcf6bae6dfb69c647187f4b4d0fb6f535521f7bc022430ce8e12008f" +checksum = "fce347092656428bc8eaf6201042cb551b8d67855af7374542a92a0fbfcac430" dependencies = [ "cfg-if", "fuchsia-zircon", @@ -2136,9 +2115,9 @@ dependencies = [ [[package]] name = "mio-uds" -version = "0.6.7" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "966257a94e196b11bb43aca423754d87429960a768de9414f3691d6957abf125" +checksum = "afcb699eb26d4332647cc848492bbc15eafb26f08d0304550d5aa1f612e066f0" dependencies = [ "iovec", "libc", @@ -2159,9 +2138,9 @@ dependencies = [ [[package]] name = "mirai-annotations" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a76c45e77b9af9f0a3fd33cfd462210c1c221029343ac5346306f28b86943ebd" +checksum = "258c143ddb5105becc4834f66d0b0ad3d3205d07cb3efc3236f33f623dd07b16" [[package]] name = "multimap" @@ -2189,9 +2168,9 @@ dependencies = [ [[package]] name = "net2" -version = "0.2.33" +version = "0.2.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42550d9fb7b6684a6d404d9fa7250c2eb2646df731d1c06afc06dcee9e1bcf88" +checksum = "2ba7c918ac76704fb42afcbbb43891e72731f3dcca3bef2a19786297baf14af7" dependencies = [ "cfg-if", "libc", @@ -2259,9 +2238,9 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c8b15b261814f992e33760b1fca9fe8b693d8a65299f20c9901688636cfb746" dependencies = [ - "proc-macro2 1.0.10", + "proc-macro2 1.0.12", "quote 1.0.1", - "syn 1.0.18", + "syn 1.0.19", ] [[package]] @@ -2310,9 +2289,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffa5a33ddddfee04c0283a7653987d634e880347e96b5b2ed64de07efb59db9d" dependencies = [ "proc-macro-crate", - "proc-macro2 1.0.10", + "proc-macro2 1.0.12", "quote 1.0.1", - "syn 1.0.18", + "syn 1.0.19", ] [[package]] @@ -2358,9 +2337,9 @@ checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" [[package]] name = "openssl-sys" -version = "0.9.55" +version = "0.9.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7717097d810a0f2e2323f9e5d11e71608355e24828410b55b9d4f18aa5f9a5d8" +checksum = "f02309a7f127000ed50594f0b50ecc69e7c654e16d41b4e8156d1b3df8e0b52e" dependencies = [ "autocfg 1.0.0", "cc", @@ -2501,9 +2480,9 @@ dependencies = [ [[package]] name = "paste" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3c897744f63f34f7ae3a024d9162bb5001f4ad661dd24bea0dc9f075d2de1c6" +checksum = "0a229b1c58c692edcaa5b9b0948084f130f55d2dcc15b02fcc5340b2b4521476" dependencies = [ "paste-impl", "proc-macro-hack", @@ -2511,14 +2490,14 @@ dependencies = [ [[package]] name = "paste-impl" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66fd6f92e3594f2dd7b3fc23e42d82e292f7bcda6d8e5dcd167072327234ab89" +checksum = "2e0bf239e447e67ff6d16a8bb5e4d4bd2343acf5066061c0e8e06ac5ba8ca68c" dependencies = [ "proc-macro-hack", - "proc-macro2 1.0.10", + "proc-macro2 1.0.12", "quote 1.0.1", - "syn 1.0.18", + "syn 1.0.19", ] [[package]] @@ -2593,29 +2572,29 @@ dependencies = [ [[package]] name = "pin-project" -version = "0.4.9" +version = "0.4.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f6a7f5eee6292c559c793430c55c00aea9d3b3d1905e855806ca4d7253426a2" +checksum = "81d480cb4e89522ccda96d0eed9af94180b7a5f93fb28f66e1fd7d68431663d1" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "0.4.9" +version = "0.4.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8988430ce790d8682672117bc06dda364c0be32d3abd738234f19f3240bad99a" +checksum = "a82996f11efccb19b685b14b5df818de31c1edcee3daa256ab5775dd98e72feb" dependencies = [ - "proc-macro2 1.0.10", + "proc-macro2 1.0.12", "quote 1.0.1", - "syn 1.0.18", + "syn 1.0.19", ] [[package]] name = "pin-project-lite" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "237844750cfbb86f67afe27eee600dfbbcb6188d734139b534cbfbf4f96792ae" +checksum = "f7505eeebd78492e0f6108f7171c4948dbb120ee8119d9d77d0afa5469bef67f" [[package]] name = "pin-utils" @@ -2695,9 +2674,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "98e9e4b82e0ef281812565ea4751049f1bdcdfccda7d3f459f2e138a40c08678" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.10", + "proc-macro2 1.0.12", "quote 1.0.1", - "syn 1.0.18", + "syn 1.0.19", "version_check 0.9.1", ] @@ -2707,9 +2686,9 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4f5444ead4e9935abd7f27dc51f7e852a0569ac888096d5ec2499470794e2e53" dependencies = [ - "proc-macro2 1.0.10", + "proc-macro2 1.0.12", "quote 1.0.1", - "syn 1.0.18", + "syn 1.0.19", "syn-mid", "version_check 0.9.1", ] @@ -2737,9 +2716,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.10" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df246d292ff63439fea9bc8c0a270bed0e390d5ebd4db4ba15aba81111b5abe3" +checksum = "8872cf6f48eee44265156c111456a700ab3483686b3f96df4cf5481c89157319" dependencies = [ "unicode-xid 0.2.0", ] @@ -2861,7 +2840,7 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49d77c41ca8767f2f41394c11a4eebccab83da25e7cc035387a3125f02be90a3" dependencies = [ - "proc-macro2 1.0.10", + "proc-macro2 1.0.12", ] [[package]] @@ -3176,9 +3155,9 @@ version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "475e68978dc5b743f2f40d8e0a8fdc83f1c5e78cbf4b8fa5e74e73beebc340de" dependencies = [ - "proc-macro2 1.0.10", + "proc-macro2 1.0.12", "quote 1.0.1", - "syn 1.0.18", + "syn 1.0.19", ] [[package]] @@ -3208,7 +3187,7 @@ dependencies = [ "serde_json", "serde_urlencoded", "time", - "tokio 0.2.19", + "tokio 0.2.20", "tokio-rustls", "url 2.1.1", "wasm-bindgen", @@ -3346,9 +3325,9 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.18" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "039c25b130bd8c1321ee2d7de7fde2659fa9c2744e4bb29711cfc852ea53cd19" +checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" dependencies = [ "lazy_static", "winapi 0.3.8", @@ -3371,13 +3350,13 @@ dependencies = [ [[package]] name = "scroll_derive" -version = "0.10.1" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8584eea9b9ff42825b46faf46a8c24d2cff13ec152fa2a50df788b87c07ee28" +checksum = "e367622f934864ffa1c704ba2b82280aab856e3d8213c84c5720257eb34b15b9" dependencies = [ - "proc-macro2 1.0.10", + "proc-macro2 1.0.12", "quote 1.0.1", - "syn 1.0.18", + "syn 1.0.19", ] [[package]] @@ -3392,9 +3371,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f331b9025654145cd425b9ded0caf8f5ae0df80d418b326e2dc1c3dc5eb0620" +checksum = "64808902d7d99f78eaddd2b4e2509713babc3dc3c85ad6f4c447680f3c01e535" dependencies = [ "bitflags", "core-foundation", @@ -3462,9 +3441,9 @@ version = "1.0.110" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "818fbf6bfa9a42d3bfcaca148547aa00c7b915bec71d1757aa2d44ca68771984" dependencies = [ - "proc-macro2 1.0.10", + "proc-macro2 1.0.12", "quote 1.0.1", - "syn 1.0.18", + "syn 1.0.19", ] [[package]] @@ -3519,9 +3498,9 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d08338d8024b227c62bd68a12c7c9883f5c66780abaef15c550dc56f46ee6515" dependencies = [ - "proc-macro2 1.0.10", + "proc-macro2 1.0.12", "quote 1.0.1", - "syn 1.0.18", + "syn 1.0.19", ] [[package]] @@ -3716,67 +3695,6 @@ dependencies = [ "solana-sdk", ] -[[package]] -name = "solana-archiver" -version = "1.2.0" -dependencies = [ - "clap", - "console 0.10.3", - "solana-archiver-lib", - "solana-clap-utils", - "solana-core", - "solana-logger", - "solana-metrics", - "solana-net-utils", - "solana-sdk", - "solana-version", -] - -[[package]] -name = "solana-archiver-lib" -version = "1.2.0" -dependencies = [ - "bincode", - "crossbeam-channel", - "ed25519-dalek", - "hex 0.4.2", - "log 0.4.8", - "rand 0.7.3", - "rand_chacha 0.2.2", - "serde", - "serde_derive", - "serde_json", - "solana-archiver-utils", - "solana-chacha", - "solana-chacha-sys", - "solana-client", - "solana-core", - "solana-ledger", - "solana-logger", - "solana-metrics", - "solana-net-utils", - "solana-perf", - "solana-sdk", - "solana-storage-program", - "solana-streamer", - "thiserror", -] - -[[package]] -name = "solana-archiver-utils" -version = "1.2.0" -dependencies = [ - "hex 0.4.2", - "log 0.4.8", - "rand 0.7.3", - "solana-chacha", - "solana-chacha-sys", - "solana-ledger", - "solana-logger", - "solana-perf", - "solana-sdk", -] - [[package]] name = "solana-banking-bench" version = "1.2.0" @@ -3912,42 +3830,6 @@ dependencies = [ "thiserror", ] -[[package]] -name = "solana-chacha" -version = "1.2.0" -dependencies = [ - "hex-literal", - "log 0.4.8", - "rand 0.7.3", - "rand_chacha 0.2.2", - "solana-chacha-sys", - "solana-ledger", - "solana-logger", - "solana-perf", - "solana-sdk", -] - -[[package]] -name = "solana-chacha-cuda" -version = "1.2.0" -dependencies = [ - "hex-literal", - "log 0.4.8", - "solana-archiver-utils", - "solana-chacha", - "solana-ledger", - "solana-logger", - "solana-perf", - "solana-sdk", -] - -[[package]] -name = "solana-chacha-sys" -version = "1.2.0" -dependencies = [ - "cc", -] - [[package]] name = "solana-clap-utils" version = "1.2.0" @@ -3997,7 +3879,6 @@ dependencies = [ "solana-runtime", "solana-sdk", "solana-stake-program", - "solana-storage-program", "solana-transaction-status", "solana-version", "solana-vote-program", @@ -4096,7 +3977,6 @@ dependencies = [ "serial_test_derive", "solana-bpf-loader-program", "solana-budget-program", - "solana-chacha-cuda", "solana-clap-utils", "solana-client", "solana-faucet", @@ -4111,7 +3991,6 @@ dependencies = [ "solana-runtime", "solana-sdk", "solana-stake-program", - "solana-storage-program", "solana-streamer", "solana-sys-tuner", "solana-transaction-status", @@ -4146,7 +4025,7 @@ dependencies = [ "reqwest", "serde", "syn 0.15.44", - "syn 1.0.18", + "syn 1.0.19", "tokio 0.1.22", "winapi 0.3.8", ] @@ -4232,7 +4111,7 @@ dependencies = [ name = "solana-genesis" version = "1.2.0" dependencies = [ - "base64 0.12.0", + "base64 0.12.1", "chrono", "clap", "serde", @@ -4244,7 +4123,6 @@ dependencies = [ "solana-logger", "solana-sdk", "solana-stake-program", - "solana-storage-program", "solana-version", "solana-vote-program", "tempfile", @@ -4260,7 +4138,6 @@ dependencies = [ "solana-exchange-program", "solana-runtime", "solana-sdk", - "solana-storage-program", "solana-vest-program", ] @@ -4422,7 +4299,6 @@ dependencies = [ "rand 0.7.3", "serial_test", "serial_test_derive", - "solana-archiver-lib", "solana-client", "solana-config-program", "solana-core", @@ -4436,7 +4312,6 @@ dependencies = [ "solana-runtime", "solana-sdk", "solana-stake-program", - "solana-storage-program", "solana-vest-program", "solana-vote-program", "tempfile", @@ -4688,7 +4563,6 @@ dependencies = [ "solana-rayon-threadlimit", "solana-sdk", "solana-stake-program", - "solana-storage-program", "solana-vote-program", "tempfile", "thiserror", @@ -4755,9 +4629,9 @@ name = "solana-sdk-macro" version = "1.2.0" dependencies = [ "bs58 0.3.1", - "proc-macro2 1.0.10", + "proc-macro2 1.0.12", "quote 1.0.1", - "syn 1.0.18", + "syn 1.0.19", ] [[package]] @@ -4818,22 +4692,6 @@ dependencies = [ "thiserror", ] -[[package]] -name = "solana-storage-program" -version = "1.2.0" -dependencies = [ - "assert_matches", - "bincode", - "log 0.4.8", - "num-derive 0.3.0", - "num-traits", - "rand 0.7.3", - "serde", - "serde_derive", - "solana-logger", - "solana-sdk", -] - [[package]] name = "solana-streamer" version = "1.2.0" @@ -5114,9 +4972,9 @@ version = "0.0.1-sol5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67801085b6adee0d30caa77d126bbceafa4b9bb8f4c7871e29638faa20dcd8b1" dependencies = [ - "proc-macro2 1.0.10", + "proc-macro2 1.0.12", "quote 1.0.1", - "syn 1.0.18", + "syn 1.0.19", ] [[package]] @@ -5348,7 +5206,7 @@ version = "0.0.1-sol5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "790621a290a17a41cec0883decc861b2102b5d4d9f638492bbc55613ea142e85" dependencies = [ - "bit-vec 0.6.1", + "bit-vec", "lazy_static", "proptest", "sha2", @@ -5421,7 +5279,7 @@ checksum = "f0f45ed1b65bf9a4bf2f7b7dc59212d1926e9eaf00fa998988e420fd124467c6" dependencies = [ "phf_generator", "phf_shared", - "proc-macro2 1.0.10", + "proc-macro2 1.0.12", "quote 1.0.1", "string_cache_shared", ] @@ -5463,9 +5321,9 @@ checksum = "d239ca4b13aee7a2142e6795cbd69e457665ff8037aed33b3effdc430d2f927a" dependencies = [ "heck", "proc-macro-error", - "proc-macro2 1.0.10", + "proc-macro2 1.0.12", "quote 1.0.1", - "syn 1.0.18", + "syn 1.0.19", ] [[package]] @@ -5499,11 +5357,11 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.18" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "410a7488c0a728c7ceb4ad59b9567eb4053d02e8cc7f5c0e0eeeb39518369213" +checksum = "e8e5aa70697bb26ee62214ae3288465ecec0000f05182f039b477001f08f5ae7" dependencies = [ - "proc-macro2 1.0.10", + "proc-macro2 1.0.12", "quote 1.0.1", "unicode-xid 0.2.0", ] @@ -5514,9 +5372,9 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7be3539f6c128a931cf19dcee741c1af532c7fd387baa739c03dd2e96479338a" dependencies = [ - "proc-macro2 1.0.10", + "proc-macro2 1.0.12", "quote 1.0.1", - "syn 1.0.18", + "syn 1.0.19", ] [[package]] @@ -5525,9 +5383,9 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67656ea1dc1b41b1451851562ea232ec2e5a80242139f7e679ceccfb5d61f545" dependencies = [ - "proc-macro2 1.0.10", + "proc-macro2 1.0.12", "quote 1.0.1", - "syn 1.0.18", + "syn 1.0.19", "unicode-xid 0.2.0", ] @@ -5674,9 +5532,9 @@ version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f34e0c1caaa462fd840ec6b768946ea1e7842620d94fe29d5b847138f521269" dependencies = [ - "proc-macro2 1.0.10", + "proc-macro2 1.0.12", "quote 1.0.1", - "syn 1.0.18", + "syn 1.0.19", ] [[package]] @@ -5786,9 +5644,9 @@ dependencies = [ [[package]] name = "tokio" -version = "0.2.19" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d9c43f1bb96970e153bcbae39a65e249ccb942bd9d36dbdf086024920417c9c" +checksum = "05c1d570eb1a36f0345a5ce9c6c6e665b70b73d11236912c0b477616aeec47b1" dependencies = [ "bytes 0.5.4", "fnv", @@ -5893,7 +5751,7 @@ checksum = "4adb8b3e5f86b707f1b54e7c15b6de52617a823608ccda98a15d3a24222f265a" dependencies = [ "futures-core", "rustls", - "tokio 0.2.19", + "tokio 0.2.20", "webpki", ] @@ -6005,7 +5863,7 @@ dependencies = [ "futures-sink", "log 0.4.8", "pin-project-lite", - "tokio 0.2.19", + "tokio 0.2.20", ] [[package]] @@ -6215,9 +6073,9 @@ checksum = "3fc439f2794e98976c88a2a2dafce96b930fe8010b0a256b3c2199a773933168" [[package]] name = "vec_map" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05c78687fb1a80548ae3250346c3db86a80a7cdd77bda190189f2d0a0987c81a" +checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" [[package]] name = "version_check" @@ -6286,9 +6144,9 @@ checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" [[package]] name = "wasm-bindgen" -version = "0.2.60" +version = "0.2.62" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cc57ce05287f8376e998cbddfb4c8cb43b84a7ec55cf4551d7c00eef317a47f" +checksum = "e3c7d40d09cdbf0f4895ae58cf57d92e1e57a9dd8ed2e8390514b54a47cc5551" dependencies = [ "cfg-if", "serde", @@ -6298,24 +6156,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.60" +version = "0.2.62" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d967d37bf6c16cca2973ca3af071d0a2523392e4a594548155d89a678f4237cd" +checksum = "c3972e137ebf830900db522d6c8fd74d1900dcfc733462e9a12e942b00b4ac94" dependencies = [ "bumpalo", "lazy_static", "log 0.4.8", - "proc-macro2 1.0.10", + "proc-macro2 1.0.12", "quote 1.0.1", - "syn 1.0.18", + "syn 1.0.19", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.10" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7add542ea1ac7fdaa9dc25e031a6af33b7d63376292bd24140c637d00d1c312a" +checksum = "8a369c5e1dfb7569e14d62af4da642a3cbc2f9a3652fe586e26ac22222aa4b04" dependencies = [ "cfg-if", "js-sys", @@ -6325,9 +6183,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.60" +version = "0.2.62" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bd151b63e1ea881bb742cd20e1d6127cef28399558f3b5d415289bc41eee3a4" +checksum = "2cd85aa2c579e8892442954685f0d801f9129de24fa2136b2c6a539c76b65776" dependencies = [ "quote 1.0.1", "wasm-bindgen-macro-support", @@ -6335,28 +6193,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.60" +version = "0.2.62" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d68a5b36eef1be7868f668632863292e37739656a80fc4b9acec7b0bd35a4931" +checksum = "8eb197bd3a47553334907ffd2f16507b4f4f01bbec3ac921a7719e0decdfe72a" dependencies = [ - "proc-macro2 1.0.10", + "proc-macro2 1.0.12", "quote 1.0.1", - "syn 1.0.18", + "syn 1.0.19", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.60" +version = "0.2.62" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "daf76fe7d25ac79748a37538b7daeed1c7a6867c92d3245c12c6222e4a20d639" +checksum = "a91c2916119c17a8e316507afaaa2dd94b47646048014bbdf6bef098c1bb58ad" [[package]] name = "web-sys" -version = "0.3.37" +version = "0.3.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d6f51648d8c56c366144378a33290049eafdd784071077f6fe37dae64c1c4cb" +checksum = "8bc359e5dd3b46cb9687a051d50a2fdd228e4ba7cf6fcf861a5365c3d671a642" dependencies = [ "js-sys", "wasm-bindgen", @@ -6574,9 +6432,9 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de251eec69fc7c1bc3923403d18ececb929380e016afe103da75f396704f8ca2" dependencies = [ - "proc-macro2 1.0.10", + "proc-macro2 1.0.12", "quote 1.0.1", - "syn 1.0.18", + "syn 1.0.19", "synstructure", ] diff --git a/Cargo.toml b/Cargo.toml index 9211fd611b..ef59ba9e26 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,9 +5,6 @@ members = [ "bench-tps", "accounts-bench", "banking-bench", - "chacha", - "chacha-cuda", - "chacha-sys", "cli-config", "client", "core", @@ -42,12 +39,8 @@ members = [ "programs/noop", "programs/ownable", "programs/stake", - "programs/storage", "programs/vest", "programs/vote", - "archiver", - "archiver-lib", - "archiver-utils", "remote-wallet", "ramp-tps", "runtime", diff --git a/archiver-lib/Cargo.toml b/archiver-lib/Cargo.toml deleted file mode 100644 index 7397363941..0000000000 --- a/archiver-lib/Cargo.toml +++ /dev/null @@ -1,43 +0,0 @@ -[package] -name = "solana-archiver-lib" -version = "1.2.0" -description = "Solana Archiver Library" -authors = ["Solana Maintainers "] -repository = "https://github.com/solana-labs/solana" -license = "Apache-2.0" -homepage = "https://solana.com/" -edition = "2018" - -[dependencies] -bincode = "1.2.1" -crossbeam-channel = "0.4" -ed25519-dalek = "=1.0.0-pre.3" -log = "0.4.8" -rand = "0.7.0" -rand_chacha = "0.2.2" -solana-client = { path = "../client", version = "1.2.0" } -solana-storage-program = { path = "../programs/storage", version = "1.2.0" } -thiserror = "1.0" -serde = "1.0.110" -serde_json = "1.0.53" -serde_derive = "1.0.103" -solana-net-utils = { path = "../net-utils", version = "1.2.0" } -solana-chacha = { path = "../chacha", version = "1.2.0" } -solana-chacha-sys = { path = "../chacha-sys", version = "1.2.0" } -solana-ledger = { path = "../ledger", version = "1.2.0" } -solana-logger = { path = "../logger", version = "1.2.0" } -solana-perf = { path = "../perf", version = "1.2.0" } -solana-sdk = { path = "../sdk", version = "1.2.0" } -solana-core = { path = "../core", version = "1.2.0" } -solana-streamer = { path = "../streamer", version = "1.2.0" } -solana-archiver-utils = { path = "../archiver-utils", version = "1.2.0" } -solana-metrics = { path = "../metrics", version = "1.2.0" } - -[dev-dependencies] -hex = "0.4.2" - -[lib] -name = "solana_archiver_lib" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] diff --git a/archiver-lib/src/archiver.rs b/archiver-lib/src/archiver.rs deleted file mode 100644 index c5b51bd549..0000000000 --- a/archiver-lib/src/archiver.rs +++ /dev/null @@ -1,922 +0,0 @@ -use crate::result::ArchiverError; -use crossbeam_channel::unbounded; -use rand::{thread_rng, Rng, SeedableRng}; -use rand_chacha::ChaChaRng; -use solana_archiver_utils::sample_file; -use solana_chacha::chacha::{chacha_cbc_encrypt_ledger, CHACHA_BLOCK_SIZE}; -use solana_client::{ - rpc_client::RpcClient, rpc_request::RpcRequest, rpc_response::RpcStorageTurn, - thin_client::ThinClient, -}; -use solana_core::{ - cluster_info::{ClusterInfo, Node, VALIDATOR_PORT_RANGE}, - cluster_slots::ClusterSlots, - contact_info::ContactInfo, - gossip_service::GossipService, - repair_service, - repair_service::{RepairService, RepairSlotRange, RepairStats, RepairStrategy}, - serve_repair::ServeRepair, - shred_fetch_stage::ShredFetchStage, - sigverify_stage::{DisabledSigVerifier, SigVerifyStage}, - storage_stage::NUM_STORAGE_SAMPLES, - window_service::WindowService, -}; -use solana_ledger::{ - blockstore::Blockstore, leader_schedule_cache::LeaderScheduleCache, shred::Shred, -}; -use solana_net_utils::bind_in_range; -use solana_perf::packet::Packets; -use solana_perf::packet::{limited_deserialize, PACKET_DATA_SIZE}; -use solana_perf::recycler::Recycler; -use solana_sdk::packet::Packet; -use solana_sdk::{ - account_utils::StateMut, - client::{AsyncClient, SyncClient}, - clock::{get_complete_segment_from_slot, get_segment_from_slot, Slot}, - commitment_config::CommitmentConfig, - hash::Hash, - message::Message, - signature::{Keypair, Signature, Signer}, - timing::timestamp, - transaction::Transaction, - transport::TransportError, -}; -use solana_storage_program::{ - storage_contract::StorageContract, - storage_instruction::{self, StorageAccountType}, -}; -use solana_streamer::streamer::{receiver, responder, PacketReceiver}; -use std::{ - io::{self, ErrorKind}, - net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket}, - path::{Path, PathBuf}, - result, - sync::atomic::{AtomicBool, Ordering}, - sync::mpsc::{channel, Receiver, Sender}, - sync::Arc, - thread::{sleep, spawn, JoinHandle}, - time::Duration, -}; - -type Result = std::result::Result; - -static ENCRYPTED_FILENAME: &str = "ledger.enc"; - -#[derive(Serialize, Deserialize)] -pub enum ArchiverRequest { - GetSlotHeight(SocketAddr), -} - -pub struct Archiver { - thread_handles: Vec>, - exit: Arc, -} - -// Shared Archiver Meta struct used internally -#[derive(Default)] -struct ArchiverMeta { - slot: Slot, - slots_per_segment: u64, - ledger_path: PathBuf, - signature: Signature, - ledger_data_file_encrypted: PathBuf, - sampling_offsets: Vec, - blockhash: Hash, - sha_state: Hash, - num_chacha_blocks: usize, - client_commitment: CommitmentConfig, -} - -fn get_slot_from_signature( - signature: &Signature, - storage_turn: u64, - slots_per_segment: u64, -) -> u64 { - let signature_vec = signature.as_ref(); - let mut segment_index = u64::from(signature_vec[0]) - | (u64::from(signature_vec[1]) << 8) - | (u64::from(signature_vec[1]) << 16) - | (u64::from(signature_vec[2]) << 24); - let max_segment_index = - get_complete_segment_from_slot(storage_turn, slots_per_segment).unwrap(); - segment_index %= max_segment_index as u64; - segment_index * slots_per_segment -} - -fn create_request_processor( - socket: UdpSocket, - exit: &Arc, - slot_receiver: Receiver, -) -> Vec> { - let mut thread_handles = vec![]; - let (s_reader, r_reader) = channel(); - let (s_responder, r_responder) = channel(); - let storage_socket = Arc::new(socket); - let recycler = Recycler::default(); - let t_receiver = receiver(storage_socket.clone(), exit, s_reader, recycler, "archiver"); - thread_handles.push(t_receiver); - - let t_responder = responder("archiver-responder", storage_socket, r_responder); - thread_handles.push(t_responder); - - let exit = exit.clone(); - let t_processor = spawn(move || { - let slot = poll_for_slot(slot_receiver, &exit); - - loop { - if exit.load(Ordering::Relaxed) { - break; - } - - let packets = r_reader.recv_timeout(Duration::from_secs(1)); - - if let Ok(packets) = packets { - for packet in &packets.packets { - let req: result::Result> = - limited_deserialize(&packet.data[..packet.meta.size]); - match req { - Ok(ArchiverRequest::GetSlotHeight(from)) => { - let packet = Packet::from_data(&from, slot); - let _ = s_responder.send(Packets::new(vec![packet])); - } - Err(e) => { - info!("invalid request: {:?}", e); - } - } - } - } - } - }); - thread_handles.push(t_processor); - thread_handles -} - -fn poll_for_slot(receiver: Receiver, exit: &Arc) -> u64 { - loop { - let slot = receiver.recv_timeout(Duration::from_secs(1)); - if let Ok(slot) = slot { - return slot; - } - if exit.load(Ordering::Relaxed) { - return 0; - } - } -} - -impl Archiver { - /// Returns a Result that contains an archiver on success - /// - /// # Arguments - /// * `ledger_path` - path to where the ledger will be stored. - /// Causes panic if none - /// * `node` - The archiver node - /// * `cluster_entrypoint` - ContactInfo representing an entry into the network - /// * `keypair` - Keypair for this archiver - #[allow(clippy::new_ret_no_self)] - pub fn new( - ledger_path: &Path, - node: Node, - cluster_entrypoint: ContactInfo, - keypair: Arc, - storage_keypair: Arc, - client_commitment: CommitmentConfig, - ) -> Result { - let exit = Arc::new(AtomicBool::new(false)); - - info!("Archiver: id: {}", keypair.pubkey()); - info!("Creating cluster info...."); - let cluster_info = ClusterInfo::new(node.info.clone(), keypair.clone()); - cluster_info.set_entrypoint(cluster_entrypoint.clone()); - let cluster_info = Arc::new(cluster_info); - let cluster_slots = Arc::new(ClusterSlots::default()); - // Note for now, this ledger will not contain any of the existing entries - // in the ledger located at ledger_path, and will only append on newly received - // entries after being passed to window_service - let blockstore = Arc::new( - Blockstore::open(ledger_path).expect("Expected to be able to open database ledger"), - ); - - let gossip_service = GossipService::new(&cluster_info, None, node.sockets.gossip, &exit); - - info!("Connecting to the cluster via {:?}", cluster_entrypoint); - let (nodes, _) = - match solana_core::gossip_service::discover_cluster(&cluster_entrypoint.gossip, 2) { - Ok(nodes_and_archivers) => nodes_and_archivers, - Err(e) => { - //shutdown services before exiting - exit.store(true, Ordering::Relaxed); - gossip_service.join()?; - return Err(e.into()); - } - }; - let client = solana_core::gossip_service::get_client(&nodes); - - info!("Setting up mining account..."); - if let Err(e) = - Self::setup_mining_account(&client, &keypair, &storage_keypair, client_commitment) - { - //shutdown services before exiting - exit.store(true, Ordering::Relaxed); - gossip_service.join()?; - return Err(e); - }; - - let repair_socket = Arc::new(node.sockets.repair); - let shred_sockets: Vec> = - node.sockets.tvu.into_iter().map(Arc::new).collect(); - let shred_forward_sockets: Vec> = node - .sockets - .tvu_forwards - .into_iter() - .map(Arc::new) - .collect(); - let (shred_fetch_sender, shred_fetch_receiver) = channel(); - let fetch_stage = ShredFetchStage::new( - shred_sockets, - shred_forward_sockets, - repair_socket.clone(), - &shred_fetch_sender, - None, - &exit, - ); - let (slot_sender, slot_receiver) = channel(); - let request_processor = - create_request_processor(node.sockets.storage.unwrap(), &exit, slot_receiver); - - let t_archiver = { - let exit = exit.clone(); - let node_info = node.info.clone(); - let mut meta = ArchiverMeta { - ledger_path: ledger_path.to_path_buf(), - client_commitment, - ..ArchiverMeta::default() - }; - spawn(move || { - // setup archiver - let window_service = match Self::setup( - &mut meta, - cluster_info.clone(), - &blockstore, - &exit, - &node_info, - &storage_keypair, - repair_socket, - shred_fetch_receiver, - slot_sender, - cluster_slots, - ) { - Ok(window_service) => window_service, - Err(e) => { - //shutdown services before exiting - error!("setup failed {:?}; archiver thread exiting...", e); - exit.store(true, Ordering::Relaxed); - request_processor - .into_iter() - .for_each(|t| t.join().unwrap()); - fetch_stage.join().unwrap(); - gossip_service.join().unwrap(); - return; - } - }; - - info!("setup complete"); - // run archiver - Self::run( - &mut meta, - &blockstore, - cluster_info, - &keypair, - &storage_keypair, - &exit, - ); - // wait until exit - request_processor - .into_iter() - .for_each(|t| t.join().unwrap()); - fetch_stage.join().unwrap(); - gossip_service.join().unwrap(); - window_service.join().unwrap() - }) - }; - - Ok(Self { - thread_handles: vec![t_archiver], - exit, - }) - } - - fn run( - meta: &mut ArchiverMeta, - blockstore: &Arc, - cluster_info: Arc, - archiver_keypair: &Arc, - storage_keypair: &Arc, - exit: &Arc, - ) { - // encrypt segment - Self::encrypt_ledger(meta, blockstore).expect("ledger encrypt not successful"); - let enc_file_path = meta.ledger_data_file_encrypted.clone(); - // do replicate - loop { - if exit.load(Ordering::Relaxed) { - break; - } - - // TODO check if more segments are available - based on space constraints - Self::create_sampling_offsets(meta); - let sampling_offsets = &meta.sampling_offsets; - meta.sha_state = - match Self::sample_file_to_create_mining_hash(&enc_file_path, sampling_offsets) { - Ok(hash) => hash, - Err(err) => { - warn!("Error sampling file, exiting: {:?}", err); - break; - } - }; - - Self::submit_mining_proof(meta, &cluster_info, archiver_keypair, storage_keypair); - - // TODO make this a lot more frequent by picking a "new" blockhash instead of picking a storage blockhash - // prep the next proof - let (storage_blockhash, _) = match Self::poll_for_blockhash_and_slot( - &cluster_info, - meta.slots_per_segment, - &meta.blockhash, - exit, - ) { - Ok(blockhash_and_slot) => blockhash_and_slot, - Err(e) => { - warn!( - "Error couldn't get a newer blockhash than {:?}. {:?}", - meta.blockhash, e - ); - break; - } - }; - meta.blockhash = storage_blockhash; - Self::redeem_rewards( - &cluster_info, - archiver_keypair, - storage_keypair, - meta.client_commitment, - ); - } - exit.store(true, Ordering::Relaxed); - } - - fn redeem_rewards( - cluster_info: &ClusterInfo, - archiver_keypair: &Arc, - storage_keypair: &Arc, - client_commitment: CommitmentConfig, - ) { - let nodes = cluster_info.tvu_peers(); - let client = solana_core::gossip_service::get_client(&nodes); - - if let Ok(Some(account)) = - client.get_account_with_commitment(&storage_keypair.pubkey(), client_commitment) - { - if let Ok(StorageContract::ArchiverStorage { validations, .. }) = account.state() { - if !validations.is_empty() { - let ix = storage_instruction::claim_reward( - &archiver_keypair.pubkey(), - &storage_keypair.pubkey(), - ); - let message = Message::new_with_payer(&[ix], Some(&archiver_keypair.pubkey())); - if let Err(e) = client.send_message(&[archiver_keypair.as_ref()], message) { - error!("unable to redeem reward, tx failed: {:?}", e); - } else { - info!( - "collected mining rewards: Account balance {:?}", - client.get_balance_with_commitment( - &archiver_keypair.pubkey(), - client_commitment - ) - ); - } - } - } - } else { - info!("Redeem mining reward: No account data found"); - } - } - - // Find a segment to replicate and download it. - #[allow(clippy::too_many_arguments)] - fn setup( - meta: &mut ArchiverMeta, - cluster_info: Arc, - blockstore: &Arc, - exit: &Arc, - node_info: &ContactInfo, - storage_keypair: &Arc, - repair_socket: Arc, - shred_fetch_receiver: PacketReceiver, - slot_sender: Sender, - cluster_slots: Arc, - ) -> Result { - let slots_per_segment = - match Self::get_segment_config(&cluster_info, meta.client_commitment) { - Ok(slots_per_segment) => slots_per_segment, - Err(e) => { - error!("unable to get segment size configuration, exiting..."); - //shutdown services before exiting - exit.store(true, Ordering::Relaxed); - return Err(e); - } - }; - let (segment_blockhash, segment_slot) = match Self::poll_for_segment( - &cluster_info, - slots_per_segment, - &Hash::default(), - exit, - ) { - Ok(blockhash_and_slot) => blockhash_and_slot, - Err(e) => { - //shutdown services before exiting - exit.store(true, Ordering::Relaxed); - return Err(e); - } - }; - let signature = storage_keypair.sign_message(segment_blockhash.as_ref()); - let slot = get_slot_from_signature(&signature, segment_slot, slots_per_segment); - info!("replicating slot: {}", slot); - slot_sender.send(slot)?; - meta.slot = slot; - meta.slots_per_segment = slots_per_segment; - meta.signature = signature; - meta.blockhash = segment_blockhash; - - let mut repair_slot_range = RepairSlotRange::default(); - repair_slot_range.end = slot + slots_per_segment; - repair_slot_range.start = slot; - - let (retransmit_sender, _) = channel(); - - let (verified_sender, verified_receiver) = unbounded(); - - let _sigverify_stage = SigVerifyStage::new( - shred_fetch_receiver, - verified_sender, - DisabledSigVerifier::default(), - ); - - let window_service = WindowService::new( - blockstore.clone(), - cluster_info.clone(), - verified_receiver, - retransmit_sender, - repair_socket, - &exit, - RepairStrategy::RepairRange(repair_slot_range), - &Arc::new(LeaderScheduleCache::default()), - |_, _, _, _| true, - cluster_slots, - ); - info!("waiting for ledger download"); - Self::wait_for_segment_download( - slot, - slots_per_segment, - &blockstore, - &exit, - &node_info, - cluster_info, - ); - Ok(window_service) - } - - fn wait_for_segment_download( - start_slot: Slot, - slots_per_segment: u64, - blockstore: &Arc, - exit: &Arc, - node_info: &ContactInfo, - cluster_info: Arc, - ) { - info!( - "window created, waiting for ledger download starting at slot {:?}", - start_slot - ); - let mut current_slot = start_slot; - 'outer: loop { - while blockstore.is_full(current_slot) { - current_slot += 1; - info!("current slot: {}", current_slot); - if current_slot >= start_slot + slots_per_segment { - break 'outer; - } - } - if exit.load(Ordering::Relaxed) { - break; - } - sleep(Duration::from_secs(1)); - } - - info!("Done receiving entries from window_service"); - - // Remove archiver from the data plane - let mut contact_info = node_info.clone(); - contact_info.tvu = "0.0.0.0:0".parse().unwrap(); - contact_info.wallclock = timestamp(); - // copy over the adopted shred_version from the entrypoint - contact_info.shred_version = cluster_info.my_shred_version(); - cluster_info.update_contact_info(|current| *current = contact_info); - } - - fn encrypt_ledger(meta: &mut ArchiverMeta, blockstore: &Arc) -> Result<()> { - meta.ledger_data_file_encrypted = meta.ledger_path.join(ENCRYPTED_FILENAME); - - { - let mut ivec = [0u8; 64]; - ivec.copy_from_slice(&meta.signature.as_ref()); - - let num_encrypted_bytes = chacha_cbc_encrypt_ledger( - blockstore, - meta.slot, - meta.slots_per_segment, - &meta.ledger_data_file_encrypted, - &mut ivec, - )?; - - meta.num_chacha_blocks = num_encrypted_bytes / CHACHA_BLOCK_SIZE; - } - - info!( - "Done encrypting the ledger: {:?}", - meta.ledger_data_file_encrypted - ); - Ok(()) - } - - fn create_sampling_offsets(meta: &mut ArchiverMeta) { - meta.sampling_offsets.clear(); - let mut rng_seed = [0u8; 32]; - rng_seed.copy_from_slice(&meta.blockhash.as_ref()); - let mut rng = ChaChaRng::from_seed(rng_seed); - for _ in 0..NUM_STORAGE_SAMPLES { - meta.sampling_offsets - .push(rng.gen_range(0, meta.num_chacha_blocks) as u64); - } - } - - fn sample_file_to_create_mining_hash( - enc_file_path: &Path, - sampling_offsets: &[u64], - ) -> Result { - let sha_state = sample_file(enc_file_path, sampling_offsets)?; - info!("sampled sha_state: {}", sha_state); - Ok(sha_state) - } - - fn setup_mining_account( - client: &ThinClient, - keypair: &Keypair, - storage_keypair: &Keypair, - client_commitment: CommitmentConfig, - ) -> Result<()> { - // make sure archiver has some balance - info!("checking archiver keypair..."); - if client.poll_balance_with_timeout_and_commitment( - &keypair.pubkey(), - &Duration::from_millis(100), - &Duration::from_secs(5), - client_commitment, - )? == 0 - { - return Err(ArchiverError::EmptyStorageAccountBalance); - } - - info!("checking storage account keypair..."); - // check if the storage account exists - let balance = - client.poll_get_balance_with_commitment(&storage_keypair.pubkey(), client_commitment); - if balance.is_err() || balance.unwrap() == 0 { - let blockhash = match client.get_recent_blockhash_with_commitment(client_commitment) { - Ok((blockhash, _)) => blockhash, - Err(e) => { - return Err(ArchiverError::TransportError(e)); - } - }; - - let ixs = storage_instruction::create_storage_account( - &keypair.pubkey(), - &keypair.pubkey(), - &storage_keypair.pubkey(), - 1, - StorageAccountType::Archiver, - ); - let tx = Transaction::new_signed_instructions(&[keypair], &ixs, blockhash); - let signature = client.async_send_transaction(tx)?; - client - .poll_for_signature_with_commitment(&signature, client_commitment) - .map_err(|err| match err { - TransportError::IoError(e) => e, - TransportError::TransactionError(_) => io::Error::new( - ErrorKind::Other, - "setup_mining_account: signature not found", - ), - TransportError::Custom(e) => io::Error::new(ErrorKind::Other, e), - })?; - } - Ok(()) - } - - fn submit_mining_proof( - meta: &ArchiverMeta, - cluster_info: &ClusterInfo, - archiver_keypair: &Arc, - storage_keypair: &Arc, - ) { - // No point if we've got no storage account... - let nodes = cluster_info.tvu_peers(); - let client = solana_core::gossip_service::get_client(&nodes); - let storage_balance = client - .poll_get_balance_with_commitment(&storage_keypair.pubkey(), meta.client_commitment); - if storage_balance.is_err() || storage_balance.unwrap() == 0 { - error!("Unable to submit mining proof, no storage account"); - return; - } - // ...or no lamports for fees - let balance = client - .poll_get_balance_with_commitment(&archiver_keypair.pubkey(), meta.client_commitment); - if balance.is_err() || balance.unwrap() == 0 { - error!("Unable to submit mining proof, insufficient Archiver Account balance"); - return; - } - - let blockhash = match client.get_recent_blockhash_with_commitment(meta.client_commitment) { - Ok((blockhash, _)) => blockhash, - Err(_) => { - error!("unable to get recent blockhash, can't submit proof"); - return; - } - }; - let instruction = storage_instruction::mining_proof( - &storage_keypair.pubkey(), - meta.sha_state, - get_segment_from_slot(meta.slot, meta.slots_per_segment), - Signature::new(&meta.signature.as_ref()), - meta.blockhash, - ); - let message = Message::new_with_payer(&[instruction], Some(&archiver_keypair.pubkey())); - let mut transaction = Transaction::new( - &[archiver_keypair.as_ref(), storage_keypair.as_ref()], - message, - blockhash, - ); - if let Err(err) = client.send_and_confirm_transaction( - &[archiver_keypair.as_ref(), storage_keypair.as_ref()], - &mut transaction, - 10, - 0, - ) { - error!("Error: {:?}; while sending mining proof", err); - } - } - - pub fn close(self) { - self.exit.store(true, Ordering::Relaxed); - self.join() - } - - pub fn join(self) { - for handle in self.thread_handles { - handle.join().unwrap(); - } - } - - fn get_segment_config( - cluster_info: &ClusterInfo, - client_commitment: CommitmentConfig, - ) -> Result { - let rpc_peers = cluster_info.all_rpc_peers(); - debug!("rpc peers: {:?}", rpc_peers); - if !rpc_peers.is_empty() { - let rpc_client = { - let node_index = thread_rng().gen_range(0, rpc_peers.len()); - RpcClient::new_socket(rpc_peers[node_index].rpc) - }; - Ok(rpc_client - .send::( - RpcRequest::GetSlotsPerSegment, - serde_json::json!([client_commitment]), - 0, - ) - .unwrap()) - } else { - Err(ArchiverError::NoRpcPeers) - } - } - - /// Waits until the first segment is ready, and returns the current segment - fn poll_for_segment( - cluster_info: &ClusterInfo, - slots_per_segment: u64, - previous_blockhash: &Hash, - exit: &Arc, - ) -> Result<(Hash, u64)> { - loop { - let (blockhash, turn_slot) = Self::poll_for_blockhash_and_slot( - cluster_info, - slots_per_segment, - previous_blockhash, - exit, - )?; - if get_complete_segment_from_slot(turn_slot, slots_per_segment).is_some() { - return Ok((blockhash, turn_slot)); - } - } - } - - /// Poll for a different blockhash and associated max_slot than `previous_blockhash` - fn poll_for_blockhash_and_slot( - cluster_info: &ClusterInfo, - slots_per_segment: u64, - previous_blockhash: &Hash, - exit: &Arc, - ) -> Result<(Hash, u64)> { - info!("waiting for the next turn..."); - loop { - let rpc_peers = cluster_info.all_rpc_peers(); - debug!("rpc peers: {:?}", rpc_peers); - if !rpc_peers.is_empty() { - let rpc_client = { - let node_index = thread_rng().gen_range(0, rpc_peers.len()); - RpcClient::new_socket(rpc_peers[node_index].rpc) - }; - let RpcStorageTurn { - blockhash: storage_blockhash, - slot: turn_slot, - } = rpc_client.send( - RpcRequest::GetStorageTurn, - serde_json::value::Value::Null, - 0, - )?; - let turn_blockhash = storage_blockhash.parse().map_err(|err| { - io::Error::new( - io::ErrorKind::Other, - format!( - "Blockhash parse failure: {:?} on {:?}", - err, storage_blockhash - ), - ) - })?; - if turn_blockhash != *previous_blockhash { - info!("turn slot: {}", turn_slot); - if get_segment_from_slot(turn_slot, slots_per_segment) != 0 { - return Ok((turn_blockhash, turn_slot)); - } - } - } - if exit.load(Ordering::Relaxed) { - return Err(ArchiverError::IO(io::Error::new( - ErrorKind::Other, - "exit signalled...", - ))); - } - sleep(Duration::from_secs(5)); - } - } - - /// Ask an archiver to populate a given blockstore with its segment. - /// Return the slot at the start of the archiver's segment - /// - /// It is recommended to use a temporary blockstore for this since the download will not verify - /// shreds received and might impact the chaining of shreds across slots - pub fn download_from_archiver( - serve_repair: &ServeRepair, - archiver_info: &ContactInfo, - blockstore: &Arc, - slots_per_segment: u64, - ) -> Result { - let ip_addr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)); - // Create a client which downloads from the archiver and see that it - // can respond with shreds. - let start_slot = Self::get_archiver_segment_slot(ip_addr, archiver_info.storage_addr); - info!("Archiver download: start at {}", start_slot); - - let exit = Arc::new(AtomicBool::new(false)); - let (s_reader, r_reader) = channel(); - let repair_socket = Arc::new(bind_in_range(ip_addr, VALIDATOR_PORT_RANGE).unwrap().1); - let t_receiver = receiver( - repair_socket.clone(), - &exit, - s_reader, - Recycler::default(), - "archiver_reeciver", - ); - let id = serve_repair.keypair().pubkey(); - info!( - "Sending repair requests from: {} to: {}", - serve_repair.my_info().id, - archiver_info.gossip - ); - let repair_slot_range = RepairSlotRange { - start: start_slot, - end: start_slot + slots_per_segment, - }; - // try for upto 180 seconds //TODO needs tuning if segments are huge - for _ in 0..120 { - // Strategy used by archivers - let repairs = RepairService::generate_repairs_in_range( - blockstore, - repair_service::MAX_REPAIR_LENGTH, - &repair_slot_range, - ); - let mut repair_stats = RepairStats::default(); - //iter over the repairs and send them - if let Ok(repairs) = repairs { - let reqs: Vec<_> = repairs - .into_iter() - .filter_map(|repair_request| { - serve_repair - .map_repair_request(&repair_request, &mut repair_stats) - .map(|result| ((archiver_info.gossip, result), repair_request)) - .ok() - }) - .collect(); - - for ((to, req), repair_request) in reqs { - if let Ok(local_addr) = repair_socket.local_addr() { - datapoint_info!( - "archiver_download", - ("repair_request", format!("{:?}", repair_request), String), - ("to", to.to_string(), String), - ("from", local_addr.to_string(), String), - ("id", id.to_string(), String) - ); - } - repair_socket - .send_to(&req, archiver_info.gossip) - .unwrap_or_else(|e| { - error!("{} repair req send_to({}) error {:?}", id, to, e); - 0 - }); - } - } - let res = r_reader.recv_timeout(Duration::new(1, 0)); - if let Ok(mut packets) = res { - while let Ok(mut more) = r_reader.try_recv() { - packets.packets.append_pinned(&mut more.packets); - } - let shreds: Vec = packets - .packets - .into_iter() - .filter_map(|p| Shred::new_from_serialized_shred(p.data.to_vec()).ok()) - .collect(); - blockstore.insert_shreds(shreds, None, false)?; - } - // check if all the slots in the segment are complete - if Self::segment_complete(start_slot, slots_per_segment, blockstore) { - break; - } - sleep(Duration::from_millis(500)); - } - exit.store(true, Ordering::Relaxed); - t_receiver.join().unwrap(); - - // check if all the slots in the segment are complete - if !Self::segment_complete(start_slot, slots_per_segment, blockstore) { - return Err(ArchiverError::SegmentDownloadError); - } - Ok(start_slot) - } - - fn segment_complete( - start_slot: Slot, - slots_per_segment: u64, - blockstore: &Arc, - ) -> bool { - for slot in start_slot..(start_slot + slots_per_segment) { - if !blockstore.is_full(slot) { - return false; - } - } - true - } - - fn get_archiver_segment_slot(bind_ip_addr: IpAddr, to: SocketAddr) -> u64 { - let (_port, socket) = bind_in_range(bind_ip_addr, VALIDATOR_PORT_RANGE).unwrap(); - socket - .set_read_timeout(Some(Duration::from_secs(5))) - .unwrap(); - - let req = ArchiverRequest::GetSlotHeight(socket.local_addr().unwrap()); - let serialized_req = bincode::serialize(&req).unwrap(); - for _ in 0..10 { - socket.send_to(&serialized_req, to).unwrap(); - let mut buf = [0; 1024]; - if let Ok((size, _addr)) = socket.recv_from(&mut buf) { - // Ignore bad packet and try again - if let Ok(slot) = bincode::config() - .limit(PACKET_DATA_SIZE as u64) - .deserialize(&buf[..size]) - { - return slot; - } - } - sleep(Duration::from_millis(500)); - } - panic!("Couldn't get segment slot from archiver!"); - } -} diff --git a/archiver-lib/src/lib.rs b/archiver-lib/src/lib.rs deleted file mode 100644 index 660b11fde9..0000000000 --- a/archiver-lib/src/lib.rs +++ /dev/null @@ -1,11 +0,0 @@ -#[macro_use] -extern crate log; - -#[macro_use] -extern crate serde_derive; - -#[macro_use] -extern crate solana_metrics; - -pub mod archiver; -mod result; diff --git a/archiver-lib/src/result.rs b/archiver-lib/src/result.rs deleted file mode 100644 index 813dd4d4a6..0000000000 --- a/archiver-lib/src/result.rs +++ /dev/null @@ -1,47 +0,0 @@ -use solana_client::client_error; -use solana_ledger::blockstore; -use solana_sdk::transport; -use std::any::Any; -use thiserror::Error; - -#[derive(Error, Debug)] -pub enum ArchiverError { - #[error("IO error")] - IO(#[from] std::io::Error), - - #[error("blockstore error")] - BlockstoreError(#[from] blockstore::BlockstoreError), - - #[error("crossbeam error")] - CrossbeamSendError(#[from] crossbeam_channel::SendError), - - #[error("send error")] - SendError(#[from] std::sync::mpsc::SendError), - - #[error("join error")] - JoinError(Box), - - #[error("transport error")] - TransportError(#[from] transport::TransportError), - - #[error("client error")] - ClientError(#[from] client_error::ClientError), - - #[error("Json parsing error")] - JsonError(#[from] serde_json::error::Error), - - #[error("Storage account has no balance")] - EmptyStorageAccountBalance, - - #[error("No RPC peers..")] - NoRpcPeers, - - #[error("Couldn't download full segment")] - SegmentDownloadError, -} - -impl std::convert::From> for ArchiverError { - fn from(e: Box) -> ArchiverError { - ArchiverError::JoinError(e) - } -} diff --git a/archiver-utils/Cargo.toml b/archiver-utils/Cargo.toml deleted file mode 100644 index 5980602e76..0000000000 --- a/archiver-utils/Cargo.toml +++ /dev/null @@ -1,28 +0,0 @@ -[package] -name = "solana-archiver-utils" -version = "1.2.0" -description = "Solana Archiver Utils" -authors = ["Solana Maintainers "] -repository = "https://github.com/solana-labs/solana" -license = "Apache-2.0" -homepage = "https://solana.com/" -edition = "2018" - -[dependencies] -log = "0.4.8" -rand = "0.7.0" -solana-chacha = { path = "../chacha", version = "1.2.0" } -solana-chacha-sys = { path = "../chacha-sys", version = "1.2.0" } -solana-ledger = { path = "../ledger", version = "1.2.0" } -solana-logger = { path = "../logger", version = "1.2.0" } -solana-perf = { path = "../perf", version = "1.2.0" } -solana-sdk = { path = "../sdk", version = "1.2.0" } - -[dev-dependencies] -hex = "0.4.2" - -[lib] -name = "solana_archiver_utils" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] diff --git a/archiver-utils/src/lib.rs b/archiver-utils/src/lib.rs deleted file mode 100644 index b38a36dbe5..0000000000 --- a/archiver-utils/src/lib.rs +++ /dev/null @@ -1,120 +0,0 @@ -#[macro_use] -extern crate log; - -use solana_sdk::hash::{Hash, Hasher}; -use std::fs::File; -use std::io::{self, BufReader, ErrorKind, Read, Seek, SeekFrom}; -use std::mem::size_of; -use std::path::Path; - -pub fn sample_file(in_path: &Path, sample_offsets: &[u64]) -> io::Result { - let in_file = File::open(in_path)?; - let metadata = in_file.metadata()?; - let mut buffer_file = BufReader::new(in_file); - - let mut hasher = Hasher::default(); - let sample_size = size_of::(); - let sample_size64 = sample_size as u64; - let mut buf = vec![0; sample_size]; - - let file_len = metadata.len(); - if file_len < sample_size64 { - return Err(io::Error::new(ErrorKind::Other, "file too short!")); - } - for offset in sample_offsets { - if *offset > (file_len - sample_size64) / sample_size64 { - return Err(io::Error::new(ErrorKind::Other, "offset too large")); - } - buffer_file.seek(SeekFrom::Start(*offset * sample_size64))?; - trace!("sampling @ {} ", *offset); - match buffer_file.read(&mut buf) { - Ok(size) => { - assert_eq!(size, buf.len()); - hasher.hash(&buf); - } - Err(e) => { - warn!("Error sampling file"); - return Err(e); - } - } - } - - Ok(hasher.result()) -} - -#[cfg(test)] -mod tests { - use super::*; - use rand::{thread_rng, Rng}; - use std::fs::{create_dir_all, remove_file}; - use std::io::Write; - use std::path::PathBuf; - - extern crate hex; - - fn tmp_file_path(name: &str) -> PathBuf { - use std::env; - let out_dir = env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_string()); - let mut rand_bits = [0u8; 32]; - thread_rng().fill(&mut rand_bits[..]); - - let mut path = PathBuf::new(); - path.push(out_dir); - path.push("tmp"); - create_dir_all(&path).unwrap(); - - path.push(format!("{}-{:?}", name, hex::encode(rand_bits))); - println!("path: {:?}", path); - path - } - - #[test] - fn test_sample_file() { - solana_logger::setup(); - let in_path = tmp_file_path("test_sample_file_input.txt"); - let num_strings = 4096; - let string = "12foobar"; - { - let mut in_file = File::create(&in_path).unwrap(); - for _ in 0..num_strings { - in_file.write(string.as_bytes()).unwrap(); - } - } - let num_samples = (string.len() * num_strings / size_of::()) as u64; - let samples: Vec<_> = (0..num_samples).collect(); - let res = sample_file(&in_path, samples.as_slice()); - let ref_hash: Hash = Hash::new(&[ - 173, 251, 182, 165, 10, 54, 33, 150, 133, 226, 106, 150, 99, 192, 179, 1, 230, 144, - 151, 126, 18, 191, 54, 67, 249, 140, 230, 160, 56, 30, 170, 52, - ]); - let res = res.unwrap(); - assert_eq!(res, ref_hash); - - // Sample just past the end - assert!(sample_file(&in_path, &[num_samples]).is_err()); - remove_file(&in_path).unwrap(); - } - - #[test] - fn test_sample_file_invalid_offset() { - let in_path = tmp_file_path("test_sample_file_invalid_offset_input.txt"); - { - let mut in_file = File::create(&in_path).unwrap(); - for _ in 0..4096 { - in_file.write("123456foobar".as_bytes()).unwrap(); - } - } - let samples = [0, 200000]; - let res = sample_file(&in_path, &samples); - assert!(res.is_err()); - remove_file(in_path).unwrap(); - } - - #[test] - fn test_sample_file_missing_file() { - let in_path = tmp_file_path("test_sample_file_that_doesnt_exist.txt"); - let samples = [0, 5]; - let res = sample_file(&in_path, &samples); - assert!(res.is_err()); - } -} diff --git a/archiver/.gitignore b/archiver/.gitignore deleted file mode 100644 index 5404b132db..0000000000 --- a/archiver/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -/target/ -/farf/ diff --git a/archiver/Cargo.toml b/archiver/Cargo.toml deleted file mode 100644 index 6f253f0218..0000000000 --- a/archiver/Cargo.toml +++ /dev/null @@ -1,23 +0,0 @@ -[package] -authors = ["Solana Maintainers "] -edition = "2018" -name = "solana-archiver" -version = "1.2.0" -repository = "https://github.com/solana-labs/solana" -license = "Apache-2.0" -homepage = "https://solana.com/" - -[dependencies] -clap = "2.33.1" -console = "0.10.1" -solana-clap-utils = { path = "../clap-utils", version = "1.2.0" } -solana-core = { path = "../core", version = "1.2.0" } -solana-logger = { path = "../logger", version = "1.2.0" } -solana-metrics = { path = "../metrics", version = "1.2.0" } -solana-archiver-lib = { path = "../archiver-lib", version = "1.2.0" } -solana-net-utils = { path = "../net-utils", version = "1.2.0" } -solana-sdk = { path = "../sdk", version = "1.2.0" } -solana-version = { path = "../version", version = "1.2.0" } - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] diff --git a/archiver/src/main.rs b/archiver/src/main.rs deleted file mode 100644 index e12ba5eea1..0000000000 --- a/archiver/src/main.rs +++ /dev/null @@ -1,131 +0,0 @@ -use clap::{crate_description, crate_name, App, Arg}; -use console::style; -use solana_archiver_lib::archiver::Archiver; -use solana_clap_utils::{ - input_parsers::keypair_of, input_validators::is_keypair_or_ask_keyword, - keypair::SKIP_SEED_PHRASE_VALIDATION_ARG, -}; -use solana_core::{ - cluster_info::{Node, VALIDATOR_PORT_RANGE}, - contact_info::ContactInfo, -}; -use solana_sdk::{ - commitment_config::CommitmentConfig, - signature::{Keypair, Signer}, -}; -use std::{ - net::{IpAddr, Ipv4Addr, SocketAddr}, - path::PathBuf, - sync::Arc, -}; - -fn main() { - solana_logger::setup(); - - let matches = App::new(crate_name!()) - .about(crate_description!()) - .version(solana_version::version!()) - .arg( - Arg::with_name("identity_keypair") - .short("i") - .long("identity") - .value_name("PATH") - .takes_value(true) - .validator(is_keypair_or_ask_keyword) - .help("File containing an identity (keypair)"), - ) - .arg( - Arg::with_name("entrypoint") - .short("n") - .long("entrypoint") - .value_name("HOST:PORT") - .takes_value(true) - .required(true) - .validator(solana_net_utils::is_host_port) - .help("Rendezvous with the cluster at this entry point"), - ) - .arg( - Arg::with_name("ledger") - .short("l") - .long("ledger") - .value_name("DIR") - .takes_value(true) - .required(true) - .help("use DIR as persistent ledger location"), - ) - .arg( - Arg::with_name("storage_keypair") - .short("s") - .long("storage-keypair") - .value_name("PATH") - .takes_value(true) - .validator(is_keypair_or_ask_keyword) - .help("File containing the storage account keypair"), - ) - .arg( - Arg::with_name(SKIP_SEED_PHRASE_VALIDATION_ARG.name) - .long(SKIP_SEED_PHRASE_VALIDATION_ARG.long) - .help(SKIP_SEED_PHRASE_VALIDATION_ARG.help), - ) - .get_matches(); - - let ledger_path = PathBuf::from(matches.value_of("ledger").unwrap()); - - let identity_keypair = keypair_of(&matches, "identity_keypair").unwrap_or_else(Keypair::new); - - let storage_keypair = keypair_of(&matches, "storage_keypair").unwrap_or_else(|| { - clap::Error::with_description( - "The `storage-keypair` argument was not found", - clap::ErrorKind::ArgumentNotFound, - ) - .exit(); - }); - - let entrypoint_addr = matches - .value_of("entrypoint") - .map(|entrypoint| { - solana_net_utils::parse_host_port(entrypoint) - .expect("failed to parse entrypoint address") - }) - .unwrap(); - - let gossip_addr = { - let ip = solana_net_utils::get_public_ip_addr(&entrypoint_addr).unwrap(); - let mut addr = SocketAddr::new(ip, 0); - addr.set_ip(solana_net_utils::get_public_ip_addr(&entrypoint_addr).unwrap()); - addr - }; - let node = Node::new_archiver_with_external_ip( - &identity_keypair.pubkey(), - &gossip_addr, - VALIDATOR_PORT_RANGE, - IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), - ); - - println!( - "{} version {} (branch={}, commit={})", - style(crate_name!()).bold(), - solana_version::version!(), - option_env!("CI_BRANCH").unwrap_or("unknown"), - option_env!("CI_COMMIT").unwrap_or("unknown") - ); - solana_metrics::set_host_id(identity_keypair.pubkey().to_string()); - println!( - "replicating the data with identity_keypair={:?} gossip_addr={:?}", - identity_keypair.pubkey(), - gossip_addr - ); - - let entrypoint_info = ContactInfo::new_gossip_entry_point(&entrypoint_addr); - let archiver = Archiver::new( - &ledger_path, - node, - entrypoint_info, - Arc::new(identity_keypair), - Arc::new(storage_keypair), - CommitmentConfig::recent(), - ) - .unwrap(); - - archiver.join(); -} diff --git a/bench-exchange/src/main.rs b/bench-exchange/src/main.rs index f87fd1a139..1833fa00ad 100644 --- a/bench-exchange/src/main.rs +++ b/bench-exchange/src/main.rs @@ -54,10 +54,9 @@ fn main() { ); } else { info!("Connecting to the cluster"); - let (nodes, _archivers) = - discover_cluster(&entrypoint_addr, num_nodes).unwrap_or_else(|_| { - panic!("Failed to discover nodes"); - }); + let nodes = discover_cluster(&entrypoint_addr, num_nodes).unwrap_or_else(|_| { + panic!("Failed to discover nodes"); + }); let (client, num_clients) = get_multi_client(&nodes); diff --git a/bench-exchange/tests/bench_exchange.rs b/bench-exchange/tests/bench_exchange.rs index c99abd8418..7c7e0fdcda 100644 --- a/bench-exchange/tests/bench_exchange.rs +++ b/bench-exchange/tests/bench_exchange.rs @@ -59,7 +59,7 @@ fn test_exchange_local_cluster() { let faucet_addr = addr_receiver.recv_timeout(Duration::from_secs(2)).unwrap(); info!("Connecting to the cluster"); - let (nodes, _) = + let nodes = discover_cluster(&cluster.entry_point_info.gossip, NUM_NODES).unwrap_or_else(|err| { error!("Failed to discover {} nodes: {:?}", NUM_NODES, err); exit(1); diff --git a/bench-tps/src/main.rs b/bench-tps/src/main.rs index 48fb5a95f6..15632cb509 100644 --- a/bench-tps/src/main.rs +++ b/bench-tps/src/main.rs @@ -67,11 +67,10 @@ fn main() { } info!("Connecting to the cluster"); - let (nodes, _archivers) = - discover_cluster(&entrypoint_addr, *num_nodes).unwrap_or_else(|err| { - eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err); - exit(1); - }); + let nodes = discover_cluster(&entrypoint_addr, *num_nodes).unwrap_or_else(|err| { + eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err); + exit(1); + }); let client = if *multi_client { let (client, num_clients) = get_multi_client(&nodes); diff --git a/chacha-cuda/Cargo.toml b/chacha-cuda/Cargo.toml deleted file mode 100644 index 1dd9f81e44..0000000000 --- a/chacha-cuda/Cargo.toml +++ /dev/null @@ -1,27 +0,0 @@ -[package] -name = "solana-chacha-cuda" -version = "1.2.0" -description = "Solana Chacha Cuda APIs" -authors = ["Solana Maintainers "] -repository = "https://github.com/solana-labs/solana" -license = "Apache-2.0" -homepage = "https://solana.com/" -edition = "2018" - -[dependencies] -log = "0.4.8" -solana-archiver-utils = { path = "../archiver-utils", version = "1.2.0" } -solana-chacha = { path = "../chacha", version = "1.2.0" } -solana-ledger = { path = "../ledger", version = "1.2.0" } -solana-logger = { path = "../logger", version = "1.2.0" } -solana-perf = { path = "../perf", version = "1.2.0" } -solana-sdk = { path = "../sdk", version = "1.2.0" } - -[dev-dependencies] -hex-literal = "0.2.1" - -[lib] -name = "solana_chacha_cuda" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] diff --git a/chacha-cuda/src/chacha_cuda.rs b/chacha-cuda/src/chacha_cuda.rs deleted file mode 100644 index 301c85230a..0000000000 --- a/chacha-cuda/src/chacha_cuda.rs +++ /dev/null @@ -1,280 +0,0 @@ -// Module used by validators to approve storage mining proofs in parallel using the GPU - -use solana_chacha::chacha::{CHACHA_BLOCK_SIZE, CHACHA_KEY_SIZE}; -use solana_ledger::blockstore::Blockstore; -use solana_perf::perf_libs; -use solana_sdk::hash::Hash; -use std::io; -use std::mem::size_of; -use std::sync::Arc; - -// Encrypt a file with multiple starting IV states, determined by ivecs.len() -// -// Then sample each block at the offsets provided by samples argument with sha256 -// and return the vec of sha states -pub fn chacha_cbc_encrypt_file_many_keys( - blockstore: &Arc, - segment: u64, - slots_per_segment: u64, - ivecs: &mut [u8], - samples: &[u64], -) -> io::Result> { - let api = perf_libs::api().expect("no perf libs"); - if ivecs.len() % CHACHA_BLOCK_SIZE != 0 { - return Err(io::Error::new( - io::ErrorKind::Other, - format!( - "bad IV length({}) not divisible by {} ", - ivecs.len(), - CHACHA_BLOCK_SIZE, - ), - )); - } - - const BUFFER_SIZE: usize = 8 * 1024; - let mut buffer = [0; BUFFER_SIZE]; - let num_keys = ivecs.len() / CHACHA_BLOCK_SIZE; - let mut sha_states = vec![0; num_keys * size_of::()]; - let mut int_sha_states = vec![0; num_keys * 112]; - let keys: Vec = vec![0; num_keys * CHACHA_KEY_SIZE]; // keys not used ATM, uniqueness comes from IV - let mut current_slot = segment * slots_per_segment; - let mut start_index = 0; - let start_slot = current_slot; - let mut total_size = 0; - let mut time: f32 = 0.0; - unsafe { - (api.chacha_init_sha_state)(int_sha_states.as_mut_ptr(), num_keys as u32); - } - loop { - match blockstore.get_data_shreds(current_slot, start_index, std::u64::MAX, &mut buffer) { - Ok((last_index, mut size)) => { - debug!( - "chacha_cuda: encrypting segment: {} num_shreds: {} data_len: {}", - segment, - last_index.saturating_sub(start_index), - size - ); - - if size == 0 { - if current_slot.saturating_sub(start_slot) < slots_per_segment { - current_slot += 1; - start_index = 0; - continue; - } else { - break; - } - } - - if size < BUFFER_SIZE { - // round to the nearest key_size boundary - size = (size + CHACHA_KEY_SIZE - 1) & !(CHACHA_KEY_SIZE - 1); - } - - unsafe { - (api.chacha_cbc_encrypt_many_sample)( - buffer[..size].as_ptr(), - int_sha_states.as_mut_ptr(), - size, - keys.as_ptr(), - ivecs.as_mut_ptr(), - num_keys as u32, - samples.as_ptr(), - samples.len() as u32, - total_size, - &mut time, - ); - } - - total_size += size as u64; - start_index = last_index + 1; - } - Err(e) => { - info!("Error encrypting file: {:?}", e); - break; - } - } - } - unsafe { - (api.chacha_end_sha_state)( - int_sha_states.as_ptr(), - sha_states.as_mut_ptr(), - num_keys as u32, - ); - } - let mut res = Vec::new(); - for x in 0..num_keys { - let start = x * size_of::(); - let end = start + size_of::(); - res.push(Hash::new(&sha_states[start..end])); - } - Ok(res) -} - -#[cfg(test)] -mod tests { - use super::*; - use solana_archiver_utils::sample_file; - use solana_chacha::chacha::chacha_cbc_encrypt_ledger; - use solana_ledger::entry::create_ticks; - use solana_ledger::get_tmp_ledger_path; - use solana_sdk::clock::DEFAULT_SLOTS_PER_SEGMENT; - use solana_sdk::signature::Keypair; - use std::fs::{remove_dir_all, remove_file}; - use std::path::Path; - - #[test] - fn test_encrypt_file_many_keys_single() { - solana_logger::setup(); - if perf_libs::api().is_none() { - info!("perf-libs unavailable, skipped"); - return; - } - - let slots_per_segment = 32; - let entries = create_ticks(slots_per_segment, 0, Hash::default()); - let ledger_path = get_tmp_ledger_path!(); - let ticks_per_slot = 16; - let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap()); - - blockstore - .write_entries( - 0, - 0, - 0, - ticks_per_slot, - Some(0), - true, - &Arc::new(Keypair::new()), - entries, - 0, - ) - .unwrap(); - - let out_path = Path::new("test_chacha_encrypt_file_many_keys_single_output.txt.enc"); - - let samples = [0]; - let mut ivecs = hex!( - "abcd1234abcd1234abcd1234abcd1234 abcd1234abcd1234abcd1234abcd1234 - abcd1234abcd1234abcd1234abcd1234 abcd1234abcd1234abcd1234abcd1234" - ); - - let mut cpu_iv = ivecs.clone(); - chacha_cbc_encrypt_ledger( - &blockstore, - 0, - slots_per_segment as u64, - out_path, - &mut cpu_iv, - ) - .unwrap(); - - let ref_hash = sample_file(&out_path, &samples).unwrap(); - - let hashes = chacha_cbc_encrypt_file_many_keys( - &blockstore, - 0, - slots_per_segment as u64, - &mut ivecs, - &samples, - ) - .unwrap(); - - assert_eq!(hashes[0], ref_hash); - - let _ignored = remove_dir_all(&ledger_path); - let _ignored = remove_file(out_path); - } - - #[test] - fn test_encrypt_file_many_keys_multiple_keys() { - solana_logger::setup(); - if perf_libs::api().is_none() { - info!("perf-libs unavailable, skipped"); - return; - } - - let ledger_path = get_tmp_ledger_path!(); - let ticks_per_slot = 90; - let entries = create_ticks(2 * ticks_per_slot, 0, Hash::default()); - let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap()); - blockstore - .write_entries( - 0, - 0, - 0, - ticks_per_slot, - Some(0), - true, - &Arc::new(Keypair::new()), - entries, - 0, - ) - .unwrap(); - - let out_path = Path::new("test_chacha_encrypt_file_many_keys_multiple_output.txt.enc"); - - let samples = [0, 1, 3, 4, 5, 150]; - let mut ivecs = Vec::new(); - let mut ref_hashes: Vec = vec![]; - for i in 0..2 { - let mut ivec = hex!( - "abc123abc123abc123abc123abc123abc123abababababababababababababab - abc123abc123abc123abc123abc123abc123abababababababababababababab" - ); - ivec[0] = i; - ivecs.extend(ivec.clone().iter()); - chacha_cbc_encrypt_ledger( - &blockstore.clone(), - 0, - DEFAULT_SLOTS_PER_SEGMENT, - out_path, - &mut ivec, - ) - .unwrap(); - - ref_hashes.push(sample_file(&out_path, &samples).unwrap()); - info!( - "ivec: {:?} hash: {:?} ivecs: {:?}", - ivec.to_vec(), - ref_hashes.last(), - ivecs - ); - } - - let hashes = chacha_cbc_encrypt_file_many_keys( - &blockstore, - 0, - DEFAULT_SLOTS_PER_SEGMENT, - &mut ivecs, - &samples, - ) - .unwrap(); - - assert_eq!(hashes, ref_hashes); - - let _ignored = remove_dir_all(&ledger_path); - let _ignored = remove_file(out_path); - } - - #[test] - fn test_encrypt_file_many_keys_bad_key_length() { - solana_logger::setup(); - if perf_libs::api().is_none() { - info!("perf-libs unavailable, skipped"); - return; - } - - let mut keys = hex!("abc123"); - let ledger_path = get_tmp_ledger_path!(); - let samples = [0]; - let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap()); - assert!(chacha_cbc_encrypt_file_many_keys( - &blockstore, - 0, - DEFAULT_SLOTS_PER_SEGMENT, - &mut keys, - &samples, - ) - .is_err()); - } -} diff --git a/chacha-cuda/src/lib.rs b/chacha-cuda/src/lib.rs deleted file mode 100644 index bffbdceeda..0000000000 --- a/chacha-cuda/src/lib.rs +++ /dev/null @@ -1,8 +0,0 @@ -#[macro_use] -extern crate log; - -#[cfg(test)] -#[macro_use] -extern crate hex_literal; - -pub mod chacha_cuda; diff --git a/chacha-sys/.gitignore b/chacha-sys/.gitignore deleted file mode 100644 index 5404b132db..0000000000 --- a/chacha-sys/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -/target/ -/farf/ diff --git a/chacha-sys/Cargo.toml b/chacha-sys/Cargo.toml deleted file mode 100644 index 0199d15cab..0000000000 --- a/chacha-sys/Cargo.toml +++ /dev/null @@ -1,15 +0,0 @@ -[package] -name = "solana-chacha-sys" -version = "1.2.0" -description = "Solana chacha-sys" -authors = ["Solana Maintainers "] -repository = "https://github.com/solana-labs/solana" -homepage = "https://solana.com/" -license = "Apache-2.0" -edition = "2018" - -[build-dependencies] -cc = "1.0.49" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] diff --git a/chacha-sys/build.rs b/chacha-sys/build.rs deleted file mode 100644 index 9d4b546f4a..0000000000 --- a/chacha-sys/build.rs +++ /dev/null @@ -1,8 +0,0 @@ -extern crate cc; - -fn main() { - cc::Build::new() - .file("cpu-crypt/chacha20_core.c") - .file("cpu-crypt/chacha_cbc.c") - .compile("libcpu-crypt"); -} diff --git a/chacha-sys/cpu-crypt/.gitignore b/chacha-sys/cpu-crypt/.gitignore deleted file mode 100644 index 151080d12b..0000000000 --- a/chacha-sys/cpu-crypt/.gitignore +++ /dev/null @@ -1 +0,0 @@ -release/ diff --git a/chacha-sys/cpu-crypt/Makefile b/chacha-sys/cpu-crypt/Makefile deleted file mode 100644 index 53a705af22..0000000000 --- a/chacha-sys/cpu-crypt/Makefile +++ /dev/null @@ -1,25 +0,0 @@ -V:=debug - -LIB:=cpu-crypt - -CFLAGS_common:=-Wall -Werror -pedantic -fPIC -CFLAGS_release:=-march=native -O3 $(CFLAGS_common) -CFLAGS_debug:=-g $(CFLAGS_common) -CFLAGS:=$(CFLAGS_$V) - -all: $V/lib$(LIB).a - -$V/chacha20_core.o: chacha20_core.c chacha.h - @mkdir -p $(@D) - $(CC) $(CFLAGS) -c $< -o $@ - -$V/chacha_cbc.o: chacha_cbc.c chacha.h - @mkdir -p $(@D) - $(CC) $(CFLAGS) -c $< -o $@ - -$V/lib$(LIB).a: $V/chacha20_core.o $V/chacha_cbc.o - $(AR) rcs $@ $^ - -.PHONY:clean -clean: - rm -rf $V diff --git a/chacha-sys/cpu-crypt/chacha.h b/chacha-sys/cpu-crypt/chacha.h deleted file mode 100644 index 3a9c7b98f8..0000000000 --- a/chacha-sys/cpu-crypt/chacha.h +++ /dev/null @@ -1,31 +0,0 @@ -#ifndef HEADER_CHACHA_H -# define HEADER_CHACHA_H - -#include -#include -# include -# ifdef __cplusplus -extern "C" { -# endif - -typedef unsigned int u32; - -#define CHACHA_KEY_SIZE 32 -#define CHACHA_NONCE_SIZE 12 -#define CHACHA_BLOCK_SIZE 64 -#define CHACHA_ROUNDS 500 - -void chacha20_encrypt(const u32 input[16], - unsigned char output[64], - int num_rounds); - -void chacha20_cbc128_encrypt(const unsigned char* in, unsigned char* out, - uint32_t len, const uint8_t* key, - unsigned char* ivec); - - -# ifdef __cplusplus -} -# endif - -#endif diff --git a/chacha-sys/cpu-crypt/chacha20_core.c b/chacha-sys/cpu-crypt/chacha20_core.c deleted file mode 100644 index 1c914c7359..0000000000 --- a/chacha-sys/cpu-crypt/chacha20_core.c +++ /dev/null @@ -1,49 +0,0 @@ -#include "chacha.h" - -#define ROTL32(v, n) (((v) << (n)) | ((v) >> (32 - (n)))) - -#define ROTATE(v, c) ROTL32((v), (c)) - -#define XOR(v, w) ((v) ^ (w)) - -#define PLUS(x, y) ((x) + (y)) - -#define U32TO8_LITTLE(p, v) \ -{ (p)[0] = ((v) ) & 0xff; (p)[1] = ((v) >> 8) & 0xff; \ - (p)[2] = ((v) >> 16) & 0xff; (p)[3] = ((v) >> 24) & 0xff; } - -#define U8TO32_LITTLE(p) \ - (((u32)((p)[0]) ) | ((u32)((p)[1]) << 8) | \ - ((u32)((p)[2]) << 16) | ((u32)((p)[3]) << 24) ) - -#define QUARTERROUND(a,b,c,d) \ - x[a] = PLUS(x[a],x[b]); x[d] = ROTATE(XOR(x[d],x[a]),16); \ - x[c] = PLUS(x[c],x[d]); x[b] = ROTATE(XOR(x[b],x[c]),12); \ - x[a] = PLUS(x[a],x[b]); x[d] = ROTATE(XOR(x[d],x[a]), 8); \ - x[c] = PLUS(x[c],x[d]); x[b] = ROTATE(XOR(x[b],x[c]), 7); - -void chacha20_encrypt(const u32 input[16], - unsigned char output[64], - int num_rounds) -{ - u32 x[16]; - int i; - memcpy(x, input, sizeof(u32) * 16); - for (i = num_rounds; i > 0; i -= 2) { - QUARTERROUND( 0, 4, 8,12) - QUARTERROUND( 1, 5, 9,13) - QUARTERROUND( 2, 6,10,14) - QUARTERROUND( 3, 7,11,15) - QUARTERROUND( 0, 5,10,15) - QUARTERROUND( 1, 6,11,12) - QUARTERROUND( 2, 7, 8,13) - QUARTERROUND( 3, 4, 9,14) - } - for (i = 0; i < 16; ++i) { - x[i] = PLUS(x[i], input[i]); - } - for (i = 0; i < 16; ++i) { - U32TO8_LITTLE(output + 4 * i, x[i]); - } -} - diff --git a/chacha-sys/cpu-crypt/chacha_cbc.c b/chacha-sys/cpu-crypt/chacha_cbc.c deleted file mode 100644 index 92f71f3f26..0000000000 --- a/chacha-sys/cpu-crypt/chacha_cbc.c +++ /dev/null @@ -1,72 +0,0 @@ -#include "chacha.h" - -#if !defined(STRICT_ALIGNMENT) && !defined(PEDANTIC) -# define STRICT_ALIGNMENT 0 -#endif - -void chacha20_cbc128_encrypt(const unsigned char* in, unsigned char* out, - uint32_t len, const uint8_t* key, - unsigned char* ivec) -{ - size_t n; - unsigned char *iv = ivec; - (void)key; - - if (len == 0) { - return; - } - -#if !defined(OPENSSL_SMALL_FOOTPRINT) - if (STRICT_ALIGNMENT && - ((size_t)in | (size_t)out | (size_t)ivec) % sizeof(size_t) != 0) { - while (len >= CHACHA_BLOCK_SIZE) { - for (n = 0; n < CHACHA_BLOCK_SIZE; ++n) { - out[n] = in[n] ^ iv[n]; - //printf("%x ", out[n]); - } - chacha20_encrypt((const u32*)out, out, CHACHA_ROUNDS); - iv = out; - len -= CHACHA_BLOCK_SIZE; - in += CHACHA_BLOCK_SIZE; - out += CHACHA_BLOCK_SIZE; - } - } else { - while (len >= CHACHA_BLOCK_SIZE) { - for (n = 0; n < CHACHA_BLOCK_SIZE; n += sizeof(size_t)) { - *(size_t *)(out + n) = - *(size_t *)(in + n) ^ *(size_t *)(iv + n); - //printf("%zu ", *(size_t *)(iv + n)); - } - chacha20_encrypt((const u32*)out, out, CHACHA_ROUNDS); - iv = out; - len -= CHACHA_BLOCK_SIZE; - in += CHACHA_BLOCK_SIZE; - out += CHACHA_BLOCK_SIZE; - } - } -#endif - while (len) { - for (n = 0; n < CHACHA_BLOCK_SIZE && n < len; ++n) { - out[n] = in[n] ^ iv[n]; - } - for (; n < CHACHA_BLOCK_SIZE; ++n) { - out[n] = iv[n]; - } - chacha20_encrypt((const u32*)out, out, CHACHA_ROUNDS); - iv = out; - if (len <= CHACHA_BLOCK_SIZE) { - break; - } - len -= CHACHA_BLOCK_SIZE; - in += CHACHA_BLOCK_SIZE; - out += CHACHA_BLOCK_SIZE; - } - memcpy(ivec, iv, CHACHA_BLOCK_SIZE); - -} - -void chacha20_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t in_len, - const uint8_t key[CHACHA_KEY_SIZE], uint8_t* ivec) -{ - chacha20_cbc128_encrypt(in, out, in_len, key, ivec); -} diff --git a/chacha-sys/src/lib.rs b/chacha-sys/src/lib.rs deleted file mode 100644 index 9a0b3f7b9c..0000000000 --- a/chacha-sys/src/lib.rs +++ /dev/null @@ -1,21 +0,0 @@ -extern "C" { - fn chacha20_cbc_encrypt( - input: *const u8, - output: *mut u8, - in_len: usize, - key: *const u8, - ivec: *mut u8, - ); -} - -pub fn chacha_cbc_encrypt(input: &[u8], output: &mut [u8], key: &[u8], ivec: &mut [u8]) { - unsafe { - chacha20_cbc_encrypt( - input.as_ptr(), - output.as_mut_ptr(), - input.len(), - key.as_ptr(), - ivec.as_mut_ptr(), - ); - } -} diff --git a/chacha/.gitignore b/chacha/.gitignore deleted file mode 100644 index b645148aa9..0000000000 --- a/chacha/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/farf/ diff --git a/chacha/Cargo.toml b/chacha/Cargo.toml deleted file mode 100644 index 799001f92a..0000000000 --- a/chacha/Cargo.toml +++ /dev/null @@ -1,28 +0,0 @@ -[package] -name = "solana-chacha" -version = "1.2.0" -description = "Solana Chacha APIs" -authors = ["Solana Maintainers "] -repository = "https://github.com/solana-labs/solana" -license = "Apache-2.0" -homepage = "https://solana.com/" -edition = "2018" - -[dependencies] -log = "0.4.8" -rand = "0.7.0" -rand_chacha = "0.2.2" -solana-chacha-sys = { path = "../chacha-sys", version = "1.2.0" } -solana-ledger = { path = "../ledger", version = "1.2.0" } -solana-logger = { path = "../logger", version = "1.2.0" } -solana-perf = { path = "../perf", version = "1.2.0" } -solana-sdk = { path = "../sdk", version = "1.2.0" } - -[dev-dependencies] -hex-literal = "0.2.1" - -[lib] -name = "solana_chacha" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] diff --git a/chacha/src/chacha.rs b/chacha/src/chacha.rs deleted file mode 100644 index b0f3317020..0000000000 --- a/chacha/src/chacha.rs +++ /dev/null @@ -1,185 +0,0 @@ -use solana_ledger::blockstore::Blockstore; -use solana_sdk::clock::Slot; -use std::fs::File; -use std::io; -use std::io::{BufWriter, Write}; -use std::path::Path; -use std::sync::Arc; - -pub use solana_chacha_sys::chacha_cbc_encrypt; - -pub const CHACHA_BLOCK_SIZE: usize = 64; -pub const CHACHA_KEY_SIZE: usize = 32; - -pub fn chacha_cbc_encrypt_ledger( - blockstore: &Arc, - start_slot: Slot, - slots_per_segment: u64, - out_path: &Path, - ivec: &mut [u8; CHACHA_BLOCK_SIZE], -) -> io::Result { - let mut out_file = - BufWriter::new(File::create(out_path).expect("Can't open ledger encrypted data file")); - const BUFFER_SIZE: usize = 8 * 1024; - let mut buffer = [0; BUFFER_SIZE]; - let mut encrypted_buffer = [0; BUFFER_SIZE]; - let key = [0; CHACHA_KEY_SIZE]; - let mut total_size = 0; - let mut current_slot = start_slot; - let mut start_index = 0; - loop { - match blockstore.get_data_shreds(current_slot, start_index, std::u64::MAX, &mut buffer) { - Ok((last_index, mut size)) => { - debug!( - "chacha: encrypting slice: {} num_shreds: {} data_len: {}", - current_slot, - last_index.saturating_sub(start_index), - size - ); - debug!("read {} bytes", size); - - if size == 0 { - if current_slot.saturating_sub(start_slot) < slots_per_segment { - current_slot += 1; - start_index = 0; - continue; - } else { - break; - } - } - - if size < BUFFER_SIZE { - // round to the nearest key_size boundary - size = (size + CHACHA_KEY_SIZE - 1) & !(CHACHA_KEY_SIZE - 1); - } - total_size += size; - - chacha_cbc_encrypt(&buffer[..size], &mut encrypted_buffer[..size], &key, ivec); - if let Err(res) = out_file.write(&encrypted_buffer[..size]) { - warn!("Error writing file! {:?}", res); - return Err(res); - } - - start_index = last_index + 1; - } - Err(e) => { - info!("Error encrypting file: {:?}", e); - break; - } - } - } - Ok(total_size) -} - -#[cfg(test)] -mod tests { - use crate::chacha::chacha_cbc_encrypt_ledger; - use rand::SeedableRng; - use rand_chacha::ChaChaRng; - use solana_ledger::blockstore::Blockstore; - use solana_ledger::entry::Entry; - use solana_ledger::get_tmp_ledger_path; - use solana_sdk::hash::{hash, Hash, Hasher}; - use solana_sdk::pubkey::Pubkey; - use solana_sdk::signature::{Keypair, Signer}; - use solana_sdk::system_transaction; - use std::fs::remove_file; - use std::fs::File; - use std::io::Read; - use std::sync::Arc; - - fn make_tiny_deterministic_test_entries(num: usize) -> Vec { - let zero = Hash::default(); - let one = hash(&zero.as_ref()); - - let seed = [2u8; 32]; - - let mut generator = ChaChaRng::from_seed(seed); - let keypair = Keypair::generate(&mut generator); - - let mut id = one; - let mut num_hashes = 0; - (0..num) - .map(|_| { - Entry::new_mut( - &mut id, - &mut num_hashes, - vec![system_transaction::transfer( - &keypair, - &keypair.pubkey(), - 1, - one, - )], - ) - }) - .collect() - } - - use std::{env, fs::create_dir_all, path::PathBuf}; - fn tmp_file_path(name: &str) -> PathBuf { - let out_dir = env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_string()); - let mut path = PathBuf::new(); - path.push(out_dir); - path.push("tmp"); - create_dir_all(&path).unwrap(); - - path.push(format!("{}-{}", name, Pubkey::new_rand())); - path - } - - #[test] - fn test_encrypt_ledger() { - solana_logger::setup(); - let ledger_path = get_tmp_ledger_path!(); - let ticks_per_slot = 16; - let slots_per_segment = 32; - let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap()); - let out_path = tmp_file_path("test_encrypt_ledger"); - - let seed = [2u8; 32]; - - let mut generator = ChaChaRng::from_seed(seed); - let keypair = Keypair::generate(&mut generator); - - let entries = make_tiny_deterministic_test_entries(slots_per_segment); - blockstore - .write_entries( - 0, - 0, - 0, - ticks_per_slot, - None, - true, - &Arc::new(keypair), - entries, - 0, - ) - .unwrap(); - - let mut key = hex!( - "abcd1234abcd1234abcd1234abcd1234 abcd1234abcd1234abcd1234abcd1234 - abcd1234abcd1234abcd1234abcd1234 abcd1234abcd1234abcd1234abcd1234" - ); - chacha_cbc_encrypt_ledger( - &blockstore, - 0, - slots_per_segment as u64, - &out_path, - &mut key, - ) - .unwrap(); - let mut out_file = File::open(&out_path).unwrap(); - let mut buf = vec![]; - let size = out_file.read_to_end(&mut buf).unwrap(); - let mut hasher = Hasher::default(); - hasher.hash(&buf[..size]); - - // golden needs to be updated if shred structure changes.... - let golden: Hash = "2rq8nR6rns2T5zsbQAGBDZb41NVtacneLgkCH17CVxZm" - .parse() - .unwrap(); - - assert_eq!(hasher.result(), golden); - remove_file(&out_path).unwrap(); - } -} diff --git a/chacha/src/lib.rs b/chacha/src/lib.rs deleted file mode 100644 index 5f1e27d344..0000000000 --- a/chacha/src/lib.rs +++ /dev/null @@ -1,8 +0,0 @@ -#[macro_use] -extern crate log; - -#[cfg(test)] -#[macro_use] -extern crate hex_literal; - -pub mod chacha; diff --git a/ci/test-stable.sh b/ci/test-stable.sh index b25bd6da42..68881b8a40 100755 --- a/ci/test-stable.sh +++ b/ci/test-stable.sh @@ -91,7 +91,7 @@ test-stable-perf) fi _ cargo +"$rust_stable" build --bins ${V:+--verbose} - _ cargo +"$rust_stable" test --package solana-chacha-cuda --package solana-perf --package solana-ledger --package solana-core --lib ${V:+--verbose} -- --nocapture + _ cargo +"$rust_stable" test --package solana-perf --package solana-ledger --package solana-core --lib ${V:+--verbose} -- --nocapture ;; test-move) ci/affects-files.sh \ diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 880b894e6e..a32d982803 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -39,7 +39,6 @@ solana-remote-wallet = { path = "../remote-wallet", version = "1.2.0" } solana-runtime = { path = "../runtime", version = "1.2.0" } solana-sdk = { path = "../sdk", version = "1.2.0" } solana-stake-program = { path = "../programs/stake", version = "1.2.0" } -solana-storage-program = { path = "../programs/storage", version = "1.2.0" } solana-transaction-status = { path = "../transaction-status", version = "1.2.0" } solana-version = { path = "../version", version = "1.2.0" } solana-vote-program = { path = "../programs/vote", version = "1.2.0" } diff --git a/cli/src/cli.rs b/cli/src/cli.rs index d18467404b..3e67d393ab 100644 --- a/cli/src/cli.rs +++ b/cli/src/cli.rs @@ -7,7 +7,6 @@ use crate::{ offline::{blockhash_query::BlockhashQuery, *}, spend_utils::*, stake::*, - storage::*, validator_info::*, vote::*, }; @@ -57,7 +56,6 @@ use solana_stake_program::{ stake_instruction::LockupArgs, stake_state::{Lockup, StakeAuthorize}, }; -use solana_storage_program::storage_instruction::StorageAccountType; use solana_transaction_status::{EncodedTransaction, TransactionEncoding}; use solana_vote_program::vote_state::VoteAuthorize; use std::{ @@ -363,17 +361,6 @@ pub enum CliCommand { nonce_authority: SignerIndex, fee_payer: SignerIndex, }, - // Storage Commands - CreateStorageAccount { - account_owner: Pubkey, - storage_account: SignerIndex, - account_type: StorageAccountType, - }, - ClaimStorageReward { - node_account_pubkey: Pubkey, - storage_account_pubkey: Pubkey, - }, - ShowStorageAccount(Pubkey), // Validator Info Commands GetValidatorInfo(Option), SetValidatorInfo { @@ -707,19 +694,6 @@ pub fn parse_command( } ("stake-account", Some(matches)) => parse_show_stake_account(matches, wallet_manager), ("stake-history", Some(matches)) => parse_show_stake_history(matches), - // Storage Commands - ("create-archiver-storage-account", Some(matches)) => { - parse_storage_create_archiver_account(matches, default_signer_path, wallet_manager) - } - ("create-validator-storage-account", Some(matches)) => { - parse_storage_create_validator_account(matches, default_signer_path, wallet_manager) - } - ("claim-storage-reward", Some(matches)) => { - parse_storage_claim_reward(matches, default_signer_path, wallet_manager) - } - ("storage-account", Some(matches)) => { - parse_storage_get_account_command(matches, wallet_manager) - } // Validator Info Commands ("validator-info", Some(matches)) => match matches.subcommand() { ("publish", Some(matches)) => { @@ -1084,7 +1058,6 @@ pub fn parse_create_address_with_seed( let program_id = match matches.value_of("program_id").unwrap() { "NONCE" => system_program::id(), "STAKE" => solana_stake_program::id(), - "STORAGE" => solana_storage_program::id(), "VOTE" => solana_vote_program::id(), _ => pubkey_of(matches, "program_id").unwrap(), }; @@ -1994,33 +1967,6 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { *fee_payer, ), - // Storage Commands - - // Create storage account - CliCommand::CreateStorageAccount { - account_owner, - storage_account, - account_type, - } => process_create_storage_account( - &rpc_client, - config, - *storage_account, - &account_owner, - *account_type, - ), - CliCommand::ClaimStorageReward { - node_account_pubkey, - storage_account_pubkey, - } => process_claim_storage_reward( - &rpc_client, - config, - node_account_pubkey, - &storage_account_pubkey, - ), - CliCommand::ShowStorageAccount(storage_account_pubkey) => { - process_show_storage_account(&rpc_client, config, &storage_account_pubkey) - } - // Validator Info Commands // Return all or single validator info @@ -2355,7 +2301,6 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, ' .cluster_query_subcommands() .nonce_subcommands() .stake_subcommands() - .storage_subcommands() .subcommand( SubCommand::with_name("airdrop") .about("Request lamports") @@ -2463,7 +2408,7 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, ' .required(true) .help( "The program_id that the address will ultimately be used for, \n\ - or one of NONCE, STAKE, STORAGE, and VOTE keywords", + or one of NONCE, STAKE, and VOTE keywords", ), ) .arg( @@ -2879,7 +2824,6 @@ mod tests { for (name, program_id) in &[ ("STAKE", solana_stake_program::id()), ("VOTE", solana_vote_program::id()), - ("STORAGE", solana_storage_program::id()), ("NONCE", system_program::id()), ] { let test_create_address_with_seed = test_commands.clone().get_matches_from(vec![ diff --git a/cli/src/lib.rs b/cli/src/lib.rs index 7c4c4049dd..f9daaf053a 100644 --- a/cli/src/lib.rs +++ b/cli/src/lib.rs @@ -30,7 +30,6 @@ pub mod nonce; pub mod offline; pub mod spend_utils; pub mod stake; -pub mod storage; pub mod test_utils; pub mod validator_info; pub mod vote; diff --git a/cli/src/storage.rs b/cli/src/storage.rs deleted file mode 100644 index c33c71c5b4..0000000000 --- a/cli/src/storage.rs +++ /dev/null @@ -1,400 +0,0 @@ -use crate::{ - checks::{check_account_for_fee, check_unique_pubkeys}, - cli::{ - generate_unique_signers, log_instruction_custom_error, CliCommand, CliCommandInfo, - CliConfig, CliError, ProcessResult, SignerIndex, - }, -}; -use clap::{App, Arg, ArgMatches, SubCommand}; -use solana_clap_utils::{input_parsers::*, input_validators::*, keypair::signer_from_path}; -use solana_client::rpc_client::RpcClient; -use solana_remote_wallet::remote_wallet::RemoteWalletManager; -use solana_sdk::{ - account_utils::StateMut, message::Message, pubkey::Pubkey, system_instruction::SystemError, - transaction::Transaction, -}; -use solana_storage_program::storage_instruction::{self, StorageAccountType}; -use std::sync::Arc; - -pub trait StorageSubCommands { - fn storage_subcommands(self) -> Self; -} - -impl StorageSubCommands for App<'_, '_> { - fn storage_subcommands(self) -> Self { - self.subcommand( - SubCommand::with_name("create-archiver-storage-account") - .about("Create an archiver storage account") - .arg( - Arg::with_name("storage_account_owner") - .index(1) - .value_name("AUTHORITY_PUBKEY") - .takes_value(true) - .required(true) - .validator(is_valid_pubkey), - ) - .arg( - Arg::with_name("storage_account") - .index(2) - .value_name("ACCOUNT_KEYPAIR") - .takes_value(true) - .required(true) - .validator(is_valid_signer), - ), - ) - .subcommand( - SubCommand::with_name("create-validator-storage-account") - .about("Create a validator storage account") - .arg( - Arg::with_name("storage_account_owner") - .index(1) - .value_name("AUTHORITY_PUBKEY") - .takes_value(true) - .required(true) - .validator(is_valid_pubkey), - ) - .arg( - Arg::with_name("storage_account") - .index(2) - .value_name("ACCOUNT_KEYPAIR") - .takes_value(true) - .required(true) - .validator(is_valid_signer), - ), - ) - .subcommand( - SubCommand::with_name("claim-storage-reward") - .about("Redeem storage reward credits") - .arg( - Arg::with_name("node_account_pubkey") - .index(1) - .value_name("NODE_ACCOUNT_ADDRESS") - .takes_value(true) - .required(true) - .validator(is_valid_pubkey) - .help("The node account to credit the rewards to"), - ) - .arg( - Arg::with_name("storage_account_pubkey") - .index(2) - .value_name("STORAGE_ACCOUNT_ADDRESS") - .takes_value(true) - .required(true) - .validator(is_valid_pubkey) - .help("Storage account address to redeem credits for"), - ), - ) - .subcommand( - SubCommand::with_name("storage-account") - .about("Show the contents of a storage account") - .alias("show-storage-account") - .arg( - Arg::with_name("storage_account_pubkey") - .index(1) - .value_name("STORAGE_ACCOUNT_ADDRESS") - .takes_value(true) - .required(true) - .validator(is_valid_pubkey) - .help("Storage account address"), - ), - ) - } -} - -pub fn parse_storage_create_archiver_account( - matches: &ArgMatches<'_>, - default_signer_path: &str, - wallet_manager: &mut Option>, -) -> Result { - let account_owner = - pubkey_of_signer(matches, "storage_account_owner", wallet_manager)?.unwrap(); - let (storage_account, storage_account_pubkey) = - signer_of(matches, "storage_account", wallet_manager)?; - - let payer_provided = None; - let signer_info = generate_unique_signers( - vec![payer_provided, storage_account], - matches, - default_signer_path, - wallet_manager, - )?; - - Ok(CliCommandInfo { - command: CliCommand::CreateStorageAccount { - account_owner, - storage_account: signer_info.index_of(storage_account_pubkey).unwrap(), - account_type: StorageAccountType::Archiver, - }, - signers: signer_info.signers, - }) -} - -pub fn parse_storage_create_validator_account( - matches: &ArgMatches<'_>, - default_signer_path: &str, - wallet_manager: &mut Option>, -) -> Result { - let account_owner = - pubkey_of_signer(matches, "storage_account_owner", wallet_manager)?.unwrap(); - let (storage_account, storage_account_pubkey) = - signer_of(matches, "storage_account", wallet_manager)?; - - let payer_provided = None; - let signer_info = generate_unique_signers( - vec![payer_provided, storage_account], - matches, - default_signer_path, - wallet_manager, - )?; - - Ok(CliCommandInfo { - command: CliCommand::CreateStorageAccount { - account_owner, - storage_account: signer_info.index_of(storage_account_pubkey).unwrap(), - account_type: StorageAccountType::Validator, - }, - signers: signer_info.signers, - }) -} - -pub fn parse_storage_claim_reward( - matches: &ArgMatches<'_>, - default_signer_path: &str, - wallet_manager: &mut Option>, -) -> Result { - let node_account_pubkey = - pubkey_of_signer(matches, "node_account_pubkey", wallet_manager)?.unwrap(); - let storage_account_pubkey = - pubkey_of_signer(matches, "storage_account_pubkey", wallet_manager)?.unwrap(); - Ok(CliCommandInfo { - command: CliCommand::ClaimStorageReward { - node_account_pubkey, - storage_account_pubkey, - }, - signers: vec![signer_from_path( - matches, - default_signer_path, - "keypair", - wallet_manager, - )?], - }) -} - -pub fn parse_storage_get_account_command( - matches: &ArgMatches<'_>, - wallet_manager: &mut Option>, -) -> Result { - let storage_account_pubkey = - pubkey_of_signer(matches, "storage_account_pubkey", wallet_manager)?.unwrap(); - Ok(CliCommandInfo { - command: CliCommand::ShowStorageAccount(storage_account_pubkey), - signers: vec![], - }) -} - -pub fn process_create_storage_account( - rpc_client: &RpcClient, - config: &CliConfig, - storage_account: SignerIndex, - account_owner: &Pubkey, - account_type: StorageAccountType, -) -> ProcessResult { - let storage_account = config.signers[storage_account]; - let storage_account_pubkey = storage_account.pubkey(); - check_unique_pubkeys( - (&config.signers[0].pubkey(), "cli keypair".to_string()), - ( - &storage_account_pubkey, - "storage_account_pubkey".to_string(), - ), - )?; - - if let Ok(storage_account) = rpc_client.get_account(&storage_account_pubkey) { - let err_msg = if storage_account.owner == solana_storage_program::id() { - format!("Storage account {} already exists", storage_account_pubkey) - } else { - format!( - "Account {} already exists and is not a storage account", - storage_account_pubkey - ) - }; - return Err(CliError::BadParameter(err_msg).into()); - } - - use solana_storage_program::storage_contract::STORAGE_ACCOUNT_SPACE; - let required_balance = rpc_client - .get_minimum_balance_for_rent_exemption(STORAGE_ACCOUNT_SPACE as usize)? - .max(1); - - let ixs = storage_instruction::create_storage_account( - &config.signers[0].pubkey(), - &account_owner, - &storage_account_pubkey, - required_balance, - account_type, - ); - let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?; - - let message = Message::new(&ixs); - let mut tx = Transaction::new_unsigned(message); - tx.try_sign(&config.signers, recent_blockhash)?; - check_account_for_fee( - rpc_client, - &config.signers[0].pubkey(), - &fee_calculator, - &tx.message, - )?; - let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); - log_instruction_custom_error::(result, &config) -} - -pub fn process_claim_storage_reward( - rpc_client: &RpcClient, - config: &CliConfig, - node_account_pubkey: &Pubkey, - storage_account_pubkey: &Pubkey, -) -> ProcessResult { - let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?; - - let instruction = - storage_instruction::claim_reward(node_account_pubkey, storage_account_pubkey); - let signers = [config.signers[0]]; - let message = Message::new_with_payer(&[instruction], Some(&signers[0].pubkey())); - let mut tx = Transaction::new_unsigned(message); - tx.try_sign(&signers, recent_blockhash)?; - check_account_for_fee( - rpc_client, - &config.signers[0].pubkey(), - &fee_calculator, - &tx.message, - )?; - let signature = rpc_client.send_and_confirm_transaction_with_spinner(&tx)?; - Ok(signature.to_string()) -} - -pub fn process_show_storage_account( - rpc_client: &RpcClient, - _config: &CliConfig, - storage_account_pubkey: &Pubkey, -) -> ProcessResult { - let account = rpc_client.get_account(storage_account_pubkey)?; - - if account.owner != solana_storage_program::id() { - return Err(CliError::RpcRequestError(format!( - "{:?} is not a storage account", - storage_account_pubkey - )) - .into()); - } - - use solana_storage_program::storage_contract::StorageContract; - let storage_contract: StorageContract = account.state().map_err(|err| { - CliError::RpcRequestError(format!("Unable to deserialize storage account: {}", err)) - })?; - println!("{:#?}", storage_contract); - println!("Account Lamports: {}", account.lamports); - Ok("".to_string()) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::cli::{app, parse_command}; - use solana_sdk::signature::{read_keypair_file, write_keypair, Keypair, Signer}; - use tempfile::NamedTempFile; - - fn make_tmp_file() -> (String, NamedTempFile) { - let tmp_file = NamedTempFile::new().unwrap(); - (String::from(tmp_file.path().to_str().unwrap()), tmp_file) - } - - #[test] - fn test_parse_command() { - let test_commands = app("test", "desc", "version"); - let pubkey = Pubkey::new_rand(); - let pubkey_string = pubkey.to_string(); - - let default_keypair = Keypair::new(); - let (default_keypair_file, mut tmp_file) = make_tmp_file(); - write_keypair(&default_keypair, tmp_file.as_file_mut()).unwrap(); - - let (keypair_file, mut tmp_file) = make_tmp_file(); - let storage_account_keypair = Keypair::new(); - write_keypair(&storage_account_keypair, tmp_file.as_file_mut()).unwrap(); - - let test_create_archiver_storage_account = test_commands.clone().get_matches_from(vec![ - "test", - "create-archiver-storage-account", - &pubkey_string, - &keypair_file, - ]); - assert_eq!( - parse_command( - &test_create_archiver_storage_account, - &default_keypair_file, - &mut None - ) - .unwrap(), - CliCommandInfo { - command: CliCommand::CreateStorageAccount { - account_owner: pubkey, - storage_account: 1, - account_type: StorageAccountType::Archiver, - }, - signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - storage_account_keypair.into() - ], - } - ); - - let (keypair_file, mut tmp_file) = make_tmp_file(); - let storage_account_keypair = Keypair::new(); - write_keypair(&storage_account_keypair, tmp_file.as_file_mut()).unwrap(); - let storage_account_pubkey = storage_account_keypair.pubkey(); - let storage_account_string = storage_account_pubkey.to_string(); - - let test_create_validator_storage_account = test_commands.clone().get_matches_from(vec![ - "test", - "create-validator-storage-account", - &pubkey_string, - &keypair_file, - ]); - assert_eq!( - parse_command( - &test_create_validator_storage_account, - &default_keypair_file, - &mut None - ) - .unwrap(), - CliCommandInfo { - command: CliCommand::CreateStorageAccount { - account_owner: pubkey, - storage_account: 1, - account_type: StorageAccountType::Validator, - }, - signers: vec![ - read_keypair_file(&default_keypair_file).unwrap().into(), - storage_account_keypair.into() - ], - } - ); - - let test_claim_storage_reward = test_commands.clone().get_matches_from(vec![ - "test", - "claim-storage-reward", - &pubkey_string, - &storage_account_string, - ]); - assert_eq!( - parse_command(&test_claim_storage_reward, &default_keypair_file, &mut None).unwrap(), - CliCommandInfo { - command: CliCommand::ClaimStorageReward { - node_account_pubkey: pubkey, - storage_account_pubkey, - }, - signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], - } - ); - } -} diff --git a/core/Cargo.toml b/core/Cargo.toml index 72a0c85ec0..fb0d7147d7 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -54,12 +54,10 @@ solana-merkle-tree = { path = "../merkle-tree", version = "1.2.0" } solana-metrics = { path = "../metrics", version = "1.2.0" } solana-measure = { path = "../measure", version = "1.2.0" } solana-net-utils = { path = "../net-utils", version = "1.2.0" } -solana-chacha-cuda = { path = "../chacha-cuda", version = "1.2.0" } solana-perf = { path = "../perf", version = "1.2.0" } solana-runtime = { path = "../runtime", version = "1.2.0" } solana-sdk = { path = "../sdk", version = "1.2.0" } solana-stake-program = { path = "../programs/stake", version = "1.2.0" } -solana-storage-program = { path = "../programs/storage", version = "1.2.0" } solana-streamer = { path = "../streamer", version = "1.2.0" } solana-version = { path = "../version", version = "1.2.0" } solana-vote-program = { path = "../programs/vote", version = "1.2.0" } @@ -102,9 +100,5 @@ name = "retransmit_stage" [[bench]] name = "cluster_info" -[[bench]] -name = "chacha" -required-features = ["chacha"] - [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/core/benches/chacha.rs b/core/benches/chacha.rs deleted file mode 100644 index df7da1184a..0000000000 --- a/core/benches/chacha.rs +++ /dev/null @@ -1,29 +0,0 @@ -//#![feature(test)] -// -//extern crate solana_core; -//extern crate test; -// -//use solana_core::chacha::chacha_cbc_encrypt_files; -//use std::fs::remove_file; -//use std::fs::File; -//use std::io::Write; -//use std::path::Path; -//use test::Bencher; -// -//#[bench] -//fn bench_chacha_encrypt(bench: &mut Bencher) { -// let in_path = Path::new("bench_chacha_encrypt_file_input.txt"); -// let out_path = Path::new("bench_chacha_encrypt_file_output.txt.enc"); -// { -// let mut in_file = File::create(in_path).unwrap(); -// for _ in 0..1024 { -// in_file.write("123456foobar".as_bytes()).unwrap(); -// } -// } -// bench.iter(move || { -// chacha_cbc_encrypt_files(in_path, out_path, "thetestkey".to_string()).unwrap(); -// }); -// -// remove_file(in_path).unwrap(); -// remove_file(out_path).unwrap(); -//} diff --git a/core/src/cluster_info.rs b/core/src/cluster_info.rs index 267b6247b8..f0d1b91afc 100644 --- a/core/src/cluster_info.rs +++ b/core/src/cluster_info.rs @@ -364,7 +364,6 @@ impl ClusterInfo { pub fn contact_info_trace(&self) -> String { let now = timestamp(); let mut spy_nodes = 0; - let mut archivers = 0; let mut different_shred_nodes = 0; let my_pubkey = self.id(); let my_shred_version = self.my_shred_version(); @@ -374,8 +373,6 @@ impl ClusterInfo { .filter_map(|(node, last_updated)| { if Self::is_spy_node(&node) { spy_nodes += 1; - } else if Self::is_archiver(&node) { - archivers += 1; } let node_version = self.get_node_version(&node.id); @@ -431,14 +428,9 @@ impl ClusterInfo { ------------------+-------+----------------------------------------------+---------------+\ ------+------+------+------+------+------+------+------+------+--------\n\ {}\ - Nodes: {}{}{}{}", + Nodes: {}{}{}", nodes.join(""), - nodes.len() - spy_nodes - archivers, - if archivers > 0 { - format!("\nArchivers: {}", archivers) - } else { - "".to_string() - }, + nodes.len() - spy_nodes, if spy_nodes > 0 { format!("\nSpies: {}", spy_nodes) } else { @@ -773,11 +765,7 @@ impl ClusterInfo { .table .values() .filter_map(|x| x.value.contact_info()) - .filter(|x| { - ContactInfo::is_valid_address(&x.tvu) - && !ClusterInfo::is_archiver(x) - && x.id != self.id() - }) + .filter(|x| ContactInfo::is_valid_address(&x.tvu) && x.id != self.id()) .cloned() .collect() } @@ -793,39 +781,6 @@ impl ClusterInfo { .filter_map(|x| x.value.contact_info()) .filter(|x| { ContactInfo::is_valid_address(&x.tvu) - && !ClusterInfo::is_archiver(x) - && x.id != self.id() - && x.shred_version == self.my_shred_version() - }) - .cloned() - .collect() - } - - /// all peers that have a valid storage addr regardless of `shred_version`. - pub fn all_storage_peers(&self) -> Vec { - self.gossip - .read() - .unwrap() - .crds - .table - .values() - .filter_map(|x| x.value.contact_info()) - .filter(|x| ContactInfo::is_valid_address(&x.storage_addr) && x.id != self.id()) - .cloned() - .collect() - } - - /// all peers that have a valid storage addr and are on the same `shred_version`. - pub fn storage_peers(&self) -> Vec { - self.gossip - .read() - .unwrap() - .crds - .table - .values() - .filter_map(|x| x.value.contact_info()) - .filter(|x| { - ContactInfo::is_valid_address(&x.storage_addr) && x.id != self.id() && x.shred_version == self.my_shred_version() }) @@ -871,15 +826,9 @@ impl ClusterInfo { } fn is_spy_node(contact_info: &ContactInfo) -> bool { - (!ContactInfo::is_valid_address(&contact_info.tpu) + !ContactInfo::is_valid_address(&contact_info.tpu) || !ContactInfo::is_valid_address(&contact_info.gossip) - || !ContactInfo::is_valid_address(&contact_info.tvu)) - && !ContactInfo::is_valid_address(&contact_info.storage_addr) - } - - pub fn is_archiver(contact_info: &ContactInfo) -> bool { - ContactInfo::is_valid_address(&contact_info.storage_addr) - && !ContactInfo::is_valid_address(&contact_info.tpu) + || !ContactInfo::is_valid_address(&contact_info.tvu) } fn sorted_stakes_with_index( @@ -1935,7 +1884,6 @@ pub struct Sockets { pub broadcast: Vec, pub repair: UdpSocket, pub retransmit_sockets: Vec, - pub storage: Option, pub serve_repair: UdpSocket, } @@ -1950,50 +1898,6 @@ impl Node { let pubkey = Pubkey::new_rand(); Self::new_localhost_with_pubkey(&pubkey) } - pub fn new_localhost_archiver(pubkey: &Pubkey) -> Self { - let gossip = UdpSocket::bind("127.0.0.1:0").unwrap(); - let tvu = UdpSocket::bind("127.0.0.1:0").unwrap(); - let tvu_forwards = UdpSocket::bind("127.0.0.1:0").unwrap(); - let storage = UdpSocket::bind("127.0.0.1:0").unwrap(); - let empty = "0.0.0.0:0".parse().unwrap(); - let repair = UdpSocket::bind("127.0.0.1:0").unwrap(); - let broadcast = vec![UdpSocket::bind("0.0.0.0:0").unwrap()]; - let retransmit = UdpSocket::bind("0.0.0.0:0").unwrap(); - let serve_repair = UdpSocket::bind("127.0.0.1:0").unwrap(); - - let info = ContactInfo { - id: *pubkey, - gossip: gossip.local_addr().unwrap(), - tvu: tvu.local_addr().unwrap(), - tvu_forwards: tvu_forwards.local_addr().unwrap(), - repair: repair.local_addr().unwrap(), - tpu: empty, - tpu_forwards: empty, - storage_addr: storage.local_addr().unwrap(), - rpc: empty, - rpc_pubsub: empty, - serve_repair: serve_repair.local_addr().unwrap(), - wallclock: timestamp(), - shred_version: 0, - }; - - Node { - info, - sockets: Sockets { - gossip, - tvu: vec![tvu], - tvu_forwards: vec![], - tpu: vec![], - tpu_forwards: vec![], - broadcast, - repair, - retransmit_sockets: vec![retransmit], - serve_repair, - storage: Some(storage), - ip_echo: None, - }, - } - } pub fn new_localhost_with_pubkey(pubkey: &Pubkey) -> Self { let bind_ip_addr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)); let tpu = UdpSocket::bind("127.0.0.1:0").unwrap(); @@ -2012,7 +1916,7 @@ impl Node { let broadcast = vec![UdpSocket::bind("0.0.0.0:0").unwrap()]; let retransmit_socket = UdpSocket::bind("0.0.0.0:0").unwrap(); - let storage = UdpSocket::bind("0.0.0.0:0").unwrap(); + let unused = UdpSocket::bind("0.0.0.0:0").unwrap(); let serve_repair = UdpSocket::bind("127.0.0.1:0").unwrap(); let info = ContactInfo { id: *pubkey, @@ -2022,7 +1926,7 @@ impl Node { repair: repair.local_addr().unwrap(), tpu: tpu.local_addr().unwrap(), tpu_forwards: tpu_forwards.local_addr().unwrap(), - storage_addr: storage.local_addr().unwrap(), + unused: unused.local_addr().unwrap(), rpc: rpc_addr, rpc_pubsub: rpc_pubsub_addr, serve_repair: serve_repair.local_addr().unwrap(), @@ -2041,7 +1945,6 @@ impl Node { broadcast, repair, retransmit_sockets: vec![retransmit_socket], - storage: None, serve_repair, }, } @@ -2104,7 +2007,7 @@ impl Node { repair: SocketAddr::new(gossip_addr.ip(), repair_port), tpu: SocketAddr::new(gossip_addr.ip(), tpu_port), tpu_forwards: SocketAddr::new(gossip_addr.ip(), tpu_forwards_port), - storage_addr: socketaddr_any!(), + unused: socketaddr_any!(), rpc: socketaddr_any!(), rpc_pubsub: socketaddr_any!(), serve_repair: SocketAddr::new(gossip_addr.ip(), serve_repair_port), @@ -2124,32 +2027,11 @@ impl Node { broadcast, repair, retransmit_sockets, - storage: None, serve_repair, ip_echo: Some(ip_echo), }, } } - pub fn new_archiver_with_external_ip( - pubkey: &Pubkey, - gossip_addr: &SocketAddr, - port_range: PortRange, - bind_ip_addr: IpAddr, - ) -> Node { - let mut new = Self::new_with_external_ip(pubkey, gossip_addr, port_range, bind_ip_addr); - let (storage_port, storage_socket) = Self::bind(bind_ip_addr, port_range); - - new.info.storage_addr = SocketAddr::new(gossip_addr.ip(), storage_port); - new.sockets.storage = Some(storage_socket); - - let empty = socketaddr_any!(); - new.info.tpu = empty; - new.info.tpu_forwards = empty; - new.sockets.tpu = vec![]; - new.sockets.tpu_forwards = vec![]; - - new - } } fn report_time_spent(label: &str, time: &Duration, extra: &str) { @@ -2323,27 +2205,6 @@ mod tests { assert_eq!(node.sockets.gossip.local_addr().unwrap().port(), port); } - #[test] - fn new_archiver_external_ip_test() { - // Can't use VALIDATOR_PORT_RANGE because if this test runs in parallel with others, the - // port returned by `bind_in_range()` might be snatched up before `Node::new_with_external_ip()` runs - let port_range = (VALIDATOR_PORT_RANGE.1 + 20, VALIDATOR_PORT_RANGE.1 + 30); - let ip = Ipv4Addr::from(0); - let node = Node::new_archiver_with_external_ip( - &Pubkey::new_rand(), - &socketaddr!(ip, 0), - port_range, - IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), - ); - - let ip = IpAddr::V4(ip); - check_socket(&node.sockets.storage.unwrap(), ip, port_range); - check_socket(&node.sockets.gossip, ip, port_range); - check_socket(&node.sockets.repair, ip, port_range); - - check_sockets(&node.sockets.tvu, ip, port_range); - } - //test that all cluster_info objects only generate signed messages //when constructed with keypairs #[test] diff --git a/core/src/contact_info.rs b/core/src/contact_info.rs index 93fe68333a..a93407e447 100644 --- a/core/src/contact_info.rs +++ b/core/src/contact_info.rs @@ -25,8 +25,8 @@ pub struct ContactInfo { pub tpu: SocketAddr, /// address to forward unprocessed transactions to pub tpu_forwards: SocketAddr, - /// storage data address - pub storage_addr: SocketAddr, + /// unused address + pub unused: SocketAddr, /// address to which to send JSON-RPC requests pub rpc: SocketAddr, /// websocket for JSON-RPC push notifications @@ -95,7 +95,7 @@ impl Default for ContactInfo { repair: socketaddr_any!(), tpu: socketaddr_any!(), tpu_forwards: socketaddr_any!(), - storage_addr: socketaddr_any!(), + unused: socketaddr_any!(), rpc: socketaddr_any!(), rpc_pubsub: socketaddr_any!(), serve_repair: socketaddr_any!(), @@ -115,7 +115,7 @@ impl ContactInfo { repair: socketaddr!("127.0.0.1:1237"), tpu: socketaddr!("127.0.0.1:1238"), tpu_forwards: socketaddr!("127.0.0.1:1239"), - storage_addr: socketaddr!("127.0.0.1:1240"), + unused: socketaddr!("127.0.0.1:1240"), rpc: socketaddr!("127.0.0.1:1241"), rpc_pubsub: socketaddr!("127.0.0.1:1242"), serve_repair: socketaddr!("127.0.0.1:1243"), @@ -137,7 +137,7 @@ impl ContactInfo { repair: addr, tpu: addr, tpu_forwards: addr, - storage_addr: addr, + unused: addr, rpc: addr, rpc_pubsub: addr, serve_repair: addr, @@ -171,7 +171,7 @@ impl ContactInfo { repair, tpu, tpu_forwards, - storage_addr: "0.0.0.0:0".parse().unwrap(), + unused: "0.0.0.0:0".parse().unwrap(), rpc, rpc_pubsub, serve_repair, @@ -249,7 +249,7 @@ mod tests { assert!(ci.rpc.ip().is_unspecified()); assert!(ci.rpc_pubsub.ip().is_unspecified()); assert!(ci.tpu.ip().is_unspecified()); - assert!(ci.storage_addr.ip().is_unspecified()); + assert!(ci.unused.ip().is_unspecified()); assert!(ci.serve_repair.ip().is_unspecified()); } #[test] @@ -261,7 +261,7 @@ mod tests { assert!(ci.rpc.ip().is_multicast()); assert!(ci.rpc_pubsub.ip().is_multicast()); assert!(ci.tpu.ip().is_multicast()); - assert!(ci.storage_addr.ip().is_multicast()); + assert!(ci.unused.ip().is_multicast()); assert!(ci.serve_repair.ip().is_multicast()); } #[test] @@ -274,7 +274,7 @@ mod tests { assert!(ci.rpc.ip().is_unspecified()); assert!(ci.rpc_pubsub.ip().is_unspecified()); assert!(ci.tpu.ip().is_unspecified()); - assert!(ci.storage_addr.ip().is_unspecified()); + assert!(ci.unused.ip().is_unspecified()); assert!(ci.serve_repair.ip().is_unspecified()); } #[test] @@ -287,7 +287,7 @@ mod tests { assert_eq!(ci.tpu_forwards.port(), 13); assert_eq!(ci.rpc.port(), rpc_port::DEFAULT_RPC_PORT); assert_eq!(ci.rpc_pubsub.port(), rpc_port::DEFAULT_RPC_PUBSUB_PORT); - assert!(ci.storage_addr.ip().is_unspecified()); + assert!(ci.unused.ip().is_unspecified()); assert_eq!(ci.serve_repair.port(), 16); } diff --git a/core/src/gossip_service.rs b/core/src/gossip_service.rs index c9d5d10f49..3d424909ee 100644 --- a/core/src/gossip_service.rs +++ b/core/src/gossip_service.rs @@ -63,11 +63,11 @@ impl GossipService { } } -/// Discover Validators and Archivers in a cluster +/// Discover Validators in a cluster pub fn discover_cluster( entrypoint: &SocketAddr, num_nodes: usize, -) -> std::io::Result<(Vec, Vec)> { +) -> std::io::Result> { discover( Some(entrypoint), Some(num_nodes), @@ -77,18 +77,18 @@ pub fn discover_cluster( None, 0, ) - .map(|(_all_peers, validators, archivers)| (validators, archivers)) + .map(|(_all_peers, validators)| validators) } pub fn discover( entrypoint: Option<&SocketAddr>, - num_nodes: Option, // num_nodes only counts validators and archivers, excludes spy nodes + num_nodes: Option, // num_nodes only counts validators, excludes spy nodes timeout: Option, find_node_by_pubkey: Option, find_node_by_gossip_addr: Option<&SocketAddr>, my_gossip_addr: Option<&SocketAddr>, my_shred_version: u16, -) -> std::io::Result<(Vec, Vec, Vec)> { +) -> std::io::Result<(Vec, Vec)> { let exit = Arc::new(AtomicBool::new(false)); let (gossip_service, ip_echo, spy_ref) = make_gossip_node(entrypoint, &exit, my_gossip_addr, my_shred_version); @@ -102,7 +102,7 @@ pub fn discover( let _ip_echo_server = ip_echo.map(solana_net_utils::ip_echo_server); - let (met_criteria, secs, all_peers, tvu_peers, storage_peers) = spy( + let (met_criteria, secs, all_peers, tvu_peers) = spy( spy_ref.clone(), num_nodes, timeout, @@ -119,7 +119,7 @@ pub fn discover( secs, spy_ref.contact_info_trace() ); - return Ok((all_peers, tvu_peers, storage_peers)); + return Ok((all_peers, tvu_peers)); } if !tvu_peers.is_empty() { @@ -127,7 +127,7 @@ pub fn discover( "discover failed to match criteria by timeout...\n{}", spy_ref.contact_info_trace() ); - return Ok((all_peers, tvu_peers, storage_peers)); + return Ok((all_peers, tvu_peers)); } info!("discover failed...\n{}", spy_ref.contact_info_trace()); @@ -182,18 +182,11 @@ fn spy( timeout: Option, find_node_by_pubkey: Option, find_node_by_gossip_addr: Option<&SocketAddr>, -) -> ( - bool, - u64, - Vec, - Vec, - Vec, -) { +) -> (bool, u64, Vec, Vec) { let now = Instant::now(); let mut met_criteria = false; let mut all_peers: Vec = Vec::new(); let mut tvu_peers: Vec = Vec::new(); - let mut storage_peers: Vec = Vec::new(); let mut i = 1; while !met_criteria { if let Some(secs) = timeout { @@ -208,7 +201,6 @@ fn spy( .map(|x| x.0) .collect::>(); tvu_peers = spy_ref.all_tvu_peers().into_iter().collect::>(); - storage_peers = spy_ref.all_storage_peers(); let found_node_by_pubkey = if let Some(pubkey) = find_node_by_pubkey { all_peers.iter().any(|x| x.id == pubkey) @@ -224,7 +216,7 @@ fn spy( if let Some(num) = num_nodes { // Only consider validators and archives for `num_nodes` - let mut nodes: Vec<_> = tvu_peers.iter().chain(storage_peers.iter()).collect(); + let mut nodes: Vec<_> = tvu_peers.iter().collect(); nodes.sort(); nodes.dedup(); @@ -248,13 +240,7 @@ fn spy( )); i += 1; } - ( - met_criteria, - now.elapsed().as_secs(), - all_peers, - tvu_peers, - storage_peers, - ) + (met_criteria, now.elapsed().as_secs(), all_peers, tvu_peers) } /// Makes a spy or gossip node based on whether or not a gossip_addr was passed in @@ -314,21 +300,21 @@ mod tests { let spy_ref = Arc::new(cluster_info); - let (met_criteria, secs, _, tvu_peers, _) = spy(spy_ref.clone(), None, Some(1), None, None); + let (met_criteria, secs, _, tvu_peers) = spy(spy_ref.clone(), None, Some(1), None, None); assert_eq!(met_criteria, false); assert_eq!(secs, 1); assert_eq!(tvu_peers, spy_ref.tvu_peers()); // Find num_nodes - let (met_criteria, _, _, _, _) = spy(spy_ref.clone(), Some(1), None, None, None); + let (met_criteria, _, _, _) = spy(spy_ref.clone(), Some(1), None, None, None); assert_eq!(met_criteria, true); - let (met_criteria, _, _, _, _) = spy(spy_ref.clone(), Some(2), None, None, None); + let (met_criteria, _, _, _) = spy(spy_ref.clone(), Some(2), None, None, None); assert_eq!(met_criteria, true); // Find specific node by pubkey - let (met_criteria, _, _, _, _) = spy(spy_ref.clone(), None, None, Some(peer0), None); + let (met_criteria, _, _, _) = spy(spy_ref.clone(), None, None, Some(peer0), None); assert_eq!(met_criteria, true); - let (met_criteria, _, _, _, _) = spy( + let (met_criteria, _, _, _) = spy( spy_ref.clone(), None, Some(0), @@ -338,11 +324,11 @@ mod tests { assert_eq!(met_criteria, false); // Find num_nodes *and* specific node by pubkey - let (met_criteria, _, _, _, _) = spy(spy_ref.clone(), Some(1), None, Some(peer0), None); + let (met_criteria, _, _, _) = spy(spy_ref.clone(), Some(1), None, Some(peer0), None); assert_eq!(met_criteria, true); - let (met_criteria, _, _, _, _) = spy(spy_ref.clone(), Some(3), Some(0), Some(peer0), None); + let (met_criteria, _, _, _) = spy(spy_ref.clone(), Some(3), Some(0), Some(peer0), None); assert_eq!(met_criteria, false); - let (met_criteria, _, _, _, _) = spy( + let (met_criteria, _, _, _) = spy( spy_ref.clone(), Some(1), Some(0), @@ -352,11 +338,11 @@ mod tests { assert_eq!(met_criteria, false); // Find specific node by gossip address - let (met_criteria, _, _, _, _) = + let (met_criteria, _, _, _) = spy(spy_ref.clone(), None, None, None, Some(&peer0_info.gossip)); assert_eq!(met_criteria, true); - let (met_criteria, _, _, _, _) = spy( + let (met_criteria, _, _, _) = spy( spy_ref.clone(), None, Some(0), diff --git a/core/src/lib.rs b/core/src/lib.rs index 87bbad1b6b..07a29e18d2 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -52,7 +52,6 @@ pub mod sigverify; pub mod sigverify_shreds; pub mod sigverify_stage; pub mod snapshot_packager_service; -pub mod storage_stage; pub mod tpu; pub mod transaction_status_service; pub mod tvu; diff --git a/core/src/repair_service.rs b/core/src/repair_service.rs index 068173f20b..e86725ad5c 100644 --- a/core/src/repair_service.rs +++ b/core/src/repair_service.rs @@ -57,14 +57,11 @@ pub const MAX_DUPLICATE_WAIT_MS: usize = 10_000; pub const REPAIR_MS: u64 = 100; pub const MAX_ORPHANS: usize = 5; -pub enum RepairStrategy { - RepairRange(RepairSlotRange), - RepairAll { - bank_forks: Arc>, - completed_slots_receiver: CompletedSlotsReceiver, - epoch_schedule: EpochSchedule, - duplicate_slots_reset_sender: DuplicateSlotsResetSender, - }, +pub struct RepairInfo { + pub bank_forks: Arc>, + pub completed_slots_receiver: CompletedSlotsReceiver, + pub epoch_schedule: EpochSchedule, + pub duplicate_slots_reset_sender: DuplicateSlotsResetSender, } pub struct RepairSlotRange { @@ -97,7 +94,7 @@ impl RepairService { exit: Arc, repair_socket: Arc, cluster_info: Arc, - repair_strategy: RepairStrategy, + repair_info: RepairInfo, cluster_slots: Arc, ) -> Self { let t_repair = Builder::new() @@ -108,7 +105,7 @@ impl RepairService { &exit, &repair_socket, &cluster_info, - repair_strategy, + repair_info, &cluster_slots, ) }) @@ -122,84 +119,61 @@ impl RepairService { exit: &AtomicBool, repair_socket: &UdpSocket, cluster_info: &Arc, - repair_strategy: RepairStrategy, + repair_info: RepairInfo, cluster_slots: &Arc, ) { let serve_repair = ServeRepair::new(cluster_info.clone()); let id = cluster_info.id(); - if let RepairStrategy::RepairAll { .. } = repair_strategy { - Self::initialize_lowest_slot(id, blockstore, cluster_info); - } + Self::initialize_lowest_slot(id, blockstore, cluster_info); let mut repair_stats = RepairStats::default(); let mut last_stats = Instant::now(); let mut duplicate_slot_repair_statuses = HashMap::new(); - - if let RepairStrategy::RepairAll { - ref completed_slots_receiver, - .. - } = repair_strategy - { - Self::initialize_epoch_slots(blockstore, cluster_info, completed_slots_receiver); - } + Self::initialize_epoch_slots( + blockstore, + cluster_info, + &repair_info.completed_slots_receiver, + ); loop { if exit.load(Ordering::Relaxed) { break; } let repairs = { - match repair_strategy { - RepairStrategy::RepairRange(ref repair_slot_range) => { - // Strategy used by archivers - Self::generate_repairs_in_range( - blockstore, - MAX_REPAIR_LENGTH, - repair_slot_range, - ) - } - - RepairStrategy::RepairAll { - ref completed_slots_receiver, - ref bank_forks, - ref duplicate_slots_reset_sender, - .. - } => { - let root_bank = bank_forks.read().unwrap().root_bank().clone(); - let new_root = root_bank.slot(); - let lowest_slot = blockstore.lowest_slot(); - Self::update_lowest_slot(&id, lowest_slot, &cluster_info); - Self::update_completed_slots(completed_slots_receiver, &cluster_info); - cluster_slots.update(new_root, cluster_info, bank_forks); - let new_duplicate_slots = Self::find_new_duplicate_slots( - &duplicate_slot_repair_statuses, - blockstore, - cluster_slots, - &root_bank, - ); - Self::process_new_duplicate_slots( - &new_duplicate_slots, - &mut duplicate_slot_repair_statuses, - cluster_slots, - &root_bank, - blockstore, - &serve_repair, - &duplicate_slots_reset_sender, - ); - Self::generate_and_send_duplicate_repairs( - &mut duplicate_slot_repair_statuses, - cluster_slots, - blockstore, - &serve_repair, - &mut repair_stats, - &repair_socket, - ); - Self::generate_repairs( - blockstore, - root_bank.slot(), - MAX_REPAIR_LENGTH, - &duplicate_slot_repair_statuses, - ) - } - } + let root_bank = repair_info.bank_forks.read().unwrap().root_bank().clone(); + let new_root = root_bank.slot(); + let lowest_slot = blockstore.lowest_slot(); + Self::update_lowest_slot(&id, lowest_slot, &cluster_info); + Self::update_completed_slots(&repair_info.completed_slots_receiver, &cluster_info); + cluster_slots.update(new_root, cluster_info, &repair_info.bank_forks); + let new_duplicate_slots = Self::find_new_duplicate_slots( + &duplicate_slot_repair_statuses, + blockstore, + cluster_slots, + &root_bank, + ); + Self::process_new_duplicate_slots( + &new_duplicate_slots, + &mut duplicate_slot_repair_statuses, + cluster_slots, + &root_bank, + blockstore, + &serve_repair, + &repair_info.duplicate_slots_reset_sender, + ); + Self::generate_and_send_duplicate_repairs( + &mut duplicate_slot_repair_statuses, + cluster_slots, + blockstore, + &serve_repair, + &mut repair_stats, + &repair_socket, + ); + Self::generate_repairs( + blockstore, + root_bank.slot(), + MAX_REPAIR_LENGTH, + &duplicate_slot_repair_statuses, + ) }; if let Ok(repairs) = repairs { diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 83aa055a7b..faff4f58e8 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -45,7 +45,7 @@ use std::{ result, sync::{ atomic::{AtomicBool, Ordering}, - mpsc::{channel, Receiver, RecvTimeoutError, Sender}, + mpsc::{Receiver, RecvTimeoutError, Sender}, Arc, Mutex, RwLock, }, thread::{self, Builder, JoinHandle}, @@ -157,7 +157,7 @@ impl ReplayStage { cluster_slots: Arc, retransmit_slots_sender: RetransmitSlotsSender, duplicate_slots_reset_receiver: DuplicateSlotsResetReceiver, - ) -> (Self, Receiver>>) { + ) -> Self { let ReplayStageConfig { my_pubkey, vote_account, @@ -172,7 +172,6 @@ impl ReplayStage { rewards_recorder_sender, } = config; - let (root_bank_sender, root_bank_receiver) = channel(); trace!("replay stage"); let mut tower = Tower::new(&my_pubkey, &vote_account, &bank_forks.read().unwrap()); @@ -377,7 +376,6 @@ impl ReplayStage { &cluster_info, &blockstore, &leader_schedule_cache, - &root_bank_sender, &lockouts_sender, &accounts_hash_sender, &latest_root_senders, @@ -497,13 +495,11 @@ impl ReplayStage { Ok(()) }) .unwrap(); - ( - Self { - t_replay, - commitment_service, - }, - root_bank_receiver, - ) + + Self { + t_replay, + commitment_service, + } } fn report_memory( @@ -852,7 +848,6 @@ impl ReplayStage { cluster_info: &Arc, blockstore: &Arc, leader_schedule_cache: &Arc, - root_bank_sender: &Sender>>, lockouts_sender: &Sender, accounts_hash_sender: &Option, latest_root_senders: &[Sender], @@ -905,10 +900,6 @@ impl ReplayStage { } }); info!("new root {}", new_root); - if let Err(e) = root_bank_sender.send(rooted_banks) { - trace!("root_bank_sender failed: {:?}", e); - return Err(e.into()); - } } Self::update_commitment_cache( diff --git a/core/src/retransmit_stage.rs b/core/src/retransmit_stage.rs index a1a7e22312..f3c0fbac81 100644 --- a/core/src/retransmit_stage.rs +++ b/core/src/retransmit_stage.rs @@ -4,7 +4,7 @@ use crate::{ cluster_info::{compute_retransmit_peers, ClusterInfo, DATA_PLANE_FANOUT}, cluster_slots::ClusterSlots, repair_service::DuplicateSlotsResetSender, - repair_service::RepairStrategy, + repair_service::RepairInfo, result::{Error, Result}, window_service::{should_retransmit_and_persist, WindowService}, }; @@ -353,7 +353,7 @@ impl RetransmitStage { retransmit_receiver, ); - let repair_strategy = RepairStrategy::RepairAll { + let repair_info = RepairInfo { bank_forks, completed_slots_receiver, epoch_schedule, @@ -367,7 +367,7 @@ impl RetransmitStage { retransmit_sender, repair_socket, exit, - repair_strategy, + repair_info, &leader_schedule_cache.clone(), move |id, shred, working_bank, last_root| { let is_connected = cfg diff --git a/core/src/rpc.rs b/core/src/rpc.rs index 75c7942439..2e9e0c7664 100644 --- a/core/src/rpc.rs +++ b/core/src/rpc.rs @@ -6,7 +6,6 @@ use crate::{ contact_info::ContactInfo, non_circulating_supply::calculate_non_circulating_supply, rpc_error::RpcCustomError, - storage_stage::StorageState, validator::ValidatorExit, }; use bincode::serialize; @@ -73,7 +72,6 @@ pub struct JsonRpcRequestProcessor { block_commitment_cache: Arc>, blockstore: Arc, config: JsonRpcConfig, - storage_state: StorageState, validator_exit: Arc>>, } @@ -111,7 +109,6 @@ impl JsonRpcRequestProcessor { bank_forks: Arc>, block_commitment_cache: Arc>, blockstore: Arc, - storage_state: StorageState, validator_exit: Arc>>, ) -> Self { JsonRpcRequestProcessor { @@ -119,7 +116,6 @@ impl JsonRpcRequestProcessor { bank_forks, block_commitment_cache, blockstore, - storage_state, validator_exit, } } @@ -375,31 +371,6 @@ impl JsonRpcRequestProcessor { }) } - fn get_storage_turn_rate(&self) -> Result { - Ok(self.storage_state.get_storage_turn_rate()) - } - - fn get_storage_turn(&self) -> Result { - Ok(RpcStorageTurn { - blockhash: self.storage_state.get_storage_blockhash().to_string(), - slot: self.storage_state.get_slot(), - }) - } - - fn get_slots_per_segment(&self, commitment: Option) -> Result { - Ok(self.bank(commitment)?.slots_per_segment()) - } - - fn get_storage_pubkeys_for_slot(&self, slot: Slot) -> Result> { - let pubkeys: Vec = self - .storage_state - .get_pubkeys_for_slot(slot, &self.bank_forks) - .iter() - .map(|pubkey| pubkey.to_string()) - .collect(); - Ok(pubkeys) - } - pub fn set_log_filter(&self, filter: String) -> Result<()> { if self.config.enable_set_log_filter { solana_logger::setup_with(&filter); @@ -879,22 +850,6 @@ pub trait RpcSol { commitment: Option, ) -> Result; - #[rpc(meta, name = "getStorageTurnRate")] - fn get_storage_turn_rate(&self, meta: Self::Metadata) -> Result; - - #[rpc(meta, name = "getStorageTurn")] - fn get_storage_turn(&self, meta: Self::Metadata) -> Result; - - #[rpc(meta, name = "getSlotsPerSegment")] - fn get_slots_per_segment( - &self, - meta: Self::Metadata, - commitment: Option, - ) -> Result; - - #[rpc(meta, name = "getStoragePubkeysForSlot")] - fn get_storage_pubkeys_for_slot(&self, meta: Self::Metadata, slot: u64) -> Result>; - #[rpc(meta, name = "validatorExit")] fn validator_exit(&self, meta: Self::Metadata) -> Result; @@ -1419,39 +1374,6 @@ impl RpcSol for RpcSolImpl { .get_vote_accounts(commitment) } - fn get_storage_turn_rate(&self, meta: Self::Metadata) -> Result { - meta.request_processor - .read() - .unwrap() - .get_storage_turn_rate() - } - - fn get_storage_turn(&self, meta: Self::Metadata) -> Result { - meta.request_processor.read().unwrap().get_storage_turn() - } - - fn get_slots_per_segment( - &self, - meta: Self::Metadata, - commitment: Option, - ) -> Result { - meta.request_processor - .read() - .unwrap() - .get_slots_per_segment(commitment) - } - - fn get_storage_pubkeys_for_slot( - &self, - meta: Self::Metadata, - slot: Slot, - ) -> Result> { - meta.request_processor - .read() - .unwrap() - .get_storage_pubkeys_for_slot(slot) - } - fn validator_exit(&self, meta: Self::Metadata) -> Result { meta.request_processor.read().unwrap().validator_exit() } @@ -1736,7 +1658,6 @@ pub mod tests { bank_forks.clone(), block_commitment_cache.clone(), blockstore, - StorageState::default(), validator_exit, ))); let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(ContactInfo::default())); @@ -1785,7 +1706,6 @@ pub mod tests { bank_forks, block_commitment_cache, blockstore, - StorageState::default(), validator_exit, ); thread::spawn(move || { @@ -2524,7 +2444,6 @@ pub mod tests { new_bank_forks().0, block_commitment_cache, blockstore, - StorageState::default(), validator_exit, ); Arc::new(RwLock::new(request_processor)) @@ -2621,7 +2540,6 @@ pub mod tests { new_bank_forks().0, block_commitment_cache, blockstore, - StorageState::default(), validator_exit, ); assert_eq!(request_processor.validator_exit(), Ok(false)); @@ -2644,7 +2562,6 @@ pub mod tests { new_bank_forks().0, block_commitment_cache, blockstore, - StorageState::default(), validator_exit, ); assert_eq!(request_processor.validator_exit(), Ok(true)); @@ -2726,7 +2643,6 @@ pub mod tests { bank_forks, block_commitment_cache, blockstore, - StorageState::default(), validator_exit, ); assert_eq!( diff --git a/core/src/rpc_service.rs b/core/src/rpc_service.rs index b361847be8..483c2f9fa9 100644 --- a/core/src/rpc_service.rs +++ b/core/src/rpc_service.rs @@ -1,8 +1,7 @@ //! The `rpc_service` module implements the Solana JSON RPC service. use crate::{ - cluster_info::ClusterInfo, commitment::BlockCommitmentCache, rpc::*, - storage_stage::StorageState, validator::ValidatorExit, + cluster_info::ClusterInfo, commitment::BlockCommitmentCache, rpc::*, validator::ValidatorExit, }; use jsonrpc_core::MetaIoHandler; use jsonrpc_http_server::{ @@ -252,7 +251,6 @@ impl JsonRpcService { cluster_info: Arc, genesis_hash: Hash, ledger_path: &Path, - storage_state: StorageState, validator_exit: Arc>>, trusted_validators: Option>, ) -> Self { @@ -263,7 +261,6 @@ impl JsonRpcService { bank_forks, block_commitment_cache, blockstore, - storage_state, validator_exit.clone(), ))); @@ -394,7 +391,6 @@ mod tests { cluster_info, Hash::default(), &PathBuf::from("farf"), - StorageState::default(), validator_exit, None, ); diff --git a/core/src/serve_repair.rs b/core/src/serve_repair.rs index b19d5e3b17..9fecab2c05 100644 --- a/core/src/serve_repair.rs +++ b/core/src/serve_repair.rs @@ -602,7 +602,7 @@ mod tests { repair: socketaddr!("127.0.0.1:1237"), tpu: socketaddr!("127.0.0.1:1238"), tpu_forwards: socketaddr!("127.0.0.1:1239"), - storage_addr: socketaddr!("127.0.0.1:1240"), + unused: socketaddr!("127.0.0.1:1240"), rpc: socketaddr!("127.0.0.1:1241"), rpc_pubsub: socketaddr!("127.0.0.1:1242"), serve_repair: socketaddr!("127.0.0.1:1243"), @@ -680,7 +680,7 @@ mod tests { repair: socketaddr!([127, 0, 0, 1], 1237), tpu: socketaddr!([127, 0, 0, 1], 1238), tpu_forwards: socketaddr!([127, 0, 0, 1], 1239), - storage_addr: socketaddr!([127, 0, 0, 1], 1240), + unused: socketaddr!([127, 0, 0, 1], 1240), rpc: socketaddr!([127, 0, 0, 1], 1241), rpc_pubsub: socketaddr!([127, 0, 0, 1], 1242), serve_repair: serve_repair_addr, @@ -708,7 +708,7 @@ mod tests { repair: socketaddr!([127, 0, 0, 1], 1237), tpu: socketaddr!([127, 0, 0, 1], 1238), tpu_forwards: socketaddr!([127, 0, 0, 1], 1239), - storage_addr: socketaddr!([127, 0, 0, 1], 1240), + unused: socketaddr!([127, 0, 0, 1], 1240), rpc: socketaddr!([127, 0, 0, 1], 1241), rpc_pubsub: socketaddr!([127, 0, 0, 1], 1242), serve_repair: serve_repair_addr2, diff --git a/core/src/storage_stage.rs b/core/src/storage_stage.rs deleted file mode 100644 index a637797f43..0000000000 --- a/core/src/storage_stage.rs +++ /dev/null @@ -1,740 +0,0 @@ -// A stage that handles generating the keys used to encrypt the ledger and sample it -// for storage mining. Archivers submit storage proofs, validator then bundles them -// to submit its proof for mining to be rewarded. - -use crate::{ - cluster_info::ClusterInfo, - commitment::BlockCommitmentCache, - contact_info::ContactInfo, - result::{Error, Result}, -}; -use rand::{Rng, SeedableRng}; -use rand_chacha::ChaChaRng; -use solana_chacha_cuda::chacha_cuda::chacha_cbc_encrypt_file_many_keys; -use solana_ledger::{bank_forks::BankForks, blockstore::Blockstore}; -use solana_runtime::{bank::Bank, storage_utils::archiver_accounts}; -use solana_sdk::{ - account::Account, - account_utils::StateMut, - clock::{get_segment_from_slot, Slot}, - hash::Hash, - instruction::Instruction, - message::Message, - pubkey::Pubkey, - signature::{Keypair, Signature, Signer}, - transaction::Transaction, -}; -use solana_storage_program::{ - storage_contract::{Proof, ProofStatus, StorageContract}, - storage_instruction, - storage_instruction::proof_validation, -}; -use solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY; -use std::{ - cmp, - collections::HashMap, - io, - mem::size_of, - net::UdpSocket, - sync::atomic::{AtomicBool, Ordering}, - sync::mpsc::{channel, Receiver, RecvTimeoutError, Sender}, - sync::{Arc, RwLock}, - thread::{self, sleep, Builder, JoinHandle}, - time::{Duration, Instant}, -}; - -// Block of hash answers to validate against -// Vec of [ledger blocks] x [keys] -type StorageResults = Vec; -type StorageKeys = Vec; -type ArchiverMap = Vec>>; - -#[derive(Default)] -pub struct StorageStateInner { - pub storage_results: StorageResults, - pub storage_keys: StorageKeys, - archiver_map: ArchiverMap, - storage_blockhash: Hash, - slot: Slot, - slots_per_segment: u64, - slots_per_turn: u64, -} - -// Used to track root slots in storage stage -#[derive(Default)] -struct StorageSlots { - last_root: u64, - slot_count: u64, - pending_root_banks: Vec>, -} - -#[derive(Clone, Default)] -pub struct StorageState { - pub state: Arc>, -} - -pub struct StorageStage { - t_storage_mining_verifier: JoinHandle<()>, - t_storage_create_accounts: JoinHandle<()>, -} - -pub const SLOTS_PER_TURN_TEST: u64 = 2; -// TODO: some way to dynamically size NUM_IDENTITIES -const NUM_IDENTITIES: usize = 1024; -pub const NUM_STORAGE_SAMPLES: usize = 4; -const KEY_SIZE: usize = 64; - -type InstructionSender = Sender; - -impl StorageState { - pub fn new(hash: &Hash, slots_per_turn: u64, slots_per_segment: u64) -> Self { - let storage_keys = vec![0u8; KEY_SIZE * NUM_IDENTITIES]; - let storage_results = vec![Hash::default(); NUM_IDENTITIES]; - let archiver_map = vec![]; - - let state = StorageStateInner { - storage_keys, - storage_results, - archiver_map, - slots_per_turn, - slot: 0, - slots_per_segment, - storage_blockhash: *hash, - }; - - StorageState { - state: Arc::new(RwLock::new(state)), - } - } - - pub fn get_storage_blockhash(&self) -> Hash { - self.state.read().unwrap().storage_blockhash - } - - pub fn get_storage_turn_rate(&self) -> u64 { - self.state.read().unwrap().slots_per_turn - } - - pub fn get_slot(&self) -> u64 { - self.state.read().unwrap().slot - } - - pub fn get_pubkeys_for_slot( - &self, - slot: Slot, - bank_forks: &Arc>, - ) -> Vec { - // TODO: keep track of age? - const MAX_PUBKEYS_TO_RETURN: usize = 5; - let index = - get_segment_from_slot(slot, self.state.read().unwrap().slots_per_segment) as usize; - let archiver_map = &self.state.read().unwrap().archiver_map; - let working_bank = bank_forks.read().unwrap().working_bank(); - let accounts = archiver_accounts(&working_bank); - if index < archiver_map.len() { - //perform an account owner lookup - let mut slot_archivers = archiver_map[index] - .keys() - .filter_map(|account_id| { - accounts.get(account_id).and_then(|account| { - if let Ok(StorageContract::ArchiverStorage { owner, .. }) = account.state() - { - Some(owner) - } else { - None - } - }) - }) - .collect::>(); - slot_archivers.truncate(MAX_PUBKEYS_TO_RETURN); - slot_archivers - } else { - vec![] - } - } -} - -impl StorageStage { - #[allow(clippy::too_many_arguments)] - pub fn new( - storage_state: &StorageState, - bank_receiver: Receiver>>, - blockstore: Option>, - keypair: &Arc, - storage_keypair: &Arc, - exit: &Arc, - bank_forks: &Arc>, - cluster_info: &Arc, - block_commitment_cache: Arc>, - ) -> Self { - let (instruction_sender, instruction_receiver) = channel(); - - let t_storage_mining_verifier = { - let slots_per_turn = storage_state.state.read().unwrap().slots_per_turn; - let storage_state_inner = storage_state.state.clone(); - let exit = exit.clone(); - let storage_keypair = storage_keypair.clone(); - Builder::new() - .name("solana-storage-mining-verify-stage".to_string()) - .spawn(move || { - let mut current_key = 0; - let mut storage_slots = StorageSlots::default(); - loop { - if let Some(ref some_blockstore) = blockstore { - if let Err(e) = Self::process_entries( - &storage_keypair, - &storage_state_inner, - &bank_receiver, - &some_blockstore, - &mut storage_slots, - &mut current_key, - slots_per_turn, - &instruction_sender, - ) { - match e { - Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => { - break; - } - Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (), - _ => info!("Error from process_entries: {:?}", e), - } - } - } - if exit.load(Ordering::Relaxed) { - break; - } - } - }) - .unwrap() - }; - - let t_storage_create_accounts = { - let cluster_info = cluster_info.clone(); - let exit = exit.clone(); - let keypair = keypair.clone(); - let storage_keypair = storage_keypair.clone(); - let bank_forks = bank_forks.clone(); - Builder::new() - .name("solana-storage-create-accounts".to_string()) - .spawn(move || { - let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap(); - - { - let working_bank = bank_forks.read().unwrap().working_bank(); - let storage_account = working_bank.get_account(&storage_keypair.pubkey()); - if storage_account.is_none() { - warn!("Storage account not found: {}", storage_keypair.pubkey()); - } - } - - loop { - match instruction_receiver.recv_timeout(Duration::from_secs(1)) { - Ok(instruction) => { - Self::send_transaction( - &bank_forks, - &cluster_info, - instruction, - &keypair, - &storage_keypair, - &transactions_socket, - &block_commitment_cache, - ) - .unwrap_or_else(|err| { - info!("failed to send storage transaction: {:?}", err) - }); - } - Err(e) => match e { - RecvTimeoutError::Disconnected => break, - RecvTimeoutError::Timeout => (), - }, - }; - - if exit.load(Ordering::Relaxed) { - break; - } - sleep(Duration::from_millis(100)); - } - }) - .unwrap() - }; - - StorageStage { - t_storage_mining_verifier, - t_storage_create_accounts, - } - } - - fn send_transaction( - bank_forks: &Arc>, - cluster_info: &ClusterInfo, - instruction: Instruction, - keypair: &Arc, - storage_keypair: &Arc, - transactions_socket: &UdpSocket, - block_commitment_cache: &Arc>, - ) -> io::Result<()> { - let working_bank = bank_forks.read().unwrap().working_bank(); - let blockhash = working_bank.confirmed_last_blockhash().0; - let keypair_balance = working_bank.get_balance(&keypair.pubkey()); - - if keypair_balance == 0 { - warn!("keypair account balance empty: {}", keypair.pubkey(),); - } else { - debug!( - "keypair account balance: {}: {}", - keypair.pubkey(), - keypair_balance - ); - } - if working_bank - .get_account(&storage_keypair.pubkey()) - .is_none() - { - warn!( - "storage account does not exist: {}", - storage_keypair.pubkey() - ); - } - - let signer_keys = vec![keypair.as_ref(), storage_keypair.as_ref()]; - let message = Message::new_with_payer(&[instruction], Some(&signer_keys[0].pubkey())); - let transaction = Transaction::new(&signer_keys, message, blockhash); - // try sending the transaction upto 5 times - for _ in 0..5 { - transactions_socket.send_to( - &bincode::serialize(&transaction).unwrap(), - cluster_info.my_contact_info().tpu, - )?; - sleep(Duration::from_millis(100)); - if Self::poll_for_signature_confirmation( - bank_forks, - block_commitment_cache, - &transaction.signatures[0], - 0, - ) - .is_ok() - { - break; - }; - } - Ok(()) - } - - fn poll_for_signature_confirmation( - bank_forks: &Arc>, - block_commitment_cache: &Arc>, - signature: &Signature, - min_confirmed_blocks: usize, - ) -> Result<()> { - let mut now = Instant::now(); - let mut confirmed_blocks = 0; - loop { - let working_bank = bank_forks.read().unwrap().working_bank(); - let response = working_bank.get_signature_status_slot(signature); - if let Some((slot, status)) = response { - let confirmations = if working_bank.src.roots().contains(&slot) { - MAX_LOCKOUT_HISTORY + 1 - } else { - let r_block_commitment_cache = block_commitment_cache.read().unwrap(); - r_block_commitment_cache - .get_confirmation_count(slot) - .unwrap_or(0) - }; - if status.is_ok() { - if confirmed_blocks != confirmations { - now = Instant::now(); - confirmed_blocks = confirmations; - } - if confirmations >= min_confirmed_blocks { - break; - } - } - }; - if now.elapsed().as_secs() > 5 { - return Err(Error::from(io::Error::new( - io::ErrorKind::Other, - "signature not found", - ))); - } - sleep(Duration::from_millis(250)); - } - Ok(()) - } - - fn process_turn( - storage_keypair: &Arc, - state: &Arc>, - blockstore: &Arc, - blockhash: Hash, - slot: Slot, - slots_per_segment: u64, - instruction_sender: &InstructionSender, - total_proofs: usize, - ) -> Result<()> { - let mut seed = [0u8; 32]; - let signature = storage_keypair.sign_message(&blockhash.as_ref()); - - let ix = storage_instruction::advertise_recent_blockhash( - &storage_keypair.pubkey(), - blockhash, - get_segment_from_slot(slot, slots_per_segment), - ); - instruction_sender.send(ix)?; - - seed.copy_from_slice(&signature.as_ref()[..32]); - - let mut rng = ChaChaRng::from_seed(seed); - - { - let mut w_state = state.write().unwrap(); - w_state.slot = slot; - w_state.storage_blockhash = blockhash; - } - - if total_proofs == 0 { - return Ok(()); - } - - // Regenerate the answers - let num_segments = get_segment_from_slot(slot, slots_per_segment) as usize; - if num_segments == 0 { - info!("Ledger has 0 segments!"); - return Ok(()); - } - // TODO: what if the validator does not have this segment - let segment = signature.as_ref()[0] as usize % num_segments; - - debug!( - "storage verifying: segment: {} identities: {}", - segment, NUM_IDENTITIES, - ); - - let mut samples = vec![]; - for _ in 0..NUM_STORAGE_SAMPLES { - samples.push(rng.gen_range(0, 10)); - } - debug!("generated samples: {:?}", samples); - - // TODO: cuda required to generate the reference values - // but if it is missing, then we need to take care not to - // process storage mining results. - if solana_perf::perf_libs::api().is_some() { - // Lock the keys, since this is the IV memory, - // it will be updated in-place by the encryption. - // Should be overwritten by the proof signatures which replace the - // key values by the time it runs again. - - let mut statew = state.write().unwrap(); - - match chacha_cbc_encrypt_file_many_keys( - blockstore, - segment as u64, - statew.slots_per_segment, - &mut statew.storage_keys, - &samples, - ) { - Ok(hashes) => { - debug!("Success! encrypted ledger segment: {}", segment); - statew.storage_results.copy_from_slice(&hashes); - } - Err(e) => { - info!("error encrypting file: {:?}", e); - return Err(e.into()); - } - } - } - Ok(()) - } - - fn collect_proofs( - slot: Slot, - slots_per_segment: u64, - account_id: Pubkey, - account: Account, - storage_state: &Arc>, - current_key_idx: &mut usize, - ) -> usize { - let mut proofs_collected = 0; - if let Ok(StorageContract::ArchiverStorage { proofs, .. }) = account.state() { - //convert slot to segment - let segment = get_segment_from_slot(slot, slots_per_segment); - if let Some(proofs) = proofs.get(&segment) { - for proof in proofs.iter() { - { - // TODO do this only once per account and segment? and maybe do it somewhere else - debug!( - "generating storage_keys from storage txs current_key_idx: {}", - *current_key_idx - ); - let storage_keys = &mut storage_state.write().unwrap().storage_keys; - storage_keys[*current_key_idx..*current_key_idx + size_of::()] - .copy_from_slice(proof.signature.as_ref()); - *current_key_idx += size_of::(); - *current_key_idx %= storage_keys.len(); - } - - let mut statew = storage_state.write().unwrap(); - if statew.archiver_map.len() < segment as usize { - statew.archiver_map.resize(segment as usize, HashMap::new()); - } - let proof_segment_index = proof.segment_index as usize; - if proof_segment_index < statew.archiver_map.len() { - // TODO randomly select and verify the proof first - // Copy the submitted proof - statew.archiver_map[proof_segment_index] - .entry(account_id) - .or_default() - .push(proof.clone()); - proofs_collected += 1; - } - } - debug!("storage proof: slot: {}", slot); - } - } - proofs_collected - } - - fn process_entries( - storage_keypair: &Arc, - storage_state: &Arc>, - bank_receiver: &Receiver>>, - blockstore: &Arc, - storage_slots: &mut StorageSlots, - current_key_idx: &mut usize, - slots_per_turn: u64, - instruction_sender: &InstructionSender, - ) -> Result<()> { - let timeout = Duration::new(1, 0); - storage_slots - .pending_root_banks - .append(&mut bank_receiver.recv_timeout(timeout)?); - storage_slots - .pending_root_banks - .sort_unstable_by(|a, b| b.slot().cmp(&a.slot())); - // check if any rooted slots were missed leading up to this one and bump slot count and process proofs for each missed root - while let Some(bank) = storage_slots.pending_root_banks.pop() { - if bank.slot() > storage_slots.last_root { - storage_slots.slot_count += 1; - storage_slots.last_root = bank.slot(); - if storage_slots.slot_count % slots_per_turn == 0 { - // load all the archiver accounts in the bank. collect all their proofs at the current slot - let archiver_accounts = archiver_accounts(bank.as_ref()); - // find proofs, and use them to update - // the storage_keys with their signatures - let mut total_proofs = 0; - for (account_id, account) in archiver_accounts.into_iter() { - total_proofs += Self::collect_proofs( - bank.slot(), - bank.slots_per_segment(), - account_id, - account, - storage_state, - current_key_idx, - ); - } - - // TODO un-ignore this result and be sure to drain all pending proofs - let _ignored = Self::process_turn( - &storage_keypair, - &storage_state, - &blockstore, - bank.last_blockhash(), - bank.slot(), - bank.slots_per_segment(), - instruction_sender, - total_proofs, - ); - Self::submit_verifications( - get_segment_from_slot(bank.slot(), bank.slots_per_segment()), - &storage_state, - &storage_keypair, - instruction_sender, - )? - } - } - } - Ok(()) - } - - fn submit_verifications( - current_segment: u64, - storage_state: &Arc>, - storage_keypair: &Arc, - ix_sender: &Sender, - ) -> Result<()> { - // bundle up mining submissions from archivers - // and submit them in a tx to the leader to get rewarded. - let mut w_state = storage_state.write().unwrap(); - let mut max_proof_mask = 0; - let proof_mask_limit = storage_instruction::proof_mask_limit(); - let instructions: Vec<_> = w_state - .archiver_map - .iter_mut() - .enumerate() - .flat_map(|(_, proof_map)| { - let checked_proofs = proof_map - .iter_mut() - .filter_map(|(id, proofs)| { - if !proofs.is_empty() { - if (proofs.len() as u64) >= proof_mask_limit { - proofs.clear(); - None - } else { - max_proof_mask = cmp::max(max_proof_mask, proofs.len()); - Some(( - *id, - proofs - .drain(..) - .map(|_| ProofStatus::Valid) - .collect::>(), - )) - } - } else { - None - } - }) - .collect::>(); - - if !checked_proofs.is_empty() { - let max_accounts_per_ix = - storage_instruction::validation_account_limit(max_proof_mask); - let ixs = checked_proofs - .chunks(max_accounts_per_ix as usize) - .map(|checked_proofs| { - proof_validation( - &storage_keypair.pubkey(), - current_segment, - checked_proofs.to_vec(), - ) - }) - .collect::>(); - Some(ixs) - } else { - None - } - }) - .flatten() - .collect(); - let res: std::result::Result<_, _> = instructions - .into_iter() - .map(|ix| { - sleep(Duration::from_millis(100)); - ix_sender.send(ix) - }) - .collect(); - res?; - Ok(()) - } - - pub fn join(self) -> thread::Result<()> { - self.t_storage_create_accounts.join().unwrap(); - self.t_storage_mining_verifier.join() - } -} - -pub fn test_cluster_info(id: &Pubkey) -> Arc { - let contact_info = ContactInfo::new_localhost(id, 0); - let cluster_info = ClusterInfo::new_with_invalid_keypair(contact_info); - Arc::new(cluster_info) -} - -pub fn get_identity_index_from_signature(key: &Signature) -> usize { - let rkey = key.as_ref(); - let mut res: usize = (rkey[0] as usize) - | ((rkey[1] as usize) << 8) - | ((rkey[2] as usize) << 16) - | ((rkey[3] as usize) << 24); - res &= NUM_IDENTITIES - 1; - res -} - -#[cfg(test)] -mod tests { - use super::*; - use rayon::prelude::*; - use solana_ledger::{ - genesis_utils::{create_genesis_config, GenesisConfigInfo}, - get_tmp_ledger_path, - }; - use solana_runtime::bank::Bank; - use solana_sdk::{ - hash::Hasher, - signature::{Keypair, Signer}, - }; - use std::{ - cmp::{max, min}, - sync::{ - atomic::{AtomicBool, AtomicUsize, Ordering}, - mpsc::channel, - Arc, RwLock, - }, - }; - - #[test] - fn test_storage_stage_none_ledger() { - let keypair = Arc::new(Keypair::new()); - let storage_keypair = Arc::new(Keypair::new()); - let exit = Arc::new(AtomicBool::new(false)); - - let cluster_info = test_cluster_info(&keypair.pubkey()); - let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(1000); - let bank = Arc::new(Bank::new(&genesis_config)); - let bank_forks = Arc::new(RwLock::new(BankForks::new_from_banks(&[bank.clone()], 0))); - let ledger_path = get_tmp_ledger_path!(); - let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap()); - let block_commitment_cache = Arc::new(RwLock::new( - BlockCommitmentCache::default_with_blockstore(blockstore), - )); - let (_slot_sender, slot_receiver) = channel(); - let storage_state = StorageState::new( - &bank.last_blockhash(), - SLOTS_PER_TURN_TEST, - bank.slots_per_segment(), - ); - let storage_stage = StorageStage::new( - &storage_state, - slot_receiver, - None, - &keypair, - &storage_keypair, - &exit.clone(), - &bank_forks, - &cluster_info, - block_commitment_cache, - ); - exit.store(true, Ordering::Relaxed); - storage_stage.join().unwrap(); - } - - #[test] - fn test_signature_distribution() { - // See that signatures have an even-ish distribution.. - let mut hist = Arc::new(vec![]); - for _ in 0..NUM_IDENTITIES { - Arc::get_mut(&mut hist).unwrap().push(AtomicUsize::new(0)); - } - let hasher = Hasher::default(); - { - let hist = hist.clone(); - (0..(32 * NUM_IDENTITIES)) - .into_par_iter() - .for_each(move |_| { - let keypair = Keypair::new(); - let hash = hasher.clone().result(); - let signature = keypair.sign_message(&hash.as_ref()); - let ix = get_identity_index_from_signature(&signature); - hist[ix].fetch_add(1, Ordering::Relaxed); - }); - } - - let mut hist_max = 0; - let mut hist_min = NUM_IDENTITIES; - for x in hist.iter() { - let val = x.load(Ordering::Relaxed); - hist_max = max(val, hist_max); - hist_min = min(val, hist_min); - } - info!("min: {} max: {}", hist_min, hist_max); - assert_ne!(hist_min, 0); - } -} diff --git a/core/src/tvu.rs b/core/src/tvu.rs index 4e92639da9..d1a0908a3f 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -18,7 +18,6 @@ use crate::{ shred_fetch_stage::ShredFetchStage, sigverify_shreds::ShredSigVerifier, sigverify_stage::SigVerifyStage, - storage_stage::{StorageStage, StorageState}, }; use crossbeam_channel::unbounded; use solana_ledger::leader_schedule_cache::LeaderScheduleCache; @@ -50,7 +49,6 @@ pub struct Tvu { replay_stage: ReplayStage, ledger_cleanup_service: Option, accounts_background_service: AccountsBackgroundService, - storage_stage: StorageStage, accounts_hash_verifier: AccountsHashVerifier, } @@ -81,12 +79,10 @@ impl Tvu { pub fn new( vote_account: &Pubkey, authorized_voter_keypairs: Vec>, - storage_keypair: &Arc, bank_forks: &Arc>, cluster_info: &Arc, sockets: Sockets, blockstore: Arc, - storage_state: &StorageState, ledger_signal_receiver: Receiver, subscriptions: &Arc, poh_recorder: &Arc>, @@ -183,12 +179,12 @@ impl Tvu { leader_schedule_cache: leader_schedule_cache.clone(), latest_root_senders: vec![ledger_cleanup_slot_sender], accounts_hash_sender: Some(accounts_hash_sender), - block_commitment_cache: block_commitment_cache.clone(), + block_commitment_cache, transaction_status_sender, rewards_recorder_sender, }; - let (replay_stage, root_bank_receiver) = ReplayStage::new( + let replay_stage = ReplayStage::new( replay_stage_config, blockstore.clone(), bank_forks.clone(), @@ -212,18 +208,6 @@ impl Tvu { let accounts_background_service = AccountsBackgroundService::new(bank_forks.clone(), &exit); - let storage_stage = StorageStage::new( - storage_state, - root_bank_receiver, - Some(blockstore), - &keypair, - storage_keypair, - &exit, - &bank_forks, - &cluster_info, - block_commitment_cache, - ); - Tvu { fetch_stage, sigverify_stage, @@ -231,7 +215,6 @@ impl Tvu { replay_stage, ledger_cleanup_service, accounts_background_service, - storage_stage, accounts_hash_verifier, } } @@ -240,7 +223,6 @@ impl Tvu { self.retransmit_stage.join()?; self.fetch_stage.join()?; self.sigverify_stage.join()?; - self.storage_stage.join()?; if self.ledger_cleanup_service.is_some() { self.ledger_cleanup_service.unwrap().join()?; } @@ -289,7 +271,6 @@ pub mod tests { let (exit, poh_recorder, poh_service, _entry_receiver) = create_test_recorder(&bank, &blockstore, None); let vote_keypair = Keypair::new(); - let storage_keypair = Arc::new(Keypair::new()); let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank)); let block_commitment_cache = Arc::new(RwLock::new( BlockCommitmentCache::default_with_blockstore(blockstore.clone()), @@ -299,7 +280,6 @@ pub mod tests { let tvu = Tvu::new( &vote_keypair.pubkey(), vec![Arc::new(vote_keypair)], - &storage_keypair, &bank_forks, &cref1, { @@ -311,7 +291,6 @@ pub mod tests { } }, blockstore, - &StorageState::default(), l_receiver, &Arc::new(RpcSubscriptions::new( &exit, diff --git a/core/src/validator.rs b/core/src/validator.rs index 371a5f7b75..8f732b3bf7 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -18,7 +18,6 @@ use crate::{ serve_repair_service::ServeRepairService, sigverify, snapshot_packager_service::SnapshotPackagerService, - storage_stage::StorageState, tpu::Tpu, transaction_status_service::TransactionStatusService, tvu::{Sockets, Tvu, TvuConfig}, @@ -36,7 +35,7 @@ use solana_ledger::{ use solana_metrics::datapoint_info; use solana_runtime::bank::Bank; use solana_sdk::{ - clock::{Slot, DEFAULT_SLOTS_PER_TURN}, + clock::Slot, epoch_schedule::MAX_LEADER_SCHEDULE_EPOCH_OFFSET, genesis_config::GenesisConfig, hash::Hash, @@ -63,7 +62,6 @@ pub struct ValidatorConfig { pub expected_genesis_hash: Option, pub expected_shred_version: Option, pub voting_disabled: bool, - pub storage_slots_per_turn: u64, pub account_paths: Vec, pub rpc_config: JsonRpcConfig, pub rpc_ports: Option<(u16, u16)>, // (API, PubSub) @@ -90,7 +88,6 @@ impl Default for ValidatorConfig { expected_genesis_hash: None, expected_shred_version: None, voting_disabled: false, - storage_slots_per_turn: DEFAULT_SLOTS_PER_TURN, max_ledger_shreds: None, account_paths: Vec::new(), rpc_config: JsonRpcConfig::default(), @@ -153,7 +150,6 @@ impl Validator { ledger_path: &Path, vote_account: &Pubkey, mut authorized_voter_keypairs: Vec>, - storage_keypair: &Arc, entrypoint_info_option: Option<&ContactInfo>, poh_verify: bool, config: &ValidatorConfig, @@ -227,13 +223,6 @@ impl Validator { } let cluster_info = Arc::new(ClusterInfo::new(node.info.clone(), keypair.clone())); - - let storage_state = StorageState::new( - &bank.last_blockhash(), - config.storage_slots_per_turn, - bank.slots_per_segment(), - ); - let blockstore = Arc::new(blockstore); let block_commitment_cache = Arc::new(RwLock::new( BlockCommitmentCache::default_with_blockstore(blockstore.clone()), @@ -264,7 +253,6 @@ impl Validator { cluster_info.clone(), genesis_config.hash(), ledger_path, - storage_state.clone(), validator_exit.clone(), config.trusted_validators.clone(), ), @@ -394,7 +382,6 @@ impl Validator { let tvu = Tvu::new( vote_account, authorized_voter_keypairs, - storage_keypair, &bank_forks, &cluster_info, Sockets { @@ -423,7 +410,6 @@ impl Validator { .collect(), }, blockstore.clone(), - &storage_state, ledger_signal_receiver, &subscriptions, &poh_recorder, @@ -715,7 +701,6 @@ impl TestValidator { let (ledger_path, blockhash) = create_new_tmp_ledger!(&genesis_config); let leader_voting_keypair = Arc::new(voting_keypair); - let storage_keypair = Arc::new(Keypair::new()); let config = ValidatorConfig { rpc_ports: Some((node.info.rpc.port(), node.info.rpc_pubsub.port())), ..ValidatorConfig::default() @@ -726,7 +711,6 @@ impl TestValidator { &ledger_path, &leader_voting_keypair.pubkey(), vec![leader_voting_keypair.clone()], - &storage_keypair, None, true, &config, @@ -882,7 +866,6 @@ mod tests { let (validator_ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config); let voting_keypair = Arc::new(Keypair::new()); - let storage_keypair = Arc::new(Keypair::new()); let config = ValidatorConfig { rpc_ports: Some(( validator_node.info.rpc.port(), @@ -896,7 +879,6 @@ mod tests { &validator_ledger_path, &voting_keypair.pubkey(), vec![voting_keypair.clone()], - &storage_keypair, Some(&leader_node.info), true, &config, @@ -921,7 +903,6 @@ mod tests { let (validator_ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config); ledger_paths.push(validator_ledger_path.clone()); let vote_account_keypair = Arc::new(Keypair::new()); - let storage_keypair = Arc::new(Keypair::new()); let config = ValidatorConfig { rpc_ports: Some(( validator_node.info.rpc.port(), @@ -935,7 +916,6 @@ mod tests { &validator_ledger_path, &vote_account_keypair.pubkey(), vec![vote_account_keypair.clone()], - &storage_keypair, Some(&leader_node.info), true, &config, diff --git a/core/src/window_service.rs b/core/src/window_service.rs index 5ae3b2aa40..84cfffe421 100644 --- a/core/src/window_service.rs +++ b/core/src/window_service.rs @@ -4,7 +4,7 @@ use crate::{ cluster_info::ClusterInfo, cluster_slots::ClusterSlots, - repair_service::{RepairService, RepairStrategy}, + repair_service::{RepairInfo, RepairService}, result::{Error, Result}, }; use crossbeam_channel::{ @@ -254,7 +254,7 @@ impl WindowService { retransmit: PacketSender, repair_socket: Arc, exit: &Arc, - repair_strategy: RepairStrategy, + repair_info: RepairInfo, leader_schedule_cache: &Arc, shred_filter: F, cluster_slots: Arc, @@ -265,17 +265,14 @@ impl WindowService { + std::marker::Send + std::marker::Sync, { - let bank_forks = match repair_strategy { - RepairStrategy::RepairRange(_) => None, - RepairStrategy::RepairAll { ref bank_forks, .. } => Some(bank_forks.clone()), - }; + let bank_forks = Some(repair_info.bank_forks.clone()); let repair_service = RepairService::new( blockstore.clone(), exit.clone(), repair_socket, cluster_info.clone(), - repair_strategy, + repair_info, cluster_slots, ); @@ -491,10 +488,6 @@ impl WindowService { #[cfg(test)] mod test { use super::*; - use crate::{ - cluster_info::ClusterInfo, contact_info::ContactInfo, repair_service::RepairSlotRange, - }; - use rand::thread_rng; use solana_ledger::shred::DataShredHeader; use solana_ledger::{ blockstore::{make_many_slot_entries, Blockstore}, @@ -503,21 +496,13 @@ mod test { get_tmp_ledger_path, shred::Shredder, }; - use solana_perf::packet::Packet; use solana_sdk::{ clock::Slot, epoch_schedule::MINIMUM_SLOTS_PER_EPOCH, hash::Hash, signature::{Keypair, Signer}, }; - use std::{ - net::UdpSocket, - sync::atomic::{AtomicBool, Ordering}, - sync::mpsc::channel, - sync::Arc, - thread::sleep, - time::Duration, - }; + use std::sync::Arc; fn local_entries_to_shred( entries: &[Entry], @@ -620,71 +605,6 @@ mod test { ); } - fn make_test_window( - verified_receiver: CrossbeamReceiver>, - exit: Arc, - ) -> WindowService { - let blockstore_path = get_tmp_ledger_path!(); - let (blockstore, _, _) = Blockstore::open_with_signal(&blockstore_path) - .expect("Expected to be able to open database ledger"); - - let blockstore = Arc::new(blockstore); - let (retransmit_sender, _retransmit_receiver) = channel(); - let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair( - ContactInfo::new_localhost(&Pubkey::default(), 0), - )); - let cluster_slots = Arc::new(ClusterSlots::default()); - let repair_sock = Arc::new(UdpSocket::bind(socketaddr_any!()).unwrap()); - let window = WindowService::new( - blockstore, - cluster_info, - verified_receiver, - retransmit_sender, - repair_sock, - &exit, - RepairStrategy::RepairRange(RepairSlotRange { start: 0, end: 0 }), - &Arc::new(LeaderScheduleCache::default()), - |_, _, _, _| true, - cluster_slots, - ); - window - } - - #[test] - fn test_recv_window() { - let (packet_sender, packet_receiver) = unbounded(); - let exit = Arc::new(AtomicBool::new(false)); - let window = make_test_window(packet_receiver, exit.clone()); - // send 5 slots worth of data to the window - let (shreds, _) = make_many_slot_entries(0, 5, 10); - let packets: Vec<_> = shreds - .into_iter() - .map(|mut s| { - let mut p = Packet::default(); - p.data.copy_from_slice(&mut s.payload); - p - }) - .collect(); - let mut packets = Packets::new(packets); - packet_sender.send(vec![packets.clone()]).unwrap(); - sleep(Duration::from_millis(500)); - - // add some empty packets to the data set. These should fail to deserialize - packets.packets.append(&mut vec![Packet::default(); 10]); - packets.packets.shuffle(&mut thread_rng()); - packet_sender.send(vec![packets.clone()]).unwrap(); - sleep(Duration::from_millis(500)); - - // send 1 empty packet that cannot deserialize into a shred - packet_sender - .send(vec![Packets::new(vec![Packet::default(); 1])]) - .unwrap(); - sleep(Duration::from_millis(500)); - - exit.store(true, Ordering::Relaxed); - window.join().unwrap(); - } - #[test] fn test_run_check_duplicate() { let blockstore_path = get_tmp_ledger_path!(); diff --git a/core/tests/storage_stage.rs b/core/tests/storage_stage.rs deleted file mode 100644 index 4594aa76af..0000000000 --- a/core/tests/storage_stage.rs +++ /dev/null @@ -1,261 +0,0 @@ -// Long-running storage_stage tests - -#[cfg(test)] -mod tests { - use log::*; - use solana_core::{ - commitment::BlockCommitmentCache, - storage_stage::{ - get_identity_index_from_signature, test_cluster_info, StorageStage, StorageState, - SLOTS_PER_TURN_TEST, - }, - }; - use solana_ledger::{ - bank_forks::BankForks, - blockstore::Blockstore, - blockstore_processor, create_new_tmp_ledger, entry, - genesis_utils::{create_genesis_config, GenesisConfigInfo}, - }; - use solana_runtime::bank::Bank; - use solana_sdk::{ - clock::DEFAULT_TICKS_PER_SLOT, - hash::Hash, - message::Message, - pubkey::Pubkey, - signature::{Keypair, Signature, Signer}, - transaction::Transaction, - }; - use solana_storage_program::storage_instruction::{self, StorageAccountType}; - use std::{ - fs::remove_dir_all, - sync::{ - atomic::{AtomicBool, Ordering}, - mpsc::channel, - Arc, RwLock, - }, - thread::sleep, - time::Duration, - }; - - fn get_mining_result(storage_state: &StorageState, key: &Signature) -> Hash { - let idx = get_identity_index_from_signature(key); - storage_state.state.read().unwrap().storage_results[idx] - } - - #[test] - fn test_storage_stage_process_account_proofs() { - solana_logger::setup(); - let keypair = Arc::new(Keypair::new()); - let storage_keypair = Arc::new(Keypair::new()); - let archiver_keypair = Arc::new(Keypair::new()); - let exit = Arc::new(AtomicBool::new(false)); - - let GenesisConfigInfo { - mut genesis_config, - mint_keypair, - .. - } = create_genesis_config(1000); - genesis_config - .native_instruction_processors - .push(solana_storage_program::solana_storage_program!()); - let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config); - - let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap()); - - let bank = Bank::new(&genesis_config); - let bank = Arc::new(bank); - let bank_forks = Arc::new(RwLock::new(BankForks::new_from_banks(&[bank.clone()], 0))); - let block_commitment_cache = Arc::new(RwLock::new( - BlockCommitmentCache::default_with_blockstore(blockstore.clone()), - )); - let cluster_info = test_cluster_info(&keypair.pubkey()); - - let (bank_sender, bank_receiver) = channel(); - let storage_state = StorageState::new( - &bank.last_blockhash(), - SLOTS_PER_TURN_TEST, - bank.slots_per_segment(), - ); - let storage_stage = StorageStage::new( - &storage_state, - bank_receiver, - Some(blockstore.clone()), - &keypair, - &storage_keypair, - &exit.clone(), - &bank_forks, - &cluster_info, - block_commitment_cache, - ); - bank_sender.send(vec![bank.clone()]).unwrap(); - - // create accounts - let bank = Arc::new(Bank::new_from_parent(&bank, &keypair.pubkey(), 1)); - let account_ixs = storage_instruction::create_storage_account( - &mint_keypair.pubkey(), - &Pubkey::new_rand(), - &archiver_keypair.pubkey(), - 1, - StorageAccountType::Archiver, - ); - let account_tx = Transaction::new_signed_instructions( - &[&mint_keypair, &archiver_keypair], - &account_ixs, - bank.last_blockhash(), - ); - bank.process_transaction(&account_tx).expect("create"); - - bank_sender.send(vec![bank.clone()]).unwrap(); - - let mut reference_keys; - { - let keys = &storage_state.state.read().unwrap().storage_keys; - reference_keys = vec![0; keys.len()]; - reference_keys.copy_from_slice(keys); - } - - let keypair = Keypair::new(); - - let mining_proof_ix = storage_instruction::mining_proof( - &archiver_keypair.pubkey(), - Hash::default(), - 0, - keypair.sign_message(b"test"), - bank.last_blockhash(), - ); - - let next_bank = Arc::new(Bank::new_from_parent(&bank, &keypair.pubkey(), 2)); - //register ticks so the program reports a different segment - blockstore_processor::process_entries( - &next_bank, - &entry::create_ticks( - DEFAULT_TICKS_PER_SLOT * next_bank.slots_per_segment() + 1, - 0, - bank.last_blockhash(), - ), - true, - None, - ) - .unwrap(); - let message = Message::new_with_payer(&[mining_proof_ix], Some(&mint_keypair.pubkey())); - let mining_proof_tx = Transaction::new( - &[&mint_keypair, archiver_keypair.as_ref()], - message, - next_bank.last_blockhash(), - ); - next_bank - .process_transaction(&mining_proof_tx) - .expect("process txs"); - bank_sender.send(vec![next_bank]).unwrap(); - - for _ in 0..5 { - { - let keys = &storage_state.state.read().unwrap().storage_keys; - if keys[..] != *reference_keys.as_slice() { - break; - } - } - - sleep(Duration::new(1, 0)); - } - - debug!("joining..?"); - exit.store(true, Ordering::Relaxed); - storage_stage.join().unwrap(); - - { - let keys = &storage_state.state.read().unwrap().storage_keys; - assert_ne!(keys[..], *reference_keys); - } - - remove_dir_all(ledger_path).unwrap(); - } - - #[test] - fn test_storage_stage_process_banks() { - solana_logger::setup(); - let keypair = Arc::new(Keypair::new()); - let storage_keypair = Arc::new(Keypair::new()); - let exit = Arc::new(AtomicBool::new(false)); - - let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(1000); - let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config); - - let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap()); - let slot = 1; - let bank = Arc::new(Bank::new(&genesis_config)); - let bank_forks = Arc::new(RwLock::new(BankForks::new_from_banks(&[bank.clone()], 0))); - let block_commitment_cache = Arc::new(RwLock::new( - BlockCommitmentCache::default_with_blockstore(blockstore.clone()), - )); - - let cluster_info = test_cluster_info(&keypair.pubkey()); - let (bank_sender, bank_receiver) = channel(); - let storage_state = StorageState::new( - &bank.last_blockhash(), - SLOTS_PER_TURN_TEST, - bank.slots_per_segment(), - ); - let storage_stage = StorageStage::new( - &storage_state, - bank_receiver, - Some(blockstore.clone()), - &keypair, - &storage_keypair, - &exit.clone(), - &bank_forks, - &cluster_info, - block_commitment_cache, - ); - bank_sender.send(vec![bank.clone()]).unwrap(); - - let keypair = Keypair::new(); - let hash = Hash::default(); - let signature = keypair.sign_message(&hash.as_ref()); - - let mut result = get_mining_result(&storage_state, &signature); - - assert_eq!(result, Hash::default()); - - let mut last_bank = bank; - let rooted_banks = (slot..slot + last_bank.slots_per_segment() + 1) - .map(|i| { - let bank = Arc::new(Bank::new_from_parent(&last_bank, &keypair.pubkey(), i)); - blockstore_processor::process_entries( - &bank, - &entry::create_ticks(64, 0, bank.last_blockhash()), - true, - None, - ) - .expect("failed process entries"); - last_bank = bank; - last_bank.clone() - }) - .collect::>(); - bank_sender.send(rooted_banks).unwrap(); - - if solana_perf::perf_libs::api().is_some() { - for _ in 0..5 { - result = get_mining_result(&storage_state, &signature); - if result != Hash::default() { - info!("found result = {:?} sleeping..", result); - break; - } - info!("result = {:?} sleeping..", result); - sleep(Duration::new(1, 0)); - } - } - - info!("joining..?"); - exit.store(true, Ordering::Relaxed); - storage_stage.join().unwrap(); - - if solana_perf::perf_libs::api().is_some() { - assert_ne!(result, Hash::default()); - } else { - assert_eq!(result, Hash::default()); - } - - remove_dir_all(ledger_path).unwrap(); - } -} diff --git a/docs/src/SUMMARY.md b/docs/src/SUMMARY.md index d9d30ea475..611cab342d 100644 --- a/docs/src/SUMMARY.md +++ b/docs/src/SUMMARY.md @@ -58,7 +58,6 @@ * [Fork Generation](cluster/fork-generation.md) * [Managing Forks](cluster/managing-forks.md) * [Turbine Block Propagation](cluster/turbine-block-propagation.md) - * [Ledger Replication](cluster/ledger-replication.md) * [Secure Vote Signing](cluster/vote-signing.md) * [Stake Delegation and Rewards](cluster/stake-delegation-and-rewards.md) * [Anatomy of a Validator](validator/README.md) @@ -75,14 +74,9 @@ * [Validation-client Economics](implemented-proposals/ed_overview/ed_validation_client_economics/README.md) * [State-validation Protocol-based Rewards](implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_state_validation_protocol_based_rewards.md) * [State-validation Transaction Fees](implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_state_validation_transaction_fees.md) - * [Replication-validation Transaction Fees](implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_replication_validation_transaction_fees.md) * [Validation Stake Delegation](implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_validation_stake_delegation.md) - * [Replication-client Economics](implemented-proposals/ed_overview/ed_replication_client_economics/README.md) - * [Storage-replication Rewards](implemented-proposals/ed_overview/ed_replication_client_economics/ed_rce_storage_replication_rewards.md) - * [Replication-client Reward Auto-delegation](implemented-proposals/ed_overview/ed_replication_client_economics/ed_rce_replication_client_reward_auto_delegation.md) * [Storage Rent Economics](implemented-proposals/ed_overview/ed_storage_rent_economics.md) * [Economic Sustainability](implemented-proposals/ed_overview/ed_economic_sustainability.md) - * [Attack Vectors](implemented-proposals/ed_overview/ed_attack_vectors.md) * [Economic Design MVP](implemented-proposals/ed_overview/ed_mvp.md) * [References](implemented-proposals/ed_overview/ed_references.md) * [Deterministic Transaction Fees](implemented-proposals/transaction-fees.md) @@ -102,7 +96,6 @@ * [Commitment](implemented-proposals/commitment.md) * [Snapshot Verification](implemented-proposals/snapshot-verification.md) * [Accepted Design Proposals](proposals/README.md) - * [Ledger Replication](proposals/ledger-replication-to-implement.md) * [Secure Vote Signing](proposals/vote-signing-to-implement.md) * [Cluster Test Framework](proposals/cluster-test-framework.md) * [Validator](proposals/validator-proposal.md) diff --git a/docs/src/cluster/README.md b/docs/src/cluster/README.md index 298e77744b..3c34106563 100644 --- a/docs/src/cluster/README.md +++ b/docs/src/cluster/README.md @@ -4,17 +4,17 @@ A Solana cluster is a set of validators working together to serve client transac ## Creating a Cluster -Before starting any validators, one first needs to create a _genesis config_. The config references two public keys, a _mint_ and a _bootstrap validator_. The validator holding the bootstrap validator's private key is responsible for appending the first entries to the ledger. It initializes its internal state with the mint's account. That account will hold the number of native tokens defined by the genesis config. The second validator then contacts the bootstrap validator to register as a _validator_ or _archiver_. Additional validators then register with any registered member of the cluster. +Before starting any validators, one first needs to create a _genesis config_. The config references two public keys, a _mint_ and a _bootstrap validator_. The validator holding the bootstrap validator's private key is responsible for appending the first entries to the ledger. It initializes its internal state with the mint's account. That account will hold the number of native tokens defined by the genesis config. The second validator then contacts the bootstrap validator to register as a _validator_. Additional validators then register with any registered member of the cluster. -A validator receives all entries from the leader and submits votes confirming those entries are valid. After voting, the validator is expected to store those entries until archiver nodes submit proofs that they have stored copies of it. Once the validator observes a sufficient number of copies exist, it deletes its copy. +A validator receives all entries from the leader and submits votes confirming those entries are valid. After voting, the validator is expected to store those entries. Once the validator observes a sufficient number of copies exist, it deletes its copy. ## Joining a Cluster -Validators and archivers enter the cluster via registration messages sent to its _control plane_. The control plane is implemented using a _gossip_ protocol, meaning that a node may register with any existing node, and expect its registration to propagate to all nodes in the cluster. The time it takes for all nodes to synchronize is proportional to the square of the number of nodes participating in the cluster. Algorithmically, that's considered very slow, but in exchange for that time, a node is assured that it eventually has all the same information as every other node, and that that information cannot be censored by any one node. +Validators enter the cluster via registration messages sent to its _control plane_. The control plane is implemented using a _gossip_ protocol, meaning that a node may register with any existing node, and expect its registration to propagate to all nodes in the cluster. The time it takes for all nodes to synchronize is proportional to the square of the number of nodes participating in the cluster. Algorithmically, that's considered very slow, but in exchange for that time, a node is assured that it eventually has all the same information as every other node, and that that information cannot be censored by any one node. ## Sending Transactions to a Cluster -Clients send transactions to any validator's Transaction Processing Unit \(TPU\) port. If the node is in the validator role, it forwards the transaction to the designated leader. If in the leader role, the node bundles incoming transactions, timestamps them creating an _entry_, and pushes them onto the cluster's _data plane_. Once on the data plane, the transactions are validated by validator nodes and replicated by archiver nodes, effectively appending them to the ledger. +Clients send transactions to any validator's Transaction Processing Unit \(TPU\) port. If the node is in the validator role, it forwards the transaction to the designated leader. If in the leader role, the node bundles incoming transactions, timestamps them creating an _entry_, and pushes them onto the cluster's _data plane_. Once on the data plane, the transactions are validated by validator nodes, effectively appending them to the ledger. ## Confirming Transactions diff --git a/docs/src/cluster/ledger-replication.md b/docs/src/cluster/ledger-replication.md deleted file mode 100644 index 34792ffd9e..0000000000 --- a/docs/src/cluster/ledger-replication.md +++ /dev/null @@ -1,269 +0,0 @@ -# Ledger Replication - -At full capacity on a 1gbps network solana will generate 4 petabytes of data per year. To prevent the network from centralizing around validators that have to store the full data set this protocol proposes a way for mining nodes to provide storage capacity for pieces of the data. - -The basic idea to Proof of Replication is encrypting a dataset with a public symmetric key using CBC encryption, then hash the encrypted dataset. The main problem with the naive approach is that a dishonest storage node can stream the encryption and delete the data as it's hashed. The simple solution is to periodically regenerate the hash based on a signed PoH value. This ensures that all the data is present during the generation of the proof and it also requires validators to have the entirety of the encrypted data present for verification of every proof of every identity. So the space required to validate is `number_of_proofs * data_size` - -## Optimization with PoH - -Our improvement on this approach is to randomly sample the encrypted segments faster than it takes to encrypt, and record the hash of those samples into the PoH ledger. Thus the segments stay in the exact same order for every PoRep and verification can stream the data and verify all the proofs in a single batch. This way we can verify multiple proofs concurrently, each one on its own CUDA core. The total space required for verification is `1_ledger_segment + 2_cbc_blocks * number_of_identities` with core count equal to `number_of_identities`. We use a 64-byte chacha CBC block size. - -## Network - -Validators for PoRep are the same validators that are verifying transactions. If an archiver can prove that a validator verified a fake PoRep, then the validator will not receive a reward for that storage epoch. - -Archivers are specialized _light clients_. They download a part of the ledger \(a.k.a Segment\) and store it, and provide PoReps of storing the ledger. For each verified PoRep archivers earn a reward of sol from the mining pool. - -## Constraints - -We have the following constraints: - -* Verification requires generating the CBC blocks. That requires space of 2 - - blocks per identity, and 1 CUDA core per identity for the same dataset. So as - - many identities at once should be batched with as many proofs for those - - identities verified concurrently for the same dataset. - -* Validators will randomly sample the set of storage proofs to the set that - - they can handle, and only the creators of those chosen proofs will be - - rewarded. The validator can run a benchmark whenever its hardware configuration - - changes to determine what rate it can validate storage proofs. - -## Validation and Replication Protocol - -### Constants - -1. SLOTS\_PER\_SEGMENT: Number of slots in a segment of ledger data. The - - unit of storage for an archiver. - -2. NUM\_KEY\_ROTATION\_SEGMENTS: Number of segments after which archivers - - regenerate their encryption keys and select a new dataset to store. - -3. NUM\_STORAGE\_PROOFS: Number of storage proofs required for a storage proof - - claim to be successfully rewarded. - -4. RATIO\_OF\_FAKE\_PROOFS: Ratio of fake proofs to real proofs that a storage - - mining proof claim has to contain to be valid for a reward. - -5. NUM\_STORAGE\_SAMPLES: Number of samples required for a storage mining - - proof. - -6. NUM\_CHACHA\_ROUNDS: Number of encryption rounds performed to generate - - encrypted state. - -7. NUM\_SLOTS\_PER\_TURN: Number of slots that define a single storage epoch or - - a "turn" of the PoRep game. - -### Validator behavior - -1. Validators join the network and begin looking for archiver accounts at each - - storage epoch/turn boundary. - -2. Every turn, Validators sign the PoH value at the boundary and use that signature - - to randomly pick proofs to verify from each storage account found in the turn boundary. - - This signed value is also submitted to the validator's storage account and will be used by - - archivers at a later stage to cross-verify. - -3. Every `NUM_SLOTS_PER_TURN` slots the validator advertises the PoH value. This is value - - is also served to Archivers via RPC interfaces. - -4. For a given turn N, all validations get locked out until turn N+3 \(a gap of 2 turn/epoch\). - - At which point all validations during that turn are available for reward collection. - -5. Any incorrect validations will be marked during the turn in between. - -### Archiver behavior - -1. Since an archiver is somewhat of a light client and not downloading all the - - ledger data, they have to rely on other validators and archivers for information. - - Any given validator may or may not be malicious and give incorrect information, although - - there are not any obvious attack vectors that this could accomplish besides having the - - archiver do extra wasted work. For many of the operations there are a number of options - - depending on how paranoid an archiver is: - - * \(a\) archiver can ask a validator - * \(b\) archiver can ask multiple validators - * \(c\) archiver can ask other archivers - * \(d\) archiver can subscribe to the full transaction stream and generate - - the information itself \(assuming the slot is recent enough\) - - * \(e\) archiver can subscribe to an abbreviated transaction stream to - - generate the information itself \(assuming the slot is recent enough\) - -2. An archiver obtains the PoH hash corresponding to the last turn with its slot. -3. The archiver signs the PoH hash with its keypair. That signature is the - - seed used to pick the segment to replicate and also the encryption key. The - - archiver mods the signature with the slot to get which segment to - - replicate. - -4. The archiver retrives the ledger by asking peer validators and - - archivers. See 6.5. - -5. The archiver then encrypts that segment with the key with chacha algorithm - - in CBC mode with `NUM_CHACHA_ROUNDS` of encryption. - -6. The archiver initializes a chacha rng with the a signed recent PoH value as - - the seed. - -7. The archiver generates `NUM_STORAGE_SAMPLES` samples in the range of the - - entry size and samples the encrypted segment with sha256 for 32-bytes at each - - offset value. Sampling the state should be faster than generating the encrypted - - segment. - -8. The archiver sends a PoRep proof transaction which contains its sha state - - at the end of the sampling operation, its seed and the samples it used to the - - current leader and it is put onto the ledger. - -9. During a given turn the archiver should submit many proofs for the same segment - - and based on the `RATIO_OF_FAKE_PROOFS` some of those proofs must be fake. - -10. As the PoRep game enters the next turn, the archiver must submit a - - transaction with the mask of which proofs were fake during the last turn. This - - transaction will define the rewards for both archivers and validators. - -11. Finally for a turn N, as the PoRep game enters turn N + 3, archiver's proofs for - - turn N will be counted towards their rewards. - -### The PoRep Game - -The Proof of Replication game has 4 primary stages. For each "turn" multiple PoRep games can be in progress but each in a different stage. - -The 4 stages of the PoRep Game are as follows: - -1. Proof submission stage - * Archivers: submit as many proofs as possible during this stage - * Validators: No-op -2. Proof verification stage - * Archivers: No-op - * Validators: Select archivers and verify their proofs from the previous turn -3. Proof challenge stage - * Archivers: Submit the proof mask with justifications \(for fake proofs submitted 2 turns ago\) - * Validators: No-op -4. Reward collection stage - * Archivers: Collect rewards for 3 turns ago - * Validators: Collect rewards for 3 turns ago - -For each turn of the PoRep game, both Validators and Archivers evaluate each stage. The stages are run as separate transactions on the storage program. - -### Finding who has a given block of ledger - -1. Validators monitor the turns in the PoRep game and look at the rooted bank - - at turn boundaries for any proofs. - -2. Validators maintain a map of ledger segments and corresponding archiver public keys. - - The map is updated when a Validator processes an archiver's proofs for a segment. - - The validator provides an RPC interface to access the this map. Using this API, clients - - can map a segment to an archiver's network address \(correlating it via cluster\_info table\). - - The clients can then send repair requests to the archiver to retrieve segments. - -3. Validators would need to invalidate this list every N turns. - -## Sybil attacks - -For any random seed, we force everyone to use a signature that is derived from a PoH hash at the turn boundary. Everyone uses the same count, so the same PoH hash is signed by every participant. The signatures are then each cryptographically tied to the keypair, which prevents a leader from grinding on the resulting value for more than 1 identity. - -Since there are many more client identities then encryption identities, we need to split the reward for multiple clients, and prevent Sybil attacks from generating many clients to acquire the same block of data. To remain BFT we want to avoid a single human entity from storing all the replications of a single chunk of the ledger. - -Our solution to this is to force the clients to continue using the same identity. If the first round is used to acquire the same block for many client identities, the second round for the same client identities will force a redistribution of the signatures, and therefore PoRep identities and blocks. Thus to get a reward for archivers need to store the first block for free and the network can reward long lived client identities more than new ones. - -## Validator attacks - -* If a validator approves fake proofs, archiver can easily out them by - - showing the initial state for the hash. - -* If a validator marks real proofs as fake, no on-chain computation can be done - - to distinguish who is correct. Rewards would have to rely on the results from - - multiple validators to catch bad actors and archivers from being denied rewards. - -* Validator stealing mining proof results for itself. The proofs are derived - - from a signature from an archiver, since the validator does not know the - - private key used to generate the encryption key, it cannot be the generator of - - the proof. - -## Reward incentives - -Fake proofs are easy to generate but difficult to verify. For this reason, PoRep proof transactions generated by archivers may require a higher fee than a normal transaction to represent the computational cost required by validators. - -Some percentage of fake proofs are also necessary to receive a reward from storage mining. - -## Notes - -* We can reduce the costs of verification of PoRep by using PoH, and actually - - make it feasible to verify a large number of proofs for a global dataset. - -* We can eliminate grinding by forcing everyone to sign the same PoH hash and - - use the signatures as the seed - -* The game between validators and archivers is over random blocks and random - - encryption identities and random data samples. The goal of randomization is - - to prevent colluding groups from having overlap on data or validation. - -* Archiver clients fish for lazy validators by submitting fake proofs that - - they can prove are fake. - -* To defend against Sybil client identities that try to store the same block we - - force the clients to store for multiple rounds before receiving a reward. - -* Validators should also get rewarded for validating submitted storage proofs - - as incentive for storing the ledger. They can only validate proofs if they - - are storing that slice of the ledger. - diff --git a/docs/src/cluster/synchronization.md b/docs/src/cluster/synchronization.md index 6394de3fe6..cd51d27fe9 100644 --- a/docs/src/cluster/synchronization.md +++ b/docs/src/cluster/synchronization.md @@ -18,7 +18,7 @@ Another difference between PoH and VDFs is that a VDF is used only for tracking ## Relationship to Consensus Mechanisms -Proof of History is not a consensus mechanism, but it is used to improve the performance of Solana's Proof of Stake consensus. It is also used to improve the performance of the data plane and replication protocols. +Proof of History is not a consensus mechanism, but it is used to improve the performance of Solana's Proof of Stake consensus. It is also used to improve the performance of the data plane protocols. ## More on Proof of History diff --git a/docs/src/implemented-proposals/ed_overview/README.md b/docs/src/implemented-proposals/ed_overview/README.md index 165c2403c3..d792e7669a 100644 --- a/docs/src/implemented-proposals/ed_overview/README.md +++ b/docs/src/implemented-proposals/ed_overview/README.md @@ -2,15 +2,15 @@ **Subject to change.** -Solana’s crypto-economic system is designed to promote a healthy, long term self-sustaining economy with participant incentives aligned to the security and decentralization of the network. The main participants in this economy are validation-clients and replication-clients. Their contributions to the network, state validation and data storage respectively, and their requisite incentive mechanisms are discussed below. +Solana’s crypto-economic system is designed to promote a healthy, long term self-sustaining economy with participant incentives aligned to the security and decentralization of the network. The main participants in this economy are validation-clients. Their contributions to the network, state validation, and their requisite incentive mechanisms are discussed below. -The main channels of participant remittances are referred to as protocol-based rewards and transaction fees. Protocol-based rewards are issuances from a global, protocol-defined, inflation rate. These rewards will constitute the total reward delivered to replication and validation clients, the remaining sourced from transaction fees. In the early days of the network, it is likely that protocol-based rewards, deployed based on predefined issuance schedule, will drive the majority of participant incentives to participate in the network. +The main channels of participant remittances are referred to as protocol-based rewards and transaction fees. Protocol-based rewards are issuances from a global, protocol-defined, inflation rate. These rewards will constitute the total reward delivered to validation clients, the remaining sourced from transaction fees. In the early days of the network, it is likely that protocol-based rewards, deployed based on predefined issuance schedule, will drive the majority of participant incentives to participate in the network. -These protocol-based rewards, to be distributed to participating validation and replication clients, are to be a result of a global supply inflation rate, calculated per Solana epoch and distributed amongst the active validator set. As discussed further below, the per annum inflation rate is based on a pre-determined disinflationary schedule. This provides the network with monetary supply predictability which supports long term economic stability and security. +These protocol-based rewards, to be distributed to participating validation clients, are to be a result of a global supply inflation rate, calculated per Solana epoch and distributed amongst the active validator set. As discussed further below, the per annum inflation rate is based on a pre-determined disinflationary schedule. This provides the network with monetary supply predictability which supports long term economic stability and security. -Transaction fees are market-based participant-to-participant transfers, attached to network interactions as a necessary motivation and compensation for the inclusion and execution of a proposed transaction \(be it a state execution or proof-of-replication verification\). A mechanism for long-term economic stability and forking protection through partial burning of each transaction fee is also discussed below. +Transaction fees are market-based participant-to-participant transfers, attached to network interactions as a necessary motivation and compensation for the inclusion and execution of a proposed transaction. A mechanism for long-term economic stability and forking protection through partial burning of each transaction fee is also discussed below. -A high-level schematic of Solana’s crypto-economic design is shown below in **Figure 1**. The specifics of validation-client economics are described in sections: [Validation-client Economics](ed_validation_client_economics/README.md), [State-validation Protocol-based Rewards](ed_validation_client_economics/ed_vce_state_validation_protocol_based_rewards.md), [State-validation Transaction Fees](ed_validation_client_economics/ed_vce_state_validation_transaction_fees.md) and [Replication-validation Transaction Fees](ed_validation_client_economics/ed_vce_replication_validation_transaction_fees.md). Also, the section titled [Validation Stake Delegation](ed_validation_client_economics/ed_vce_validation_stake_delegation.md) closes with a discussion of validator delegation opportunities and marketplace. Additionally, in [Storage Rent Economics](ed_storage_rent_economics.md), we describe an implementation of storage rent to account for the externality costs of maintaining the active state of the ledger. [Replication-client Economics](ed_replication_client_economics/README.md) will review the Solana network design for global ledger storage/redundancy and archiver-client economics \([Storage-replication rewards](ed_replication_client_economics/ed_rce_storage_replication_rewards.md)\) along with an archiver-to-validator delegation mechanism designed to aide participant on-boarding into the Solana economy discussed in [Replication-client Reward Auto-delegation](ed_replication_client_economics/ed_rce_replication_client_reward_auto_delegation.md). An outline of features for an MVP economic design is discussed in the [Economic Design MVP](ed_mvp.md) section. Finally, in [Attack Vectors](ed_attack_vectors.md), various attack vectors will be described and potential vulnerabilities explored and parameterized. +A high-level schematic of Solana’s crypto-economic design is shown below in **Figure 1**. The specifics of validation-client economics are described in sections: [Validation-client Economics](ed_validation_client_economics/README.md), [State-validation Protocol-based Rewards](ed_validation_client_economics/ed_vce_state_validation_protocol_based_rewards.md), [State-validation Transaction Fees](ed_validation_client_economics/ed_vce_state_validation_transaction_fees.md). Also, the section titled [Validation Stake Delegation](ed_validation_client_economics/ed_vce_validation_stake_delegation.md) closes with a discussion of validator delegation opportunities and marketplace. Additionally, in [Storage Rent Economics](ed_storage_rent_economics.md), we describe an implementation of storage rent to account for the externality costs of maintaining the active state of the ledger. An outline of features for an MVP economic design is discussed in the [Economic Design MVP](ed_mvp.md) section. ![](../../.gitbook/assets/economic_design_infl_230719.png) diff --git a/docs/src/implemented-proposals/ed_overview/ed_attack_vectors.md b/docs/src/implemented-proposals/ed_overview/ed_attack_vectors.md deleted file mode 100644 index 419720f3d5..0000000000 --- a/docs/src/implemented-proposals/ed_overview/ed_attack_vectors.md +++ /dev/null @@ -1,14 +0,0 @@ -# Attack Vectors - -**Subject to change.** - -## Colluding validation and replication clients - -A colluding validation-client, may take the strategy to mark PoReps from non-colluding archiver nodes as invalid as an attempt to maximize the rewards for the colluding archiver nodes. In this case, it isn’t feasible for the offended-against archiver nodes to petition the network for resolution as this would result in a network-wide vote on each offending PoRep and create too much overhead for the network to progress adequately. Also, this mitigation attempt would still be vulnerable to a >= 51% staked colluder. - -Alternatively, transaction fees from submitted PoReps are pooled and distributed across validation-clients in proportion to the number of valid PoReps discounted by the number of invalid PoReps as voted by each validator-client. Thus invalid votes are directly dis-incentivized through this reward channel. Invalid votes that are revealed by archiver nodes as fishing PoReps, will not be discounted from the payout PoRep count. - -Another collusion attack involves a validator-client who may take the strategy to ignore invalid PoReps from colluding archiver and vote them as valid. In this case, colluding archiver-clients would not have to store the data while still receiving rewards for validated PoReps. Additionally, colluding validator nodes would also receive rewards for validating these PoReps. To mitigate this attack, validators must randomly sample PoReps corresponding to the ledger block they are validating and because of this, there will be multiple validators that will receive the colluding archiver’s invalid submissions. These non-colluding validators will be incentivized to mark these PoReps as invalid as they have no way to determine whether the proposed invalid PoRep is actually a fishing PoRep, for which a confirmation vote would result in the validator’s stake being slashed. - -In this case, the proportion of time a colluding pair will be successful has an upper limit determined by the % of stake of the network claimed by the colluding validator. This also sets bounds to the value of such an attack. For example, if a colluding validator controls 10% of the total validator stake, transaction fees will be lost \(likely sent to mining pool\) by the colluding archiver 90% of the time and so the attack vector is only profitable if the per-PoRep reward at least 90% higher than the average PoRep transaction fee. While, probabilistically, some colluding archiver-client PoReps will find their way to colluding validation-clients, the network can also monitor rates of paired \(validator + archiver\) discrepancies in voting patterns and censor identified colluders in these cases. - diff --git a/docs/src/implemented-proposals/ed_overview/ed_economic_sustainability.md b/docs/src/implemented-proposals/ed_overview/ed_economic_sustainability.md index 5f8b06d055..329ab6957f 100644 --- a/docs/src/implemented-proposals/ed_overview/ed_economic_sustainability.md +++ b/docs/src/implemented-proposals/ed_overview/ed_economic_sustainability.md @@ -4,13 +4,4 @@ Long term economic sustainability is one of the guiding principles of Solana’s economic design. While it is impossible to predict how decentralized economies will develop over time, especially economies with flexible decentralized governances, we can arrange economic components such that, under certain conditions, a sustainable economy may take shape in the long term. In the case of Solana’s network, these components take the form of token issuance \(via inflation\) and token burning. -The dominant remittances from the Solana mining pool are validator and archiver rewards. The disinflationary mechanism is a flat, protocol-specified and adjusted, % of each transaction fee. - -The Archiver rewards are to be delivered to archivers as a portion of the network inflation after successful PoRep validation. The per-PoRep reward amount is determined as a function of the total network storage redundancy at the time of the PoRep validation and the network goal redundancy. This function is likely to take the form of a discount from a base reward to be delivered when the network has achieved and maintained its goal redundancy. An example of such a reward function is shown in **Figure 1** - -![](../../.gitbook/assets/porep_reward.png) - -**Figure 1**: Example PoRep reward design as a function of global network storage redundancy. - -In the example shown in **Figure 1**, multiple per PoRep base rewards are explored \(as a % of Tx Fee\) to be delivered when the global ledger replication redundancy meets 10X. When the global ledger replication redundancy is less than 10X, the base reward is discounted as a function of the square of the ratio of the actual ledger replication redundancy to the goal redundancy \(i.e. 10X\). - +The dominant remittances from the Solana mining pool are validator rewards. The disinflationary mechanism is a flat, protocol-specified and adjusted, % of each transaction fee. diff --git a/docs/src/implemented-proposals/ed_overview/ed_mvp.md b/docs/src/implemented-proposals/ed_overview/ed_mvp.md index 77fb33e301..a57b3b42e6 100644 --- a/docs/src/implemented-proposals/ed_overview/ed_mvp.md +++ b/docs/src/implemented-proposals/ed_overview/ed_mvp.md @@ -10,7 +10,3 @@ The preceding sections, outlined in the [Economic Design Overview](../README.md) * Mechanism by which validators are rewarded via network inflation. * Ability to delegate tokens to validator nodes * Validator set commission fees on interest from delegated tokens. -* Archivers to receive fixed, arbitrary reward for submitting validated PoReps. Reward size mechanism \(i.e. PoRep reward as a function of total ledger redundancy\) to come later. -* Pooling of archiver PoRep transaction fees and weighted distribution to validators based on PoRep verification \(see [Replication-validation Transaction Fees](ed_validation_client_economics/ed_vce_replication_validation_transaction_fees.md). It will be useful to test this protection against attacks on testnet. -* Nice-to-have: auto-delegation of archiver rewards to validator. - diff --git a/docs/src/implemented-proposals/ed_overview/ed_replication_client_economics/README.md b/docs/src/implemented-proposals/ed_overview/ed_replication_client_economics/README.md deleted file mode 100644 index e61d34f265..0000000000 --- a/docs/src/implemented-proposals/ed_overview/ed_replication_client_economics/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# Replication-client Economics - -**Subject to change.** - -Replication-clients should be rewarded for providing the network with storage space. Incentivization of the set of archivers provides data security through redundancy of the historical ledger. Replication nodes are rewarded in proportion to the amount of ledger data storage provided, as proved by successfully submitting Proofs-of-Replication to the cluster.. These rewards are captured by generating and entering Proofs of Replication \(PoReps\) into the PoH stream which can be validated by Validation nodes as described in [Replication-validation Transaction Fees](../ed_validation_client_economics/ed_vce_replication_validation_transaction_fees.md). - diff --git a/docs/src/implemented-proposals/ed_overview/ed_replication_client_economics/ed_rce_replication_client_reward_auto_delegation.md b/docs/src/implemented-proposals/ed_overview/ed_replication_client_economics/ed_rce_replication_client_reward_auto_delegation.md deleted file mode 100644 index 8d4f5e1c5c..0000000000 --- a/docs/src/implemented-proposals/ed_overview/ed_replication_client_economics/ed_rce_replication_client_reward_auto_delegation.md +++ /dev/null @@ -1,8 +0,0 @@ -# Replication-client Reward Auto-delegation - -**Subject to change.** - -The ability for Solana network participants to earn rewards by providing storage service is a unique on-boarding path that requires little hardware overhead and minimal upfront capital. It offers an avenue for individuals with extra-storage space on their home laptops or PCs to contribute to the security of the network and become integrated into the Solana economy. - -To enhance this on-boarding ramp and facilitate further participation and investment in the Solana economy, replication-clients have the opportunity to auto-delegate their rewards to validation-clients of their choice. Much like the automatic reinvestment of stock dividends, in this scenario, an archiver-client can earn Solana tokens by providing some storage capacity to the network \(i.e. via submitting valid PoReps\), have the protocol-based rewards automatically assigned as delegation to a staked validator node of the archiver's choice and earn interest, less a fee, from the validation-client's network participation. - diff --git a/docs/src/implemented-proposals/ed_overview/ed_replication_client_economics/ed_rce_storage_replication_rewards.md b/docs/src/implemented-proposals/ed_overview/ed_replication_client_economics/ed_rce_storage_replication_rewards.md deleted file mode 100644 index 9deda4cbde..0000000000 --- a/docs/src/implemented-proposals/ed_overview/ed_replication_client_economics/ed_rce_storage_replication_rewards.md +++ /dev/null @@ -1,8 +0,0 @@ -# Storage-replication Rewards - -**Subject to change.** - -Archiver-clients download, encrypt and submit PoReps for ledger block sections.3 PoReps submitted to the PoH stream, and subsequently validated, function as evidence that the submitting archiver client is indeed storing the assigned ledger block sections on local hard drive space as a service to the network. Therefore, archiver clients should earn protocol rewards proportional to the amount of storage, and the number of successfully validated PoReps, that they are verifiably providing to the network. - -Additionally, archiver clients have the opportunity to capture a portion of slashed bounties \[TBD\] of dishonest validator clients. This can be accomplished by an archiver client submitting a verifiably false PoRep for which a dishonest validator client receives and signs as a valid PoRep. This reward incentive is to prevent lazy validators and minimize validator-archiver collusion attacks, more on this below. - diff --git a/docs/src/implemented-proposals/ed_overview/ed_storage_rent_economics.md b/docs/src/implemented-proposals/ed_overview/ed_storage_rent_economics.md index 0968e3531a..40da25e270 100644 --- a/docs/src/implemented-proposals/ed_overview/ed_storage_rent_economics.md +++ b/docs/src/implemented-proposals/ed_overview/ed_storage_rent_economics.md @@ -1,6 +1,6 @@ ## Storage Rent Economics -Each transaction that is submitted to the Solana ledger imposes costs. Transaction fees paid by the submitter, and collected by a validator, in theory, account for the acute, transactional, costs of validating and adding that data to the ledger. At the same time, our compensation design for archivers (see [Replication-client Economics](ed_replication_client_economics/README.md)), in theory, accounts for the long term storage of the historical ledger. Unaccounted in this process is the mid-term storage of active ledger state, necessarily maintained by the rotating validator set. This type of storage imposes costs not only to validators but also to the broader network as active state grows so does data transmission and validation overhead. To account for these costs, we describe here our preliminary design and implementation of storage rent. +Each transaction that is submitted to the Solana ledger imposes costs. Transaction fees paid by the submitter, and collected by a validator, in theory, account for the acute, transactional, costs of validating and adding that data to the ledger. Unaccounted in this process is the mid-term storage of active ledger state, necessarily maintained by the rotating validator set. This type of storage imposes costs not only to validators but also to the broader network as active state grows so does data transmission and validation overhead. To account for these costs, we describe here our preliminary design and implementation of storage rent. Storage rent can be paid via one of two methods: diff --git a/docs/src/implemented-proposals/ed_overview/ed_validation_client_economics/README.md b/docs/src/implemented-proposals/ed_overview/ed_validation_client_economics/README.md index ec5413ed06..b371a0c3fd 100644 --- a/docs/src/implemented-proposals/ed_overview/ed_validation_client_economics/README.md +++ b/docs/src/implemented-proposals/ed_overview/ed_validation_client_economics/README.md @@ -2,7 +2,7 @@ **Subject to change.** -Validator-clients are eligible to receive protocol-based \(i.e. inflation-based\) rewards issued via stake-based annual interest rates \(calculated per epoch\) by providing compute \(CPU+GPU\) resources to validate and vote on a given PoH state. These protocol-based rewards are determined through an algorithmic disinflationary schedule as a function of total amount of circulating tokens. The network is expected to launch with an annual inflation rate around 15%, set to decrease by 15% per year until a long-term stable rate of 1-2% is reached. These issuances are to be split and distributed to participating validators and archivers, with around 90% of the issued tokens allocated for validator rewards. Because the network will be distributing a fixed amount of inflation rewards across the stake-weighted valdiator set, any individual validator's interest rate will be a function of the amount of staked SOL in relation to the circulating SOL. +Validator-clients are eligible to receive protocol-based \(i.e. inflation-based\) rewards issued via stake-based annual interest rates \(calculated per epoch\) by providing compute \(CPU+GPU\) resources to validate and vote on a given PoH state. These protocol-based rewards are determined through an algorithmic disinflationary schedule as a function of total amount of circulating tokens. The network is expected to launch with an annual inflation rate around 15%, set to decrease by 15% per year until a long-term stable rate of 1-2% is reached. These issuances are to be split and distributed to participating validators, with around 90% of the issued tokens allocated for validator rewards. Because the network will be distributing a fixed amount of inflation rewards across the stake-weighted validator set, any individual validator's interest rate will be a function of the amount of staked SOL in relation to the circulating SOL. -Additionally, validator clients may earn revenue through fees via state-validation transactions and Proof-of-Replication \(PoRep\) transactions. For clarity, we separately describe the design and motivation of these revenue distriubutions for validation-clients below: state-validation protocol-based rewards, state-validation transaction fees and rent, and PoRep-validation transaction fees. +Additionally, validator clients may earn revenue through fees via state-validation transactions. For clarity, we separately describe the design and motivation of these revenue distributions for validation-clients below: state-validation protocol-based rewards and state-validation transaction fees and rent. diff --git a/docs/src/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_replication_validation_transaction_fees.md b/docs/src/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_replication_validation_transaction_fees.md deleted file mode 100644 index a15044890a..0000000000 --- a/docs/src/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_replication_validation_transaction_fees.md +++ /dev/null @@ -1,11 +0,0 @@ -# Replication-validation Transaction Fees - -**Subject to change.** - -As previously mentioned, validator-clients will also be responsible for validating PoReps submitted into the PoH stream by archiver-clients. In this case, validators are providing compute \(CPU/GPU\) and light storage resources to confirm that these replication proofs could only be generated by a client that is storing the referenced PoH leger block. - -While replication-clients are incentivize and rewarded through protocol-based rewards schedule \(see [Replication-client Economics](../ed_replication_client_economics/README.md)\), validator-clients will be incentivized to include and validate PoReps in PoH through collection of transaction fees associated with the submitted PoReps and distribution of protocol rewards proportional to the validated PoReps. As will be described in detail in the Section 3.1, replication-client rewards are protocol-based and designed to reward based on a global data redundancy factor. I.e. the protocol will incentivize replication-client participation through rewards based on a target ledger redundancy \(e.g. 10x data redundancy\). - -The validation of PoReps by validation-clients is computationally more expensive than state-validation \(detailed in the [Economic Sustainability](../ed_economic_sustainability.md) section\), thus the transaction fees are expected to be proportionally higher. - -There are various attack vectors available for colluding validation and replication clients, also described in detail in [Economic Sustainability](../ed_economic_sustainability.md). To protect against various collusion attack vectors, for a given epoch, validator rewards are distributed across participating validation-clients in proportion to the number of validated PoReps in the epoch less the number of PoReps that mismatch the archivers challenge. The PoRep challenge game is described in [Ledger Replication](../../../cluster/ledger-replication.md#the-porep-game). This design rewards validators proportional to the number of PoReps they process and validate, while providing negative pressure for validation-clients to submit lazy or malicious invalid votes on submitted PoReps \(note that it is computationally prohibitive to determine whether a validator-client has marked a valid PoRep as invalid\). diff --git a/docs/src/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_state_validation_protocol_based_rewards.md b/docs/src/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_state_validation_protocol_based_rewards.md index ea48ef4d41..0e4e9835bc 100644 --- a/docs/src/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_state_validation_protocol_based_rewards.md +++ b/docs/src/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_state_validation_protocol_based_rewards.md @@ -4,10 +4,10 @@ Validator-clients have two functional roles in the Solana network: -* Validate \(vote\) the current global state of that PoH along with any Proofs-of-Replication \(see [Replication Client Economics](../ed_replication_client_economics/README.md)\) that they are eligible to validate. -* Be elected as ‘leader’ on a stake-weighted round-robin schedule during which time they are responsible for collecting outstanding transactions and Proofs-of-Replication and incorporating them into the PoH, thus updating the global state of the network and providing chain continuity. +* Validate \(vote\) the current global state of that PoH. +* Be elected as ‘leader’ on a stake-weighted round-robin schedule during which time they are responsible for collecting outstanding transactions and incorporating them into the PoH, thus updating the global state of the network and providing chain continuity. -Validator-client rewards for these services are to be distributed at the end of each Solana epoch. As previously discussed, compensation for validator-clients is provided via a protocol-based annual inflation rate dispersed in proportion to the stake-weight of each validator \(see below\) along with leader-claimed transaction fees available during each leader rotation. I.e. during the time a given validator-client is elected as leader, it has the opportunity to keep a portion of each transaction fee, less a protocol-specified amount that is destroyed \(see [Validation-client State Transaction Fees](ed_vce_state_validation_transaction_fees.md)\). PoRep transaction fees are also collected by the leader client and validator PoRep rewards are distributed in proportion to the number of validated PoReps less the number of PoReps that mismatch an archiver's challenge. \(see [Replication-client Transaction Fees](ed_vce_replication_validation_transaction_fees.md)\) +Validator-client rewards for these services are to be distributed at the end of each Solana epoch. As previously discussed, compensation for validator-clients is provided via a protocol-based annual inflation rate dispersed in proportion to the stake-weight of each validator \(see below\) along with leader-claimed transaction fees available during each leader rotation. I.e. during the time a given validator-client is elected as leader, it has the opportunity to keep a portion of each transaction fee, less a protocol-specified amount that is destroyed \(see [Validation-client State Transaction Fees](ed_vce_state_validation_transaction_fees.md)\). The effective protocol-based annual interest rate \(%\) per epoch received by validation-clients is to be a function of: @@ -25,7 +25,7 @@ At any given point in time, a specific validator's interest rate can be determin ![](../../../.gitbook/assets/p_ex_supply.png) -**Figure 2:** The total token supply over a 10-year period, based on an initial 250MM tokens with the disinflationary inflation schedule as shown in **Figure 1**. Over time, the interest rate, at a fixed network staked percentage, will reduce concordant with network inflation. Validation-client interest rates are designed to be higher in the early days of the network to incentivize participation and jumpstart the network economy. As previously mentioned, the inflation rate is expected to stabilize near 1-2% which also results in a fixed, long-term, interest rate to be provided to validator-clients. This value does not represent the total interest available to validator-clients as transaction fees for state-validation and ledger storage replication \(PoReps\) are not accounted for here. Given these example parameters, annualized validator-specific interest rates can be determined based on the global fraction of tokens bonded as stake, as well as their uptime/activity in the previous epoch. For the purpose of this example, we assume 100% uptime for all validators and a split in interest-based rewards between validators and archiver nodes of 80%/20%. Additionally, the fraction of staked circulating supply is assumed to be constant. Based on these assumptions, an annualized validation-client interest rate schedule as a function of % circulating token supply that is staked is shown in **Figure 3**. +**Figure 2:** The total token supply over a 10-year period, based on an initial 250MM tokens with the disinflationary inflation schedule as shown in **Figure 1**. Over time, the interest rate, at a fixed network staked percentage, will reduce concordant with network inflation. Validation-client interest rates are designed to be higher in the early days of the network to incentivize participation and jumpstart the network economy. As previously mentioned, the inflation rate is expected to stabilize near 1-2% which also results in a fixed, long-term, interest rate to be provided to validator-clients. This value does not represent the total interest available to validator-clients as transaction fees for state-validation are not accounted for here. Given these example parameters, annualized validator-specific interest rates can be determined based on the global fraction of tokens bonded as stake, as well as their uptime/activity in the previous epoch. For the purpose of this example, we assume 100% uptime for all validators and a split in interest-based rewards between validators nodes of 80%/20%. Additionally, the fraction of staked circulating supply is assumed to be constant. Based on these assumptions, an annualized validation-client interest rate schedule as a function of % circulating token supply that is staked is shown in **Figure 3**. ![](../../../.gitbook/assets/p_ex_interest.png) diff --git a/docs/src/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_state_validation_transaction_fees.md b/docs/src/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_state_validation_transaction_fees.md index 2a0fa38a84..0024519b82 100644 --- a/docs/src/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_state_validation_transaction_fees.md +++ b/docs/src/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_state_validation_transaction_fees.md @@ -9,7 +9,7 @@ Each transaction sent through the network, to be processed by the current leader * open avenues for a transaction market to incentivize validation-client to collect and process submitted transactions in their function as leader, * and provide potential long-term economic stability of the network through a protocol-captured minimum fee amount per transaction, as described below. -Many current blockchain economies \(e.g. Bitcoin, Ethereum\), rely on protocol-based rewards to support the economy in the short term, with the assumption that the revenue generated through transaction fees will support the economy in the long term, when the protocol derived rewards expire. In an attempt to create a sustainable economy through protocol-based rewards and transaction fees, a fixed portion of each transaction fee is destroyed, with the remaining fee going to the current leader processing the transaction. A scheduled global inflation rate provides a source for rewards distributed to validation-clients, through the process described above, and replication-clients, as discussed below. +Many current blockchain economies \(e.g. Bitcoin, Ethereum\), rely on protocol-based rewards to support the economy in the short term, with the assumption that the revenue generated through transaction fees will support the economy in the long term, when the protocol derived rewards expire. In an attempt to create a sustainable economy through protocol-based rewards and transaction fees, a fixed portion of each transaction fee is destroyed, with the remaining fee going to the current leader processing the transaction. A scheduled global inflation rate provides a source for rewards distributed to validation-clients, through the process described above. Transaction fees are set by the network cluster based on recent historical throughput, see [Congestion Driven Fees](../../transaction-fees.md#congestion-driven-fees). This minimum portion of each transaction fee can be dynamically adjusted depending on historical gas usage. In this way, the protocol can use the minimum fee to target a desired hardware utilization. By monitoring a protocol specified gas usage with respect to a desired, target usage amount, the minimum fee can be raised/lowered which should, in turn, lower/raise the actual gas usage per block until it reaches the target amount. This adjustment process can be thought of as similar to the difficulty adjustment algorithm in the Bitcoin protocol, however in this case it is adjusting the minimum transaction fee to guide the transaction processing hardware usage to a desired level. diff --git a/docs/src/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_validation_stake_delegation.md b/docs/src/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_validation_stake_delegation.md index 9490875102..186440613c 100644 --- a/docs/src/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_validation_stake_delegation.md +++ b/docs/src/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_validation_stake_delegation.md @@ -21,12 +21,7 @@ Running a Solana validation-client required relatively modest upfront hardware c **Table 2** example high-end hardware setup for running a Solana client. -Despite the low-barrier to entry as a validation-client, from a capital investment perspective, as in any developing economy, there will be much opportunity and need for trusted validation services as evidenced by node reliability, UX/UI, APIs and other software accessibility tools. Additionally, although Solana’s validator node startup costs are nominal when compared to similar networks, they may still be somewhat restrictive for some potential participants. In the spirit of developing a true decentralized, permissionless network, these interested parties still have two options to become involved in the Solana network/economy: +Despite the low-barrier to entry as a validation-client, from a capital investment perspective, as in any developing economy, there will be much opportunity and need for trusted validation services as evidenced by node reliability, UX/UI, APIs and other software accessibility tools. Additionally, although Solana’s validator node startup costs are nominal when compared to similar networks, they may still be somewhat restrictive for some potential participants. In the spirit of developing a true decentralized, permissionless network, these interested parties can become involved in the Solana network/economy via delegation of previously acquired tokens with a reliable validation node to earn a portion of the interest generated. -1. Delegation of previously acquired tokens with a reliable validation node to earn a portion of interest generated -2. Provide local storage space as a replication-client and receive rewards by submitting Proof-of-Replication \(see [Replication-client Economics](../ed_replication_client_economics/README.md)\). - - a. This participant has the additional option to directly delegate their earned storage rewards \([Replication-client Reward Auto-delegation](../ed_replication_client_economics/ed_rce_replication_client_reward_auto_delegation.md)\) - -Delegation of tokens to validation-clients, via option 1, provides a way for passive Solana token holders to become part of the active Solana economy and earn interest rates proportional to the interest rate generated by the delegated validation-client. Additionally, this feature intends to create a healthy validation-client market, with potential validation-client nodes competing to build reliable, transparent and profitable delegation services. +Delegation of tokens to validation-clients provides a way for passive Solana token holders to become part of the active Solana economy and earn interest rates proportional to the interest rate generated by the delegated validation-client. Additionally, this feature intends to create a healthy validation-client market, with potential validation-client nodes competing to build reliable, transparent and profitable delegation services. diff --git a/docs/src/proposals/ledger-replication-to-implement.md b/docs/src/proposals/ledger-replication-to-implement.md deleted file mode 100644 index b228d9dc99..0000000000 --- a/docs/src/proposals/ledger-replication-to-implement.md +++ /dev/null @@ -1,137 +0,0 @@ -# Ledger Replication - -Replication behavior yet to be implemented. - -## Storage epoch - -The storage epoch should be the number of slots which results in around 100GB-1TB of ledger to be generated for archivers to store. Archivers will start storing ledger when a given fork has a high probability of not being rolled back. - -## Validator behavior - -1. Every NUM\_KEY\_ROTATION\_TICKS it also validates samples received from - - archivers. It signs the PoH hash at that point and uses the following - - algorithm with the signature as the input: - - * The low 5 bits of the first byte of the signature creates an index into - - another starting byte of the signature. - - * The validator then looks at the set of storage proofs where the byte of - - the proof's sha state vector starting from the low byte matches exactly - - with the chosen byte\(s\) of the signature. - - * If the set of proofs is larger than the validator can handle, then it - - increases to matching 2 bytes in the signature. - - * Validator continues to increase the number of matching bytes until a - - workable set is found. - - * It then creates a mask of valid proofs and fake proofs and sends it to - - the leader. This is a storage proof confirmation transaction. - -2. After a lockout period of NUM\_SECONDS\_STORAGE\_LOCKOUT seconds, the - - validator then submits a storage proof claim transaction which then causes the - - distribution of the storage reward if no challenges were seen for the proof to - - the validators and archivers party to the proofs. - -## Archiver behavior - -1. The archiver then generates another set of offsets which it submits a fake - - proof with an incorrect sha state. It can be proven to be fake by providing the - - seed for the hash result. - - * A fake proof should consist of an archiver hash of a signature of a PoH - - value. That way when the archiver reveals the fake proof, it can be - - verified on chain. - -2. The archiver monitors the ledger, if it sees a fake proof integrated, it - - creates a challenge transaction and submits it to the current leader. The - - transacation proves the validator incorrectly validated a fake storage proof. - - The archiver is rewarded and the validator's staking balance is slashed or - - frozen. - -## Storage proof contract logic - -Each archiver and validator will have their own storage account. The validator's account would be separate from their gossip id similiar to their vote account. These should be implemented as two programs one which handles the validator as the keysigner and one for the archiver. In that way when the programs reference other accounts, they can check the program id to ensure it is a validator or archiver account they are referencing. - -### SubmitMiningProof - -```text -SubmitMiningProof { - slot: u64, - sha_state: Hash, - signature: Signature, -}; -keys = [archiver_keypair] -``` - -Archivers create these after mining their stored ledger data for a certain hash value. The slot is the end slot of the segment of ledger they are storing, the sha\_state the result of the archiver using the hash function to sample their encrypted ledger segment. The signature is the signature that was created when they signed a PoH value for the current storage epoch. The list of proofs from the current storage epoch should be saved in the account state, and then transfered to a list of proofs for the previous epoch when the epoch passes. In a given storage epoch a given archiver should only submit proofs for one segment. - -The program should have a list of slots which are valid storage mining slots. This list should be maintained by keeping track of slots which are rooted slots in which a significant portion of the network has voted on with a high lockout value, maybe 32-votes old. Every SLOTS\_PER\_SEGMENT number of slots would be added to this set. The program should check that the slot is in this set. The set can be maintained by receiving a AdvertiseStorageRecentBlockHash and checking with its bank/Tower BFT state. - -The program should do a signature verify check on the signature, public key from the transaction submitter and the message of the previous storage epoch PoH value. - -### ProofValidation - -```text -ProofValidation { - proof_mask: Vec, -} -keys = [validator_keypair, archiver_keypair(s) (unsigned)] -``` - -A validator will submit this transaction to indicate that a set of proofs for a given segment are valid/not-valid or skipped where the validator did not look at it. The keypairs for the archivers that it looked at should be referenced in the keys so the program logic can go to those accounts and see that the proofs are generated in the previous epoch. The sampling of the storage proofs should be verified ensuring that the correct proofs are skipped by the validator according to the logic outlined in the validator behavior of sampling. - -The included archiver keys will indicate the the storage samples which are being referenced; the length of the proof\_mask should be verified against the set of storage proofs in the referenced archiver account\(s\), and should match with the number of proofs submitted in the previous storage epoch in the state of said archiver account. - -### ClaimStorageReward - -```text -ClaimStorageReward { -} -keys = [validator_keypair or archiver_keypair, validator/archiver_keypairs (unsigned)] -``` - -Archivers and validators will use this transaction to get paid tokens from a program state where SubmitStorageProof, ProofValidation and ChallengeProofValidations are in a state where proofs have been submitted and validated and there are no ChallengeProofValidations referencing those proofs. For a validator, it should reference the archiver keypairs to which it has validated proofs in the relevant epoch. And for an archiver it should reference validator keypairs for which it has validated and wants to be rewarded. - -### ChallengeProofValidation - -```text -ChallengeProofValidation { - proof_index: u64, - hash_seed_value: Vec, -} -keys = [archiver_keypair, validator_keypair] -``` - -This transaction is for catching lazy validators who are not doing the work to validate proofs. An archiver will submit this transaction when it sees a validator has approved a fake SubmitMiningProof transaction. Since the archiver is a light client not looking at the full chain, it will have to ask a validator or some set of validators for this information maybe via RPC call to obtain all ProofValidations for a certain segment in the previous storage epoch. The program will look in the validator account state see that a ProofValidation is submitted in the previous storage epoch and hash the hash\_seed\_value and see that the hash matches the SubmitMiningProof transaction and that the validator marked it as valid. If so, then it will save the challenge to the list of challenges that it has in its state. - -### AdvertiseStorageRecentBlockhash - -```text -AdvertiseStorageRecentBlockhash { - hash: Hash, - slot: u64, -} -``` - -Validators and archivers will submit this to indicate that a new storage epoch has passed and that the storage proofs which are current proofs should now be for the previous epoch. Other transactions should check to see that the epoch that they are referencing is accurate according to current chain state. - diff --git a/docs/src/terminology.md b/docs/src/terminology.md index 1f9ecccd45..e695c87556 100644 --- a/docs/src/terminology.md +++ b/docs/src/terminology.md @@ -88,10 +88,6 @@ See [Proof of History](terminology.md#proof-of-history). The time, i.e. number of [slots](terminology.md#slot), for which a [leader schedule](terminology.md#leader-schedule) is valid. -## fake storage proof - -A proof which has the same format as a storage proof, but the sha state is actually from hashing a known ledger value which the storage client can reveal and is also easily verifiable by the network on-chain. - ## fee account The fee account in the transaction is the account pays for the cost of including the transaction in the ledger. This is the first account in the transaction. This account must be declared as Read-Write (writable) in the transaction since paying for the transaction reduces the account balance. @@ -118,7 +114,7 @@ A digital fingerprint of a sequence of bytes. ## inflation -An increase in token supply over time used to fund rewards for validation and replication and to fund continued development of Solana. +An increase in token supply over time used to fund rewards for validation and to fund continued development of Solana. ## instruction @@ -144,10 +140,6 @@ A sequence of [validator](terminology.md#validator) [public keys](terminology.md A list of [entries](terminology.md#entry) containing [transactions](terminology.md#transaction) signed by [clients](terminology.md#client). -## ledger segment - -Portion of the ledger which is downloaded by the archiver where storage proof data is derived. - ## ledger vote A [hash](terminology.md#hash) of the [validator's state](terminology.md#bank-state) at a given [tick height](terminology.md#tick-height). It comprises a validator's affirmation that a [block](terminology.md#block) it has received has been verified, as well as a promise not to vote for a conflicting [block](terminology.md#block) \(i.e. [fork](terminology.md#fork)\) for a specific amount of time, the [lockout](terminology.md#lockout) period. @@ -204,10 +196,6 @@ A stack of proofs, each which proves that some data existed before the proof was The public key of a [keypair](terminology.md#keypair). -## archiver - -Storage mining client, stores some part of the ledger enumerated in blocks and submits storage proofs to the chain. Not a validator. - ## root A [block](terminology.md#block) or [slot](terminology.md#slot) that has reached maximum [lockout](terminology.md#lockout) on a validator. The root is the highest block that is an ancestor of all active forks on a validator. All ancestor blocks of a root are also transitively a root. Blocks that are not an ancestor and not a descendant of the root are excluded from consideration for consensus and can be discarded. @@ -236,26 +224,6 @@ The [native token](terminology.md#native-token) tracked by a [cluster](terminolo Tokens forfeit to the [cluster](terminology.md#cluster) if malicious [validator](terminology.md#validator) behavior can be proven. -## storage proof - -A set of sha hash state which is constructed by sampling the encrypted version of the stored ledger segment at certain offsets. - -## storage proof challenge - -A transaction from an archiver that verifiably proves that a validator confirmed a fake proof. - -## storage proof claim - -A transaction from a validator which is after the timeout period given from the storage proof confirmation and which no successful challenges have been observed which rewards the parties of the storage proofs and confirmations. - -## storage proof confirmation - -A transaction by a validator which indicates the set of real and fake proofs submitted by a storage miner. The transaction would contain a list of proof hash values and a bit which says if this hash is valid or fake. - -## storage validation capacity - -The number of keys and samples that a validator can verify each storage epoch. - ## supermajority 2/3 of a [cluster](terminology.md#cluster). diff --git a/docs/src/validator/blockstore.md b/docs/src/validator/blockstore.md index 1c51648c47..d199340f34 100644 --- a/docs/src/validator/blockstore.md +++ b/docs/src/validator/blockstore.md @@ -85,6 +85,3 @@ Replay stage uses Blockstore APIs to find the longest chain of entries it can ha ## Pruning Blockstore Once Blockstore entries are old enough, representing all the possible forks becomes less useful, perhaps even problematic for replay upon restart. Once a validator's votes have reached max lockout, however, any Blockstore contents that are not on the PoH chain for that vote for can be pruned, expunged. - -Archiver nodes will be responsible for storing really old ledger contents, and validators need only persist their bank periodically. - diff --git a/dos/src/main.rs b/dos/src/main.rs index 0d43e3113c..132b9c94cf 100644 --- a/dos/src/main.rs +++ b/dos/src/main.rs @@ -148,7 +148,7 @@ fn main() { let data_type = value_t_or_exit!(matches, "data_type", String); info!("Finding cluster entry: {:?}", entrypoint_addr); - let (nodes, _validators, _archivers) = discover( + let (nodes, _validators) = discover( Some(&entrypoint_addr), None, Some(60), diff --git a/genesis-programs/Cargo.toml b/genesis-programs/Cargo.toml index 2cb1e9d6f1..521d7cfbc0 100644 --- a/genesis-programs/Cargo.toml +++ b/genesis-programs/Cargo.toml @@ -15,7 +15,6 @@ solana-budget-program = { path = "../programs/budget", version = "1.2.0" } solana-exchange-program = { path = "../programs/exchange", version = "1.2.0" } solana-runtime = { path = "../runtime", version = "1.2.0" } solana-sdk = { path = "../sdk", version = "1.2.0" } -solana-storage-program = { path = "../programs/storage", version = "1.2.0" } solana-vest-program = { path = "../programs/vest", version = "1.2.0" } [lib] diff --git a/genesis-programs/src/lib.rs b/genesis-programs/src/lib.rs index 2d2c18169f..da598e8a5f 100644 --- a/genesis-programs/src/lib.rs +++ b/genesis-programs/src/lib.rs @@ -10,8 +10,6 @@ extern crate solana_budget_program; #[macro_use] extern crate solana_exchange_program; #[macro_use] -extern crate solana_storage_program; -#[macro_use] extern crate solana_vest_program; use log::*; @@ -50,7 +48,6 @@ pub fn get_programs(operating_mode: OperatingMode, epoch: Epoch) -> Option Option Option Result<(), Box> { ); } - if operating_mode == OperatingMode::Development { - solana_storage_program::rewards_pools::add_genesis_accounts(&mut genesis_config); - } solana_stake_program::add_genesis_accounts(&mut genesis_config); if let Some(files) = matches.values_of("primordial_accounts_file") { @@ -763,8 +760,6 @@ mod tests { ) .expect("genesis"); - solana_storage_program::rewards_pools::add_genesis_accounts(&mut genesis_config); - remove_file(path).unwrap(); // Test total number of accounts is correct diff --git a/gossip/src/main.rs b/gossip/src/main.rs index d1d52fc79c..e68706c696 100644 --- a/gossip/src/main.rs +++ b/gossip/src/main.rs @@ -215,7 +215,7 @@ fn main() -> Result<(), Box> { }), ); - let (_all_peers, validators, _archivers) = discover( + let (_all_peers, validators) = discover( entrypoint_addr.as_ref(), num_nodes, timeout, @@ -263,7 +263,7 @@ fn main() -> Result<(), Box> { let entrypoint_addr = parse_entrypoint(&matches); let timeout = value_t_or_exit!(matches, "timeout", u64); let shred_version = value_t_or_exit!(matches, "shred_version", u16); - let (_all_peers, validators, _archivers) = discover( + let (_all_peers, validators) = discover( entrypoint_addr.as_ref(), Some(1), Some(timeout), @@ -304,7 +304,7 @@ fn main() -> Result<(), Box> { .unwrap() .parse::() .unwrap(); - let (_all_peers, validators, _archivers) = discover( + let (_all_peers, validators) = discover( entrypoint_addr.as_ref(), None, None, diff --git a/local-cluster/Cargo.toml b/local-cluster/Cargo.toml index 62d2af85c5..abd9357140 100644 --- a/local-cluster/Cargo.toml +++ b/local-cluster/Cargo.toml @@ -12,7 +12,6 @@ homepage = "https://solana.com/" itertools = "0.9.0" log = "0.4.8" rand = "0.7.0" -solana-archiver-lib = { path = "../archiver-lib", version = "1.2.0" } solana-config-program = { path = "../programs/config", version = "1.2.0" } solana-core = { path = "../core", version = "1.2.0" } solana-client = { path = "../client", version = "1.2.0" } @@ -25,7 +24,6 @@ solana-logger = { path = "../logger", version = "1.2.0" } solana-runtime = { path = "../runtime", version = "1.2.0" } solana-sdk = { path = "../sdk", version = "1.2.0" } solana-stake-program = { path = "../programs/stake", version = "1.2.0" } -solana-storage-program = { path = "../programs/storage", version = "1.2.0" } solana-vest-program = { path = "../programs/vest", version = "1.2.0" } solana-vote-program = { path = "../programs/vote", version = "1.2.0" } tempfile = "3.1.0" diff --git a/local-cluster/src/cluster.rs b/local-cluster/src/cluster.rs index 1a83b05b1c..6b507cf1f8 100644 --- a/local-cluster/src/cluster.rs +++ b/local-cluster/src/cluster.rs @@ -10,7 +10,6 @@ use std::sync::Arc; pub struct ValidatorInfo { pub keypair: Arc, pub voting_keypair: Arc, - pub storage_keypair: Arc, pub ledger_path: PathBuf, pub contact_info: ContactInfo, } diff --git a/local-cluster/src/cluster_tests.rs b/local-cluster/src/cluster_tests.rs index 913e77c93d..0b47ef48df 100644 --- a/local-cluster/src/cluster_tests.rs +++ b/local-cluster/src/cluster_tests.rs @@ -45,7 +45,7 @@ pub fn spend_and_verify_all_nodes( nodes: usize, ignore_nodes: HashSet, ) { - let (cluster_nodes, _) = discover_cluster(&entry_point_info.gossip, nodes).unwrap(); + let cluster_nodes = discover_cluster(&entry_point_info.gossip, nodes).unwrap(); assert!(cluster_nodes.len() >= nodes); for ingress_node in &cluster_nodes { if ignore_nodes.contains(&ingress_node.id) { @@ -126,7 +126,7 @@ pub fn send_many_transactions( } pub fn validator_exit(entry_point_info: &ContactInfo, nodes: usize) { - let (cluster_nodes, _) = discover_cluster(&entry_point_info.gossip, nodes).unwrap(); + let cluster_nodes = discover_cluster(&entry_point_info.gossip, nodes).unwrap(); assert!(cluster_nodes.len() >= nodes); for node in &cluster_nodes { let client = create_client(node.client_facing_addr(), VALIDATOR_PORT_RANGE); @@ -197,7 +197,7 @@ pub fn kill_entry_and_spend_and_verify_rest( slot_millis: u64, ) { solana_logger::setup(); - let (cluster_nodes, _) = discover_cluster(&entry_point_info.gossip, nodes).unwrap(); + let cluster_nodes = discover_cluster(&entry_point_info.gossip, nodes).unwrap(); assert!(cluster_nodes.len() >= nodes); let client = create_client(entry_point_info.client_facing_addr(), VALIDATOR_PORT_RANGE); // sleep long enough to make sure we are in epoch 3 diff --git a/local-cluster/src/lib.rs b/local-cluster/src/lib.rs index b6d76272ad..c84926f844 100644 --- a/local-cluster/src/lib.rs +++ b/local-cluster/src/lib.rs @@ -1,6 +1,3 @@ pub mod cluster; pub mod cluster_tests; pub mod local_cluster; - -#[macro_use] -extern crate solana_storage_program; diff --git a/local-cluster/src/local_cluster.rs b/local-cluster/src/local_cluster.rs index 180e29ce81..960db5dbfb 100644 --- a/local-cluster/src/local_cluster.rs +++ b/local-cluster/src/local_cluster.rs @@ -1,7 +1,6 @@ use crate::cluster::{Cluster, ClusterValidatorInfo, ValidatorInfo}; use itertools::izip; use log::*; -use solana_archiver_lib::archiver::Archiver; use solana_client::thin_client::{create_client, ThinClient}; use solana_core::{ cluster_info::{Node, VALIDATOR_PORT_RANGE}, @@ -15,60 +14,35 @@ use solana_ledger::{ }; use solana_sdk::{ client::SyncClient, - clock::{DEFAULT_DEV_SLOTS_PER_EPOCH, DEFAULT_SLOTS_PER_SEGMENT, DEFAULT_TICKS_PER_SLOT}, + clock::{DEFAULT_DEV_SLOTS_PER_EPOCH, DEFAULT_TICKS_PER_SLOT}, commitment_config::CommitmentConfig, epoch_schedule::EpochSchedule, genesis_config::{GenesisConfig, OperatingMode}, - message::Message, poh_config::PohConfig, pubkey::Pubkey, signature::{Keypair, Signer}, system_transaction, transaction::Transaction, - transport::Result as TransportResult, }; use solana_stake_program::{ config as stake_config, stake_instruction, stake_state::{Authorized, Lockup, StakeState}, }; -use solana_storage_program::{ - storage_contract, - storage_instruction::{self, StorageAccountType}, -}; use solana_vote_program::{ vote_instruction, vote_state::{VoteInit, VoteState}, }; use std::{ collections::HashMap, - fs::remove_dir_all, io::{Error, ErrorKind, Result}, iter, - path::PathBuf, sync::Arc, }; -pub struct ArchiverInfo { - pub archiver_storage_pubkey: Pubkey, - pub ledger_path: PathBuf, -} - -impl ArchiverInfo { - fn new(storage_pubkey: Pubkey, ledger_path: PathBuf) -> Self { - Self { - archiver_storage_pubkey: storage_pubkey, - ledger_path, - } - } -} - #[derive(Clone, Debug)] pub struct ClusterConfig { /// The validator config that should be applied to every node in the cluster pub validator_configs: Vec, - /// Number of archivers in the cluster - /// Note- archivers will timeout if ticks_per_slot is much larger than the default 8 - pub num_archivers: usize, /// Number of nodes that are unstaked and not voting (a.k.a listening) pub num_listeners: u64, /// The specific pubkeys of each node if specified @@ -79,7 +53,6 @@ pub struct ClusterConfig { pub cluster_lamports: u64, pub ticks_per_slot: u64, pub slots_per_epoch: u64, - pub slots_per_segment: u64, pub stakers_slot_offset: u64, pub native_instruction_processors: Vec<(String, Pubkey)>, pub operating_mode: OperatingMode, @@ -90,14 +63,12 @@ impl Default for ClusterConfig { fn default() -> Self { ClusterConfig { validator_configs: vec![], - num_archivers: 0, num_listeners: 0, validator_keys: None, node_stakes: vec![], cluster_lamports: 0, ticks_per_slot: DEFAULT_TICKS_PER_SLOT, slots_per_epoch: DEFAULT_DEV_SLOTS_PER_EPOCH, - slots_per_segment: DEFAULT_SLOTS_PER_SEGMENT, stakers_slot_offset: DEFAULT_DEV_SLOTS_PER_EPOCH, native_instruction_processors: vec![], operating_mode: OperatingMode::Development, @@ -113,8 +84,6 @@ pub struct LocalCluster { pub entry_point_info: ContactInfo, pub validators: HashMap, pub genesis_config: GenesisConfig, - archivers: Vec, - pub archiver_infos: HashMap, } impl LocalCluster { @@ -159,7 +128,6 @@ impl LocalCluster { config.node_stakes[0], ); genesis_config.ticks_per_slot = config.ticks_per_slot; - genesis_config.slots_per_segment = config.slots_per_segment; genesis_config.epoch_schedule = EpochSchedule::custom(config.slots_per_epoch, config.stakers_slot_offset, true); genesis_config.operating_mode = config.operating_mode; @@ -171,11 +139,7 @@ impl LocalCluster { solana_genesis_programs::get_programs(genesis_config.operating_mode, 0) .unwrap_or_else(|| vec![]) } - OperatingMode::Development => { - genesis_config - .native_instruction_processors - .push(solana_storage_program!()); - } + _ => (), } genesis_config.inflation = @@ -185,12 +149,6 @@ impl LocalCluster { .native_instruction_processors .extend_from_slice(&config.native_instruction_processors); - let storage_keypair = Keypair::new(); - genesis_config.add_account( - storage_keypair.pubkey(), - storage_contract::create_validator_storage_account(leader_pubkey, 1), - ); - // Replace staking config genesis_config.add_account( stake_config::id(), @@ -205,7 +163,6 @@ impl LocalCluster { let (leader_ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config); let leader_contact_info = leader_node.info.clone(); - let leader_storage_keypair = Arc::new(storage_keypair); let leader_voting_keypair = Arc::new(voting_keypair); let mut leader_config = config.validator_configs[0].clone(); leader_config.rpc_ports = Some(( @@ -218,7 +175,6 @@ impl LocalCluster { &leader_ledger_path, &leader_voting_keypair.pubkey(), vec![leader_voting_keypair.clone()], - &leader_storage_keypair, None, true, &leader_config, @@ -229,7 +185,6 @@ impl LocalCluster { let leader_info = ValidatorInfo { keypair: leader_keypair.clone(), voting_keypair: leader_voting_keypair, - storage_keypair: leader_storage_keypair, ledger_path: leader_ledger_path, contact_info: leader_contact_info.clone(), }; @@ -246,9 +201,7 @@ impl LocalCluster { funding_keypair: mint_keypair, entry_point_info: leader_contact_info, validators, - archivers: vec![], genesis_config, - archiver_infos: HashMap::new(), }; for (stake, validator_config, key) in izip!( @@ -273,15 +226,7 @@ impl LocalCluster { ) .unwrap(); - for _ in 0..config.num_archivers { - cluster.add_archiver(); - } - - discover_cluster( - &cluster.entry_point_info.gossip, - config.node_stakes.len() + config.num_archivers as usize, - ) - .unwrap(); + discover_cluster(&cluster.entry_point_info.gossip, config.node_stakes.len()).unwrap(); cluster } @@ -301,10 +246,6 @@ impl LocalCluster { v.join().expect("Validator join failed"); } } - - while let Some(archiver) = self.archivers.pop() { - archiver.close(); - } } pub fn add_validator( @@ -320,7 +261,6 @@ impl LocalCluster { // Must have enough tokens to fund vote account and set delegate let voting_keypair = Keypair::new(); - let storage_keypair = Arc::new(Keypair::new()); let validator_pubkey = validator_keypair.pubkey(); let validator_node = Node::new_localhost_with_pubkey(&validator_keypair.pubkey()); let contact_info = validator_node.info.clone(); @@ -330,7 +270,7 @@ impl LocalCluster { // setup as a listener info!("listener {} ", validator_pubkey,); } else { - // Give the validator some lamports to setup vote and storage accounts + // Give the validator some lamports to setup vote accounts let validator_balance = Self::transfer_with_client( &client, &self.funding_keypair, @@ -349,9 +289,6 @@ impl LocalCluster { stake, ) .unwrap(); - - Self::setup_storage_account(&client, &storage_keypair, &validator_keypair, false) - .unwrap(); } let mut config = validator_config.clone(); @@ -366,7 +303,6 @@ impl LocalCluster { &ledger_path, &voting_keypair.pubkey(), vec![voting_keypair.clone()], - &storage_keypair, Some(&self.entry_point_info), true, &config, @@ -377,7 +313,6 @@ impl LocalCluster { ValidatorInfo { keypair: validator_keypair, voting_keypair, - storage_keypair, ledger_path, contact_info, }, @@ -389,56 +324,8 @@ impl LocalCluster { validator_pubkey } - fn add_archiver(&mut self) { - let archiver_keypair = Arc::new(Keypair::new()); - let archiver_pubkey = archiver_keypair.pubkey(); - let storage_keypair = Arc::new(Keypair::new()); - let storage_pubkey = storage_keypair.pubkey(); - let client = create_client( - self.entry_point_info.client_facing_addr(), - VALIDATOR_PORT_RANGE, - ); - - // Give the archiver some lamports to setup its storage accounts - Self::transfer_with_client( - &client, - &self.funding_keypair, - &archiver_keypair.pubkey(), - 42, - ); - let archiver_node = Node::new_localhost_archiver(&archiver_pubkey); - - Self::setup_storage_account(&client, &storage_keypair, &archiver_keypair, true).unwrap(); - - let (archiver_ledger_path, _blockhash) = create_new_tmp_ledger!(&self.genesis_config); - let archiver = Archiver::new( - &archiver_ledger_path, - archiver_node, - self.entry_point_info.clone(), - archiver_keypair, - storage_keypair, - CommitmentConfig::recent(), - ) - .unwrap_or_else(|err| panic!("Archiver::new() failed: {:?}", err)); - - self.archivers.push(archiver); - self.archiver_infos.insert( - archiver_pubkey, - ArchiverInfo::new(storage_pubkey, archiver_ledger_path), - ); - } - fn close(&mut self) { self.close_preserve_ledgers(); - for ledger_path in self - .validators - .values() - .map(|f| &f.info.ledger_path) - .chain(self.archiver_infos.values().map(|info| &info.ledger_path)) - { - remove_dir_all(&ledger_path) - .unwrap_or_else(|_| panic!("Unable to remove {:?}", ledger_path)); - } } pub fn transfer(&self, source_keypair: &Keypair, dest_pubkey: &Pubkey, lamports: u64) -> u64 { @@ -601,40 +488,6 @@ impl LocalCluster { )), } } - - /// Sets up the storage account for validators/archivers and assumes the funder is the owner - fn setup_storage_account( - client: &ThinClient, - storage_keypair: &Keypair, - from_keypair: &Arc, - archiver: bool, - ) -> TransportResult<()> { - let storage_account_type = if archiver { - StorageAccountType::Archiver - } else { - StorageAccountType::Validator - }; - let message = Message::new_with_payer( - &storage_instruction::create_storage_account( - &from_keypair.pubkey(), - &from_keypair.pubkey(), - &storage_keypair.pubkey(), - 1, - storage_account_type, - ), - Some(&from_keypair.pubkey()), - ); - - let signer_keys = vec![from_keypair.as_ref(), &storage_keypair]; - let blockhash = client - .get_recent_blockhash_with_commitment(CommitmentConfig::recent()) - .unwrap() - .0; - let mut transaction = Transaction::new(&signer_keys, message, blockhash); - client - .retry_transfer(&from_keypair, &mut transaction, 10) - .map(|_signature| ()) - } } impl Cluster for LocalCluster { @@ -686,7 +539,6 @@ impl Cluster for LocalCluster { &validator_info.ledger_path, &validator_info.voting_keypair.pubkey(), vec![validator_info.voting_keypair.clone()], - &validator_info.storage_keypair, entry_point_info, true, &cluster_validator_info.config, @@ -716,7 +568,6 @@ impl Drop for LocalCluster { #[cfg(test)] mod test { use super::*; - use solana_core::storage_stage::SLOTS_PER_TURN_TEST; use solana_sdk::epoch_schedule::MINIMUM_SLOTS_PER_EPOCH; #[test] @@ -725,7 +576,6 @@ mod test { let num_nodes = 1; let cluster = LocalCluster::new_with_equal_stakes(num_nodes, 100, 3); assert_eq!(cluster.validators.len(), num_nodes); - assert_eq!(cluster.archivers.len(), 0); } #[test] @@ -733,12 +583,9 @@ mod test { solana_logger::setup(); let mut validator_config = ValidatorConfig::default(); validator_config.rpc_config.enable_validator_exit = true; - validator_config.storage_slots_per_turn = SLOTS_PER_TURN_TEST; const NUM_NODES: usize = 1; - let num_archivers = 1; let config = ClusterConfig { validator_configs: vec![ValidatorConfig::default(); NUM_NODES], - num_archivers, node_stakes: vec![3; NUM_NODES], cluster_lamports: 100, ticks_per_slot: 8, @@ -748,6 +595,5 @@ mod test { }; let cluster = LocalCluster::new(&config); assert_eq!(cluster.validators.len(), NUM_NODES); - assert_eq!(cluster.archivers.len(), num_archivers); } } diff --git a/local-cluster/tests/archiver.rs b/local-cluster/tests/archiver.rs deleted file mode 100644 index de219af0a6..0000000000 --- a/local-cluster/tests/archiver.rs +++ /dev/null @@ -1,195 +0,0 @@ -use log::*; -use serial_test_derive::serial; -use solana_archiver_lib::archiver::Archiver; -use solana_client::thin_client::create_client; -use solana_core::{ - cluster_info::{ClusterInfo, Node, VALIDATOR_PORT_RANGE}, - contact_info::ContactInfo, - gossip_service::discover_cluster, - serve_repair::ServeRepair, - storage_stage::SLOTS_PER_TURN_TEST, - validator::ValidatorConfig, -}; -use solana_ledger::{blockstore::Blockstore, create_new_tmp_ledger, get_tmp_ledger_path}; -use solana_local_cluster::local_cluster::{ClusterConfig, LocalCluster}; -use solana_sdk::{ - commitment_config::CommitmentConfig, - genesis_config::create_genesis_config, - signature::{Keypair, Signer}, -}; -use std::{fs::remove_dir_all, sync::Arc}; - -/// Start the cluster with the given configuration and wait till the archivers are discovered -/// Then download shreds from one of them. -fn run_archiver_startup_basic(num_nodes: usize, num_archivers: usize) { - solana_logger::setup(); - info!("starting archiver test"); - - let mut validator_config = ValidatorConfig::default(); - let slots_per_segment = 8; - validator_config.storage_slots_per_turn = SLOTS_PER_TURN_TEST; - let config = ClusterConfig { - validator_configs: vec![validator_config; num_nodes], - num_archivers, - node_stakes: vec![100; num_nodes], - cluster_lamports: 10_000, - // keep a low slot/segment count to speed up the test - slots_per_segment, - ..ClusterConfig::default() - }; - let cluster = LocalCluster::new(&config); - - let (cluster_nodes, cluster_archivers) = - discover_cluster(&cluster.entry_point_info.gossip, num_nodes + num_archivers).unwrap(); - assert_eq!( - cluster_nodes.len() + cluster_archivers.len(), - num_nodes + num_archivers - ); - let mut archiver_count = 0; - let mut archiver_info = ContactInfo::default(); - for node in &cluster_archivers { - info!("storage: {:?} rpc: {:?}", node.storage_addr, node.rpc); - if ContactInfo::is_valid_address(&node.storage_addr) { - archiver_count += 1; - archiver_info = node.clone(); - } - } - assert_eq!(archiver_count, num_archivers); - - let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair( - cluster_nodes[0].clone(), - )); - let serve_repair = ServeRepair::new(cluster_info); - let path = get_tmp_ledger_path!(); - let blockstore = Arc::new(Blockstore::open(&path).unwrap()); - Archiver::download_from_archiver( - &serve_repair, - &archiver_info, - &blockstore, - slots_per_segment, - ) - .unwrap(); -} - -#[test] -#[ignore] -#[serial] -fn test_archiver_startup_1_node() { - run_archiver_startup_basic(1, 1); -} - -#[test] -#[ignore] -#[serial] -fn test_archiver_startup_2_nodes() { - run_archiver_startup_basic(2, 1); -} - -#[test] -#[serial] -fn test_archiver_startup_leader_hang() { - use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - - solana_logger::setup(); - info!("starting archiver test"); - - let leader_ledger_path = std::path::PathBuf::from("archiver_test_leader_ledger"); - let (genesis_config, _mint_keypair) = create_genesis_config(10_000); - let (archiver_ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config); - - { - let archiver_keypair = Arc::new(Keypair::new()); - let storage_keypair = Arc::new(Keypair::new()); - - info!("starting archiver node"); - let archiver_node = Node::new_localhost_with_pubkey(&archiver_keypair.pubkey()); - - let fake_gossip = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0); - let leader_info = ContactInfo::new_gossip_entry_point(&fake_gossip); - - let archiver_res = Archiver::new( - &archiver_ledger_path, - archiver_node, - leader_info, - archiver_keypair, - storage_keypair, - CommitmentConfig::recent(), - ); - - assert!(archiver_res.is_err()); - } - - let _ignored = Blockstore::destroy(&leader_ledger_path); - let _ignored = Blockstore::destroy(&archiver_ledger_path); - let _ignored = remove_dir_all(&leader_ledger_path); - let _ignored = remove_dir_all(&archiver_ledger_path); -} - -#[test] -#[serial] -fn test_archiver_startup_ledger_hang() { - solana_logger::setup(); - info!("starting archiver test"); - let mut validator_config = ValidatorConfig::default(); - validator_config.storage_slots_per_turn = SLOTS_PER_TURN_TEST; - let cluster = LocalCluster::new_with_equal_stakes(2, 10_000, 100); - - info!("starting archiver node"); - let bad_keys = Arc::new(Keypair::new()); - let storage_keypair = Arc::new(Keypair::new()); - let mut archiver_node = Node::new_localhost_with_pubkey(&bad_keys.pubkey()); - - // Pass bad TVU sockets to prevent successful ledger download - archiver_node.sockets.tvu = vec![std::net::UdpSocket::bind("0.0.0.0:0").unwrap()]; - let (archiver_ledger_path, _blockhash) = create_new_tmp_ledger!(&cluster.genesis_config); - - let archiver_res = Archiver::new( - &archiver_ledger_path, - archiver_node, - cluster.entry_point_info.clone(), - bad_keys, - storage_keypair, - CommitmentConfig::recent(), - ); - - assert!(archiver_res.is_err()); -} - -#[test] -#[serial] -fn test_account_setup() { - let num_nodes = 1; - let num_archivers = 1; - let mut validator_config = ValidatorConfig::default(); - validator_config.storage_slots_per_turn = SLOTS_PER_TURN_TEST; - let config = ClusterConfig { - validator_configs: vec![ValidatorConfig::default(); num_nodes], - num_archivers, - node_stakes: vec![100; num_nodes], - cluster_lamports: 10_000, - ..ClusterConfig::default() - }; - let cluster = LocalCluster::new(&config); - - let _ = discover_cluster( - &cluster.entry_point_info.gossip, - num_nodes + num_archivers as usize, - ) - .unwrap(); - // now check that the cluster actually has accounts for the archiver. - let client = create_client( - cluster.entry_point_info.client_facing_addr(), - VALIDATOR_PORT_RANGE, - ); - cluster.archiver_infos.iter().for_each(|(_, value)| { - assert_eq!( - client - .poll_get_balance_with_commitment( - &value.archiver_storage_pubkey, - CommitmentConfig::recent() - ) - .unwrap(), - 1 - ); - }); -} diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index 34f4fda9c5..b814d96aa3 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -266,7 +266,7 @@ fn run_cluster_partition( ); let mut cluster = LocalCluster::new(&config); - let (cluster_nodes, _) = discover_cluster(&cluster.entry_point_info.gossip, num_nodes).unwrap(); + let cluster_nodes = discover_cluster(&cluster.entry_point_info.gossip, num_nodes).unwrap(); info!("PARTITION_TEST sleeping until partition starting condition",); loop { @@ -338,7 +338,7 @@ fn run_cluster_partition( assert!(alive_node_contact_infos.len() > 0); info!("PARTITION_TEST discovering nodes"); - let (cluster_nodes, _) = discover_cluster( + let cluster_nodes = discover_cluster( &alive_node_contact_infos[0].gossip, alive_node_contact_infos.len(), ) @@ -461,7 +461,7 @@ fn test_forwarding() { }; let cluster = LocalCluster::new(&config); - let (cluster_nodes, _) = discover_cluster(&cluster.entry_point_info.gossip, 2).unwrap(); + let cluster_nodes = discover_cluster(&cluster.entry_point_info.gossip, 2).unwrap(); assert!(cluster_nodes.len() >= 2); let leader_pubkey = cluster.entry_point_info.id; @@ -525,7 +525,7 @@ fn test_listener_startup() { ..ClusterConfig::default() }; let cluster = LocalCluster::new(&config); - let (cluster_nodes, _) = discover_cluster(&cluster.entry_point_info.gossip, 4).unwrap(); + let cluster_nodes = discover_cluster(&cluster.entry_point_info.gossip, 4).unwrap(); assert_eq!(cluster_nodes.len(), 4); } @@ -542,7 +542,7 @@ fn test_stable_operating_mode() { ..ClusterConfig::default() }; let cluster = LocalCluster::new(&config); - let (cluster_nodes, _) = discover_cluster(&cluster.entry_point_info.gossip, 1).unwrap(); + let cluster_nodes = discover_cluster(&cluster.entry_point_info.gossip, 1).unwrap(); assert_eq!(cluster_nodes.len(), 1); let client = create_client( @@ -571,13 +571,7 @@ fn test_stable_operating_mode() { } // Programs that are not available at epoch 0 - for program_id in [ - &solana_sdk::bpf_loader::id(), - &solana_storage_program::id(), - &solana_vest_program::id(), - ] - .iter() - { + for program_id in [&solana_sdk::bpf_loader::id(), &solana_vest_program::id()].iter() { assert_eq!( ( program_id, @@ -719,7 +713,7 @@ fn test_consistency_halt() { let mut cluster = LocalCluster::new(&config); sleep(Duration::from_millis(5000)); - let (cluster_nodes, _) = discover_cluster(&cluster.entry_point_info.gossip, 1).unwrap(); + let cluster_nodes = discover_cluster(&cluster.entry_point_info.gossip, 1).unwrap(); info!("num_nodes: {}", cluster_nodes.len()); // Add a validator with the leader as trusted, it should halt when it detects @@ -747,7 +741,6 @@ fn test_consistency_halt() { assert_eq!( discover_cluster(&cluster.entry_point_info.gossip, num_nodes) .unwrap() - .0 .len(), num_nodes ); @@ -762,11 +755,11 @@ fn test_consistency_halt() { break; } Ok(nodes) => { - if nodes.0.len() < 2 { + if nodes.len() < 2 { encountered_error = true; break; } - info!("checking cluster for fewer nodes.. {:?}", nodes.0.len()); + info!("checking cluster for fewer nodes.. {:?}", nodes.len()); } } let client = cluster @@ -962,7 +955,7 @@ fn test_snapshots_blockstore_floor() { // Start up a new node from a snapshot let validator_stake = 5; - let (cluster_nodes, _) = discover_cluster(&cluster.entry_point_info.gossip, 1).unwrap(); + let cluster_nodes = discover_cluster(&cluster.entry_point_info.gossip, 1).unwrap(); let mut trusted_validators = HashSet::new(); trusted_validators.insert(cluster_nodes[0].id); validator_snapshot_test_config diff --git a/multinode-demo/archiver-x.sh b/multinode-demo/archiver-x.sh deleted file mode 100755 index 442c37d060..0000000000 --- a/multinode-demo/archiver-x.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/usr/bin/env bash -# -# Start a dynamically-configured archiver -# - -here=$(dirname "$0") -exec "$here"/archiver.sh --label x$$ "$@" diff --git a/multinode-demo/archiver.sh b/multinode-demo/archiver.sh deleted file mode 100755 index 778cc0e01e..0000000000 --- a/multinode-demo/archiver.sh +++ /dev/null @@ -1,83 +0,0 @@ -#!/usr/bin/env bash -# -# A thin wrapper around `solana-archiver` that automatically provisions the -# archiver's identity and/or storage keypair if not provided by the caller. -# -set -e - -here=$(dirname "$0") -# shellcheck source=multinode-demo/common.sh -source "$here"/common.sh - -entrypoint=127.0.0.0:8001 -label= - -while [[ -n $1 ]]; do - if [[ ${1:0:1} = - ]]; then - if [[ $1 = --entrypoint ]]; then - entrypoint=$2 - args+=("$1" "$2") - shift 2 - elif [[ $1 = --identity ]]; then - identity=$2 - [[ -r $identity ]] || { - echo "$identity does not exist" - exit 1 - } - args+=("$1" "$2") - shift 2 - elif [[ $1 = --label ]]; then - label="-$2" - shift 2 - elif [[ $1 = --ledger ]]; then - args+=("$1" "$2") - shift 2 - elif [[ $1 = --storage-keypair ]]; then - storage_keypair=$2 - [[ -r $storage_keypair ]] || { - echo "$storage_keypair does not exist" - exit 1 - } - args+=("$1" "$2") - shift 2 - else - echo "Unknown argument: $1" - $solana_archiver --help - exit 1 - fi - else - echo "Unknown argument: $1" - $solana_archiver --help - exit 1 - fi -done - -: "${identity:="$SOLANA_ROOT"/farf/archiver-identity"$label".json}" -: "${storage_keypair:="$SOLANA_ROOT"/farf/archiver-storage-keypair"$label".json}" -ledger="$SOLANA_ROOT"/farf/archiver-ledger"$label" - -rpc_url=$($solana_gossip rpc-url --entrypoint "$entrypoint") - -if [[ ! -r $identity ]]; then - $solana_keygen new --no-passphrase -so "$identity" - - # See https://github.com/solana-labs/solana/issues/4344 - $solana_cli --keypair "$identity" --url "$rpc_url" airdrop 1 -fi -identity_pubkey=$($solana_keygen pubkey "$identity") - -if [[ ! -r $storage_keypair ]]; then - $solana_keygen new --no-passphrase -so "$storage_keypair" - - $solana_cli --keypair "$identity" --url "$rpc_url" \ - create-archiver-storage-account "$identity_pubkey" "$storage_keypair" -fi - -default_arg --entrypoint "$entrypoint" -default_arg --identity "$identity" -default_arg --storage-keypair "$storage_keypair" -default_arg --ledger "$ledger" - -set -x -# shellcheck disable=SC2086 # Don't want to double quote $solana_archiver -exec $solana_archiver "${args[@]}" diff --git a/multinode-demo/common.sh b/multinode-demo/common.sh index cfe5d9f1d5..e1f1544d21 100644 --- a/multinode-demo/common.sh +++ b/multinode-demo/common.sh @@ -59,7 +59,6 @@ solana_gossip=$(solana_program gossip) solana_keygen=$(solana_program keygen) solana_ledger_tool=$(solana_program ledger-tool) solana_cli=$(solana_program) -solana_archiver=$(solana_program archiver) export RUST_BACKTRACE=1 diff --git a/multinode-demo/validator.sh b/multinode-demo/validator.sh index 9c02c3c7b4..5809e2fe8f 100755 --- a/multinode-demo/validator.sh +++ b/multinode-demo/validator.sh @@ -78,10 +78,6 @@ while [[ -n $1 ]]; do vote_account=$2 args+=("$1" "$2") shift 2 - elif [[ $1 = --storage-keypair ]]; then - storage_keypair=$2 - args+=("$1" "$2") - shift 2 elif [[ $1 = --init-complete-file ]]; then args+=("$1" "$2") shift 2 @@ -214,7 +210,6 @@ faucet_address="${gossip_entrypoint%:*}":9900 : "${identity:=$ledger_dir/identity.json}" : "${vote_account:=$ledger_dir/vote-account.json}" -: "${storage_keypair:=$ledger_dir/storage-keypair.json}" default_arg --entrypoint "$gossip_entrypoint" if ((airdrops_enabled)); then @@ -223,7 +218,6 @@ fi default_arg --identity "$identity" default_arg --vote-account "$vote_account" -default_arg --storage-keypair "$storage_keypair" default_arg --ledger "$ledger_dir" default_arg --log - default_arg --enable-rpc-exit @@ -279,12 +273,6 @@ setup_validator_accounts() { fi echo "Validator vote account configured" - if ! wallet storage-account "$storage_keypair"; then - echo "Creating validator storage account" - wallet create-validator-storage-account "$identity" "$storage_keypair" || return $? - fi - echo "Validator storage account configured" - echo "Validator identity account balance:" wallet balance || return $? @@ -295,7 +283,6 @@ rpc_url=$($solana_gossip rpc-url --entrypoint "$gossip_entrypoint" --any) [[ -r "$identity" ]] || $solana_keygen new --no-passphrase -so "$identity" [[ -r "$vote_account" ]] || $solana_keygen new --no-passphrase -so "$vote_account" -[[ -r "$storage_keypair" ]] || $solana_keygen new --no-passphrase -so "$storage_keypair" setup_validator_accounts "$node_sol" diff --git a/net/common.sh b/net/common.sh index 9df146e9af..fbe534576a 100644 --- a/net/common.sh +++ b/net/common.sh @@ -37,9 +37,6 @@ clientIpListZone=() blockstreamerIpList=() blockstreamerIpListPrivate=() blockstreamerIpListZone=() -archiverIpList=() -archiverIpListPrivate=() -archiverIpListZone=() buildSshOptions() { sshOptions=( diff --git a/net/gce.sh b/net/gce.sh index a9dbcafbf9..21a6987076 100755 --- a/net/gce.sh +++ b/net/gce.sh @@ -16,7 +16,6 @@ gce) gpuBootstrapLeaderMachineType="$cpuBootstrapLeaderMachineType --accelerator count=1,type=nvidia-tesla-p100" clientMachineType="--custom-cpu 16 --custom-memory 20GB" blockstreamerMachineType="--machine-type n1-standard-8" - archiverMachineType="--custom-cpu 4 --custom-memory 16GB" selfDestructHours=8 ;; ec2) @@ -31,7 +30,6 @@ ec2) gpuBootstrapLeaderMachineType=p2.xlarge clientMachineType=c5.2xlarge blockstreamerMachineType=m5.4xlarge - archiverMachineType=c5.xlarge selfDestructHours=0 ;; azure) @@ -42,7 +40,6 @@ azure) gpuBootstrapLeaderMachineType=Standard_NC12 clientMachineType=Standard_D16s_v3 blockstreamerMachineType=Standard_D16s_v3 - archiverMachineType=Standard_D4s_v3 selfDestructHours=0 ;; colo) @@ -53,7 +50,6 @@ colo) gpuBootstrapLeaderMachineType=1 clientMachineType=0 blockstreamerMachineType=0 - archiverMachineType=0 selfDestructHours=0 ;; *) @@ -64,11 +60,9 @@ esac prefix=testnet-dev-${USER//[^A-Za-z0-9]/} additionalValidatorCount=2 clientNodeCount=0 -archiverNodeCount=0 blockstreamer=false validatorBootDiskSizeInGb=500 clientBootDiskSizeInGb=75 -archiverBootDiskSizeInGb=500 validatorAdditionalDiskSizeInGb= externalNodes=false failOnValidatorBootupFailure=true @@ -123,7 +117,6 @@ Manage testnet instances create-specific options: -n [number] - Number of additional validators (default: $additionalValidatorCount) -c [number] - Number of client nodes (default: $clientNodeCount) - -r [number] - Number of archiver nodes (default: $archiverNodeCount) -u - Include a Blockstreamer (default: $blockstreamer) -P - Use public network IP addresses (default: $publicNetwork) -g - Enable GPU and automatically set validator machine types to $gpuBootstrapLeaderMachineType @@ -151,7 +144,7 @@ Manage testnet instances Only supported on GCE. --dedicated - Use dedicated instances for additional validators (by default preemptible instances are used to reduce - cost). Note that the bootstrap validator, archiver, + cost). Note that the bootstrap validator, blockstreamer and client nodes are always dedicated. Set this flag on colo to prevent your testnet from being pre-empted by nightly test automation. --self-destruct-hours [number] @@ -252,9 +245,6 @@ while getopts "h?p:Pn:c:r:z:gG:a:d:uxf" opt "${shortArgs[@]}"; do c) clientNodeCount=$OPTARG ;; - r) - archiverNodeCount=$OPTARG - ;; z) containsZone "$OPTARG" "${zones[@]}" || zones+=("$OPTARG") ;; @@ -607,16 +597,6 @@ EOF cloud_ForEachInstance recordInstanceIp true blockstreamerIpList } - if ! $externalNodes; then - echo "archiverIpList=()" >> "$configFile" - echo "archiverIpListPrivate=()" >> "$configFile" - fi - echo "Looking for archiver instances..." - cloud_FindInstances "$prefix-archiver" - [[ ${#instances[@]} -eq 0 ]] || { - cloud_ForEachInstance recordInstanceIp true archiverIpList - } - echo "Wrote $configFile" $metricsWriteDatapoint "testnet-deploy net-config-complete=1" } @@ -696,7 +676,6 @@ create) Bootstrap validator = $bootstrapLeaderMachineType (GPU=$enableGpu) Additional validators = $additionalValidatorCount x $validatorMachineType Client(s) = $clientNodeCount x $clientMachineType - Archivers(s) = $archiverNodeCount x $archiverMachineType Blockstreamer = $blockstreamer ======================================================================================== @@ -900,12 +879,6 @@ EOF "$startupScript" "$blockstreamerAddress" "$bootDiskType" "" "$maybePreemptible" "$sshPrivateKey" fi - if [[ $archiverNodeCount -gt 0 ]]; then - cloud_CreateInstances "$prefix" "$prefix-archiver" "$archiverNodeCount" \ - false "$archiverMachineType" "${zones[0]}" "$archiverBootDiskSizeInGb" \ - "$startupScript" "" "" "" "$maybePreemptible" "$sshPrivateKey" - fi - $metricsWriteDatapoint "testnet-deploy net-create-complete=1" prepareInstancesAndWriteConfigFile @@ -929,7 +902,6 @@ info) echo "NET_NUM_VALIDATORS=${#validatorIpList[@]}" echo "NET_NUM_CLIENTS=${#clientIpList[@]}" echo "NET_NUM_BLOCKSTREAMERS=${#blockstreamerIpList[@]}" - echo "NET_NUM_ARCHIVERS=${#archiverIpList[@]}" else printNode "Node Type" "Public IP" "Private IP" "Zone" echo "-------------------+-----------------+-----------------+--------------" @@ -976,18 +948,6 @@ info) done fi - if [[ ${#archiverIpList[@]} -gt 0 ]]; then - for i in $(seq 0 $(( ${#archiverIpList[@]} - 1)) ); do - ipAddress=${archiverIpList[$i]} - ipAddressPrivate=${archiverIpListPrivate[$i]} - zone=${archiverIpListZone[$i]} - if $evalInfo; then - echo "NET_ARCHIVER${i}_IP=$ipAddress" - else - printNode archiver "$ipAddress" "$ipAddressPrivate" "$zone" - fi - done - fi ;; status) cloud_StatusAll diff --git a/net/net.sh b/net/net.sh index 000cf9f941..1c04de5ce1 100755 --- a/net/net.sh +++ b/net/net.sh @@ -260,7 +260,7 @@ startBootstrapLeader() { $deployMethod \ bootstrap-validator \ $entrypointIp \ - $((${#validatorIpList[@]} + ${#blockstreamerIpList[@]} + ${#archiverIpList[@]})) \ + $((${#validatorIpList[@]} + ${#blockstreamerIpList[@]})) \ \"$RUST_LOG\" \ $skipSetup \ $failOnValidatorBootupFailure \ @@ -329,7 +329,7 @@ startNode() { $deployMethod \ $nodeType \ $entrypointIp \ - $((${#validatorIpList[@]} + ${#blockstreamerIpList[@]} + ${#archiverIpList[@]})) \ + $((${#validatorIpList[@]} + ${#blockstreamerIpList[@]})) \ \"$RUST_LOG\" \ $skipSetup \ $failOnValidatorBootupFailure \ @@ -461,13 +461,10 @@ getNodeType() { nodeIndex=0 # <-- global nodeType=validator # <-- global - for ipAddress in "${validatorIpList[@]}" b "${blockstreamerIpList[@]}" r "${archiverIpList[@]}"; do + for ipAddress in "${validatorIpList[@]}" b "${blockstreamerIpList[@]}"; do if [[ $ipAddress = b ]]; then nodeType=blockstreamer continue - elif [[ $ipAddress = r ]]; then - nodeType=archiver - continue fi if [[ $ipAddress = "$nodeAddress" ]]; then @@ -543,7 +540,7 @@ deploy() { $metricsWriteDatapoint "testnet-deploy net-start-begin=1" declare bootstrapLeader=true - for nodeAddress in "${validatorIpList[@]}" "${blockstreamerIpList[@]}" "${archiverIpList[@]}"; do + for nodeAddress in "${validatorIpList[@]}" "${blockstreamerIpList[@]}"; do nodeType= nodeIndex= getNodeType @@ -623,7 +620,7 @@ deploy() { echo echo "--- Deployment Successful" echo "Bootstrap validator deployment took $bootstrapNodeDeployTime seconds" - echo "Additional validator deployment (${#validatorIpList[@]} validators, ${#blockstreamerIpList[@]} blockstreamer nodes, ${#archiverIpList[@]} archivers) took $additionalNodeDeployTime seconds" + echo "Additional validator deployment (${#validatorIpList[@]} validators, ${#blockstreamerIpList[@]} blockstreamer nodes) took $additionalNodeDeployTime seconds" echo "Client deployment (${#clientIpList[@]} instances) took $clientDeployTime seconds" echo "Network start logs in $netLogDir" } @@ -662,7 +659,7 @@ stop() { declare loopCount=0 pids=() - for ipAddress in "${validatorIpList[@]}" "${blockstreamerIpList[@]}" "${archiverIpList[@]}" "${clientIpList[@]}"; do + for ipAddress in "${validatorIpList[@]}" "${blockstreamerIpList[@]}" "${clientIpList[@]}"; do stopNode "$ipAddress" false # Stagger additional node stop time to avoid too many concurrent ssh @@ -1018,9 +1015,6 @@ logs) for ipAddress in "${blockstreamerIpList[@]}"; do fetchRemoteLog "$ipAddress" validator done - for ipAddress in "${archiverIpList[@]}"; do - fetchRemoteLog "$ipAddress" validator - done ;; netem) if [[ -n $netemConfigFile ]]; then diff --git a/net/remote/remote-node.sh b/net/remote/remote-node.sh index c3e5ad04a3..d32f6b575a 100755 --- a/net/remote/remote-node.sh +++ b/net/remote/remote-node.sh @@ -389,31 +389,6 @@ EOF multinode-demo/delegate-stake.sh "${args[@]}" "$internalNodesStakeLamports" fi ;; - archiver) - if [[ $deployMethod != skip ]]; then - net/scripts/rsync-retry.sh -vPrc "$entrypointIp":~/.cargo/bin/ ~/.cargo/bin/ - fi - - args=( - --entrypoint "$entrypointIp:8001" - ) - - if [[ $airdropsEnabled != true ]]; then - # If this ever becomes a problem, we need to provide the `--identity` - # argument to an existing system account with lamports in it - echo "Error: archivers not supported without airdrops" - exit 1 - fi - -cat >> ~/solana/on-reboot < validator.log.\$now 2>&1 & - pid=\$! - oom_score_adj "\$pid" 1000 - disown -EOF - ~/solana/on-reboot - sleep 1 - ;; *) echo "Error: unknown node type: $nodeType" exit 1 diff --git a/net/ssh.sh b/net/ssh.sh index 34b6fdb865..b86357356b 100755 --- a/net/ssh.sh +++ b/net/ssh.sh @@ -72,15 +72,6 @@ else done fi echo -echo Archivers: -if [[ ${#archiverIpList[@]} -eq 0 ]]; then - echo " None" -else - for ipAddress in "${archiverIpList[@]}"; do - printNode validator "$ipAddress" - done -fi -echo echo "Use |scp.sh| to transfer files to and from nodes" echo diff --git a/perf/src/perf_libs.rs b/perf/src/perf_libs.rs index 0891068198..cef92a94a0 100644 --- a/perf/src/perf_libs.rs +++ b/perf/src/perf_libs.rs @@ -57,26 +57,6 @@ pub struct Api<'a> { ) -> u32, >, - pub chacha_cbc_encrypt_many_sample: Symbol< - 'a, - unsafe extern "C" fn( - input: *const u8, - sha_state: *mut u8, - in_len: usize, - keys: *const u8, - ivec: *mut u8, - num_keys: u32, - samples: *const u64, - num_samples: u32, - starting_block: u64, - time_us: *mut f32, - ), - >, - - pub chacha_init_sha_state: Symbol<'a, unsafe extern "C" fn(sha_state: *mut u8, num_keys: u32)>, - pub chacha_end_sha_state: - Symbol<'a, unsafe extern "C" fn(sha_state_in: *const u8, out: *mut u8, num_keys: u32)>, - pub poh_verify_many: Symbol< 'a, unsafe extern "C" fn( diff --git a/programs/bpf/Cargo.lock b/programs/bpf/Cargo.lock index 105f23c514..2eb9adf300 100644 --- a/programs/bpf/Cargo.lock +++ b/programs/bpf/Cargo.lock @@ -1657,7 +1657,6 @@ dependencies = [ "solana-rayon-threadlimit 1.2.0", "solana-sdk 1.2.0", "solana-stake-program 1.2.0", - "solana-storage-program 1.2.0", "solana-vote-program 1.2.0", "tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "thiserror 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1727,21 +1726,6 @@ dependencies = [ "thiserror 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "solana-storage-program" -version = "1.2.0" -dependencies = [ - "bincode 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "num-derive 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "num-traits 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.110 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.110 (registry+https://github.com/rust-lang/crates.io-index)", - "solana-logger 1.2.0", - "solana-sdk 1.2.0", -] - [[package]] name = "solana-vote-program" version = "1.2.0" diff --git a/programs/bpf/rust/sysval/src/lib.rs b/programs/bpf/rust/sysval/src/lib.rs index ff68371f09..59f3a22bce 100644 --- a/programs/bpf/rust/sysval/src/lib.rs +++ b/programs/bpf/rust/sysval/src/lib.rs @@ -3,7 +3,7 @@ extern crate solana_sdk; use solana_sdk::{ account_info::AccountInfo, - clock::{get_segment_from_slot, DEFAULT_SLOTS_PER_EPOCH, DEFAULT_SLOTS_PER_SEGMENT}, + clock::DEFAULT_SLOTS_PER_EPOCH, entrypoint, entrypoint::ProgramResult, info, @@ -27,10 +27,6 @@ fn process_instruction( sysvar::clock::id().log(); let clock = Clock::from_account_info(&accounts[2]).expect("clock"); assert_eq!(clock.slot, DEFAULT_SLOTS_PER_EPOCH + 1); - assert_eq!( - clock.segment, - get_segment_from_slot(clock.slot, DEFAULT_SLOTS_PER_SEGMENT) - ); // Fees info!("Fees identifier:"); diff --git a/programs/stake/src/stake_instruction.rs b/programs/stake/src/stake_instruction.rs index b467cbc86e..06395857e4 100644 --- a/programs/stake/src/stake_instruction.rs +++ b/programs/stake/src/stake_instruction.rs @@ -439,7 +439,7 @@ mod tests { RefCell::new(if sysvar::clock::check_id(&meta.pubkey) { sysvar::clock::Clock::default().create_account(1) } else if sysvar::rewards::check_id(&meta.pubkey) { - sysvar::rewards::create_account(1, 0.0, 0.0) + sysvar::rewards::create_account(1, 0.0) } else if sysvar::stake_history::check_id(&meta.pubkey) { sysvar::stake_history::create_account(1, &StakeHistory::default()) } else if config::check_id(&meta.pubkey) { @@ -680,7 +680,7 @@ mod tests { KeyedAccount::new( &sysvar::rewards::id(), false, - &RefCell::new(sysvar::rewards::create_account(1, 0.0, 0.0)) + &RefCell::new(sysvar::rewards::create_account(1, 0.0)) ), KeyedAccount::new( &sysvar::stake_history::id(), @@ -719,7 +719,7 @@ mod tests { KeyedAccount::new( &sysvar::rewards::id(), false, - &RefCell::new(sysvar::rewards::create_account(1, 0.0, 0.0)) + &RefCell::new(sysvar::rewards::create_account(1, 0.0)) ), ], &serialize(&StakeInstruction::Deactivate).unwrap(), diff --git a/programs/storage/Cargo.toml b/programs/storage/Cargo.toml deleted file mode 100644 index 0ccacc01c4..0000000000 --- a/programs/storage/Cargo.toml +++ /dev/null @@ -1,30 +0,0 @@ -[package] -name = "solana-storage-program" -version = "1.2.0" -description = "Solana Storage program" -authors = ["Solana Maintainers "] -repository = "https://github.com/solana-labs/solana" -license = "Apache-2.0" -homepage = "https://solana.com/" -edition = "2018" - -[dependencies] -bincode = "1.2.1" -log = "0.4.8" -rand = "0.7.0" -num-derive = "0.3" -num-traits = "0.2" -serde = "1.0.110" -serde_derive = "1.0.103" -solana-logger = { path = "../../logger", version = "1.2.0" } -solana-sdk = { path = "../../sdk", version = "1.2.0" } - -[dev-dependencies] -assert_matches = "1.3.0" - -[lib] -crate-type = ["lib", "cdylib"] -name = "solana_storage_program" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] diff --git a/programs/storage/src/lib.rs b/programs/storage/src/lib.rs deleted file mode 100644 index eac526f005..0000000000 --- a/programs/storage/src/lib.rs +++ /dev/null @@ -1,12 +0,0 @@ -pub mod rewards_pools; -pub mod storage_contract; -pub mod storage_instruction; -pub mod storage_processor; - -use crate::storage_processor::process_instruction; - -solana_sdk::declare_program!( - "Storage111111111111111111111111111111111111", - solana_storage_program, - process_instruction -); diff --git a/programs/storage/src/rewards_pools.rs b/programs/storage/src/rewards_pools.rs deleted file mode 100644 index 313a60e6ab..0000000000 --- a/programs/storage/src/rewards_pools.rs +++ /dev/null @@ -1,51 +0,0 @@ -//! rewards_pools -//! * initialize genesis with rewards pools -//! * keep track of rewards -//! * own mining pools - -use crate::storage_contract::create_rewards_pool; -use rand::{thread_rng, Rng}; -use solana_sdk::genesis_config::GenesisConfig; -use solana_sdk::hash::{hash, Hash}; -use solana_sdk::pubkey::Pubkey; - -// base rewards pool ID -solana_sdk::declare_id!("StorageMiningPoo111111111111111111111111111"); - -// to cut down on collisions for redemptions, we make multiple accounts -pub const NUM_REWARDS_POOLS: usize = 32; - -pub fn add_genesis_accounts(genesis_config: &mut GenesisConfig) -> u64 { - let mut pubkey = id(); - - for _i in 0..NUM_REWARDS_POOLS { - genesis_config.add_rewards_pool(pubkey, create_rewards_pool()); - pubkey = Pubkey::new(hash(pubkey.as_ref()).as_ref()); - } - 0 // didn't consume any lamports -} - -pub fn random_id() -> Pubkey { - let mut id = Hash::new(id().as_ref()); - - for _i in 0..thread_rng().gen_range(0, NUM_REWARDS_POOLS) { - id = hash(id.as_ref()); - } - - Pubkey::new(id.as_ref()) -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test() { - let mut genesis_config = GenesisConfig::default(); - add_genesis_accounts(&mut genesis_config); - - for _i in 0..NUM_REWARDS_POOLS { - assert!(genesis_config.rewards_pools.get(&random_id()).is_some()) - } - } -} diff --git a/programs/storage/src/storage_contract.rs b/programs/storage/src/storage_contract.rs deleted file mode 100644 index baa4d4bbaf..0000000000 --- a/programs/storage/src/storage_contract.rs +++ /dev/null @@ -1,642 +0,0 @@ -use crate::storage_instruction::StorageAccountType; -use log::*; -use num_derive::FromPrimitive; -use serde_derive::{Deserialize, Serialize}; -use solana_sdk::{ - account::{Account, KeyedAccount}, - account_utils::StateMut, - clock::Epoch, - hash::Hash, - instruction::InstructionError, - pubkey::Pubkey, - signature::Signature, - sysvar, -}; -use std::collections::BTreeMap; - -// Todo Tune this for actual use cases when PoRep is feature complete -pub const STORAGE_ACCOUNT_SPACE: u64 = 1024 * 8; -pub const MAX_PROOFS_PER_SEGMENT: usize = 80; - -#[derive(Default, Debug, Serialize, Deserialize, Clone, PartialEq)] -pub struct Credits { - // current epoch - epoch: Epoch, - // currently pending credits - pub current_epoch: Epoch, - // credits ready to be claimed - pub redeemable: u64, -} - -impl Credits { - pub fn update_epoch(&mut self, current_epoch: Epoch) { - if self.epoch != current_epoch { - self.epoch = current_epoch; - self.redeemable += self.current_epoch; - self.current_epoch = 0; - } - } -} - -#[derive(Debug, Clone, PartialEq, FromPrimitive)] -pub enum StorageError { - InvalidSegment, - InvalidBlockhash, - InvalidProofMask, - DuplicateProof, - RewardPoolDepleted, - InvalidOwner, - ProofLimitReached, -} - -#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] -pub enum ProofStatus { - Skipped, - Valid, - NotValid, -} - -impl Default for ProofStatus { - fn default() -> Self { - ProofStatus::Skipped - } -} - -#[derive(Default, Debug, Serialize, Deserialize, Clone, PartialEq)] -pub struct Proof { - /// The encryption key the archiver used (also used to generate offsets) - pub signature: Signature, - /// A "recent" blockhash used to generate the seed - pub blockhash: Hash, - /// The resulting sampled state - pub sha_state: Hash, - /// The segment this proof is for - pub segment_index: u64, -} - -#[derive(Debug, Serialize, Deserialize)] -pub enum StorageContract { - Uninitialized, // Must be first (aka, 0) - - ValidatorStorage { - owner: Pubkey, - // Most recently advertised segment - segment: u64, - // Most recently advertised blockhash - hash: Hash, - // Lockouts and Rewards are per segment per archiver. It needs to remain this way until - // the challenge stage is added. - lockout_validations: BTreeMap>>, - // Used to keep track of ongoing credits - credits: Credits, - }, - - ArchiverStorage { - owner: Pubkey, - // TODO what to do about duplicate proofs across segments? - Check the blockhashes - // Map of Proofs per segment, in a Vec - proofs: BTreeMap>, - // Map of Rewards per segment, in a BTreeMap based on the validator account that verified - // the proof. This can be used for challenge stage when its added - validations: BTreeMap>>, - // Used to keep track of ongoing credits - credits: Credits, - }, - - RewardsPool, -} - -// utility function, used by Bank, tests, genesis -pub fn create_validator_storage_account(owner: Pubkey, lamports: u64) -> Account { - let mut storage_account = Account::new(lamports, STORAGE_ACCOUNT_SPACE as usize, &crate::id()); - - storage_account - .set_state(&StorageContract::ValidatorStorage { - owner, - segment: 0, - hash: Hash::default(), - lockout_validations: BTreeMap::new(), - credits: Credits::default(), - }) - .expect("set_state"); - - storage_account -} - -pub struct StorageAccount<'a> { - pub(crate) id: Pubkey, - account: &'a mut Account, -} - -impl<'a> StorageAccount<'a> { - pub fn new(id: Pubkey, account: &'a mut Account) -> Self { - Self { id, account } - } - - pub fn initialize_storage( - &mut self, - owner: Pubkey, - account_type: StorageAccountType, - ) -> Result<(), InstructionError> { - let storage_contract = &mut self.account.state()?; - if let StorageContract::Uninitialized = storage_contract { - *storage_contract = match account_type { - StorageAccountType::Archiver => StorageContract::ArchiverStorage { - owner, - proofs: BTreeMap::new(), - validations: BTreeMap::new(), - credits: Credits::default(), - }, - StorageAccountType::Validator => StorageContract::ValidatorStorage { - owner, - segment: 0, - hash: Hash::default(), - lockout_validations: BTreeMap::new(), - credits: Credits::default(), - }, - }; - self.account.set_state(storage_contract) - } else { - Err(InstructionError::AccountAlreadyInitialized) - } - } - - pub fn submit_mining_proof( - &mut self, - sha_state: Hash, - segment_index: u64, - signature: Signature, - blockhash: Hash, - clock: sysvar::clock::Clock, - ) -> Result<(), InstructionError> { - let mut storage_contract = &mut self.account.state()?; - if let StorageContract::ArchiverStorage { - proofs, - validations, - credits, - .. - } = &mut storage_contract - { - let current_segment = clock.segment; - - // clean up the account - // TODO check for time correctness - storage seems to run at a delay of about 3 - *proofs = proofs - .iter() - .filter(|(segment, _)| **segment >= current_segment.saturating_sub(5)) - .map(|(segment, proofs)| (*segment, proofs.clone())) - .collect(); - *validations = validations - .iter() - .filter(|(segment, _)| **segment >= current_segment.saturating_sub(10)) - .map(|(segment, rewards)| (*segment, rewards.clone())) - .collect(); - - if segment_index >= current_segment { - // attempt to submit proof for unconfirmed segment - return Err(InstructionError::Custom( - StorageError::InvalidSegment as u32, - )); - } - - debug!( - "Mining proof submitted with contract {:?} segment_index: {}", - sha_state, segment_index - ); - - // TODO check that this blockhash is valid and recent - // if !is_valid(&blockhash) { - // // proof isn't using a recent blockhash - // return Err(InstructionError::Custom(InvalidBlockhash as u32)); - // } - - let proof = Proof { - sha_state, - signature, - blockhash, - segment_index, - }; - // store the proofs in the "current" segment's entry in the hash map. - let segment_proofs = proofs.entry(current_segment).or_default(); - if segment_proofs.contains(&proof) { - // do not accept duplicate proofs - return Err(InstructionError::Custom( - StorageError::DuplicateProof as u32, - )); - } - if segment_proofs.len() >= MAX_PROOFS_PER_SEGMENT { - // do not accept more than MAX_PROOFS_PER_SEGMENT - return Err(InstructionError::Custom( - StorageError::ProofLimitReached as u32, - )); - } - credits.update_epoch(clock.epoch); - segment_proofs.push(proof); - self.account.set_state(storage_contract) - } else { - Err(InstructionError::InvalidArgument) - } - } - - pub fn advertise_storage_recent_blockhash( - &mut self, - hash: Hash, - segment: u64, - clock: sysvar::clock::Clock, - ) -> Result<(), InstructionError> { - let mut storage_contract = &mut self.account.state()?; - if let StorageContract::ValidatorStorage { - segment: state_segment, - hash: state_hash, - lockout_validations, - credits, - .. - } = &mut storage_contract - { - debug!("advertise new segment: {} orig: {}", segment, clock.segment); - if segment < *state_segment || segment > clock.segment { - return Err(InstructionError::Custom( - StorageError::InvalidSegment as u32, - )); - } - - *state_segment = segment; - *state_hash = hash; - - // storage epoch updated, move the lockout_validations to credits - let (_num_valid, total_validations) = count_valid_proofs(&lockout_validations); - lockout_validations.clear(); - credits.update_epoch(clock.epoch); - credits.current_epoch += total_validations; - self.account.set_state(storage_contract) - } else { - Err(InstructionError::InvalidArgument) - } - } - - pub fn proof_validation( - &mut self, - me: &Pubkey, - clock: sysvar::clock::Clock, - segment_index: u64, - proofs_per_account: Vec>, - archiver_accounts: &mut [StorageAccount], - ) -> Result<(), InstructionError> { - let mut storage_contract = &mut self.account.state()?; - if let StorageContract::ValidatorStorage { - segment: state_segment, - lockout_validations, - .. - } = &mut storage_contract - { - if segment_index > *state_segment { - return Err(InstructionError::Custom( - StorageError::InvalidSegment as u32, - )); - } - - let accounts = archiver_accounts - .iter_mut() - .enumerate() - .filter_map(|(i, account)| { - account.account.state().ok().map(|contract| match contract { - StorageContract::ArchiverStorage { - proofs: account_proofs, - .. - } => { - //TODO do this better - if let Some(segment_proofs) = - account_proofs.get(&segment_index).cloned() - { - if proofs_per_account - .get(i) - .filter(|proofs| proofs.len() == segment_proofs.len()) - .is_some() - { - Some(account) - } else { - None - } - } else { - None - } - } - _ => None, - }) - }) - .flatten() - .collect::>(); - - if accounts.len() != proofs_per_account.len() { - // don't have all the accounts to validate the proofs_per_account against - return Err(InstructionError::Custom( - StorageError::InvalidProofMask as u32, - )); - } - - let stored_proofs: Vec<_> = proofs_per_account - .into_iter() - .zip(accounts.into_iter()) - .filter_map(|(checked_proofs, account)| { - if store_validation_result(me, &clock, account, segment_index, &checked_proofs) - .is_ok() - { - Some((account.id, checked_proofs)) - } else { - None - } - }) - .collect(); - - // allow validators to store successful validations - stored_proofs - .into_iter() - .for_each(|(archiver_account_id, proof_mask)| { - lockout_validations - .entry(segment_index) - .or_default() - .insert(archiver_account_id, proof_mask); - }); - - self.account.set_state(storage_contract) - } else { - Err(InstructionError::InvalidArgument) - } - } - - pub fn claim_storage_reward( - &mut self, - rewards_pool: &KeyedAccount, - clock: sysvar::clock::Clock, - rewards: sysvar::rewards::Rewards, - owner: &mut StorageAccount, - ) -> Result<(), InstructionError> { - let mut storage_contract = &mut self.account.state()?; - - if let StorageContract::ValidatorStorage { - owner: account_owner, - credits, - .. - } = &mut storage_contract - { - if owner.id != *account_owner { - return Err(InstructionError::Custom(StorageError::InvalidOwner as u32)); - } - - credits.update_epoch(clock.epoch); - check_redeemable(credits, rewards.storage_point_value, rewards_pool, owner)?; - - self.account.set_state(storage_contract) - } else if let StorageContract::ArchiverStorage { - owner: account_owner, - validations, - credits, - .. - } = &mut storage_contract - { - if owner.id != *account_owner { - return Err(InstructionError::Custom(StorageError::InvalidOwner as u32)); - } - credits.update_epoch(clock.epoch); - let (num_validations, _total_proofs) = count_valid_proofs(&validations); - credits.current_epoch += num_validations; - validations.clear(); - check_redeemable(credits, rewards.storage_point_value, rewards_pool, owner)?; - - self.account.set_state(storage_contract) - } else { - Err(InstructionError::InvalidArgument) - } - } -} - -fn check_redeemable( - credits: &mut Credits, - storage_point_value: f64, - rewards_pool: &KeyedAccount, - owner: &mut StorageAccount, -) -> Result<(), InstructionError> { - let rewards = (credits.redeemable as f64 * storage_point_value) as u64; - if rewards_pool.lamports()? < rewards { - Err(InstructionError::Custom( - StorageError::RewardPoolDepleted as u32, - )) - } else { - if rewards >= 1 { - rewards_pool.try_account_ref_mut()?.lamports -= rewards; - owner.account.lamports += rewards; - //clear credits - credits.redeemable = 0; - } - Ok(()) - } -} - -pub fn create_rewards_pool() -> Account { - Account::new_data(std::u64::MAX, &StorageContract::RewardsPool, &crate::id()).unwrap() -} - -/// Store the result of a proof validation into the archiver account -fn store_validation_result( - me: &Pubkey, - clock: &sysvar::clock::Clock, - storage_account: &mut StorageAccount, - segment: u64, - proof_mask: &[ProofStatus], -) -> Result<(), InstructionError> { - let mut storage_contract = storage_account.account.state()?; - match &mut storage_contract { - StorageContract::ArchiverStorage { - proofs, - validations, - credits, - .. - } => { - if !proofs.contains_key(&segment) { - return Err(InstructionError::InvalidAccountData); - } - - if proofs.get(&segment).unwrap().len() != proof_mask.len() { - return Err(InstructionError::InvalidAccountData); - } - - let (recorded_validations, _) = count_valid_proofs(&validations); - let entry = validations.entry(segment).or_default(); - if !entry.contains_key(me) { - entry.insert(*me, proof_mask.to_vec()); - } - let (total_validations, _) = count_valid_proofs(&validations); - credits.update_epoch(clock.epoch); - credits.current_epoch += total_validations - recorded_validations; - } - _ => return Err(InstructionError::InvalidAccountData), - } - storage_account.account.set_state(&storage_contract) -} - -fn count_valid_proofs( - validations: &BTreeMap>>, -) -> (u64, u64) { - let proofs = validations - .iter() - .flat_map(|(_, proofs)| { - proofs - .iter() - .flat_map(|(_, proofs)| proofs) - .collect::>() - }) - .collect::>(); - let mut num = 0; - for proof in proofs.iter() { - if let ProofStatus::Valid = proof { - num += 1; - } - } - (num, proofs.len() as u64) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{id, rewards_pools}; - use std::{cell::RefCell, collections::BTreeMap}; - - #[test] - fn test_account_data() { - solana_logger::setup(); - let mut account = Account::default(); - account.data.resize(STORAGE_ACCOUNT_SPACE as usize, 0); - let storage_account = StorageAccount::new(Pubkey::default(), &mut account); - // pretend it's a validator op code - let mut contract = storage_account.account.state().unwrap(); - if let StorageContract::ValidatorStorage { .. } = contract { - assert!(true) - } - if let StorageContract::ArchiverStorage { .. } = &mut contract { - panic!("Contract should not decode into two types"); - } - - contract = StorageContract::ValidatorStorage { - owner: Pubkey::default(), - segment: 0, - hash: Hash::default(), - lockout_validations: BTreeMap::new(), - credits: Credits::default(), - }; - storage_account.account.set_state(&contract).unwrap(); - if let StorageContract::ArchiverStorage { .. } = contract { - panic!("Wrong contract type"); - } - contract = StorageContract::ArchiverStorage { - owner: Pubkey::default(), - proofs: BTreeMap::new(), - validations: BTreeMap::new(), - credits: Credits::default(), - }; - storage_account.account.set_state(&contract).unwrap(); - if let StorageContract::ValidatorStorage { .. } = contract { - panic!("Wrong contract type"); - } - } - - #[test] - fn test_process_validation() { - let mut account = StorageAccount { - id: Pubkey::default(), - account: &mut Account { - owner: id(), - ..Account::default() - }, - }; - let segment_index = 0; - let proof = Proof { - segment_index, - ..Proof::default() - }; - - // account has no space - store_validation_result( - &Pubkey::default(), - &sysvar::clock::Clock::default(), - &mut account, - segment_index, - &vec![ProofStatus::default(); 1], - ) - .unwrap_err(); - - account - .account - .data - .resize(STORAGE_ACCOUNT_SPACE as usize, 0); - let storage_contract = &mut account.account.state().unwrap(); - if let StorageContract::Uninitialized = storage_contract { - let mut proofs = BTreeMap::new(); - proofs.insert(0, vec![proof.clone()]); - *storage_contract = StorageContract::ArchiverStorage { - owner: Pubkey::default(), - proofs, - validations: BTreeMap::new(), - credits: Credits::default(), - }; - }; - account.account.set_state(storage_contract).unwrap(); - - // proof is valid - store_validation_result( - &Pubkey::default(), - &sysvar::clock::Clock::default(), - &mut account, - segment_index, - &vec![ProofStatus::Valid], - ) - .unwrap(); - - // proof failed verification but we should still be able to store it - store_validation_result( - &Pubkey::default(), - &sysvar::clock::Clock::default(), - &mut account, - segment_index, - &vec![ProofStatus::NotValid], - ) - .unwrap(); - } - - #[test] - fn test_redeemable() { - let mut credits = Credits { - epoch: 0, - current_epoch: 0, - redeemable: 100, - }; - let mut owner_account = Account { - lamports: 1, - ..Account::default() - }; - let mut rewards_pool = RefCell::new(create_rewards_pool()); - let pool_id = rewards_pools::id(); - let keyed_pool_account = KeyedAccount::new(&pool_id, false, &mut rewards_pool); - let mut owner = StorageAccount { - id: Pubkey::default(), - account: &mut owner_account, - }; - - // check that redeeming from depleted pools fails - keyed_pool_account.account.borrow_mut().lamports = 0; - assert_eq!( - check_redeemable(&mut credits, 1.0, &keyed_pool_account, &mut owner), - Err(InstructionError::Custom( - StorageError::RewardPoolDepleted as u32, - )) - ); - assert_eq!(owner.account.lamports, 1); - - keyed_pool_account.account.borrow_mut().lamports = 200; - assert_eq!( - check_redeemable(&mut credits, 1.0, &keyed_pool_account, &mut owner), - Ok(()) - ); - // check that the owner's balance increases - assert_eq!(owner.account.lamports, 101); - } -} diff --git a/programs/storage/src/storage_instruction.rs b/programs/storage/src/storage_instruction.rs deleted file mode 100644 index d5213686f2..0000000000 --- a/programs/storage/src/storage_instruction.rs +++ /dev/null @@ -1,189 +0,0 @@ -use crate::storage_contract::{ProofStatus, STORAGE_ACCOUNT_SPACE}; -use crate::{id, rewards_pools}; -use serde_derive::{Deserialize, Serialize}; -use solana_sdk::hash::Hash; -use solana_sdk::instruction::{AccountMeta, Instruction}; -use solana_sdk::pubkey::Pubkey; -use solana_sdk::signature::Signature; -use solana_sdk::system_instruction; -use solana_sdk::sysvar::{clock, rewards}; - -#[derive(Debug, Serialize, Deserialize, PartialEq, Clone, Copy)] -pub enum StorageAccountType { - Archiver, - Validator, -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub enum StorageInstruction { - /// Initialize the account as a validator or archiver - /// - /// Expects 1 Account: - /// 0 - Account to be initialized - InitializeStorage { - owner: Pubkey, - account_type: StorageAccountType, - }, - - SubmitMiningProof { - sha_state: Hash, - segment_index: u64, - signature: Signature, - blockhash: Hash, - }, - AdvertiseStorageRecentBlockhash { - hash: Hash, - segment: u64, - }, - /// Redeem storage reward credits - /// - /// Expects 1 Account: - /// 0 - Storage account with credits to redeem - /// 1 - Clock Syscall to figure out the clock epoch - /// 2 - Archiver account to credit - this account *must* be the owner - /// 3 - MiningPool account to redeem credits from - /// 4 - Rewards Syscall to figure out point values - ClaimStorageReward, - ProofValidation { - /// The segment during which this proof was generated - segment: u64, - /// A Vec of proof masks per keyed archiver account loaded by the instruction - proofs: Vec>, - }, -} - -fn get_ratios() -> (u64, u64) { - // max number bytes available for account metas and proofs - // The maximum transaction size is == `PACKET_DATA_SIZE` (1232 bytes) - // There are approx. 900 bytes left over after the storage instruction is wrapped into - // a signed transaction. - static MAX_BYTES: u64 = 900; - let account_meta_size: u64 = - bincode::serialized_size(&AccountMeta::new(Pubkey::new_rand(), false)).unwrap_or(0); - let proof_size: u64 = bincode::serialized_size(&ProofStatus::default()).unwrap_or(0); - - // the ratio between account meta size and a single proof status - let ratio = (account_meta_size + proof_size - 1) / proof_size; - let bytes = (MAX_BYTES + ratio - 1) / ratio; - (ratio, bytes) -} - -/// Returns how many accounts and their proofs will fit in a single proof validation tx -/// -/// # Arguments -/// -/// * `proof_mask_max` - The largest proof mask across all accounts intended for submission -/// -pub fn validation_account_limit(proof_mask_max: usize) -> u64 { - let (ratio, bytes) = get_ratios(); - // account_meta_count * (ratio + proof_mask_max) = bytes - bytes / (ratio + proof_mask_max as u64) -} - -pub fn proof_mask_limit() -> u64 { - let (ratio, bytes) = get_ratios(); - bytes - ratio -} - -pub fn create_storage_account( - from_pubkey: &Pubkey, - storage_owner: &Pubkey, - storage_pubkey: &Pubkey, - lamports: u64, - account_type: StorageAccountType, -) -> Vec { - vec![ - system_instruction::create_account( - from_pubkey, - storage_pubkey, - lamports, - STORAGE_ACCOUNT_SPACE, - &id(), - ), - Instruction::new( - id(), - &StorageInstruction::InitializeStorage { - owner: *storage_owner, - account_type, - }, - vec![AccountMeta::new(*storage_pubkey, false)], - ), - ] -} - -pub fn mining_proof( - storage_pubkey: &Pubkey, - sha_state: Hash, - segment_index: u64, - signature: Signature, - blockhash: Hash, -) -> Instruction { - let storage_instruction = StorageInstruction::SubmitMiningProof { - sha_state, - segment_index, - signature, - blockhash, - }; - let account_metas = vec![ - AccountMeta::new(*storage_pubkey, true), - AccountMeta::new(clock::id(), false), - ]; - Instruction::new(id(), &storage_instruction, account_metas) -} - -pub fn advertise_recent_blockhash( - storage_pubkey: &Pubkey, - storage_hash: Hash, - segment: u64, -) -> Instruction { - let storage_instruction = StorageInstruction::AdvertiseStorageRecentBlockhash { - hash: storage_hash, - segment, - }; - let account_metas = vec![ - AccountMeta::new(*storage_pubkey, true), - AccountMeta::new(clock::id(), false), - ]; - Instruction::new(id(), &storage_instruction, account_metas) -} - -pub fn proof_validation( - storage_pubkey: &Pubkey, - segment: u64, - checked_proofs: Vec<(Pubkey, Vec)>, -) -> Instruction { - let mut account_metas = vec![ - AccountMeta::new(*storage_pubkey, true), - AccountMeta::new(clock::id(), false), - ]; - let mut proofs = vec![]; - checked_proofs.into_iter().for_each(|(id, p)| { - proofs.push(p); - account_metas.push(AccountMeta::new(id, false)) - }); - let storage_instruction = StorageInstruction::ProofValidation { segment, proofs }; - Instruction::new(id(), &storage_instruction, account_metas) -} - -pub fn claim_reward(owner_pubkey: &Pubkey, storage_pubkey: &Pubkey) -> Instruction { - let storage_instruction = StorageInstruction::ClaimStorageReward; - let account_metas = vec![ - AccountMeta::new(*storage_pubkey, false), - AccountMeta::new(clock::id(), false), - AccountMeta::new(rewards::id(), false), - AccountMeta::new(rewards_pools::random_id(), false), - AccountMeta::new(*owner_pubkey, false), - ]; - Instruction::new(id(), &storage_instruction, account_metas) -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn check_size() { - // check that if there's 50 proof per account, only 1 account can fit in a single tx - assert_eq!(validation_account_limit(50), 1); - } -} diff --git a/programs/storage/src/storage_processor.rs b/programs/storage/src/storage_processor.rs deleted file mode 100644 index c0be6124eb..0000000000 --- a/programs/storage/src/storage_processor.rs +++ /dev/null @@ -1,296 +0,0 @@ -//! storage program -//! Receive mining proofs from miners, validate the answers -//! and give reward for good proofs. -use crate::{storage_contract::StorageAccount, storage_instruction::StorageInstruction}; -use solana_sdk::{ - account::KeyedAccount, - instruction::InstructionError, - program_utils::limited_deserialize, - pubkey::Pubkey, - sysvar::{clock::Clock, rewards::Rewards, Sysvar}, -}; - -pub fn process_instruction( - _program_id: &Pubkey, - keyed_accounts: &[KeyedAccount], - data: &[u8], -) -> Result<(), InstructionError> { - solana_logger::setup(); - - let (me, rest) = keyed_accounts.split_at(1); - let me_unsigned = me[0].signer_key().is_none(); - let mut me_account = me[0].try_account_ref_mut()?; - let mut storage_account = StorageAccount::new(*me[0].unsigned_key(), &mut me_account); - - match limited_deserialize(data)? { - StorageInstruction::InitializeStorage { - owner, - account_type, - } => { - if !rest.is_empty() { - return Err(InstructionError::InvalidArgument); - } - storage_account.initialize_storage(owner, account_type) - } - StorageInstruction::SubmitMiningProof { - sha_state, - segment_index, - signature, - blockhash, - } => { - if me_unsigned || rest.len() != 1 { - // This instruction must be signed by `me` - return Err(InstructionError::InvalidArgument); - } - let clock = Clock::from_keyed_account(&rest[0])?; - storage_account.submit_mining_proof( - sha_state, - segment_index, - signature, - blockhash, - clock, - ) - } - StorageInstruction::AdvertiseStorageRecentBlockhash { hash, segment } => { - if me_unsigned || rest.len() != 1 { - // This instruction must be signed by `me` - return Err(InstructionError::InvalidArgument); - } - let clock = Clock::from_keyed_account(&rest[0])?; - storage_account.advertise_storage_recent_blockhash(hash, segment, clock) - } - StorageInstruction::ClaimStorageReward => { - if rest.len() != 4 { - return Err(InstructionError::InvalidArgument); - } - let (clock, rest) = rest.split_at(1); - let (rewards, rest) = rest.split_at(1); - let (rewards_pools, owner) = rest.split_at(1); - - let rewards = Rewards::from_keyed_account(&rewards[0])?; - let clock = Clock::from_keyed_account(&clock[0])?; - let mut owner_account = owner[0].try_account_ref_mut()?; - let mut owner = StorageAccount::new(*owner[0].unsigned_key(), &mut owner_account); - - storage_account.claim_storage_reward(&rewards_pools[0], clock, rewards, &mut owner) - } - StorageInstruction::ProofValidation { segment, proofs } => { - if rest.is_empty() { - return Err(InstructionError::InvalidArgument); - } - - let (clock, rest) = rest.split_at(1); - if me_unsigned || rest.is_empty() { - // This instruction must be signed by `me` and `rest` cannot be empty - return Err(InstructionError::InvalidArgument); - } - let me_id = storage_account.id; - let clock = Clock::from_keyed_account(&clock[0])?; - let mut rest = rest - .iter() - .map(|keyed_account| Ok((keyed_account, keyed_account.try_account_ref_mut()?))) - .collect::, InstructionError>>()?; - let mut rest = rest - .iter_mut() - .map(|(keyed_account, account_ref)| { - StorageAccount::new(*keyed_account.unsigned_key(), account_ref) - }) - .collect::>(); - storage_account.proof_validation(&me_id, clock, segment, proofs, &mut rest) - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - id, - storage_contract::STORAGE_ACCOUNT_SPACE, - storage_instruction::{self, StorageAccountType}, - }; - use log::*; - - use assert_matches::assert_matches; - use solana_sdk::{ - account::{create_keyed_accounts, Account, KeyedAccount}, - clock::DEFAULT_SLOTS_PER_SEGMENT, - hash::Hash, - instruction::{Instruction, InstructionError}, - signature::Signature, - sysvar::{ - clock::{self, Clock}, - Sysvar, - }, - }; - use std::cell::RefCell; - - fn test_instruction( - ix: &Instruction, - program_accounts: &[Account], - ) -> Result<(), InstructionError> { - let program_accounts: Vec<_> = program_accounts - .iter() - .map(|account| RefCell::new(account.clone())) - .collect(); - let keyed_accounts: Vec<_> = ix - .accounts - .iter() - .zip(program_accounts.iter()) - .map(|(account_meta, account)| { - KeyedAccount::new(&account_meta.pubkey, account_meta.is_signer, account) - }) - .collect(); - - let ret = process_instruction(&id(), &keyed_accounts, &ix.data); - info!("ret: {:?}", ret); - ret - } - - #[test] - fn test_proof_bounds() { - let account_owner = Pubkey::new_rand(); - let pubkey = Pubkey::new_rand(); - let mut account = Account { - data: vec![0; STORAGE_ACCOUNT_SPACE as usize], - ..Account::default() - }; - { - let mut storage_account = StorageAccount::new(pubkey, &mut account); - storage_account - .initialize_storage(account_owner, StorageAccountType::Archiver) - .unwrap(); - } - - let ix = storage_instruction::mining_proof( - &pubkey, - Hash::default(), - 0, - Signature::default(), - Hash::default(), - ); - // the proof is for segment 0, need to move the slot into segment 2 - let mut clock_account = Clock::default().create_account(1); - Clock::to_account( - &Clock { - slot: DEFAULT_SLOTS_PER_SEGMENT * 2, - segment: 2, - ..Clock::default() - }, - &mut clock_account, - ); - - assert_eq!(test_instruction(&ix, &[account, clock_account]), Ok(())); - } - - #[test] - fn test_storage_tx() { - let pubkey = Pubkey::new_rand(); - let accounts = [(&pubkey, &RefCell::new(Account::default()))]; - let keyed_accounts = create_keyed_accounts(&accounts); - assert!(process_instruction(&id(), &keyed_accounts, &[]).is_err()); - } - - #[test] - fn test_serialize_overflow() { - let pubkey = Pubkey::new_rand(); - let clock_id = clock::id(); - let mut keyed_accounts = Vec::new(); - let user_account = RefCell::new(Account::default()); - let clock_account = RefCell::new(Clock::default().create_account(1)); - keyed_accounts.push(KeyedAccount::new(&pubkey, true, &user_account)); - keyed_accounts.push(KeyedAccount::new(&clock_id, false, &clock_account)); - - let ix = storage_instruction::advertise_recent_blockhash(&pubkey, Hash::default(), 1); - - assert_eq!( - process_instruction(&id(), &keyed_accounts, &ix.data), - Err(InstructionError::InvalidAccountData) - ); - } - - #[test] - fn test_invalid_accounts_len() { - let pubkey = Pubkey::new_rand(); - let accounts = [Account::default()]; - - let ix = storage_instruction::mining_proof( - &pubkey, - Hash::default(), - 0, - Signature::default(), - Hash::default(), - ); - // move tick height into segment 1 - let mut clock_account = Clock::default().create_account(1); - Clock::to_account( - &Clock { - slot: 16, - segment: 1, - ..Clock::default() - }, - &mut clock_account, - ); - - assert!(test_instruction(&ix, &accounts).is_err()); - - let accounts = [Account::default(), clock_account, Account::default()]; - - assert!(test_instruction(&ix, &accounts).is_err()); - } - - #[test] - fn test_submit_mining_invalid_slot() { - solana_logger::setup(); - let pubkey = Pubkey::new_rand(); - let mut accounts = [Account::default(), Account::default()]; - accounts[0].data.resize(STORAGE_ACCOUNT_SPACE as usize, 0); - accounts[1].data.resize(STORAGE_ACCOUNT_SPACE as usize, 0); - - let ix = storage_instruction::mining_proof( - &pubkey, - Hash::default(), - 0, - Signature::default(), - Hash::default(), - ); - - // submitting a proof for a slot in the past, so this should fail - assert!(test_instruction(&ix, &accounts).is_err()); - } - - #[test] - fn test_submit_mining_ok() { - solana_logger::setup(); - let account_owner = Pubkey::new_rand(); - let pubkey = Pubkey::new_rand(); - let mut account = Account::default(); - account.data.resize(STORAGE_ACCOUNT_SPACE as usize, 0); - { - let mut storage_account = StorageAccount::new(pubkey, &mut account); - storage_account - .initialize_storage(account_owner, StorageAccountType::Archiver) - .unwrap(); - } - - let ix = storage_instruction::mining_proof( - &pubkey, - Hash::default(), - 0, - Signature::default(), - Hash::default(), - ); - // move slot into segment 1 - let mut clock_account = Clock::default().create_account(1); - Clock::to_account( - &Clock { - slot: DEFAULT_SLOTS_PER_SEGMENT, - segment: 1, - ..Clock::default() - }, - &mut clock_account, - ); - - assert_matches!(test_instruction(&ix, &[account, clock_account]), Ok(_)); - } -} diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index 8ec3292d9f..cc99b6af70 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -34,7 +34,6 @@ solana-metrics = { path = "../metrics", version = "1.2.0" } solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.2.0" } solana-sdk = { path = "../sdk", version = "1.2.0" } solana-stake-program = { path = "../programs/stake", version = "1.2.0" } -solana-storage-program = { path = "../programs/storage", version = "1.2.0" } solana-vote-program = { path = "../programs/vote", version = "1.2.0" } tempfile = "3.1.0" thiserror = "1.0" diff --git a/runtime/src/accounts.rs b/runtime/src/accounts.rs index 2bc794c465..6b95c9822b 100644 --- a/runtime/src/accounts.rs +++ b/runtime/src/accounts.rs @@ -429,10 +429,7 @@ impl Accounts { AccountAddressFilter::Exclude => !filter_by_address.contains(&pubkey), AccountAddressFilter::Include => filter_by_address.contains(&pubkey), }; - should_include_pubkey - && account.lamports != 0 - && !(account.lamports == std::u64::MAX - && account.owner == solana_storage_program::id()) + should_include_pubkey && account.lamports != 0 }) .map(|(pubkey, account, _slot)| (*pubkey, account.lamports)) { diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 7eb4258e7c..5b78afb4b7 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -19,8 +19,6 @@ use crate::{ }, stakes::Stakes, status_cache::{SlotDelta, StatusCache}, - storage_utils, - storage_utils::StorageAccounts, system_instruction_processor::{self, get_system_account_kind, SystemAccountKind}, transaction_batch::TransactionBatch, transaction_utils::OrderedIterator, @@ -37,8 +35,8 @@ use solana_metrics::{ use solana_sdk::{ account::Account, clock::{ - get_segment_from_slot, Epoch, Slot, SlotCount, SlotIndex, UnixTimestamp, - DEFAULT_TICKS_PER_SECOND, MAX_PROCESSING_AGE, MAX_RECENT_BLOCKHASHES, SECONDS_PER_DAY, + Epoch, Slot, SlotCount, SlotIndex, UnixTimestamp, DEFAULT_TICKS_PER_SECOND, + MAX_PROCESSING_AGE, MAX_RECENT_BLOCKHASHES, SECONDS_PER_DAY, }, epoch_schedule::EpochSchedule, fee_calculator::{FeeCalculator, FeeRateGovernor}, @@ -237,6 +235,13 @@ impl HashAgeKind { } } +#[derive(Default, Clone, PartialEq, Debug, Deserialize, Serialize)] +struct UnusedAccounts { + unused1: HashSet, + unused2: HashSet, + unused3: HashMap, +} + /// Manager for the state of all accounts and programs after processing its entries. #[derive(Default, Deserialize, Serialize)] pub struct Bank { @@ -303,8 +308,8 @@ pub struct Bank { /// The number of slots per year, used for inflation slots_per_year: f64, - /// The number of slots per Storage segment - slots_per_segment: u64, + /// Unused + unused: u64, /// Bank slot (i.e. block) slot: Slot, @@ -346,8 +351,8 @@ pub struct Bank { /// cache of vote_account and stake_account state for this fork stakes: RwLock, - /// cache of validator and archiver storage accounts for this fork - storage_accounts: RwLock, + /// unused + unused_accounts: RwLock, /// staked nodes on epoch boundaries, saved off when a bank.slot() is at /// a leader schedule calculation boundary @@ -466,7 +471,7 @@ impl Bank { ticks_per_slot: parent.ticks_per_slot, ns_per_slot: parent.ns_per_slot, genesis_creation_time: parent.genesis_creation_time, - slots_per_segment: parent.slots_per_segment, + unused: parent.unused, slots_per_year: parent.slots_per_year, epoch_schedule, collected_rent: AtomicU64::new(0), @@ -480,7 +485,7 @@ impl Bank { transaction_count: AtomicU64::new(parent.transaction_count()), stakes: RwLock::new(parent.stakes.read().unwrap().clone_with_epoch(epoch)), epoch_stakes: parent.epoch_stakes.clone(), - storage_accounts: RwLock::new(parent.storage_accounts.read().unwrap().clone()), + unused_accounts: RwLock::new(parent.unused_accounts.read().unwrap().clone()), parent_hash: parent.hash(), parent_slot: parent.slot(), collector_id: *collector_id, @@ -595,7 +600,7 @@ impl Bank { pub fn clock(&self) -> sysvar::clock::Clock { sysvar::clock::Clock { slot: self.slot, - segment: get_segment_from_slot(self.slot, self.slots_per_segment), + unused: 0, epoch: self.epoch_schedule.get_epoch(self.slot), leader_schedule_epoch: self.epoch_schedule.get_leader_schedule_epoch(self.slot), unix_timestamp: self.unix_timestamp(), @@ -716,35 +721,26 @@ impl Bank { // years_elapsed = slots_elapsed / slots/year let period = self.epoch_schedule.get_slots_in_epoch(epoch) as f64 / self.slots_per_year; - let (validator_rewards, storage_rewards) = { + let validator_rewards = { let inflation = self.inflation.read().unwrap(); - ( - (*inflation).validator(year) * self.capitalization() as f64 * period, - (*inflation).storage(year) * self.capitalization() as f64 * period, - ) + (*inflation).validator(year) * self.capitalization() as f64 * period }; let validator_points = self.stakes.write().unwrap().claim_points(); - let storage_points = self.storage_accounts.write().unwrap().claim_points(); - - let (validator_point_value, storage_point_value) = self.check_point_values( - validator_rewards / validator_points as f64, - storage_rewards / storage_points as f64, - ); + let validator_point_value = + self.check_point_value(validator_rewards / validator_points as f64); self.update_sysvar_account(&sysvar::rewards::id(), |account| { sysvar::rewards::create_account( self.inherit_sysvar_account_balance(account), validator_point_value, - storage_point_value, ) }); let validator_rewards = self.pay_validator_rewards(validator_point_value); - self.capitalization.fetch_add( - validator_rewards + storage_rewards as u64, - Ordering::Relaxed, - ); + + self.capitalization + .fetch_add(validator_rewards as u64, Ordering::Relaxed); } /// iterate over all stakes, redeem vote credits for each stake we can @@ -815,24 +811,17 @@ impl Bank { // If the point values are not `normal`, bring them back into range and // set them to the last value or 0. - fn check_point_values( - &self, - mut validator_point_value: f64, - mut storage_point_value: f64, - ) -> (f64, f64) { + fn check_point_value(&self, mut validator_point_value: f64) -> f64 { let rewards = sysvar::rewards::Rewards::from_account( &self .get_account(&sysvar::rewards::id()) - .unwrap_or_else(|| sysvar::rewards::create_account(1, 0.0, 0.0)), + .unwrap_or_else(|| sysvar::rewards::create_account(1, 0.0)), ) .unwrap_or_else(Default::default); if !validator_point_value.is_normal() { validator_point_value = rewards.validator_point_value; } - if !storage_point_value.is_normal() { - storage_point_value = rewards.storage_point_value - } - (validator_point_value, storage_point_value) + validator_point_value } fn collect_fees(&self) { @@ -945,7 +934,7 @@ impl Bank { self.ns_per_slot = genesis_config.poh_config.target_tick_duration.as_nanos() * genesis_config.ticks_per_slot as u128; self.genesis_creation_time = genesis_config.creation_time; - self.slots_per_segment = genesis_config.slots_per_segment; + self.unused = genesis_config.unused; self.max_tick_height = (self.slot + 1) * self.ticks_per_slot; self.slots_per_year = years_as_slots( 1.0, @@ -2063,11 +2052,6 @@ impl Bank { if Stakes::is_stake(account) { self.stakes.write().unwrap().store(pubkey, account); - } else if storage_utils::is_storage(account) { - self.storage_accounts - .write() - .unwrap() - .store(pubkey, account); } } @@ -2369,11 +2353,6 @@ impl Bank { self.slots_per_year } - /// Return the number of slots per segment - pub fn slots_per_segment(&self) -> u64 { - self.slots_per_segment - } - /// Return the number of ticks since genesis. pub fn tick_height(&self) -> u64 { self.tick_height.load(Ordering::Relaxed) @@ -2384,7 +2363,7 @@ impl Bank { *self.inflation.read().unwrap() } - /// Return the total capititalization of the Bank + /// Return the total capitalization of the Bank pub fn capitalization(&self) -> u64 { self.capitalization.load(Ordering::Relaxed) } @@ -2431,31 +2410,19 @@ impl Bank { let message = &tx.message(); let acc = raccs.as_ref().unwrap(); - for (pubkey, account) in - message - .account_keys - .iter() - .zip(acc.0.iter()) - .filter(|(_key, account)| { - (Stakes::is_stake(account)) || storage_utils::is_storage(account) - }) + for (pubkey, account) in message + .account_keys + .iter() + .zip(acc.0.iter()) + .filter(|(_key, account)| (Stakes::is_stake(account))) { if Stakes::is_stake(account) { self.stakes.write().unwrap().store(pubkey, account); - } else if storage_utils::is_storage(account) { - self.storage_accounts - .write() - .unwrap() - .store(pubkey, account); } } } } - pub fn storage_accounts(&self) -> StorageAccounts { - self.storage_accounts.read().unwrap().clone() - } - /// current stake delegations for this bank /// Note: this method is exposed publicly for external usage pub fn stake_delegations(&self) -> HashMap { @@ -2833,35 +2800,6 @@ mod tests { assert_eq!(bank1.capitalization(), 42 * 42); } - #[test] - fn test_bank_inflation() { - let key = Pubkey::default(); - let bank = Arc::new(Bank::new(&GenesisConfig { - accounts: (0..42) - .into_iter() - .map(|_| (Pubkey::new_rand(), Account::new(42, 0, &key))) - .collect(), - ..GenesisConfig::default() - })); - assert_eq!(bank.capitalization(), 42 * 42); - - // With inflation - bank.set_entered_epoch_callback(Box::new(move |bank: &mut Bank| { - let mut inflation = Inflation::default(); - inflation.initial = 1_000_000.0; - bank.set_inflation(inflation) - })); - let bank1 = Bank::new_from_parent(&bank, &key, MINIMUM_SLOTS_PER_EPOCH + 1); - assert_ne!(bank.capitalization(), bank1.capitalization()); - - // Without inflation - bank.set_entered_epoch_callback(Box::new(move |bank: &mut Bank| { - bank.set_inflation(Inflation::new_disabled()) - })); - let bank2 = Bank::new_from_parent(&bank, &key, MINIMUM_SLOTS_PER_EPOCH * 2 + 1); - assert_eq!(bank.capitalization(), bank2.capitalization()); - } - #[test] fn test_credit_debit_rent_no_side_effect_on_hash() { let (mut genesis_config, _mint_keypair) = create_genesis_config(10); @@ -4158,13 +4096,8 @@ mod tests { let ((vote_id, mut vote_account), (stake_id, stake_account)) = crate::stakes::tests::create_staked_node_accounts(1_0000); - let ((validator_id, validator_account), (archiver_id, archiver_account)) = - crate::storage_utils::tests::create_storage_accounts_with_credits(100); - - // set up stakes, vote, and storage accounts + // set up accounts bank.store_account(&stake_id, &stake_account); - bank.store_account(&validator_id, &validator_account); - bank.store_account(&archiver_id, &archiver_account); // generate some rewards let mut vote_state = Some(VoteState::from(&vote_account).unwrap()); @@ -4185,7 +4118,6 @@ mod tests { bank.store_account(&vote_id, &vote_account); let validator_points = bank.stakes.read().unwrap().points(); - let storage_points = bank.storage_accounts.read().unwrap().points(); // put a child bank in epoch 1, which calls update_rewards()... let bank1 = Bank::new_from_parent( @@ -4215,15 +4147,11 @@ mod tests { // verify the rewards are the right size assert!( - ((rewards.validator_point_value * validator_points as f64 - + rewards.storage_point_value * storage_points as f64) - - inflation as f64) - .abs() + ((rewards.validator_point_value * validator_points as f64) - inflation as f64).abs() < 1.0 // rounding, truncating ); // verify validator rewards show up in bank1.rewards vector - // (currently storage rewards will not show up) assert_eq!( bank1.rewards, Some(vec![( @@ -5900,25 +5828,19 @@ mod tests { } #[test] - fn test_check_point_values() { + fn test_check_point_value() { let (genesis_config, _) = create_genesis_config(500); let bank = Arc::new(Bank::new(&genesis_config)); // check that point values are 0 if no previous value was known and current values are not normal - assert_eq!( - bank.check_point_values(std::f64::INFINITY, std::f64::NAN), - (0.0, 0.0) - ); + assert_eq!(bank.check_point_value(std::f64::INFINITY), 0.0); bank.store_account( &sysvar::rewards::id(), - &sysvar::rewards::create_account(1, 1.0, 1.0), + &sysvar::rewards::create_account(1, 1.0), ); // check that point values are the previous value if current values are not normal - assert_eq!( - bank.check_point_values(std::f64::INFINITY, std::f64::NAN), - (1.0, 1.0) - ); + assert_eq!(bank.check_point_value(std::f64::INFINITY), 1.0); } #[test] diff --git a/runtime/src/genesis_utils.rs b/runtime/src/genesis_utils.rs index 51a581e725..000a9ead13 100644 --- a/runtime/src/genesis_utils.rs +++ b/runtime/src/genesis_utils.rs @@ -150,7 +150,6 @@ pub fn create_genesis_config_with_leader_ex( }; solana_stake_program::add_genesis_accounts(&mut genesis_config); - solana_storage_program::rewards_pools::add_genesis_accounts(&mut genesis_config); GenesisConfigInfo { genesis_config, diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 2c1de96e93..4a0edda70e 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -16,7 +16,6 @@ pub mod rent_collector; mod serde_utils; pub mod stakes; pub mod status_cache; -pub mod storage_utils; mod system_instruction_processor; pub mod transaction_batch; pub mod transaction_utils; diff --git a/runtime/src/storage_utils.rs b/runtime/src/storage_utils.rs deleted file mode 100644 index 3fd5bf0d9d..0000000000 --- a/runtime/src/storage_utils.rs +++ /dev/null @@ -1,248 +0,0 @@ -use crate::bank::Bank; -use solana_sdk::{account::Account, account_utils::StateMut, pubkey::Pubkey}; -use solana_storage_program::storage_contract::StorageContract; -use std::collections::{HashMap, HashSet}; - -#[derive(Default, Clone, PartialEq, Debug, Deserialize, Serialize)] -pub struct StorageAccounts { - /// validator storage accounts and their credits - validator_accounts: HashSet, - - /// archiver storage accounts and their credits - archiver_accounts: HashSet, - - /// unclaimed points. - // 1 point == 1 storage account credit - points: HashMap, -} - -pub fn is_storage(account: &Account) -> bool { - solana_storage_program::check_id(&account.owner) -} - -impl StorageAccounts { - pub fn store(&mut self, pubkey: &Pubkey, account: &Account) { - if let Ok(storage_state) = account.state() { - if let StorageContract::ArchiverStorage { credits, .. } = storage_state { - if account.lamports == 0 { - self.archiver_accounts.remove(pubkey); - } else { - self.archiver_accounts.insert(*pubkey); - self.points.insert(*pubkey, credits.current_epoch); - } - } else if let StorageContract::ValidatorStorage { credits, .. } = storage_state { - if account.lamports == 0 { - self.validator_accounts.remove(pubkey); - } else { - self.validator_accounts.insert(*pubkey); - self.points.insert(*pubkey, credits.current_epoch); - } - } - }; - } - - /// currently unclaimed points - pub fn points(&self) -> u64 { - self.points.values().sum() - } - - /// "claims" points, resets points to 0 - pub fn claim_points(&mut self) -> u64 { - let points = self.points(); - self.points.clear(); - points - } -} - -pub fn validator_accounts(bank: &Bank) -> HashMap { - bank.storage_accounts() - .validator_accounts - .iter() - .filter_map(|account_id| { - bank.get_account(account_id) - .map(|account| (*account_id, account)) - }) - .collect() -} - -pub fn archiver_accounts(bank: &Bank) -> HashMap { - bank.storage_accounts() - .archiver_accounts - .iter() - .filter_map(|account_id| { - bank.get_account(account_id) - .map(|account| (*account_id, account)) - }) - .collect() -} - -#[cfg(test)] -pub(crate) mod tests { - use super::*; - use crate::bank_client::BankClient; - use solana_sdk::{ - client::SyncClient, - genesis_config::create_genesis_config, - message::Message, - signature::{Keypair, Signer}, - }; - use solana_storage_program::{ - storage_contract::{StorageAccount, STORAGE_ACCOUNT_SPACE}, - storage_instruction::{self, StorageAccountType}, - storage_processor, - }; - use std::{rc::Rc, sync::Arc}; - - #[test] - fn test_store_and_recover() { - let (mut genesis_config, mint_keypair) = create_genesis_config(1000); - genesis_config.rent.lamports_per_byte_year = 0; - let mint_pubkey = mint_keypair.pubkey(); - let archiver_keypair = Keypair::new(); - let archiver_pubkey = archiver_keypair.pubkey(); - let validator_keypair = Keypair::new(); - let validator_pubkey = validator_keypair.pubkey(); - let mut bank = Bank::new(&genesis_config); - bank.add_static_program( - "storage_program", - solana_storage_program::id(), - storage_processor::process_instruction, - ); - - let bank = Arc::new(bank); - let bank_client = BankClient::new_shared(&bank); - - let message = Message::new(&storage_instruction::create_storage_account( - &mint_pubkey, - &Pubkey::default(), - &archiver_pubkey, - 11, - StorageAccountType::Archiver, - )); - bank_client - .send_message(&[&mint_keypair, &archiver_keypair], message) - .unwrap(); - - let message = Message::new(&storage_instruction::create_storage_account( - &mint_pubkey, - &Pubkey::default(), - &validator_pubkey, - 11, - StorageAccountType::Validator, - )); - bank_client - .send_message(&[&mint_keypair, &validator_keypair], message) - .unwrap(); - - assert_eq!(validator_accounts(bank.as_ref()).len(), 1); - assert_eq!(archiver_accounts(bank.as_ref()).len(), 1); - } - - #[test] - fn test_points() { - // note: storage_points == storage_credits - let credits = 42; - let mut storage_accounts = StorageAccounts::default(); - assert_eq!(storage_accounts.points(), 0); - assert_eq!(storage_accounts.claim_points(), 0); - - // create random validator and archiver accounts with `credits` - let ((validator_pubkey, validator_account), (archiver_pubkey, archiver_account)) = - create_storage_accounts_with_credits(credits); - - storage_accounts.store(&validator_pubkey, &validator_account); - storage_accounts.store(&archiver_pubkey, &archiver_account); - // check that 2x credits worth of points are available - assert_eq!(storage_accounts.points(), credits * 2); - let ((validator_pubkey, validator_account), (archiver_pubkey, mut archiver_account)) = - create_storage_accounts_with_credits(credits); - - storage_accounts.store(&validator_pubkey, &validator_account); - storage_accounts.store(&archiver_pubkey, &archiver_account); - // check that 4x credits worth of points are available - assert_eq!(storage_accounts.points(), credits * 2 * 2); - - storage_accounts.store(&validator_pubkey, &validator_account); - storage_accounts.store(&archiver_pubkey, &archiver_account); - // check that storing again has no effect - assert_eq!(storage_accounts.points(), credits * 2 * 2); - - let storage_contract = &mut archiver_account.state().unwrap(); - if let StorageContract::ArchiverStorage { - credits: account_credits, - .. - } = storage_contract - { - account_credits.current_epoch += 1; - } - archiver_account.set_state(storage_contract).unwrap(); - storage_accounts.store(&archiver_pubkey, &archiver_account); - - // check that incremental store increases credits - assert_eq!(storage_accounts.points(), credits * 2 * 2 + 1); - - assert_eq!(storage_accounts.claim_points(), credits * 2 * 2 + 1); - // check that once redeemed, the points are gone - assert_eq!(storage_accounts.claim_points(), 0); - } - - pub fn create_storage_accounts_with_credits( - credits: u64, - ) -> ((Pubkey, Account), (Pubkey, Account)) { - let validator_pubkey = Pubkey::new_rand(); - let archiver_pubkey = Pubkey::new_rand(); - let validator_account = Account::new_ref( - 1, - STORAGE_ACCOUNT_SPACE as usize, - &solana_storage_program::id(), - ); - let archiver_account = Account::new_ref( - 1, - STORAGE_ACCOUNT_SPACE as usize, - &solana_storage_program::id(), - ); - { - StorageAccount::new(validator_pubkey, &mut validator_account.borrow_mut()) - .initialize_storage(validator_pubkey, StorageAccountType::Validator) - .unwrap(); - let storage_contract = &mut validator_account.borrow().state().unwrap(); - if let StorageContract::ValidatorStorage { - credits: account_credits, - .. - } = storage_contract - { - account_credits.current_epoch = credits; - } - validator_account - .borrow_mut() - .set_state(storage_contract) - .unwrap(); - - StorageAccount::new(archiver_pubkey, &mut archiver_account.borrow_mut()) - .initialize_storage(archiver_pubkey, StorageAccountType::Archiver) - .unwrap(); - let storage_contract = &mut archiver_account.borrow().state().unwrap(); - if let StorageContract::ArchiverStorage { - credits: account_credits, - .. - } = storage_contract - { - account_credits.current_epoch = credits; - } - archiver_account - .borrow_mut() - .set_state(storage_contract) - .unwrap(); - } - ( - ( - validator_pubkey, - Rc::try_unwrap(validator_account).unwrap().into_inner(), - ), - ( - archiver_pubkey, - Rc::try_unwrap(archiver_account).unwrap().into_inner(), - ), - ) - } -} diff --git a/runtime/tests/storage.rs b/runtime/tests/storage.rs deleted file mode 100644 index 3ed26b4409..0000000000 --- a/runtime/tests/storage.rs +++ /dev/null @@ -1,482 +0,0 @@ -use assert_matches::assert_matches; -use bincode::deserialize; -use log::*; -use solana_runtime::{ - bank::Bank, - bank_client::BankClient, - genesis_utils::{create_genesis_config, GenesisConfigInfo}, -}; -use solana_sdk::{ - account_utils::StateMut, - client::SyncClient, - clock::{get_segment_from_slot, DEFAULT_SLOTS_PER_SEGMENT, DEFAULT_TICKS_PER_SLOT}, - hash::{hash, Hash}, - message::Message, - pubkey::Pubkey, - signature::{Keypair, Signature, Signer}, - system_instruction, - sysvar::{ - rewards::{self, Rewards}, - Sysvar, - }, -}; -use solana_storage_program::{ - id, - storage_contract::{ProofStatus, StorageContract}, - storage_instruction::{self, StorageAccountType}, - storage_processor::process_instruction, -}; -use std::{collections::HashMap, sync::Arc}; - -const TICKS_IN_SEGMENT: u64 = DEFAULT_SLOTS_PER_SEGMENT * DEFAULT_TICKS_PER_SLOT; - -#[test] -fn test_account_owner() { - let account_owner = Pubkey::new_rand(); - let validator_storage_keypair = Keypair::new(); - let validator_storage_pubkey = validator_storage_keypair.pubkey(); - let archiver_storage_keypair = Keypair::new(); - let archiver_storage_pubkey = archiver_storage_keypair.pubkey(); - - let GenesisConfigInfo { - genesis_config, - mint_keypair, - .. - } = create_genesis_config(1000); - let mut bank = Bank::new(&genesis_config); - let mint_pubkey = mint_keypair.pubkey(); - bank.add_static_program("storage_program", id(), process_instruction); - let bank = Arc::new(bank); - let bank_client = BankClient::new_shared(&bank); - - let message = Message::new(&storage_instruction::create_storage_account( - &mint_pubkey, - &account_owner, - &validator_storage_pubkey, - 1, - StorageAccountType::Validator, - )); - bank_client - .send_message(&[&mint_keypair, &validator_storage_keypair], message) - .expect("failed to create account"); - let account = bank - .get_account(&validator_storage_pubkey) - .expect("account not found"); - let storage_contract = account.state().expect("couldn't unpack account data"); - if let StorageContract::ValidatorStorage { owner, .. } = storage_contract { - assert_eq!(owner, account_owner); - } else { - assert!(false, "wrong account type found") - } - - let message = Message::new(&storage_instruction::create_storage_account( - &mint_pubkey, - &account_owner, - &archiver_storage_pubkey, - 1, - StorageAccountType::Archiver, - )); - bank_client - .send_message(&[&mint_keypair, &archiver_storage_keypair], message) - .expect("failed to create account"); - let account = bank - .get_account(&archiver_storage_pubkey) - .expect("account not found"); - let storage_contract = account.state().expect("couldn't unpack account data"); - if let StorageContract::ArchiverStorage { owner, .. } = storage_contract { - assert_eq!(owner, account_owner); - } else { - assert!(false, "wrong account type found") - } -} - -#[test] -fn test_validate_mining() { - solana_logger::setup(); - let GenesisConfigInfo { - mut genesis_config, - mint_keypair, - .. - } = create_genesis_config(100_000_000_000); - genesis_config - .native_instruction_processors - .push(solana_storage_program::solana_storage_program!()); - let mint_pubkey = mint_keypair.pubkey(); - // 1 owner for all archiver and validator accounts for the test - let owner_pubkey = Pubkey::new_rand(); - - let archiver_1_storage_keypair = Keypair::new(); - let archiver_1_storage_id = archiver_1_storage_keypair.pubkey(); - - let archiver_2_storage_keypair = Keypair::new(); - let archiver_2_storage_id = archiver_2_storage_keypair.pubkey(); - - let validator_storage_keypair = Keypair::new(); - let validator_storage_id = validator_storage_keypair.pubkey(); - - let bank = Bank::new(&genesis_config); - let bank = Arc::new(bank); - let bank_client = BankClient::new_shared(&bank); - - init_storage_accounts( - &owner_pubkey, - &bank_client, - &mint_keypair, - &[&validator_storage_keypair], - &[&archiver_1_storage_keypair, &archiver_2_storage_keypair], - 10, - ); - - // create a new bank in segment 2 - let bank = Arc::new(Bank::new_from_parent( - &bank, - &Pubkey::default(), - DEFAULT_SLOTS_PER_SEGMENT * 2, - )); - let bank_client = BankClient::new_shared(&bank); - - // advertise for storage segment 1 - let message = Message::new_with_payer( - &[storage_instruction::advertise_recent_blockhash( - &validator_storage_id, - Hash::default(), - 1, - )], - Some(&mint_pubkey), - ); - assert_matches!( - bank_client.send_message(&[&mint_keypair, &validator_storage_keypair], message), - Ok(_) - ); - - // submit proofs 5 proofs for each archiver for segment 0 - let mut checked_proofs: HashMap<_, Vec<_>> = HashMap::new(); - for _ in 0..5 { - checked_proofs - .entry(archiver_1_storage_id) - .or_default() - .push(submit_proof( - &mint_keypair, - &archiver_1_storage_keypair, - &bank_client, - 0, - )); - checked_proofs - .entry(archiver_2_storage_id) - .or_default() - .push(submit_proof( - &mint_keypair, - &archiver_2_storage_keypair, - &bank_client, - 0, - )); - } - let message = Message::new_with_payer( - &[storage_instruction::advertise_recent_blockhash( - &validator_storage_id, - Hash::default(), - 2, - )], - Some(&mint_pubkey), - ); - - // move banks into the next segment - let proof_segment = get_segment_from_slot(bank.slot(), bank.slots_per_segment()); - let bank = Arc::new(Bank::new_from_parent( - &bank, - &Pubkey::default(), - DEFAULT_SLOTS_PER_SEGMENT + bank.slot(), - )); - let bank_client = BankClient::new_shared(&bank); - - assert_matches!( - bank_client.send_message(&[&mint_keypair, &validator_storage_keypair], message), - Ok(_) - ); - - let message = Message::new_with_payer( - &[storage_instruction::proof_validation( - &validator_storage_id, - proof_segment as u64, - checked_proofs.into_iter().map(|entry| entry).collect(), - )], - Some(&mint_pubkey), - ); - - assert_matches!( - bank_client.send_message(&[&mint_keypair, &validator_storage_keypair], message), - Ok(_) - ); - - let message = Message::new_with_payer( - &[storage_instruction::advertise_recent_blockhash( - &validator_storage_id, - Hash::default(), - 3, - )], - Some(&mint_pubkey), - ); - - // move banks into the next segment - let bank = Arc::new(Bank::new_from_parent( - &bank, - &Pubkey::default(), - DEFAULT_SLOTS_PER_SEGMENT + bank.slot(), - )); - let bank_client = BankClient::new_shared(&bank); - - assert_matches!( - bank_client.send_message(&[&mint_keypair, &validator_storage_keypair], message), - Ok(_) - ); - - assert_eq!(bank_client.get_balance(&validator_storage_id).unwrap(), 10); - - let bank = Arc::new(Bank::new_from_parent( - &bank, - &Pubkey::default(), - bank.slot() + bank.epoch_schedule().slots_per_epoch, - )); - let bank_client = BankClient::new_shared(&bank); - - let rewards = bank - .get_account(&rewards::id()) - .map(|account| Rewards::from_account(&account).unwrap()) - .unwrap(); - let message = Message::new_with_payer( - &[storage_instruction::claim_reward( - &owner_pubkey, - &validator_storage_id, - )], - Some(&mint_pubkey), - ); - assert_matches!(bank_client.send_message(&[&mint_keypair], message), Ok(_)); - assert_eq!( - bank_client.get_balance(&owner_pubkey).unwrap(), - 1 + ((rewards.storage_point_value * 10_f64) as u64) - ); - - // tick the bank into the next storage epoch so that rewards can be claimed - for _ in 0..=TICKS_IN_SEGMENT { - bank.register_tick(&bank.last_blockhash()); - } - - assert_eq!(bank_client.get_balance(&archiver_1_storage_id).unwrap(), 10); - - let message = Message::new_with_payer( - &[storage_instruction::claim_reward( - &owner_pubkey, - &archiver_1_storage_id, - )], - Some(&mint_pubkey), - ); - assert_matches!(bank_client.send_message(&[&mint_keypair], message), Ok(_)); - assert_eq!( - bank_client.get_balance(&owner_pubkey).unwrap(), - 1 + ((rewards.storage_point_value * 10_f64) as u64) - + (rewards.storage_point_value * 5_f64) as u64 - ); - - let message = Message::new_with_payer( - &[storage_instruction::claim_reward( - &owner_pubkey, - &archiver_2_storage_id, - )], - Some(&mint_pubkey), - ); - assert_matches!(bank_client.send_message(&[&mint_keypair], message), Ok(_)); - assert_eq!( - bank_client.get_balance(&owner_pubkey).unwrap(), - 1 + (rewards.storage_point_value * 10_f64) as u64 - + (rewards.storage_point_value * 5_f64) as u64 - + (rewards.storage_point_value * 5_f64) as u64 - ); -} - -fn init_storage_accounts( - owner: &Pubkey, - client: &BankClient, - mint: &Keypair, - validator_accounts_to_create: &[&Keypair], - archiver_accounts_to_create: &[&Keypair], - lamports: u64, -) { - let mut signers = vec![mint]; - let mut ixs: Vec<_> = vec![system_instruction::transfer(&mint.pubkey(), owner, 1)]; - ixs.append( - &mut validator_accounts_to_create - .into_iter() - .flat_map(|account| { - signers.push(&account); - storage_instruction::create_storage_account( - &mint.pubkey(), - owner, - &account.pubkey(), - lamports, - StorageAccountType::Validator, - ) - }) - .collect(), - ); - archiver_accounts_to_create.into_iter().for_each(|account| { - signers.push(&account); - ixs.append(&mut storage_instruction::create_storage_account( - &mint.pubkey(), - owner, - &account.pubkey(), - lamports, - StorageAccountType::Archiver, - )) - }); - let message = Message::new(&ixs); - client.send_message(&signers, message).unwrap(); -} - -fn get_storage_segment(client: &C, account: &Pubkey) -> u64 { - match client.get_account_data(&account).unwrap() { - Some(storage_system_account_data) => { - let contract = deserialize(&storage_system_account_data); - if let Ok(contract) = contract { - match contract { - StorageContract::ValidatorStorage { segment, .. } => { - return segment; - } - _ => info!("error in reading segment"), - } - } - } - None => { - info!("error in reading segment"); - } - } - 0 -} - -fn submit_proof( - mint_keypair: &Keypair, - storage_keypair: &Keypair, - bank_client: &BankClient, - segment_index: u64, -) -> ProofStatus { - let sha_state = Hash::new(Pubkey::new_rand().as_ref()); - let message = Message::new_with_payer( - &[storage_instruction::mining_proof( - &storage_keypair.pubkey(), - sha_state, - segment_index, - Signature::default(), - bank_client.get_recent_blockhash().unwrap().0, - )], - Some(&mint_keypair.pubkey()), - ); - - assert_matches!( - bank_client.send_message(&[mint_keypair, storage_keypair], message), - Ok(_) - ); - ProofStatus::Valid -} - -fn get_storage_blockhash(client: &C, account: &Pubkey) -> Hash { - if let Some(storage_system_account_data) = client.get_account_data(&account).unwrap() { - let contract = deserialize(&storage_system_account_data); - if let Ok(contract) = contract { - match contract { - StorageContract::ValidatorStorage { hash, .. } => { - return hash; - } - _ => (), - } - } - } - Hash::default() -} - -#[test] -fn test_bank_storage() { - let GenesisConfigInfo { - mut genesis_config, - mint_keypair, - .. - } = create_genesis_config(1000); - genesis_config - .native_instruction_processors - .push(solana_storage_program::solana_storage_program!()); - let mint_pubkey = mint_keypair.pubkey(); - let archiver_keypair = Keypair::new(); - let archiver_pubkey = archiver_keypair.pubkey(); - let validator_keypair = Keypair::new(); - let validator_pubkey = validator_keypair.pubkey(); - - let bank = Bank::new(&genesis_config); - // tick the bank up until it's moved into storage segment 2 - // create a new bank in storage segment 2 - let bank = Bank::new_from_parent( - &Arc::new(bank), - &Pubkey::new_rand(), - DEFAULT_SLOTS_PER_SEGMENT * 2, - ); - let bank_client = BankClient::new(bank); - - let x = 42; - let x2 = x * 2; - let storage_blockhash = hash(&[x2]); - - let message = Message::new(&storage_instruction::create_storage_account( - &mint_pubkey, - &Pubkey::default(), - &archiver_pubkey, - 11, - StorageAccountType::Archiver, - )); - bank_client - .send_message(&[&mint_keypair, &archiver_keypair], message) - .unwrap(); - - let message = Message::new(&storage_instruction::create_storage_account( - &mint_pubkey, - &Pubkey::default(), - &validator_pubkey, - 1, - StorageAccountType::Validator, - )); - bank_client - .send_message(&[&mint_keypair, &validator_keypair], message) - .unwrap(); - - let message = Message::new_with_payer( - &[storage_instruction::advertise_recent_blockhash( - &validator_pubkey, - storage_blockhash, - 1, - )], - Some(&mint_pubkey), - ); - - assert_matches!( - bank_client.send_message(&[&mint_keypair, &validator_keypair], message), - Ok(_) - ); - - let slot = 0; - let message = Message::new_with_payer( - &[storage_instruction::mining_proof( - &archiver_pubkey, - Hash::default(), - slot, - Signature::default(), - bank_client.get_recent_blockhash().unwrap().0, - )], - Some(&mint_pubkey), - ); - assert_matches!( - bank_client.send_message(&[&mint_keypair, &archiver_keypair], message), - Ok(_) - ); - - assert_eq!(get_storage_segment(&bank_client, &validator_pubkey), 1); - assert_eq!( - get_storage_blockhash(&bank_client, &validator_pubkey), - storage_blockhash - ); -} diff --git a/sdk/src/clock.rs b/sdk/src/clock.rs index 4a64b7120f..4e9ef8c1ee 100644 --- a/sdk/src/clock.rs +++ b/sdk/src/clock.rs @@ -1,4 +1,4 @@ -//! Provides information about the network's clock which is made up of ticks, slots, segments, etc... +//! Provides information about the network's clock which is made up of ticks, slots, etc... // The default tick rate that the cluster attempts to achieve. Note that the actual tick // rate at any given time should be expected to drift @@ -22,9 +22,6 @@ pub const TICKS_PER_DAY: u64 = DEFAULT_TICKS_PER_SECOND * SECONDS_PER_DAY; // 1 Epoch ~= 2 days pub const DEFAULT_SLOTS_PER_EPOCH: u64 = 2 * TICKS_PER_DAY / DEFAULT_TICKS_PER_SLOT; -// Storage segment configuration -pub const DEFAULT_SLOTS_PER_SEGMENT: u64 = 1024; - // 4 times longer than the max_lockout to allow enough time for PoRep (128 slots) pub const DEFAULT_SLOTS_PER_TURN: u64 = 32 * 4; @@ -55,32 +52,10 @@ pub const MAX_TRANSACTION_FORWARDING_DELAY_GPU: usize = 2; /// More delay is expected if CUDA is not enabled (as signature verification takes longer) pub const MAX_TRANSACTION_FORWARDING_DELAY: usize = 6; -/// Converts a slot to a storage segment. Does not indicate that a segment is complete. -pub fn get_segment_from_slot(rooted_slot: Slot, slots_per_segment: u64) -> Segment { - (rooted_slot + (slots_per_segment - 1)) / slots_per_segment -} - -/// Given a slot returns the latest complete segment, if no segment could possibly be complete -/// for a given slot it returns `None` (i.e if `slot < slots_per_segment`) -pub fn get_complete_segment_from_slot( - rooted_slot: Slot, - slots_per_segment: u64, -) -> Option { - let completed_segment = rooted_slot / slots_per_segment; - if rooted_slot < slots_per_segment { - None - } else { - Some(completed_segment) - } -} - /// Slot is a unit of time given to a leader for encoding, /// is some some number of Ticks long. pub type Slot = u64; -/// A segment is some number of slots stored by archivers -pub type Segment = u64; - /// Epoch is a unit of time a given leader schedule is honored, /// some number of Slots. pub type Epoch = u64; @@ -105,8 +80,8 @@ pub type UnixTimestamp = i64; pub struct Clock { /// the current network/bank Slot pub slot: Slot, - /// the current Segment, used for archiver rounds - pub segment: Segment, + /// unused + pub unused: u64, /// the bank Epoch pub epoch: Epoch, /// the future Epoch for which the leader schedule has @@ -116,29 +91,3 @@ pub struct Clock { /// in slots, drifts! pub unix_timestamp: UnixTimestamp, } - -#[cfg(test)] -mod tests { - use super::*; - - fn get_segments(slot: Slot, slots_per_segment: u64) -> (Segment, Segment) { - ( - get_segment_from_slot(slot, slots_per_segment), - get_complete_segment_from_slot(slot, slots_per_segment).unwrap(), - ) - } - - #[test] - fn test_complete_segment_impossible() { - // slot < slots_per_segment so there can be no complete segments - assert_eq!(get_complete_segment_from_slot(5, 10), None); - } - - #[test] - fn test_segment_conversion() { - let (current, complete) = get_segments(2048, 1024); - assert_eq!(current, complete); - let (current, complete) = get_segments(2049, 1024); - assert!(complete < current); - } -} diff --git a/sdk/src/genesis_config.rs b/sdk/src/genesis_config.rs index 8627493f68..db5abe69bd 100644 --- a/sdk/src/genesis_config.rs +++ b/sdk/src/genesis_config.rs @@ -2,7 +2,7 @@ use crate::{ account::Account, - clock::{UnixTimestamp, DEFAULT_SLOTS_PER_SEGMENT, DEFAULT_TICKS_PER_SLOT}, + clock::{UnixTimestamp, DEFAULT_TICKS_PER_SLOT}, epoch_schedule::EpochSchedule, fee_calculator::FeeRateGovernor, hash::{hash, Hash}, @@ -27,6 +27,9 @@ use std::{ time::{SystemTime, UNIX_EPOCH}, }; +// deprecated default that is no longer used +pub const UNUSED_DEFAULT: u64 = 1024; + #[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq)] pub enum OperatingMode { Preview, // Next set of cluster features to be promoted to Stable @@ -45,7 +48,7 @@ pub struct GenesisConfig { /// accounts for network rewards, these do not count towards capitalization pub rewards_pools: BTreeMap, pub ticks_per_slot: u64, - pub slots_per_segment: u64, + pub unused: u64, /// network speed configuration pub poh_config: PohConfig, /// this field exists only to ensure that the binary layout of GenesisConfig remains compatible @@ -89,7 +92,7 @@ impl Default for GenesisConfig { native_instruction_processors: Vec::default(), rewards_pools: BTreeMap::default(), ticks_per_slot: DEFAULT_TICKS_PER_SLOT, - slots_per_segment: DEFAULT_SLOTS_PER_SEGMENT, + unused: UNUSED_DEFAULT, poh_config: PohConfig::default(), inflation: Inflation::default(), __backwards_compat_with_v0_23: 0, diff --git a/sdk/src/sysvar/rewards.rs b/sdk/src/sysvar/rewards.rs index ed2e308d1a..afbcb00345 100644 --- a/sdk/src/sysvar/rewards.rs +++ b/sdk/src/sysvar/rewards.rs @@ -8,19 +8,15 @@ crate::declare_sysvar_id!("SysvarRewards111111111111111111111111111111", Rewards #[derive(Serialize, Deserialize, Debug, Default, PartialEq)] pub struct Rewards { pub validator_point_value: f64, - pub storage_point_value: f64, + pub unused: f64, } impl Sysvar for Rewards {} -pub fn create_account( - lamports: u64, - validator_point_value: f64, - storage_point_value: f64, -) -> Account { +pub fn create_account(lamports: u64, validator_point_value: f64) -> Account { Rewards { validator_point_value, - storage_point_value, + unused: 0.0, } .create_account(lamports) } @@ -31,7 +27,7 @@ mod tests { #[test] fn test_create_account() { - let account = create_account(1, 0.0, 0.0); + let account = create_account(1, 0.0); let rewards = Rewards::from_account(&account).unwrap(); assert_eq!(rewards, Rewards::default()); } diff --git a/system-test/genesis-test/cluster_token_count.sh b/system-test/genesis-test/cluster_token_count.sh index b33d7e4119..0d0fd2507e 100755 --- a/system-test/genesis-test/cluster_token_count.sh +++ b/system-test/genesis-test/cluster_token_count.sh @@ -17,7 +17,6 @@ usage: $0 [cluster_rpc_url] STAKE SYSTEM VOTE - STORAGE CONFIG Required arguments: @@ -76,10 +75,6 @@ function get_program_account_balance_totals { voteAccountBalanceTotalSol=$totalAccountBalancesSol voteAccountBalanceTotalLamports=$totalAccountBalancesLamports ;; - STORAGE) - storageAccountBalanceTotalSol=$totalAccountBalancesSol - storageAccountBalanceTotalLamports=$totalAccountBalancesLamports - ;; CONFIG) configAccountBalanceTotalSol=$totalAccountBalancesSol configAccountBalanceTotalLamports=$totalAccountBalancesLamports @@ -92,8 +87,8 @@ function get_program_account_balance_totals { } function sum_account_balances_totals { - grandTotalAccountBalancesSol=$((systemAccountBalanceTotalSol + stakeAccountBalanceTotalSol + voteAccountBalanceTotalSol + storageAccountBalanceTotalSol + configAccountBalanceTotalSol)) - grandTotalAccountBalancesLamports=$((systemAccountBalanceTotalLamports + stakeAccountBalanceTotalLamports + voteAccountBalanceTotalLamports + storageAccountBalanceTotalLamports + configAccountBalanceTotalLamports)) + grandTotalAccountBalancesSol=$((systemAccountBalanceTotalSol + stakeAccountBalanceTotalSol + voteAccountBalanceTotalSol + configAccountBalanceTotalSol)) + grandTotalAccountBalancesLamports=$((systemAccountBalanceTotalLamports + stakeAccountBalanceTotalLamports + voteAccountBalanceTotalLamports + configAccountBalanceTotalLamports)) printf "\n--- Total Token Distribution in all Account Balances ---\n" printf "Total SOL in all Account Balances: %'d\n" "$grandTotalAccountBalancesSol" @@ -109,13 +104,11 @@ LAMPORTS_PER_SOL=1000000000 # 1 billion stakeAccountBalanceTotalSol= systemAccountBalanceTotalSol= voteAccountBalanceTotalSol= -storageAccountBalanceTotalSol= configAccountBalanceTotalSol= stakeAccountBalanceTotalLamports= systemAccountBalanceTotalLamports= voteAccountBalanceTotalLamports= -storageAccountBalanceTotalLamports= configAccountBalanceTotalLamports= echo "--- Querying RPC URL: $url ---" @@ -124,13 +117,11 @@ get_cluster_version get_program_accounts STAKE "$STAKE_PROGRAM_PUBKEY" "$url" get_program_accounts SYSTEM "$SYSTEM_PROGRAM_PUBKEY" "$url" get_program_accounts VOTE "$VOTE_PROGRAM_PUBKEY" "$url" -get_program_accounts STORAGE "$STORAGE_PROGRAM_PUBKEY" "$url" get_program_accounts CONFIG "$CONFIG_PROGRAM_PUBKEY" "$url" write_program_account_data_csv STAKE write_program_account_data_csv SYSTEM write_program_account_data_csv VOTE -write_program_account_data_csv STORAGE write_program_account_data_csv CONFIG get_token_capitalization @@ -138,7 +129,6 @@ get_token_capitalization get_program_account_balance_totals STAKE get_program_account_balance_totals SYSTEM get_program_account_balance_totals VOTE -get_program_account_balance_totals STORAGE get_program_account_balance_totals CONFIG sum_account_balances_totals diff --git a/system-test/genesis-test/get_program_accounts.sh b/system-test/genesis-test/get_program_accounts.sh index d32e1c7bca..60262a55ea 100755 --- a/system-test/genesis-test/get_program_accounts.sh +++ b/system-test/genesis-test/get_program_accounts.sh @@ -6,7 +6,6 @@ STAKE_PROGRAM_PUBKEY=Stake11111111111111111111111111111111111111 SYSTEM_PROGRAM_PUBKEY=11111111111111111111111111111111 VOTE_PROGRAM_PUBKEY=Vote111111111111111111111111111111111111111 -STORAGE_PROGRAM_PUBKEY=Storage111111111111111111111111111111111111 CONFIG_PROGRAM_PUBKEY=Config1111111111111111111111111111111111111 function get_program_accounts { diff --git a/validator/src/main.rs b/validator/src/main.rs index ddf9b6160e..4df88b7ca7 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -520,15 +520,6 @@ pub fn main() { The authorized voter for the account must either be the --identity keypair \ or with the --authorized-voter argument") ) - .arg( - Arg::with_name("storage_keypair") - .long("storage-keypair") - .value_name("PATH") - .hidden(true) // Don't document this argument to discourage its use - .takes_value(true) - .validator(is_keypair_or_ask_keyword) - .help("File containing the storage account keypair. Default is an ephemeral keypair"), - ) .arg( Arg::with_name("init_complete_file") .long("init-complete-file") @@ -833,8 +824,6 @@ pub fn main() { .map(|keypairs| keypairs.into_iter().map(Arc::new).collect()) .unwrap_or_else(|| vec![identity_keypair.clone()]); - let storage_keypair = keypair_of(&matches, "storage_keypair").unwrap_or_else(Keypair::new); - let ledger_path = PathBuf::from(matches.value_of("ledger_path").unwrap()); let init_complete_file = matches.value_of("init_complete_file"); let skip_poh_verify = matches.is_present("skip_poh_verify"); @@ -1260,7 +1249,6 @@ pub fn main() { &ledger_path, &vote_account, authorized_voter_keypairs, - &Arc::new(storage_keypair), cluster_entrypoint.as_ref(), !skip_poh_verify, &validator_config,