From 9232057e956544f4c36a7667a5a23d68c5df9448 Mon Sep 17 00:00:00 2001 From: Greg Fitzgerald Date: Mon, 21 Oct 2019 11:29:37 -0600 Subject: [PATCH] Rename replicator to archiver (#6464) * Rename replicator to archiver * cargo fmt * Fix grammar --- Cargo.lock | 26 ++-- Cargo.toml | 2 +- RELEASE.md | 2 +- archiver-x.rs | 7 + multinode-demo/replicator.sh => archiver.rs | 22 +-- {replicator => archiver}/.gitignore | 0 {replicator => archiver}/Cargo.toml | 2 +- {replicator => archiver}/src/main.rs | 13 +- bench-exchange/src/main.rs | 2 +- bench-tps/src/main.rs | 2 +- book/src/SUMMARY.md | 2 +- book/src/api-reference/cli.md | 10 +- book/src/cluster/README.md | 8 +- book/src/cluster/ledger-replication.md | 94 ++++++------- book/src/ed_storage_rent_economics.md | 2 +- .../getting-started/testnet-participation.md | 2 +- book/src/implemented-proposals/blocktree.md | 2 +- .../ed_overview/README.md | 2 +- .../ed_overview/ed_attack_vectors.md | 8 +- .../ed_overview/ed_economic_sustainability.md | 4 +- .../ed_overview/ed_mvp.md | 6 +- .../ed_replication_client_economics/README.md | 2 +- ...plication_client_reward_auto_delegation.md | 2 +- .../ed_rce_storage_replication_rewards.md | 4 +- .../ed_validation_client_economics/README.md | 2 +- ...replication_validation_transaction_fees.md | 4 +- ...state_validation_protocol_based_rewards.md | 4 +- .../ledger-replication-to-implement.md | 40 +++--- ...ning-replicator.md => running-archiver.md} | 44 +++--- book/src/terminology.md | 6 +- book/src/validator/tvu/blocktree.md | 2 +- cli/src/cli.rs | 4 +- cli/src/storage.rs | 16 +-- core/src/{replicator.rs => archiver.rs} | 127 +++++++++--------- core/src/chacha_cuda.rs | 2 +- core/src/cluster_info.rs | 24 ++-- core/src/gossip_service.rs | 33 ++--- core/src/lib.rs | 2 +- core/src/repair_service.rs | 2 +- core/src/storage_stage.rs | 49 ++++--- core/src/window_service.rs | 2 +- core/tests/storage_stage.rs | 10 +- gossip/src/main.rs | 6 +- local_cluster/src/local_cluster.rs | 88 ++++++------ local_cluster/src/tests.rs | 2 +- .../src/tests/{replicator.rs => archiver.rs} | 110 +++++++-------- multinode-demo/common.sh | 2 +- multinode-demo/replicator-x.sh | 7 - net/common.sh | 6 +- net/gce.sh | 46 +++---- net/net.sh | 16 +-- net/remote/remote-node.sh | 6 +- net/ssh.sh | 6 +- programs/storage_api/src/storage_contract.rs | 34 ++--- .../storage_api/src/storage_instruction.rs | 8 +- .../tests/storage_processor.rs | 79 +++++------ runtime/src/bank.rs | 6 +- runtime/src/storage_utils.rs | 62 ++++----- scripts/cargo-install-all.sh | 2 +- sdk/src/clock.rs | 2 +- validator/src/lib.rs | 2 +- 61 files changed, 529 insertions(+), 560 deletions(-) create mode 100755 archiver-x.rs rename multinode-demo/replicator.sh => archiver.rs (76%) rename {replicator => archiver}/.gitignore (100%) rename {replicator => archiver}/Cargo.toml (95%) rename {replicator => archiver}/src/main.rs (94%) rename book/src/{running-replicator.md => running-archiver.md} (72%) rename core/src/{replicator.rs => archiver.rs} (90%) rename local_cluster/src/tests/{replicator.rs => archiver.rs} (59%) delete mode 100755 multinode-demo/replicator-x.sh diff --git a/Cargo.lock b/Cargo.lock index 40fc2ea08..e6ed05b45 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2992,6 +2992,19 @@ dependencies = [ "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "solana-archiver" +version = "0.20.0" +dependencies = [ + "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)", + "console 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "solana-core 0.20.0", + "solana-logger 0.20.0", + "solana-metrics 0.20.0", + "solana-netutil 0.20.0", + "solana-sdk 0.20.0", +] + [[package]] name = "solana-banking-bench" version = "0.20.0" @@ -3745,19 +3758,6 @@ dependencies = [ "smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "solana-replicator" -version = "0.20.0" -dependencies = [ - "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)", - "console 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "solana-core 0.20.0", - "solana-logger 0.20.0", - "solana-metrics 0.20.0", - "solana-netutil 0.20.0", - "solana-sdk 0.20.0", -] - [[package]] name = "solana-runtime" version = "0.20.0" diff --git a/Cargo.toml b/Cargo.toml index 95e54efd9..f3f4fadcf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -47,7 +47,7 @@ members = [ "programs/vest_program", "programs/vote_api", "programs/vote_program", - "replicator", + "archiver", "runtime", "sdk", "sdk-c", diff --git a/RELEASE.md b/RELEASE.md index 8a0df6645..e2c630c3b 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -138,7 +138,7 @@ There are three release channels that map to branches as follows: ### Update documentation TODO: Documentation update procedure is WIP as we move to gitbook -Document the new recommended version by updating `book/src/running-replicator.md` and `book/src/validator-testnet.md` on the release (beta) branch to point at the `solana-install` for the upcoming release version. +Document the new recommended version by updating `book/src/running-archiver.md` and `book/src/validator-testnet.md` on the release (beta) branch to point at the `solana-install` for the upcoming release version. #### Publish updated Book We maintain three copies of the "book" as official documentation: diff --git a/archiver-x.rs b/archiver-x.rs new file mode 100755 index 000000000..442c37d06 --- /dev/null +++ b/archiver-x.rs @@ -0,0 +1,7 @@ +#!/usr/bin/env bash +# +# Start a dynamically-configured archiver +# + +here=$(dirname "$0") +exec "$here"/archiver.sh --label x$$ "$@" diff --git a/multinode-demo/replicator.sh b/archiver.rs similarity index 76% rename from multinode-demo/replicator.sh rename to archiver.rs index 3d3db0780..a888ce3fc 100755 --- a/multinode-demo/replicator.sh +++ b/archiver.rs @@ -1,7 +1,7 @@ #!/usr/bin/env bash # -# A thin wrapper around `solana-replicator` that automatically provisions the -# replicator's identity and/or storage keypair if not provided by the caller. +# A thin wrapper around `solana-archiver` that automatically provisions the +# archiver's identity and/or storage keypair if not provided by the caller. # set -e @@ -42,26 +42,26 @@ while [[ -n $1 ]]; do shift 2 else echo "Unknown argument: $1" - $solana_replicator --help + $solana_archiver --help exit 1 fi else echo "Unknown argument: $1" - $solana_replicator --help + $solana_archiver --help exit 1 fi done -: "${identity_keypair:="$SOLANA_ROOT"/farf/replicator-identity-keypair"$label".json}" -: "${storage_keypair:="$SOLANA_ROOT"/farf/replicator-storage-keypair"$label".json}" -ledger="$SOLANA_ROOT"/farf/replicator-ledger"$label" +: "${identity_keypair:="$SOLANA_ROOT"/farf/archiver-identity-keypair"$label".json}" +: "${storage_keypair:="$SOLANA_ROOT"/farf/archiver-storage-keypair"$label".json}" +ledger="$SOLANA_ROOT"/farf/archiver-ledger"$label" rpc_url=$($solana_gossip get-rpc-url --entrypoint "$entrypoint") if [[ ! -r $identity_keypair ]]; then $solana_keygen new -o "$identity_keypair" - # TODO: https://github.com/solana-labs/solminer/blob/9cd2289/src/replicator.js#L17-L18 + # TODO: https://github.com/solana-labs/solminer/blob/9cd2289/src/archiver.js#L17-L18 $solana_cli --keypair "$identity_keypair" --url "$rpc_url" \ airdrop 100000 lamports fi @@ -72,7 +72,7 @@ if [[ ! -r $storage_keypair ]]; then storage_pubkey=$($solana_keygen pubkey "$storage_keypair") $solana_cli --keypair "$identity_keypair" --url "$rpc_url" \ - create-replicator-storage-account "$identity_pubkey" "$storage_pubkey" + create-archiver-storage-account "$identity_pubkey" "$storage_pubkey" fi default_arg --entrypoint "$entrypoint" @@ -81,5 +81,5 @@ default_arg --storage-keypair "$storage_keypair" default_arg --ledger "$ledger" set -x -# shellcheck disable=SC2086 # Don't want to double quote $solana_replicator -exec $solana_replicator "${args[@]}" +# shellcheck disable=SC2086 # Don't want to double quote $solana_archiver +exec $solana_archiver "${args[@]}" diff --git a/replicator/.gitignore b/archiver/.gitignore similarity index 100% rename from replicator/.gitignore rename to archiver/.gitignore diff --git a/replicator/Cargo.toml b/archiver/Cargo.toml similarity index 95% rename from replicator/Cargo.toml rename to archiver/Cargo.toml index d815af5b0..d7dcc63ed 100644 --- a/replicator/Cargo.toml +++ b/archiver/Cargo.toml @@ -1,7 +1,7 @@ [package] authors = ["Solana Maintainers "] edition = "2018" -name = "solana-replicator" +name = "solana-archiver" version = "0.20.0" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" diff --git a/replicator/src/main.rs b/archiver/src/main.rs similarity index 94% rename from replicator/src/main.rs rename to archiver/src/main.rs index 83ba7f955..23cbec8c7 100644 --- a/replicator/src/main.rs +++ b/archiver/src/main.rs @@ -1,8 +1,8 @@ use clap::{crate_description, crate_name, crate_version, App, Arg}; use console::style; +use solana_core::archiver::Archiver; use solana_core::cluster_info::{Node, VALIDATOR_PORT_RANGE}; use solana_core::contact_info::ContactInfo; -use solana_core::replicator::Replicator; use solana_sdk::signature::{read_keypair_file, Keypair, KeypairUtil}; use std::net::SocketAddr; use std::path::PathBuf; @@ -94,11 +94,8 @@ fn main() { addr.set_ip(solana_netutil::get_public_ip_addr(&entrypoint_addr).unwrap()); addr }; - let node = Node::new_replicator_with_external_ip( - &keypair.pubkey(), - &gossip_addr, - VALIDATOR_PORT_RANGE, - ); + let node = + Node::new_archiver_with_external_ip(&keypair.pubkey(), &gossip_addr, VALIDATOR_PORT_RANGE); println!( "{} version {} (branch={}, commit={})", @@ -115,7 +112,7 @@ fn main() { ); let entrypoint_info = ContactInfo::new_gossip_entry_point(&entrypoint_addr); - let replicator = Replicator::new( + let archiver = Archiver::new( &ledger_path, node, entrypoint_info, @@ -124,5 +121,5 @@ fn main() { ) .unwrap(); - replicator.join(); + archiver.join(); } diff --git a/bench-exchange/src/main.rs b/bench-exchange/src/main.rs index 34ae65dfe..b77167d53 100644 --- a/bench-exchange/src/main.rs +++ b/bench-exchange/src/main.rs @@ -54,7 +54,7 @@ fn main() { ); } else { info!("Connecting to the cluster"); - let (nodes, _replicators) = + let (nodes, _archivers) = discover_cluster(&entrypoint_addr, num_nodes).unwrap_or_else(|_| { panic!("Failed to discover nodes"); }); diff --git a/bench-tps/src/main.rs b/bench-tps/src/main.rs index e52812ef5..90cb87192 100644 --- a/bench-tps/src/main.rs +++ b/bench-tps/src/main.rs @@ -64,7 +64,7 @@ fn main() { } info!("Connecting to the cluster"); - let (nodes, _replicators) = + let (nodes, _archivers) = discover_cluster(&entrypoint_addr, *num_nodes).unwrap_or_else(|err| { eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err); exit(1); diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 0ab125624..619fb571c 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -35,7 +35,7 @@ * [Publishing Validator Info](running-validator/validator-info.md) * [Troubleshooting](running-validator/validator-troubleshoot.md) * [FAQ](running-validator/validator-faq.md) -* [Running a Replicator](running-replicator.md) +* [Running an Archiver](running-archiver.md) * [API Reference](api-reference/README.md) * [Transaction](api-reference/transaction-api.md) * [Instruction](api-reference/instruction-api.md) diff --git a/book/src/api-reference/cli.md b/book/src/api-reference/cli.md index 6a5abbe9c..f9d141d78 100644 --- a/book/src/api-reference/cli.md +++ b/book/src/api-reference/cli.md @@ -200,7 +200,7 @@ SUBCOMMANDS: claim-storage-reward Redeem storage reward credits cluster-version Get the version of the cluster entrypoint confirm Confirm transaction by signature - create-replicator-storage-account Create a replicator storage account + create-archiver-storage-account Create an archiver storage account create-stake-account Create a stake account create-validator-storage-account Create a validator storage account create-vote-account Create a vote account @@ -380,13 +380,13 @@ ARGS: The transaction signature to confirm ``` -#### solana-create-replicator-storage-account +#### solana-create-archiver-storage-account ```text -solana-create-replicator-storage-account -Create a replicator storage account +solana-create-archiver-storage-account +Create an archiver storage account USAGE: - solana create-replicator-storage-account [OPTIONS] + solana create-archiver-storage-account [OPTIONS] FLAGS: -h, --help Prints help information diff --git a/book/src/cluster/README.md b/book/src/cluster/README.md index 0d8ac9b1c..ee5455adb 100644 --- a/book/src/cluster/README.md +++ b/book/src/cluster/README.md @@ -4,17 +4,17 @@ A Solana cluster is a set of validators working together to serve client transac ## Creating a Cluster -Before starting any validators, one first needs to create a _genesis block_. The block contains entries referencing two public keys, a _mint_ and a _bootstrap leader_. The validator holding the bootstrap leader's private key is responsible for appending the first entries to the ledger. It initializes its internal state with the mint's account. That account will hold the number of native tokens defined by the genesis block. The second validator then contacts the bootstrap leader to register as a _validator_ or _replicator_. Additional validators then register with any registered member of the cluster. +Before starting any validators, one first needs to create a _genesis block_. The block contains entries referencing two public keys, a _mint_ and a _bootstrap leader_. The validator holding the bootstrap leader's private key is responsible for appending the first entries to the ledger. It initializes its internal state with the mint's account. That account will hold the number of native tokens defined by the genesis block. The second validator then contacts the bootstrap leader to register as a _validator_ or _archiver_. Additional validators then register with any registered member of the cluster. -A validator receives all entries from the leader and submits votes confirming those entries are valid. After voting, the validator is expected to store those entries until replicator nodes submit proofs that they have stored copies of it. Once the validator observes a sufficient number of copies exist, it deletes its copy. +A validator receives all entries from the leader and submits votes confirming those entries are valid. After voting, the validator is expected to store those entries until archiver nodes submit proofs that they have stored copies of it. Once the validator observes a sufficient number of copies exist, it deletes its copy. ## Joining a Cluster -Validators and replicators enter the cluster via registration messages sent to its _control plane_. The control plane is implemented using a _gossip_ protocol, meaning that a node may register with any existing node, and expect its registration to propagate to all nodes in the cluster. The time it takes for all nodes to synchronize is proportional to the square of the number of nodes participating in the cluster. Algorithmically, that's considered very slow, but in exchange for that time, a node is assured that it eventually has all the same information as every other node, and that that information cannot be censored by any one node. +Validators and archivers enter the cluster via registration messages sent to its _control plane_. The control plane is implemented using a _gossip_ protocol, meaning that a node may register with any existing node, and expect its registration to propagate to all nodes in the cluster. The time it takes for all nodes to synchronize is proportional to the square of the number of nodes participating in the cluster. Algorithmically, that's considered very slow, but in exchange for that time, a node is assured that it eventually has all the same information as every other node, and that that information cannot be censored by any one node. ## Sending Transactions to a Cluster -Clients send transactions to any validator's Transaction Processing Unit \(TPU\) port. If the node is in the validator role, it forwards the transaction to the designated leader. If in the leader role, the node bundles incoming transactions, timestamps them creating an _entry_, and pushes them onto the cluster's _data plane_. Once on the data plane, the transactions are validated by validator nodes and replicated by replicator nodes, effectively appending them to the ledger. +Clients send transactions to any validator's Transaction Processing Unit \(TPU\) port. If the node is in the validator role, it forwards the transaction to the designated leader. If in the leader role, the node bundles incoming transactions, timestamps them creating an _entry_, and pushes them onto the cluster's _data plane_. Once on the data plane, the transactions are validated by validator nodes and replicated by archiver nodes, effectively appending them to the ledger. ## Confirming Transactions diff --git a/book/src/cluster/ledger-replication.md b/book/src/cluster/ledger-replication.md index d289d127b..34792ffd9 100644 --- a/book/src/cluster/ledger-replication.md +++ b/book/src/cluster/ledger-replication.md @@ -10,9 +10,9 @@ Our improvement on this approach is to randomly sample the encrypted segments fa ## Network -Validators for PoRep are the same validators that are verifying transactions. If a replicator can prove that a validator verified a fake PoRep, then the validator will not receive a reward for that storage epoch. +Validators for PoRep are the same validators that are verifying transactions. If an archiver can prove that a validator verified a fake PoRep, then the validator will not receive a reward for that storage epoch. -Replicators are specialized _light clients_. They download a part of the ledger \(a.k.a Segment\) and store it, and provide PoReps of storing the ledger. For each verified PoRep replicators earn a reward of sol from the mining pool. +Archivers are specialized _light clients_. They download a part of the ledger \(a.k.a Segment\) and store it, and provide PoReps of storing the ledger. For each verified PoRep archivers earn a reward of sol from the mining pool. ## Constraints @@ -40,9 +40,9 @@ We have the following constraints: 1. SLOTS\_PER\_SEGMENT: Number of slots in a segment of ledger data. The - unit of storage for a replicator. + unit of storage for an archiver. -2. NUM\_KEY\_ROTATION\_SEGMENTS: Number of segments after which replicators +2. NUM\_KEY\_ROTATION\_SEGMENTS: Number of segments after which archivers regenerate their encryption keys and select a new dataset to store. @@ -68,7 +68,7 @@ We have the following constraints: ### Validator behavior -1. Validators join the network and begin looking for replicator accounts at each +1. Validators join the network and begin looking for archiver accounts at each storage epoch/turn boundary. @@ -78,11 +78,11 @@ We have the following constraints: This signed value is also submitted to the validator's storage account and will be used by - replicators at a later stage to cross-verify. + archivers at a later stage to cross-verify. 3. Every `NUM_SLOTS_PER_TURN` slots the validator advertises the PoH value. This is value - is also served to Replicators via RPC interfaces. + is also served to Archivers via RPC interfaces. 4. For a given turn N, all validations get locked out until turn N+3 \(a gap of 2 turn/epoch\). @@ -90,53 +90,53 @@ We have the following constraints: 5. Any incorrect validations will be marked during the turn in between. -### Replicator behavior +### Archiver behavior -1. Since a replicator is somewhat of a light client and not downloading all the +1. Since an archiver is somewhat of a light client and not downloading all the - ledger data, they have to rely on other validators and replicators for information. + ledger data, they have to rely on other validators and archivers for information. Any given validator may or may not be malicious and give incorrect information, although there are not any obvious attack vectors that this could accomplish besides having the - replicator do extra wasted work. For many of the operations there are a number of options + archiver do extra wasted work. For many of the operations there are a number of options - depending on how paranoid a replicator is: + depending on how paranoid an archiver is: - * \(a\) replicator can ask a validator - * \(b\) replicator can ask multiple validators - * \(c\) replicator can ask other replicators - * \(d\) replicator can subscribe to the full transaction stream and generate + * \(a\) archiver can ask a validator + * \(b\) archiver can ask multiple validators + * \(c\) archiver can ask other archivers + * \(d\) archiver can subscribe to the full transaction stream and generate the information itself \(assuming the slot is recent enough\) - * \(e\) replicator can subscribe to an abbreviated transaction stream to + * \(e\) archiver can subscribe to an abbreviated transaction stream to generate the information itself \(assuming the slot is recent enough\) -2. A replicator obtains the PoH hash corresponding to the last turn with its slot. -3. The replicator signs the PoH hash with its keypair. That signature is the +2. An archiver obtains the PoH hash corresponding to the last turn with its slot. +3. The archiver signs the PoH hash with its keypair. That signature is the seed used to pick the segment to replicate and also the encryption key. The - replicator mods the signature with the slot to get which segment to + archiver mods the signature with the slot to get which segment to replicate. -4. The replicator retrives the ledger by asking peer validators and +4. The archiver retrives the ledger by asking peer validators and - replicators. See 6.5. + archivers. See 6.5. -5. The replicator then encrypts that segment with the key with chacha algorithm +5. The archiver then encrypts that segment with the key with chacha algorithm in CBC mode with `NUM_CHACHA_ROUNDS` of encryption. -6. The replicator initializes a chacha rng with the a signed recent PoH value as +6. The archiver initializes a chacha rng with the a signed recent PoH value as the seed. -7. The replicator generates `NUM_STORAGE_SAMPLES` samples in the range of the +7. The archiver generates `NUM_STORAGE_SAMPLES` samples in the range of the entry size and samples the encrypted segment with sha256 for 32-bytes at each @@ -144,23 +144,23 @@ We have the following constraints: segment. -8. The replicator sends a PoRep proof transaction which contains its sha state +8. The archiver sends a PoRep proof transaction which contains its sha state at the end of the sampling operation, its seed and the samples it used to the current leader and it is put onto the ledger. -9. During a given turn the replicator should submit many proofs for the same segment +9. During a given turn the archiver should submit many proofs for the same segment and based on the `RATIO_OF_FAKE_PROOFS` some of those proofs must be fake. -10. As the PoRep game enters the next turn, the replicator must submit a +10. As the PoRep game enters the next turn, the archiver must submit a transaction with the mask of which proofs were fake during the last turn. This - transaction will define the rewards for both replicators and validators. + transaction will define the rewards for both archivers and validators. -11. Finally for a turn N, as the PoRep game enters turn N + 3, replicator's proofs for +11. Finally for a turn N, as the PoRep game enters turn N + 3, archiver's proofs for turn N will be counted towards their rewards. @@ -171,19 +171,19 @@ The Proof of Replication game has 4 primary stages. For each "turn" multiple PoR The 4 stages of the PoRep Game are as follows: 1. Proof submission stage - * Replicators: submit as many proofs as possible during this stage + * Archivers: submit as many proofs as possible during this stage * Validators: No-op 2. Proof verification stage - * Replicators: No-op - * Validators: Select replicators and verify their proofs from the previous turn + * Archivers: No-op + * Validators: Select archivers and verify their proofs from the previous turn 3. Proof challenge stage - * Replicators: Submit the proof mask with justifications \(for fake proofs submitted 2 turns ago\) + * Archivers: Submit the proof mask with justifications \(for fake proofs submitted 2 turns ago\) * Validators: No-op 4. Reward collection stage - * Replicators: Collect rewards for 3 turns ago + * Archivers: Collect rewards for 3 turns ago * Validators: Collect rewards for 3 turns ago -For each turn of the PoRep game, both Validators and Replicators evaluate each stage. The stages are run as separate transactions on the storage program. +For each turn of the PoRep game, both Validators and Archivers evaluate each stage. The stages are run as separate transactions on the storage program. ### Finding who has a given block of ledger @@ -191,15 +191,15 @@ For each turn of the PoRep game, both Validators and Replicators evaluate each s at turn boundaries for any proofs. -2. Validators maintain a map of ledger segments and corresponding replicator public keys. +2. Validators maintain a map of ledger segments and corresponding archiver public keys. - The map is updated when a Validator processes a replicator's proofs for a segment. + The map is updated when a Validator processes an archiver's proofs for a segment. The validator provides an RPC interface to access the this map. Using this API, clients - can map a segment to a replicator's network address \(correlating it via cluster\_info table\). + can map a segment to an archiver's network address \(correlating it via cluster\_info table\). - The clients can then send repair requests to the replicator to retrieve segments. + The clients can then send repair requests to the archiver to retrieve segments. 3. Validators would need to invalidate this list every N turns. @@ -209,11 +209,11 @@ For any random seed, we force everyone to use a signature that is derived from a Since there are many more client identities then encryption identities, we need to split the reward for multiple clients, and prevent Sybil attacks from generating many clients to acquire the same block of data. To remain BFT we want to avoid a single human entity from storing all the replications of a single chunk of the ledger. -Our solution to this is to force the clients to continue using the same identity. If the first round is used to acquire the same block for many client identities, the second round for the same client identities will force a redistribution of the signatures, and therefore PoRep identities and blocks. Thus to get a reward for replicators need to store the first block for free and the network can reward long lived client identities more than new ones. +Our solution to this is to force the clients to continue using the same identity. If the first round is used to acquire the same block for many client identities, the second round for the same client identities will force a redistribution of the signatures, and therefore PoRep identities and blocks. Thus to get a reward for archivers need to store the first block for free and the network can reward long lived client identities more than new ones. ## Validator attacks -* If a validator approves fake proofs, replicator can easily out them by +* If a validator approves fake proofs, archiver can easily out them by showing the initial state for the hash. @@ -221,11 +221,11 @@ Our solution to this is to force the clients to continue using the same identity to distinguish who is correct. Rewards would have to rely on the results from - multiple validators to catch bad actors and replicators from being denied rewards. + multiple validators to catch bad actors and archivers from being denied rewards. * Validator stealing mining proof results for itself. The proofs are derived - from a signature from a replicator, since the validator does not know the + from a signature from an archiver, since the validator does not know the private key used to generate the encryption key, it cannot be the generator of @@ -233,7 +233,7 @@ Our solution to this is to force the clients to continue using the same identity ## Reward incentives -Fake proofs are easy to generate but difficult to verify. For this reason, PoRep proof transactions generated by replicators may require a higher fee than a normal transaction to represent the computational cost required by validators. +Fake proofs are easy to generate but difficult to verify. For this reason, PoRep proof transactions generated by archivers may require a higher fee than a normal transaction to represent the computational cost required by validators. Some percentage of fake proofs are also necessary to receive a reward from storage mining. @@ -247,13 +247,13 @@ Some percentage of fake proofs are also necessary to receive a reward from stora use the signatures as the seed -* The game between validators and replicators is over random blocks and random +* The game between validators and archivers is over random blocks and random encryption identities and random data samples. The goal of randomization is to prevent colluding groups from having overlap on data or validation. -* Replicator clients fish for lazy validators by submitting fake proofs that +* Archiver clients fish for lazy validators by submitting fake proofs that they can prove are fake. diff --git a/book/src/ed_storage_rent_economics.md b/book/src/ed_storage_rent_economics.md index 474cd8cef..c49dc92b1 100644 --- a/book/src/ed_storage_rent_economics.md +++ b/book/src/ed_storage_rent_economics.md @@ -1,6 +1,6 @@ ## Storage Rent Economics -Each transaction that is submitted to the Solana ledger imposes costs. Transaction fees paid by the submitter, and collected by a validator, in theory, account for the acute, transacitonal, costs of validating and adding that data to the ledger. At the same time, our compensation design for replicators (see [Replication-client Economics](ed_replication_client_economics.md)), in theory, accounts for the long term storage of the historical ledger. Unaccounted in this process is the mid-term storage of active ledger state, necessarily maintined by the rotating validator set. This type of storage imposes costs not only to validators but also to the broader network as active state grows so does data transmission and validation overhead. To account for these costs, we describe here our preliminary design and implementation of storage rent. +Each transaction that is submitted to the Solana ledger imposes costs. Transaction fees paid by the submitter, and collected by a validator, in theory, account for the acute, transacitonal, costs of validating and adding that data to the ledger. At the same time, our compensation design for archivers (see [Replication-client Economics](ed_replication_client_economics.md)), in theory, accounts for the long term storage of the historical ledger. Unaccounted in this process is the mid-term storage of active ledger state, necessarily maintined by the rotating validator set. This type of storage imposes costs not only to validators but also to the broader network as active state grows so does data transmission and validation overhead. To account for these costs, we describe here our preliminary design and implementation of storage rent. Storage rent can be paid via one of two methods: diff --git a/book/src/getting-started/testnet-participation.md b/book/src/getting-started/testnet-participation.md index 64c54a9ef..6090984a7 100644 --- a/book/src/getting-started/testnet-participation.md +++ b/book/src/getting-started/testnet-participation.md @@ -3,5 +3,5 @@ Participate in our testnet: * [Running a Validator](../running-validator/) -* [Running a Replicator](../running-replicator.md) +* [Running an Archiver](../running-archiver.md) diff --git a/book/src/implemented-proposals/blocktree.md b/book/src/implemented-proposals/blocktree.md index 1399b0d71..6e3e17189 100644 --- a/book/src/implemented-proposals/blocktree.md +++ b/book/src/implemented-proposals/blocktree.md @@ -86,5 +86,5 @@ Replay stage uses Blocktree APIs to find the longest chain of entries it can han Once Blocktree entries are old enough, representing all the possible forks becomes less useful, perhaps even problematic for replay upon restart. Once a validator's votes have reached max lockout, however, any Blocktree contents that are not on the PoH chain for that vote for can be pruned, expunged. -Replicator nodes will be responsible for storing really old ledger contents, and validators need only persist their bank periodically. +Archiver nodes will be responsible for storing really old ledger contents, and validators need only persist their bank periodically. diff --git a/book/src/implemented-proposals/ed_overview/README.md b/book/src/implemented-proposals/ed_overview/README.md index 9ca7348c7..090c16bca 100644 --- a/book/src/implemented-proposals/ed_overview/README.md +++ b/book/src/implemented-proposals/ed_overview/README.md @@ -10,7 +10,7 @@ These protocol-based rewards, to be distributed to participating validation and Transaction fees are market-based participant-to-participant transfers, attached to network interactions as a necessary motivation and compensation for the inclusion and execution of a proposed transaction \(be it a state execution or proof-of-replication verification\). A mechanism for long-term economic stability and forking protection through partial burning of each transaction fee is also discussed below. -A high-level schematic of Solana’s crypto-economic design is shown below in **Figure 1**. The specifics of validation-client economics are described in sections: [Validation-client Economics](ed_validation_client_economics/), [State-validation Protocol-based Rewards](ed_validation_client_economics/ed_vce_state_validation_protocol_based_rewards.md), [State-validation Transaction Fees](ed_validation_client_economics/ed_vce_state_validation_transaction_fees.md) and [Replication-validation Transaction Fees](ed_validation_client_economics/ed_vce_replication_validation_transaction_fees.md). Also, the chapter titled [Validation Stake Delegation](ed_validation_client_economics/ed_vce_validation_stake_delegation.md) closes with a discussion of validator delegation opportunties and marketplace. Additionally, in [Storage Rent Economics](https://github.com/solana-labs/solana/tree/aacead62c0eb052068172eba6b53fc85874d6d54/book/src/ed_storage_rent_economics.md), we describe an implementation of storage rent to account for the externality costs of maintaining the active state of the ledger. The [Replication-client Economics](ed_replication_client_economics/) chapter will review the Solana network design for global ledger storage/redundancy and replicator-client economics \([Storage-replication rewards](ed_replication_client_economics/ed_rce_storage_replication_rewards.md)\) along with a replicator-to-validator delegation mechanism designed to aide participant on-boarding into the Solana economy discussed in [Replication-client Reward Auto-delegation](ed_replication_client_economics/ed_rce_replication_client_reward_auto_delegation.md). An outline of features for an MVP economic design is discussed in the [Economic Design MVP](ed_mvp.md) section. Finally, in chapter [Attack Vectors](ed_attack_vectors.md), various attack vectors will be described and potential vulnerabilities explored and parameterized. +A high-level schematic of Solana’s crypto-economic design is shown below in **Figure 1**. The specifics of validation-client economics are described in sections: [Validation-client Economics](ed_validation_client_economics/), [State-validation Protocol-based Rewards](ed_validation_client_economics/ed_vce_state_validation_protocol_based_rewards.md), [State-validation Transaction Fees](ed_validation_client_economics/ed_vce_state_validation_transaction_fees.md) and [Replication-validation Transaction Fees](ed_validation_client_economics/ed_vce_replication_validation_transaction_fees.md). Also, the chapter titled [Validation Stake Delegation](ed_validation_client_economics/ed_vce_validation_stake_delegation.md) closes with a discussion of validator delegation opportunties and marketplace. Additionally, in [Storage Rent Economics](https://github.com/solana-labs/solana/tree/aacead62c0eb052068172eba6b53fc85874d6d54/book/src/ed_storage_rent_economics.md), we describe an implementation of storage rent to account for the externality costs of maintaining the active state of the ledger. The [Replication-client Economics](ed_replication_client_economics/) chapter will review the Solana network design for global ledger storage/redundancy and archiver-client economics \([Storage-replication rewards](ed_replication_client_economics/ed_rce_storage_replication_rewards.md)\) along with an archiver-to-validator delegation mechanism designed to aide participant on-boarding into the Solana economy discussed in [Replication-client Reward Auto-delegation](ed_replication_client_economics/ed_rce_replication_client_reward_auto_delegation.md). An outline of features for an MVP economic design is discussed in the [Economic Design MVP](ed_mvp.md) section. Finally, in chapter [Attack Vectors](ed_attack_vectors.md), various attack vectors will be described and potential vulnerabilities explored and parameterized. **Figure 1**: Schematic overview of Solana economic incentive design. diff --git a/book/src/implemented-proposals/ed_overview/ed_attack_vectors.md b/book/src/implemented-proposals/ed_overview/ed_attack_vectors.md index 1a9d9017b..419720f3d 100644 --- a/book/src/implemented-proposals/ed_overview/ed_attack_vectors.md +++ b/book/src/implemented-proposals/ed_overview/ed_attack_vectors.md @@ -4,11 +4,11 @@ ## Colluding validation and replication clients -A colluding validation-client, may take the strategy to mark PoReps from non-colluding replicator nodes as invalid as an attempt to maximize the rewards for the colluding replicator nodes. In this case, it isn’t feasible for the offended-against replicator nodes to petition the network for resolution as this would result in a network-wide vote on each offending PoRep and create too much overhead for the network to progress adequately. Also, this mitigation attempt would still be vulnerable to a >= 51% staked colluder. +A colluding validation-client, may take the strategy to mark PoReps from non-colluding archiver nodes as invalid as an attempt to maximize the rewards for the colluding archiver nodes. In this case, it isn’t feasible for the offended-against archiver nodes to petition the network for resolution as this would result in a network-wide vote on each offending PoRep and create too much overhead for the network to progress adequately. Also, this mitigation attempt would still be vulnerable to a >= 51% staked colluder. -Alternatively, transaction fees from submitted PoReps are pooled and distributed across validation-clients in proportion to the number of valid PoReps discounted by the number of invalid PoReps as voted by each validator-client. Thus invalid votes are directly dis-incentivized through this reward channel. Invalid votes that are revealed by replicator nodes as fishing PoReps, will not be discounted from the payout PoRep count. +Alternatively, transaction fees from submitted PoReps are pooled and distributed across validation-clients in proportion to the number of valid PoReps discounted by the number of invalid PoReps as voted by each validator-client. Thus invalid votes are directly dis-incentivized through this reward channel. Invalid votes that are revealed by archiver nodes as fishing PoReps, will not be discounted from the payout PoRep count. -Another collusion attack involves a validator-client who may take the strategy to ignore invalid PoReps from colluding replicator and vote them as valid. In this case, colluding replicator-clients would not have to store the data while still receiving rewards for validated PoReps. Additionally, colluding validator nodes would also receive rewards for validating these PoReps. To mitigate this attack, validators must randomly sample PoReps corresponding to the ledger block they are validating and because of this, there will be multiple validators that will receive the colluding replicator’s invalid submissions. These non-colluding validators will be incentivized to mark these PoReps as invalid as they have no way to determine whether the proposed invalid PoRep is actually a fishing PoRep, for which a confirmation vote would result in the validator’s stake being slashed. +Another collusion attack involves a validator-client who may take the strategy to ignore invalid PoReps from colluding archiver and vote them as valid. In this case, colluding archiver-clients would not have to store the data while still receiving rewards for validated PoReps. Additionally, colluding validator nodes would also receive rewards for validating these PoReps. To mitigate this attack, validators must randomly sample PoReps corresponding to the ledger block they are validating and because of this, there will be multiple validators that will receive the colluding archiver’s invalid submissions. These non-colluding validators will be incentivized to mark these PoReps as invalid as they have no way to determine whether the proposed invalid PoRep is actually a fishing PoRep, for which a confirmation vote would result in the validator’s stake being slashed. -In this case, the proportion of time a colluding pair will be successful has an upper limit determined by the % of stake of the network claimed by the colluding validator. This also sets bounds to the value of such an attack. For example, if a colluding validator controls 10% of the total validator stake, transaction fees will be lost \(likely sent to mining pool\) by the colluding replicator 90% of the time and so the attack vector is only profitable if the per-PoRep reward at least 90% higher than the average PoRep transaction fee. While, probabilistically, some colluding replicator-client PoReps will find their way to colluding validation-clients, the network can also monitor rates of paired \(validator + replicator\) discrepancies in voting patterns and censor identified colluders in these cases. +In this case, the proportion of time a colluding pair will be successful has an upper limit determined by the % of stake of the network claimed by the colluding validator. This also sets bounds to the value of such an attack. For example, if a colluding validator controls 10% of the total validator stake, transaction fees will be lost \(likely sent to mining pool\) by the colluding archiver 90% of the time and so the attack vector is only profitable if the per-PoRep reward at least 90% higher than the average PoRep transaction fee. While, probabilistically, some colluding archiver-client PoReps will find their way to colluding validation-clients, the network can also monitor rates of paired \(validator + archiver\) discrepancies in voting patterns and censor identified colluders in these cases. diff --git a/book/src/implemented-proposals/ed_overview/ed_economic_sustainability.md b/book/src/implemented-proposals/ed_overview/ed_economic_sustainability.md index 3fe2f02ef..42097074e 100644 --- a/book/src/implemented-proposals/ed_overview/ed_economic_sustainability.md +++ b/book/src/implemented-proposals/ed_overview/ed_economic_sustainability.md @@ -4,9 +4,9 @@ Long term economic sustainability is one of the guiding principles of Solana’s economic design. While it is impossible to predict how decentralized economies will develop over time, especially economies with flexible decentralized governances, we can arrange economic components such that, under certain conditions, a sustainable economy may take shape in the long term. In the case of Solana’s network, these components take the form of token issuance \(via inflation\) and token burning’. -The dominant remittances from the Solana mining pool are validator and replicator rewards. The disinflationary mechanism is a flat, protocol-specified and adjusted, % of each transaction fee. +The dominant remittances from the Solana mining pool are validator and archiver rewards. The disinflationary mechanism is a flat, protocol-specified and adjusted, % of each transaction fee. -The Replicator rewards are to be delivered to replicators as a portion of the network inflation after successful PoRep validation. The per-PoRep reward amount is determined as a function of the total network storage redundancy at the time of the PoRep validation and the network goal redundancy. This function is likely to take the form of a discount from a base reward to be delivered when the network has achieved and maintained its goal redundancy. An example of such a reward function is shown in **Figure 3** +The Archiver rewards are to be delivered to archivers as a portion of the network inflation after successful PoRep validation. The per-PoRep reward amount is determined as a function of the total network storage redundancy at the time of the PoRep validation and the network goal redundancy. This function is likely to take the form of a discount from a base reward to be delivered when the network has achieved and maintained its goal redundancy. An example of such a reward function is shown in **Figure 3** **Figure 3**: Example PoRep reward design as a function of global network storage redundancy. diff --git a/book/src/implemented-proposals/ed_overview/ed_mvp.md b/book/src/implemented-proposals/ed_overview/ed_mvp.md index ed2041217..301cbc03e 100644 --- a/book/src/implemented-proposals/ed_overview/ed_mvp.md +++ b/book/src/implemented-proposals/ed_overview/ed_mvp.md @@ -10,7 +10,7 @@ The preceeding sections, outlined in the [Economic Design Overview](./), describ * Mechanism by which validators are rewarded via network inflation. * Ability to delegate tokens to validator nodes * Validator set commission fees on interest from delegated tokens. -* Replicators to receive fixed, arbitrary reward for submitting validated PoReps. Reward size mechanism \(i.e. PoRep reward as a function of total ledger redundancy\) to come later. -* Pooling of replicator PoRep transaction fees and weighted distribution to validators based on PoRep verification \(see [Replication-validation Transaction Fees](ed_validation_client_economics/ed_vce_replication_validation_transaction_fees.md). It will be useful to test this protection against attacks on testnet. -* Nice-to-have: auto-delegation of replicator rewards to validator. +* Archivers to receive fixed, arbitrary reward for submitting validated PoReps. Reward size mechanism \(i.e. PoRep reward as a function of total ledger redundancy\) to come later. +* Pooling of archiver PoRep transaction fees and weighted distribution to validators based on PoRep verification \(see [Replication-validation Transaction Fees](ed_validation_client_economics/ed_vce_replication_validation_transaction_fees.md). It will be useful to test this protection against attacks on testnet. +* Nice-to-have: auto-delegation of archiver rewards to validator. diff --git a/book/src/implemented-proposals/ed_overview/ed_replication_client_economics/README.md b/book/src/implemented-proposals/ed_overview/ed_replication_client_economics/README.md index 04b0dfdc0..c92417f09 100644 --- a/book/src/implemented-proposals/ed_overview/ed_replication_client_economics/README.md +++ b/book/src/implemented-proposals/ed_overview/ed_replication_client_economics/README.md @@ -2,5 +2,5 @@ **Subject to change.** -Replication-clients should be rewarded for providing the network with storage space. Incentivization of the set of replicators provides data security through redundancy of the historical ledger. Replication nodes are rewarded in proportion to the amount of ledger data storage provided, as proved by successfully submitting Proofs-of-Replication to the cluster.. These rewards are captured by generating and entering Proofs of Replication \(PoReps\) into the PoH stream which can be validated by Validation nodes as described above in the [Replication-validation Transaction Fees](../ed_validation_client_economics/ed_vce_replication_validation_transaction_fees.md) chapter. +Replication-clients should be rewarded for providing the network with storage space. Incentivization of the set of archivers provides data security through redundancy of the historical ledger. Replication nodes are rewarded in proportion to the amount of ledger data storage provided, as proved by successfully submitting Proofs-of-Replication to the cluster.. These rewards are captured by generating and entering Proofs of Replication \(PoReps\) into the PoH stream which can be validated by Validation nodes as described above in the [Replication-validation Transaction Fees](../ed_validation_client_economics/ed_vce_replication_validation_transaction_fees.md) chapter. diff --git a/book/src/implemented-proposals/ed_overview/ed_replication_client_economics/ed_rce_replication_client_reward_auto_delegation.md b/book/src/implemented-proposals/ed_overview/ed_replication_client_economics/ed_rce_replication_client_reward_auto_delegation.md index 660d982ba..8d4f5e1c5 100644 --- a/book/src/implemented-proposals/ed_overview/ed_replication_client_economics/ed_rce_replication_client_reward_auto_delegation.md +++ b/book/src/implemented-proposals/ed_overview/ed_replication_client_economics/ed_rce_replication_client_reward_auto_delegation.md @@ -4,5 +4,5 @@ The ability for Solana network participants to earn rewards by providing storage service is a unique on-boarding path that requires little hardware overhead and minimal upfront capital. It offers an avenue for individuals with extra-storage space on their home laptops or PCs to contribute to the security of the network and become integrated into the Solana economy. -To enhance this on-boarding ramp and facilitate further participation and investment in the Solana economy, replication-clients have the opportunity to auto-delegate their rewards to validation-clients of their choice. Much like the automatic reinvestment of stock dividends, in this scenario, a replicator-client can earn Solana tokens by providing some storage capacity to the network \(i.e. via submitting valid PoReps\), have the protocol-based rewards automatically assigned as delegation to a staked validator node of the replicator's choice and earn interest, less a fee, from the validation-client's network participation. +To enhance this on-boarding ramp and facilitate further participation and investment in the Solana economy, replication-clients have the opportunity to auto-delegate their rewards to validation-clients of their choice. Much like the automatic reinvestment of stock dividends, in this scenario, an archiver-client can earn Solana tokens by providing some storage capacity to the network \(i.e. via submitting valid PoReps\), have the protocol-based rewards automatically assigned as delegation to a staked validator node of the archiver's choice and earn interest, less a fee, from the validation-client's network participation. diff --git a/book/src/implemented-proposals/ed_overview/ed_replication_client_economics/ed_rce_storage_replication_rewards.md b/book/src/implemented-proposals/ed_overview/ed_replication_client_economics/ed_rce_storage_replication_rewards.md index 93939faf8..9deda4cbd 100644 --- a/book/src/implemented-proposals/ed_overview/ed_replication_client_economics/ed_rce_storage_replication_rewards.md +++ b/book/src/implemented-proposals/ed_overview/ed_replication_client_economics/ed_rce_storage_replication_rewards.md @@ -2,7 +2,7 @@ **Subject to change.** -Replicator-clients download, encrypt and submit PoReps for ledger block sections.3 PoReps submitted to the PoH stream, and subsequently validated, function as evidence that the submitting replicator client is indeed storing the assigned ledger block sections on local hard drive space as a service to the network. Therefore, replicator clients should earn protocol rewards proportional to the amount of storage, and the number of successfully validated PoReps, that they are verifiably providing to the network. +Archiver-clients download, encrypt and submit PoReps for ledger block sections.3 PoReps submitted to the PoH stream, and subsequently validated, function as evidence that the submitting archiver client is indeed storing the assigned ledger block sections on local hard drive space as a service to the network. Therefore, archiver clients should earn protocol rewards proportional to the amount of storage, and the number of successfully validated PoReps, that they are verifiably providing to the network. -Additionally, replicator clients have the opportunity to capture a portion of slashed bounties \[TBD\] of dishonest validator clients. This can be accomplished by a replicator client submitting a verifiably false PoRep for which a dishonest validator client receives and signs as a valid PoRep. This reward incentive is to prevent lazy validators and minimize validator-replicator collusion attacks, more on this below. +Additionally, archiver clients have the opportunity to capture a portion of slashed bounties \[TBD\] of dishonest validator clients. This can be accomplished by an archiver client submitting a verifiably false PoRep for which a dishonest validator client receives and signs as a valid PoRep. This reward incentive is to prevent lazy validators and minimize validator-archiver collusion attacks, more on this below. diff --git a/book/src/implemented-proposals/ed_overview/ed_validation_client_economics/README.md b/book/src/implemented-proposals/ed_overview/ed_validation_client_economics/README.md index f9843548f..ec5413ed0 100644 --- a/book/src/implemented-proposals/ed_overview/ed_validation_client_economics/README.md +++ b/book/src/implemented-proposals/ed_overview/ed_validation_client_economics/README.md @@ -2,7 +2,7 @@ **Subject to change.** -Validator-clients are eligible to receive protocol-based \(i.e. inflation-based\) rewards issued via stake-based annual interest rates \(calculated per epoch\) by providing compute \(CPU+GPU\) resources to validate and vote on a given PoH state. These protocol-based rewards are determined through an algorithmic disinflationary schedule as a function of total amount of circulating tokens. The network is expected to launch with an annual inflation rate around 15%, set to decrease by 15% per year until a long-term stable rate of 1-2% is reached. These issuances are to be split and distributed to participating validators and replicators, with around 90% of the issued tokens allocated for validator rewards. Because the network will be distributing a fixed amount of inflation rewards across the stake-weighted valdiator set, any individual validator's interest rate will be a function of the amount of staked SOL in relation to the circulating SOL. +Validator-clients are eligible to receive protocol-based \(i.e. inflation-based\) rewards issued via stake-based annual interest rates \(calculated per epoch\) by providing compute \(CPU+GPU\) resources to validate and vote on a given PoH state. These protocol-based rewards are determined through an algorithmic disinflationary schedule as a function of total amount of circulating tokens. The network is expected to launch with an annual inflation rate around 15%, set to decrease by 15% per year until a long-term stable rate of 1-2% is reached. These issuances are to be split and distributed to participating validators and archivers, with around 90% of the issued tokens allocated for validator rewards. Because the network will be distributing a fixed amount of inflation rewards across the stake-weighted valdiator set, any individual validator's interest rate will be a function of the amount of staked SOL in relation to the circulating SOL. Additionally, validator clients may earn revenue through fees via state-validation transactions and Proof-of-Replication \(PoRep\) transactions. For clarity, we separately describe the design and motivation of these revenue distriubutions for validation-clients below: state-validation protocol-based rewards, state-validation transaction fees and rent, and PoRep-validation transaction fees. diff --git a/book/src/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_replication_validation_transaction_fees.md b/book/src/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_replication_validation_transaction_fees.md index ed6c0dc38..a4a2d0ac6 100644 --- a/book/src/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_replication_validation_transaction_fees.md +++ b/book/src/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_replication_validation_transaction_fees.md @@ -2,11 +2,11 @@ **Subject to change.** -As previously mentioned, validator-clients will also be responsible for validating PoReps submitted into the PoH stream by replicator-clients. In this case, validators are providing compute \(CPU/GPU\) and light storage resources to confirm that these replication proofs could only be generated by a client that is storing the referenced PoH leger block. +As previously mentioned, validator-clients will also be responsible for validating PoReps submitted into the PoH stream by archiver-clients. In this case, validators are providing compute \(CPU/GPU\) and light storage resources to confirm that these replication proofs could only be generated by a client that is storing the referenced PoH leger block. While replication-clients are incentivized and rewarded through protocol-based rewards schedule \(see [Replication-client Economics](../ed_replication_client_economics/)\), validator-clients will be incentivized to include and validate PoReps in PoH through collection of transaction fees associated with the submitted PoReps and distribution of protocol rewards proportional to the validated PoReps. As will be described in detail in the Section 3.1, replication-client rewards are protocol-based and designed to reward based on a global data redundancy factor. I.e. the protocol will incentivize replication-client participation through rewards based on a target ledger redundancy \(e.g. 10x data redundancy\). The validation of PoReps by validation-clients is computationally more expensive than state-validation \(detail in the [Economic Sustainability](../ed_economic_sustainability.md) chapter\), thus the transaction fees are expected to be proportionally higher. -There are various attack vectors available for colluding validation and replication clients, also described in detail below in [Economic Sustainability](https://github.com/solana-labs/solana/tree/aacead62c0eb052068172eba6b53fc85874d6d54/book/src/ed_economic_sustainability/README.md). To protect against various collusion attack vectors, for a given epoch, validator rewards are distributed across participating validation-clients in proportion to the number of validated PoReps in the epoch less the number of PoReps that mismatch the replicators challenge. The PoRep challenge game is described in [Ledger Replication](https://github.com/solana-labs/solana/blob/master/book/src/ledger-replication.md#the-porep-game). This design rewards validators proportional to the number of PoReps they process and validate, while providing negative pressure for validation-clients to submit lazy or malicious invalid votes on submitted PoReps \(note that it is computationally prohibitive to determine whether a validator-client has marked a valid PoRep as invalid\). +There are various attack vectors available for colluding validation and replication clients, also described in detail below in [Economic Sustainability](https://github.com/solana-labs/solana/tree/aacead62c0eb052068172eba6b53fc85874d6d54/book/src/ed_economic_sustainability/README.md). To protect against various collusion attack vectors, for a given epoch, validator rewards are distributed across participating validation-clients in proportion to the number of validated PoReps in the epoch less the number of PoReps that mismatch the archivers challenge. The PoRep challenge game is described in [Ledger Replication](https://github.com/solana-labs/solana/blob/master/book/src/ledger-replication.md#the-porep-game). This design rewards validators proportional to the number of PoReps they process and validate, while providing negative pressure for validation-clients to submit lazy or malicious invalid votes on submitted PoReps \(note that it is computationally prohibitive to determine whether a validator-client has marked a valid PoRep as invalid\). diff --git a/book/src/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_state_validation_protocol_based_rewards.md b/book/src/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_state_validation_protocol_based_rewards.md index 53c165471..32de78f34 100644 --- a/book/src/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_state_validation_protocol_based_rewards.md +++ b/book/src/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_state_validation_protocol_based_rewards.md @@ -7,7 +7,7 @@ Validator-clients have two functional roles in the Solana network: * Validate \(vote\) the current global state of that PoH along with any Proofs-of-Replication \(see [Replication Client Economics](../ed_replication_client_economics/)\) that they are eligible to validate. * Be elected as ‘leader’ on a stake-weighted round-robin schedule during which time they are responsible for collecting outstanding transactions and Proofs-of-Replication and incorporating them into the PoH, thus updating the global state of the network and providing chain continuity. -Validator-client rewards for these services are to be distributed at the end of each Solana epoch. As previously discussed, compensation for validator-clients is provided via a protocol-based annual inflation rate dispersed in proportion to the stake-weight of each validator \(see below\) along with leader-claimed transaction fees available during each leader rotation. I.e. during the time a given validator-client is elected as leader, it has the opportunity to keep a portion of each transaction fee, less a protocol-specified amount that is destroyed \(see [Validation-client State Transaction Fees](ed_vce_state_validation_transaction_fees.md)\). PoRep transaction fees are also collected by the leader client and validator PoRep rewards are distributed in proportion to the number of validated PoReps less the number of PoReps that mismatch a replicator's challenge. \(see [Replication-client Transaction Fees](ed_vce_replication_validation_transaction_fees.md)\) +Validator-client rewards for these services are to be distributed at the end of each Solana epoch. As previously discussed, compensation for validator-clients is provided via a protocol-based annual inflation rate dispersed in proportion to the stake-weight of each validator \(see below\) along with leader-claimed transaction fees available during each leader rotation. I.e. during the time a given validator-client is elected as leader, it has the opportunity to keep a portion of each transaction fee, less a protocol-specified amount that is destroyed \(see [Validation-client State Transaction Fees](ed_vce_state_validation_transaction_fees.md)\). PoRep transaction fees are also collected by the leader client and validator PoRep rewards are distributed in proportion to the number of validated PoReps less the number of PoReps that mismatch an archiver's challenge. \(see [Replication-client Transaction Fees](ed_vce_replication_validation_transaction_fees.md)\) The effective protocol-based annual interest rate \(%\) per epoch received by validation-clients is to be a function of: @@ -21,7 +21,7 @@ At any given point in time, a specific validator's interest rate can be determin ![drawing](../../../.gitbook/assets/p_ex_schedule-3.png) \*\*Figure 2:\*\* In this example schedule, the annual inflation rate \[%\] reduces at around 20% per year, until it reaches the long-term, fixed, 1.5% rate. -![drawing](../../../.gitbook/assets/p_ex_supply-1%20%281%29.png) \*\*Figure 3:\*\* The total token supply over a 10-year period, based on an initial 250MM tokens with the disinflationary inflation schedule as shown in \*\*Figure 2\*\* Over time, the interest rate, at a fixed network staked percentage, will reduce concordant with network inflation. Validation-client interest rates are designed to be higher in the early days of the network to incentivize participation and jumpstart the network economy. As previously mentioned, the inflation rate is expected to stabalize near 1-2% which also results in a fixed, long-term, interest rate to be provided to validator-clients. This value does not represent the total interest available to validator-clients as transaction fees for state-validation and ledger storage replication \(PoReps\) are not accounted for here. Given these example parameters, annualized validator-specific interest rates can be determined based on the global fraction of tokens bonded as stake, as well as their uptime/activity in the previous epoch. For the purpose of this example, we assume 100% uptime for all validators and a split in interest-based rewards between validators and replicator nodes of 80%/20%. Additionally, the fraction of staked circulating supply is assummed to be constant. Based on these assumptions, an annualized validation-client interest rate schedule as a function of % circulating token supply that is staked is shown in\*\* Figure 4\*\*. +![drawing](../../../.gitbook/assets/p_ex_supply-1%20%281%29.png) \*\*Figure 3:\*\* The total token supply over a 10-year period, based on an initial 250MM tokens with the disinflationary inflation schedule as shown in \*\*Figure 2\*\* Over time, the interest rate, at a fixed network staked percentage, will reduce concordant with network inflation. Validation-client interest rates are designed to be higher in the early days of the network to incentivize participation and jumpstart the network economy. As previously mentioned, the inflation rate is expected to stabalize near 1-2% which also results in a fixed, long-term, interest rate to be provided to validator-clients. This value does not represent the total interest available to validator-clients as transaction fees for state-validation and ledger storage replication \(PoReps\) are not accounted for here. Given these example parameters, annualized validator-specific interest rates can be determined based on the global fraction of tokens bonded as stake, as well as their uptime/activity in the previous epoch. For the purpose of this example, we assume 100% uptime for all validators and a split in interest-based rewards between validators and archiver nodes of 80%/20%. Additionally, the fraction of staked circulating supply is assummed to be constant. Based on these assumptions, an annualized validation-client interest rate schedule as a function of % circulating token supply that is staked is shown in\*\* Figure 4\*\*. ![drawing](https://github.com/solana-labs/solana/tree/aacead62c0eb052068172eba6b53fc85874d6d54/book/src/.gitbook/assets/p_ex_interest.png) diff --git a/book/src/proposals/ledger-replication-to-implement.md b/book/src/proposals/ledger-replication-to-implement.md index 5e4043cd3..b228d9dc9 100644 --- a/book/src/proposals/ledger-replication-to-implement.md +++ b/book/src/proposals/ledger-replication-to-implement.md @@ -4,13 +4,13 @@ Replication behavior yet to be implemented. ## Storage epoch -The storage epoch should be the number of slots which results in around 100GB-1TB of ledger to be generated for replicators to store. Replicators will start storing ledger when a given fork has a high probability of not being rolled back. +The storage epoch should be the number of slots which results in around 100GB-1TB of ledger to be generated for archivers to store. Archivers will start storing ledger when a given fork has a high probability of not being rolled back. ## Validator behavior 1. Every NUM\_KEY\_ROTATION\_TICKS it also validates samples received from - replicators. It signs the PoH hash at that point and uses the following + archivers. It signs the PoH hash at that point and uses the following algorithm with the signature as the input: @@ -42,35 +42,35 @@ The storage epoch should be the number of slots which results in around 100GB-1T distribution of the storage reward if no challenges were seen for the proof to - the validators and replicators party to the proofs. + the validators and archivers party to the proofs. -## Replicator behavior +## Archiver behavior -1. The replicator then generates another set of offsets which it submits a fake +1. The archiver then generates another set of offsets which it submits a fake proof with an incorrect sha state. It can be proven to be fake by providing the seed for the hash result. - * A fake proof should consist of a replicator hash of a signature of a PoH + * A fake proof should consist of an archiver hash of a signature of a PoH - value. That way when the replicator reveals the fake proof, it can be + value. That way when the archiver reveals the fake proof, it can be verified on chain. -2. The replicator monitors the ledger, if it sees a fake proof integrated, it +2. The archiver monitors the ledger, if it sees a fake proof integrated, it creates a challenge transaction and submits it to the current leader. The transacation proves the validator incorrectly validated a fake storage proof. - The replicator is rewarded and the validator's staking balance is slashed or + The archiver is rewarded and the validator's staking balance is slashed or frozen. ## Storage proof contract logic -Each replicator and validator will have their own storage account. The validator's account would be separate from their gossip id similiar to their vote account. These should be implemented as two programs one which handles the validator as the keysigner and one for the replicator. In that way when the programs reference other accounts, they can check the program id to ensure it is a validator or replicator account they are referencing. +Each archiver and validator will have their own storage account. The validator's account would be separate from their gossip id similiar to their vote account. These should be implemented as two programs one which handles the validator as the keysigner and one for the archiver. In that way when the programs reference other accounts, they can check the program id to ensure it is a validator or archiver account they are referencing. ### SubmitMiningProof @@ -80,10 +80,10 @@ SubmitMiningProof { sha_state: Hash, signature: Signature, }; -keys = [replicator_keypair] +keys = [archiver_keypair] ``` -Replicators create these after mining their stored ledger data for a certain hash value. The slot is the end slot of the segment of ledger they are storing, the sha\_state the result of the replicator using the hash function to sample their encrypted ledger segment. The signature is the signature that was created when they signed a PoH value for the current storage epoch. The list of proofs from the current storage epoch should be saved in the account state, and then transfered to a list of proofs for the previous epoch when the epoch passes. In a given storage epoch a given replicator should only submit proofs for one segment. +Archivers create these after mining their stored ledger data for a certain hash value. The slot is the end slot of the segment of ledger they are storing, the sha\_state the result of the archiver using the hash function to sample their encrypted ledger segment. The signature is the signature that was created when they signed a PoH value for the current storage epoch. The list of proofs from the current storage epoch should be saved in the account state, and then transfered to a list of proofs for the previous epoch when the epoch passes. In a given storage epoch a given archiver should only submit proofs for one segment. The program should have a list of slots which are valid storage mining slots. This list should be maintained by keeping track of slots which are rooted slots in which a significant portion of the network has voted on with a high lockout value, maybe 32-votes old. Every SLOTS\_PER\_SEGMENT number of slots would be added to this set. The program should check that the slot is in this set. The set can be maintained by receiving a AdvertiseStorageRecentBlockHash and checking with its bank/Tower BFT state. @@ -95,22 +95,22 @@ The program should do a signature verify check on the signature, public key from ProofValidation { proof_mask: Vec, } -keys = [validator_keypair, replicator_keypair(s) (unsigned)] +keys = [validator_keypair, archiver_keypair(s) (unsigned)] ``` -A validator will submit this transaction to indicate that a set of proofs for a given segment are valid/not-valid or skipped where the validator did not look at it. The keypairs for the replicators that it looked at should be referenced in the keys so the program logic can go to those accounts and see that the proofs are generated in the previous epoch. The sampling of the storage proofs should be verified ensuring that the correct proofs are skipped by the validator according to the logic outlined in the validator behavior of sampling. +A validator will submit this transaction to indicate that a set of proofs for a given segment are valid/not-valid or skipped where the validator did not look at it. The keypairs for the archivers that it looked at should be referenced in the keys so the program logic can go to those accounts and see that the proofs are generated in the previous epoch. The sampling of the storage proofs should be verified ensuring that the correct proofs are skipped by the validator according to the logic outlined in the validator behavior of sampling. -The included replicator keys will indicate the the storage samples which are being referenced; the length of the proof\_mask should be verified against the set of storage proofs in the referenced replicator account\(s\), and should match with the number of proofs submitted in the previous storage epoch in the state of said replicator account. +The included archiver keys will indicate the the storage samples which are being referenced; the length of the proof\_mask should be verified against the set of storage proofs in the referenced archiver account\(s\), and should match with the number of proofs submitted in the previous storage epoch in the state of said archiver account. ### ClaimStorageReward ```text ClaimStorageReward { } -keys = [validator_keypair or replicator_keypair, validator/replicator_keypairs (unsigned)] +keys = [validator_keypair or archiver_keypair, validator/archiver_keypairs (unsigned)] ``` -Replicators and validators will use this transaction to get paid tokens from a program state where SubmitStorageProof, ProofValidation and ChallengeProofValidations are in a state where proofs have been submitted and validated and there are no ChallengeProofValidations referencing those proofs. For a validator, it should reference the replicator keypairs to which it has validated proofs in the relevant epoch. And for a replicator it should reference validator keypairs for which it has validated and wants to be rewarded. +Archivers and validators will use this transaction to get paid tokens from a program state where SubmitStorageProof, ProofValidation and ChallengeProofValidations are in a state where proofs have been submitted and validated and there are no ChallengeProofValidations referencing those proofs. For a validator, it should reference the archiver keypairs to which it has validated proofs in the relevant epoch. And for an archiver it should reference validator keypairs for which it has validated and wants to be rewarded. ### ChallengeProofValidation @@ -119,10 +119,10 @@ ChallengeProofValidation { proof_index: u64, hash_seed_value: Vec, } -keys = [replicator_keypair, validator_keypair] +keys = [archiver_keypair, validator_keypair] ``` -This transaction is for catching lazy validators who are not doing the work to validate proofs. A replicator will submit this transaction when it sees a validator has approved a fake SubmitMiningProof transaction. Since the replicator is a light client not looking at the full chain, it will have to ask a validator or some set of validators for this information maybe via RPC call to obtain all ProofValidations for a certain segment in the previous storage epoch. The program will look in the validator account state see that a ProofValidation is submitted in the previous storage epoch and hash the hash\_seed\_value and see that the hash matches the SubmitMiningProof transaction and that the validator marked it as valid. If so, then it will save the challenge to the list of challenges that it has in its state. +This transaction is for catching lazy validators who are not doing the work to validate proofs. An archiver will submit this transaction when it sees a validator has approved a fake SubmitMiningProof transaction. Since the archiver is a light client not looking at the full chain, it will have to ask a validator or some set of validators for this information maybe via RPC call to obtain all ProofValidations for a certain segment in the previous storage epoch. The program will look in the validator account state see that a ProofValidation is submitted in the previous storage epoch and hash the hash\_seed\_value and see that the hash matches the SubmitMiningProof transaction and that the validator marked it as valid. If so, then it will save the challenge to the list of challenges that it has in its state. ### AdvertiseStorageRecentBlockhash @@ -133,5 +133,5 @@ AdvertiseStorageRecentBlockhash { } ``` -Validators and replicators will submit this to indicate that a new storage epoch has passed and that the storage proofs which are current proofs should now be for the previous epoch. Other transactions should check to see that the epoch that they are referencing is accurate according to current chain state. +Validators and archivers will submit this to indicate that a new storage epoch has passed and that the storage proofs which are current proofs should now be for the previous epoch. Other transactions should check to see that the epoch that they are referencing is accurate according to current chain state. diff --git a/book/src/running-replicator.md b/book/src/running-archiver.md similarity index 72% rename from book/src/running-replicator.md rename to book/src/running-archiver.md index 4d40b6986..933348f42 100644 --- a/book/src/running-replicator.md +++ b/book/src/running-archiver.md @@ -1,14 +1,14 @@ -# Running a Replicator +# Running an Archiver -This document describes how to setup a replicator in the testnet +This document describes how to setup an archiver in the testnet Please note some of the information and instructions described here may change in future releases. ## Overview -Replicators are specialized light clients. They download a part of the ledger \(a.k.a Segment\) and store it. They earn rewards for storing segments. +Archivers are specialized light clients. They download a part of the ledger \(a.k.a Segment\) and store it. They earn rewards for storing segments. -The testnet features a validator running at testnet.solana.com, which serves as the entrypoint to the cluster for your replicator node. +The testnet features a validator running at testnet.solana.com, which serves as the entrypoint to the cluster for your archiver node. Additionally there is a blockexplorer available at [http://testnet.solana.com/](http://testnet.solana.com/). @@ -16,7 +16,7 @@ The testnet is configured to reset the ledger daily, or sooner should the hourly ## Machine Requirements -Replicators don't need specialized hardware. Anything with more than 128GB of disk space will be able to participate in the cluster as a replicator node. +Archivers don't need specialized hardware. Anything with more than 128GB of disk space will be able to participate in the cluster as an archiver node. Currently the disk space requirements are very low but we expect them to change in the future. @@ -24,7 +24,7 @@ Prebuilt binaries are available for Linux x86\_64 \(Ubuntu 18.04 recommended\), ### Confirm The Testnet Is Reachable -Before starting a replicator node, sanity check that the cluster is accessible to your machine by running some simple commands. If any of the commands fail, please retry 5-10 minutes later to confirm the testnet is not just restarting itself before debugging further. +Before starting an archiver node, sanity check that the cluster is accessible to your machine by running some simple commands. If any of the commands fail, please retry 5-10 minutes later to confirm the testnet is not just restarting itself before debugging further. Fetch the current transaction count over JSON RPC: @@ -36,7 +36,7 @@ Inspect the blockexplorer at [http://testnet.solana.com/](http://testnet.solana. View the [metrics dashboard](https://metrics.solana.com:3000/d/testnet-beta/testnet-monitor-beta?var-testnet=testnet) for more detail on cluster activity. -## Replicator Setup +## Archiver Setup #### Obtaining The Software @@ -90,7 +90,7 @@ $ export PATH=$PWD/bin:$PATH Download the binaries by navigating to [https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest), download **solana-release-x86\_64-pc-windows-msvc.tar.bz2**, then extract it into a folder. It is a good idea to add this extracted folder to your windows PATH. -## Starting The Replicator +## Starting The Archiver Try running following command to join the gossip network and view all the other nodes in the cluster: @@ -99,12 +99,12 @@ $ solana-gossip --entrypoint testnet.solana.com:8001 spy # Press ^C to exit ``` -Now configure the keypairs for your replicator by running: +Now configure the keypairs for your archiver by running: Navigate to the solana install location and open a cmd prompt ```bash -$ solana-keygen new -o replicator-keypair.json +$ solana-keygen new -o archiver-keypair.json $ solana-keygen new -o storage-keypair.json ``` @@ -113,8 +113,8 @@ Use solana-keygen to show the public keys for each of the keypairs, they will be * Windows ```bash - # The replicator's identity - $ solana-keygen pubkey replicator-keypair.json + # The archiver's identity + $ solana-keygen pubkey archiver-keypair.json $ solana-keygen pubkey storage-keypair.json ``` @@ -122,34 +122,34 @@ Use solana-keygen to show the public keys for each of the keypairs, they will be \`\`\`bash - $ export REPLICATOR\_IDENTITY=$\(solana-keygen pubkey replicator-keypair.json\) + $ export ARCHIVER\_IDENTITY=$\(solana-keygen pubkey archiver-keypair.json\) $ export STORAGE\_IDENTITY=$\(solana-keygen pubkey storage-keypair.json\) ```text -Then set up the storage accounts for your replicator by running: +Then set up the storage accounts for your archiver by running: ```bash -$ solana --keypair replicator-keypair.json airdrop 100000 lamports -$ solana --keypair replicator-keypair.json create-replicator-storage-account $REPLICATOR_IDENTITY $STORAGE_IDENTITY +$ solana --keypair archiver-keypair.json airdrop 100000 lamports +$ solana --keypair archiver-keypair.json create-archiver-storage-account $ARCHIVER_IDENTITY $STORAGE_IDENTITY ``` -Note: Every time the testnet restarts, run the steps to setup the replicator accounts again. +Note: Every time the testnet restarts, run the steps to setup the archiver accounts again. -To start the replicator: +To start the archiver: ```bash -$ solana-replicator --entrypoint testnet.solana.com:8001 --identity replicator-keypair.json --storage-keypair storage-keypair.json --ledger replicator-ledger +$ solana-archiver --entrypoint testnet.solana.com:8001 --identity archiver-keypair.json --storage-keypair storage-keypair.json --ledger archiver-ledger ``` -## Verify Replicator Setup +## Verify Archiver Setup -From another console, confirm the IP address and **identity pubkey** of your replicator is visible in the gossip network by running: +From another console, confirm the IP address and **identity pubkey** of your archiver is visible in the gossip network by running: ```bash $ solana-gossip --entrypoint testnet.solana.com:8001 spy ``` -Provide the **storage account pubkey** to the `solana show-storage-account` command to view the recent mining activity from your replicator: +Provide the **storage account pubkey** to the `solana show-storage-account` command to view the recent mining activity from your archiver: ```bash $ solana --keypair storage-keypair.json show-storage-account $STORAGE_IDENTITY diff --git a/book/src/terminology.md b/book/src/terminology.md index d6cc7312c..4765641af 100644 --- a/book/src/terminology.md +++ b/book/src/terminology.md @@ -132,7 +132,7 @@ A list of [entries](terminology.md#entry) containing [transactions](terminology. ## ledger segment -Portion of the ledger which is downloaded by the replicator where storage proof data is derived. +Portion of the ledger which is downloaded by the archiver where storage proof data is derived. ## ledger vote @@ -186,7 +186,7 @@ A stack of proofs, each which proves that some data existed before the proof was The public key of a [keypair](terminology.md#keypair). -## replicator +## archiver Storage mining client, stores some part of the ledger enumerated in blocks and submits storage proofs to the chain. Not a full-node. @@ -224,7 +224,7 @@ A set of sha hash state which is constructed by sampling the encrypted version o ## storage proof challenge -A transaction from a replicator that verifiably proves that a validator confirmed a fake proof. +A transaction from an archiver that verifiably proves that a validator confirmed a fake proof. ## storage proof claim diff --git a/book/src/validator/tvu/blocktree.md b/book/src/validator/tvu/blocktree.md index 69497d84c..68c44427f 100644 --- a/book/src/validator/tvu/blocktree.md +++ b/book/src/validator/tvu/blocktree.md @@ -86,5 +86,5 @@ Replay stage uses Blocktree APIs to find the longest chain of entries it can han Once Blocktree entries are old enough, representing all the possible forks becomes less useful, perhaps even problematic for replay upon restart. Once a validator's votes have reached max lockout, however, any Blocktree contents that are not on the PoH chain for that vote for can be pruned, expunged. -Replicator nodes will be responsible for storing really old ledger contents, and validators need only persist their bank periodically. +Archiver nodes will be responsible for storing really old ledger contents, and validators need only persist their bank periodically. diff --git a/cli/src/cli.rs b/cli/src/cli.rs index db9ef020a..ac2c79064 100644 --- a/cli/src/cli.rs +++ b/cli/src/cli.rs @@ -218,8 +218,8 @@ pub fn parse_command( ("redeem-vote-credits", Some(matches)) => parse_redeem_vote_credits(matches), ("show-stake-account", Some(matches)) => parse_show_stake_account(matches), // Storage Commands - ("create-replicator-storage-account", Some(matches)) => { - parse_storage_create_replicator_account(matches) + ("create-archiver-storage-account", Some(matches)) => { + parse_storage_create_archiver_account(matches) } ("create-validator-storage-account", Some(matches)) => { parse_storage_create_validator_account(matches) diff --git a/cli/src/storage.rs b/cli/src/storage.rs index 9c5393f0e..b68a62e8e 100644 --- a/cli/src/storage.rs +++ b/cli/src/storage.rs @@ -21,8 +21,8 @@ pub trait StorageSubCommands { impl StorageSubCommands for App<'_, '_> { fn storage_subcommands(self) -> Self { self.subcommand( - SubCommand::with_name("create-replicator-storage-account") - .about("Create a replicator storage account") + SubCommand::with_name("create-archiver-storage-account") + .about("Create an archiver storage account") .arg( Arg::with_name("storage_account_owner") .index(1) @@ -98,7 +98,7 @@ impl StorageSubCommands for App<'_, '_> { } } -pub fn parse_storage_create_replicator_account( +pub fn parse_storage_create_archiver_account( matches: &ArgMatches<'_>, ) -> Result { let account_owner = pubkey_of(matches, "storage_account_owner").unwrap(); @@ -106,7 +106,7 @@ pub fn parse_storage_create_replicator_account( Ok(CliCommand::CreateStorageAccount { account_owner, storage_account_pubkey, - account_type: StorageAccountType::Replicator, + account_type: StorageAccountType::Archiver, }) } @@ -221,18 +221,18 @@ mod tests { let storage_account_pubkey = Pubkey::new_rand(); let storage_account_string = storage_account_pubkey.to_string(); - let test_create_replicator_storage_account = test_commands.clone().get_matches_from(vec![ + let test_create_archiver_storage_account = test_commands.clone().get_matches_from(vec![ "test", - "create-replicator-storage-account", + "create-archiver-storage-account", &pubkey_string, &storage_account_string, ]); assert_eq!( - parse_command(&pubkey, &test_create_replicator_storage_account).unwrap(), + parse_command(&pubkey, &test_create_archiver_storage_account).unwrap(), CliCommand::CreateStorageAccount { account_owner: pubkey, storage_account_pubkey, - account_type: StorageAccountType::Replicator, + account_type: StorageAccountType::Archiver, } ); diff --git a/core/src/replicator.rs b/core/src/archiver.rs similarity index 90% rename from core/src/replicator.rs rename to core/src/archiver.rs index b379c7341..bdd235805 100644 --- a/core/src/replicator.rs +++ b/core/src/archiver.rs @@ -51,18 +51,18 @@ use std::time::Duration; static ENCRYPTED_FILENAME: &str = "ledger.enc"; #[derive(Serialize, Deserialize)] -pub enum ReplicatorRequest { +pub enum ArchiverRequest { GetSlotHeight(SocketAddr), } -pub struct Replicator { +pub struct Archiver { thread_handles: Vec>, exit: Arc, } -// Shared Replicator Meta struct used internally +// Shared Archiver Meta struct used internally #[derive(Default)] -struct ReplicatorMeta { +struct ArchiverMeta { slot: u64, slots_per_segment: u64, ledger_path: PathBuf, @@ -135,16 +135,10 @@ fn create_request_processor( let (s_responder, r_responder) = channel(); let storage_socket = Arc::new(socket); let recycler = Recycler::default(); - let t_receiver = receiver( - storage_socket.clone(), - exit, - s_reader, - recycler, - "replicator", - ); + let t_receiver = receiver(storage_socket.clone(), exit, s_reader, recycler, "archiver"); thread_handles.push(t_receiver); - let t_responder = responder("replicator-responder", storage_socket.clone(), r_responder); + let t_responder = responder("archiver-responder", storage_socket.clone(), r_responder); thread_handles.push(t_responder); let exit = exit.clone(); @@ -160,10 +154,10 @@ fn create_request_processor( if let Ok(packets) = packets { for packet in &packets.packets { - let req: result::Result> = + let req: result::Result> = deserialize(&packet.data[..packet.meta.size]); match req { - Ok(ReplicatorRequest::GetSlotHeight(from)) => { + Ok(ArchiverRequest::GetSlotHeight(from)) => { if let Ok(blob) = to_shared_blob(slot, from) { let _ = s_responder.send(vec![blob]); } @@ -192,15 +186,15 @@ fn poll_for_slot(receiver: Receiver, exit: &Arc) -> u64 { } } -impl Replicator { - /// Returns a Result that contains a replicator on success +impl Archiver { + /// Returns a Result that contains an archiver on success /// /// # Arguments /// * `ledger_path` - path to where the ledger will be stored. /// Causes panic if none - /// * `node` - The replicator node + /// * `node` - The archiver node /// * `cluster_entrypoint` - ContactInfo representing an entry into the network - /// * `keypair` - Keypair for this replicator + /// * `keypair` - Keypair for this archiver #[allow(clippy::new_ret_no_self)] pub fn new( ledger_path: &Path, @@ -211,7 +205,7 @@ impl Replicator { ) -> Result { let exit = Arc::new(AtomicBool::new(false)); - info!("Replicator: id: {}", keypair.pubkey()); + info!("Archiver: id: {}", keypair.pubkey()); info!("Creating cluster info...."); let mut cluster_info = ClusterInfo::new(node.info.clone(), keypair.clone()); cluster_info.set_entrypoint(cluster_entrypoint.clone()); @@ -235,7 +229,7 @@ impl Replicator { info!("Connecting to the cluster via {:?}", cluster_entrypoint); let (nodes, _) = match crate::gossip_service::discover_cluster(&cluster_entrypoint.gossip, 1) { - Ok(nodes_and_replicators) => nodes_and_replicators, + Ok(nodes_and_archivers) => nodes_and_archivers, Err(e) => { //shutdown services before exiting exit.store(true, Ordering::Relaxed); @@ -273,15 +267,15 @@ impl Replicator { let request_processor = create_request_processor(node.sockets.storage.unwrap(), &exit, slot_receiver); - let t_replicator = { + let t_archiver = { let exit = exit.clone(); let node_info = node.info.clone(); - let mut meta = ReplicatorMeta { + let mut meta = ArchiverMeta { ledger_path: ledger_path.to_path_buf(), - ..ReplicatorMeta::default() + ..ArchiverMeta::default() }; spawn(move || { - // setup replicator + // setup archiver let window_service = match Self::setup( &mut meta, cluster_info.clone(), @@ -296,7 +290,7 @@ impl Replicator { Ok(window_service) => window_service, Err(e) => { //shutdown services before exiting - error!("setup failed {:?}; replicator thread exiting...", e); + error!("setup failed {:?}; archiver thread exiting...", e); exit.store(true, Ordering::Relaxed); request_processor .into_iter() @@ -308,7 +302,7 @@ impl Replicator { }; info!("setup complete"); - // run replicator + // run archiver Self::run( &mut meta, &blocktree, @@ -328,16 +322,16 @@ impl Replicator { }; Ok(Self { - thread_handles: vec![t_replicator], + thread_handles: vec![t_archiver], exit, }) } fn run( - meta: &mut ReplicatorMeta, + meta: &mut ArchiverMeta, blocktree: &Arc, cluster_info: Arc>, - replicator_keypair: &Arc, + archiver_keypair: &Arc, storage_keypair: &Arc, exit: &Arc, ) { @@ -362,7 +356,7 @@ impl Replicator { } }; - Self::submit_mining_proof(meta, &cluster_info, replicator_keypair, storage_keypair); + Self::submit_mining_proof(meta, &cluster_info, archiver_keypair, storage_keypair); // TODO make this a lot more frequent by picking a "new" blockhash instead of picking a storage blockhash // prep the next proof @@ -382,34 +376,34 @@ impl Replicator { } }; meta.blockhash = storage_blockhash; - Self::redeem_rewards(&cluster_info, replicator_keypair, storage_keypair); + Self::redeem_rewards(&cluster_info, archiver_keypair, storage_keypair); } exit.store(true, Ordering::Relaxed); } fn redeem_rewards( cluster_info: &Arc>, - replicator_keypair: &Arc, + archiver_keypair: &Arc, storage_keypair: &Arc, ) { let nodes = cluster_info.read().unwrap().tvu_peers(); let client = crate::gossip_service::get_client(&nodes); if let Ok(Some(account)) = client.get_account(&storage_keypair.pubkey()) { - if let Ok(StorageContract::ReplicatorStorage { validations, .. }) = account.state() { + if let Ok(StorageContract::ArchiverStorage { validations, .. }) = account.state() { if !validations.is_empty() { let ix = storage_instruction::claim_reward( - &replicator_keypair.pubkey(), + &archiver_keypair.pubkey(), &storage_keypair.pubkey(), ); let message = - Message::new_with_payer(vec![ix], Some(&replicator_keypair.pubkey())); - if let Err(e) = client.send_message(&[&replicator_keypair], message) { + Message::new_with_payer(vec![ix], Some(&archiver_keypair.pubkey())); + if let Err(e) = client.send_message(&[&archiver_keypair], message) { error!("unable to redeem reward, tx failed: {:?}", e); } else { info!( "collected mining rewards: Account balance {:?}", - client.get_balance(&replicator_keypair.pubkey()) + client.get_balance(&archiver_keypair.pubkey()) ); } } @@ -421,7 +415,7 @@ impl Replicator { // Find a segment to replicate and download it. fn setup( - meta: &mut ReplicatorMeta, + meta: &mut ArchiverMeta, cluster_info: Arc>, blocktree: &Arc, exit: &Arc, @@ -520,7 +514,7 @@ impl Replicator { info!("Done receiving entries from window_service"); - // Remove replicator from the data plane + // Remove archiver from the data plane let mut contact_info = node_info.clone(); contact_info.tvu = "0.0.0.0:0".parse().unwrap(); contact_info.wallclock = timestamp(); @@ -530,7 +524,7 @@ impl Replicator { } } - fn encrypt_ledger(meta: &mut ReplicatorMeta, blocktree: &Arc) -> Result<()> { + fn encrypt_ledger(meta: &mut ArchiverMeta, blocktree: &Arc) -> Result<()> { meta.ledger_data_file_encrypted = meta.ledger_path.join(ENCRYPTED_FILENAME); { @@ -555,7 +549,7 @@ impl Replicator { Ok(()) } - fn create_sampling_offsets(meta: &mut ReplicatorMeta) { + fn create_sampling_offsets(meta: &mut ArchiverMeta) { meta.sampling_offsets.clear(); let mut rng_seed = [0u8; 32]; rng_seed.copy_from_slice(&meta.blockhash.as_ref()); @@ -580,7 +574,7 @@ impl Replicator { keypair: &Keypair, storage_keypair: &Keypair, ) -> Result<()> { - // make sure replicator has some balance + // make sure archiver has some balance if client.poll_get_balance(&keypair.pubkey())? == 0 { return Err( io::Error::new(io::ErrorKind::Other, "keypair account has no balance").into(), @@ -605,7 +599,7 @@ impl Replicator { &keypair.pubkey(), &storage_keypair.pubkey(), 1, - StorageAccountType::Replicator, + StorageAccountType::Archiver, ); let tx = Transaction::new_signed_instructions(&[keypair], ix, blockhash); let signature = client.async_send_transaction(tx)?; @@ -623,9 +617,9 @@ impl Replicator { } fn submit_mining_proof( - meta: &ReplicatorMeta, + meta: &ArchiverMeta, cluster_info: &Arc>, - replicator_keypair: &Arc, + archiver_keypair: &Arc, storage_keypair: &Arc, ) { // No point if we've got no storage account... @@ -637,9 +631,9 @@ impl Replicator { return; } // ...or no lamports for fees - let balance = client.poll_get_balance(&replicator_keypair.pubkey()); + let balance = client.poll_get_balance(&archiver_keypair.pubkey()); if balance.is_err() || balance.unwrap() == 0 { - error!("Unable to submit mining proof, insufficient Replicator Account balance"); + error!("Unable to submit mining proof, insufficient Archiver Account balance"); return; } @@ -657,15 +651,14 @@ impl Replicator { Signature::new(&meta.signature.as_ref()), meta.blockhash, ); - let message = - Message::new_with_payer(vec![instruction], Some(&replicator_keypair.pubkey())); + let message = Message::new_with_payer(vec![instruction], Some(&archiver_keypair.pubkey())); let mut transaction = Transaction::new( - &[replicator_keypair.as_ref(), storage_keypair.as_ref()], + &[archiver_keypair.as_ref(), storage_keypair.as_ref()], message, blockhash, ); if let Err(err) = client.send_and_confirm_transaction( - &[&replicator_keypair, &storage_keypair], + &[&archiver_keypair, &storage_keypair], &mut transaction, 10, 0, @@ -787,21 +780,21 @@ impl Replicator { } } - /// Ask a replicator to populate a given blocktree with its segment. - /// Return the slot at the start of the replicator's segment + /// Ask an archiver to populate a given blocktree with its segment. + /// Return the slot at the start of the archiver's segment /// /// It is recommended to use a temporary blocktree for this since the download will not verify /// blobs received and might impact the chaining of blobs across slots - pub fn download_from_replicator( + pub fn download_from_archiver( cluster_info: &Arc>, - replicator_info: &ContactInfo, + archiver_info: &ContactInfo, blocktree: &Arc, slots_per_segment: u64, ) -> Result<(u64)> { - // Create a client which downloads from the replicator and see that it + // Create a client which downloads from the archiver and see that it // can respond with blobs. - let start_slot = Self::get_replicator_segment_slot(replicator_info.storage_addr); - info!("Replicator download: start at {}", start_slot); + let start_slot = Self::get_archiver_segment_slot(archiver_info.storage_addr); + info!("Archiver download: start at {}", start_slot); let exit = Arc::new(AtomicBool::new(false)); let (s_reader, r_reader) = channel(); @@ -811,13 +804,13 @@ impl Replicator { &exit, s_reader.clone(), Recycler::default(), - "replicator_reeciver", + "archiver_reeciver", ); let id = cluster_info.read().unwrap().id(); info!( "Sending repair requests from: {} to: {}", cluster_info.read().unwrap().my_data().id, - replicator_info.gossip + archiver_info.gossip ); let repair_slot_range = RepairSlotRange { start: start_slot, @@ -825,7 +818,7 @@ impl Replicator { }; // try for upto 180 seconds //TODO needs tuning if segments are huge for _ in 0..120 { - // Strategy used by replicators + // Strategy used by archivers let repairs = RepairService::generate_repairs_in_range( blocktree, repair_service::MAX_REPAIR_LENGTH, @@ -840,7 +833,7 @@ impl Replicator { .read() .unwrap() .map_repair_request(&repair_request) - .map(|result| ((replicator_info.gossip, result), repair_request)) + .map(|result| ((archiver_info.gossip, result), repair_request)) .ok() }) .collect(); @@ -848,7 +841,7 @@ impl Replicator { for ((to, req), repair_request) in reqs { if let Ok(local_addr) = repair_socket.local_addr() { datapoint_info!( - "replicator_download", + "archiver_download", ("repair_request", format!("{:?}", repair_request), String), ("to", to.to_string(), String), ("from", local_addr.to_string(), String), @@ -856,7 +849,7 @@ impl Replicator { ); } repair_socket - .send_to(&req, replicator_info.gossip) + .send_to(&req, archiver_info.gossip) .unwrap_or_else(|e| { error!("{} repair req send_to({}) error {:?}", id, to, e); 0 @@ -906,13 +899,13 @@ impl Replicator { true } - fn get_replicator_segment_slot(to: SocketAddr) -> u64 { + fn get_archiver_segment_slot(to: SocketAddr) -> u64 { let (_port, socket) = bind_in_range(VALIDATOR_PORT_RANGE).unwrap(); socket .set_read_timeout(Some(Duration::from_secs(5))) .unwrap(); - let req = ReplicatorRequest::GetSlotHeight(socket.local_addr().unwrap()); + let req = ArchiverRequest::GetSlotHeight(socket.local_addr().unwrap()); let serialized_req = bincode::serialize(&req).unwrap(); for _ in 0..10 { socket.send_to(&serialized_req, to).unwrap(); @@ -922,7 +915,7 @@ impl Replicator { } sleep(Duration::from_millis(500)); } - panic!("Couldn't get segment slot from replicator!"); + panic!("Couldn't get segment slot from archiver!"); } } diff --git a/core/src/chacha_cuda.rs b/core/src/chacha_cuda.rs index d5c169708..39ec67994 100644 --- a/core/src/chacha_cuda.rs +++ b/core/src/chacha_cuda.rs @@ -113,8 +113,8 @@ pub fn chacha_cbc_encrypt_file_many_keys( #[cfg(test)] mod tests { use super::*; + use crate::archiver::sample_file; use crate::chacha::chacha_cbc_encrypt_ledger; - use crate::replicator::sample_file; use solana_ledger::blocktree::get_tmp_ledger_path; use solana_ledger::entry::create_ticks; use solana_sdk::clock::DEFAULT_SLOTS_PER_SEGMENT; diff --git a/core/src/cluster_info.rs b/core/src/cluster_info.rs index 56be0e80f..4831641c0 100644 --- a/core/src/cluster_info.rs +++ b/core/src/cluster_info.rs @@ -242,7 +242,7 @@ impl ClusterInfo { pub fn contact_info_trace(&self) -> String { let now = timestamp(); let mut spy_nodes = 0; - let mut replicators = 0; + let mut archivers = 0; let my_pubkey = self.my_data().id; let nodes: Vec<_> = self .all_peers() @@ -250,8 +250,8 @@ impl ClusterInfo { .map(|(node, last_updated)| { if Self::is_spy_node(&node) { spy_nodes += 1; - } else if Self::is_replicator(&node) { - replicators += 1; + } else if Self::is_archiver(&node) { + archivers += 1; } fn addr_to_string(addr: &SocketAddr) -> String { if ContactInfo::is_valid_address(addr) { @@ -281,9 +281,9 @@ impl ClusterInfo { {}\ Nodes: {}{}{}", nodes.join(""), - nodes.len() - spy_nodes - replicators, - if replicators > 0 { - format!("\nReplicators: {}", replicators) + nodes.len() - spy_nodes - archivers, + if archivers > 0 { + format!("\nArchivers: {}", archivers) } else { "".to_string() }, @@ -426,7 +426,7 @@ impl ClusterInfo { .values() .filter_map(|x| x.value.contact_info()) .filter(|x| ContactInfo::is_valid_address(&x.tvu)) - .filter(|x| !ClusterInfo::is_replicator(x)) + .filter(|x| !ClusterInfo::is_archiver(x)) .filter(|x| x.id != me) .cloned() .collect() @@ -478,7 +478,7 @@ impl ClusterInfo { && !ContactInfo::is_valid_address(&contact_info.storage_addr) } - pub fn is_replicator(contact_info: &ContactInfo) -> bool { + pub fn is_archiver(contact_info: &ContactInfo) -> bool { ContactInfo::is_valid_address(&contact_info.storage_addr) && !ContactInfo::is_valid_address(&contact_info.tpu) } @@ -1593,7 +1593,7 @@ impl Node { let pubkey = Pubkey::new_rand(); Self::new_localhost_with_pubkey(&pubkey) } - pub fn new_localhost_replicator(pubkey: &Pubkey) -> Self { + pub fn new_localhost_archiver(pubkey: &Pubkey) -> Self { let gossip = UdpSocket::bind("127.0.0.1:0").unwrap(); let tvu = UdpSocket::bind("127.0.0.1:0").unwrap(); let tvu_forwards = UdpSocket::bind("127.0.0.1:0").unwrap(); @@ -1748,7 +1748,7 @@ impl Node { }, } } - pub fn new_replicator_with_external_ip( + pub fn new_archiver_with_external_ip( pubkey: &Pubkey, gossip_addr: &SocketAddr, port_range: PortRange, @@ -2130,9 +2130,9 @@ mod tests { } #[test] - fn new_replicator_external_ip_test() { + fn new_archiver_external_ip_test() { let ip = Ipv4Addr::from(0); - let node = Node::new_replicator_with_external_ip( + let node = Node::new_archiver_with_external_ip( &Pubkey::new_rand(), &socketaddr!(ip, 0), VALIDATOR_PORT_RANGE, diff --git a/core/src/gossip_service.rs b/core/src/gossip_service.rs index 21685385c..1519115b2 100644 --- a/core/src/gossip_service.rs +++ b/core/src/gossip_service.rs @@ -53,7 +53,7 @@ impl GossipService { } } -/// Discover Nodes and Replicators in a cluster +/// Discover Nodes and Archivers in a cluster pub fn discover_cluster( entry_point: &SocketAddr, num_nodes: usize, @@ -76,7 +76,7 @@ pub fn discover( info!("Gossip entry point: {:?}", entry_point); info!("Spy node id: {:?}", id); - let (met_criteria, secs, tvu_peers, replicators) = spy( + let (met_criteria, secs, tvu_peers, archivers) = spy( spy_ref.clone(), num_nodes, timeout, @@ -93,7 +93,7 @@ pub fn discover( secs, spy_ref.read().unwrap().contact_info_trace() ); - return Ok((tvu_peers, replicators)); + return Ok((tvu_peers, archivers)); } if !tvu_peers.is_empty() { @@ -101,7 +101,7 @@ pub fn discover( "discover failed to match criteria by timeout...\n{}", spy_ref.read().unwrap().contact_info_trace() ); - return Ok((tvu_peers, replicators)); + return Ok((tvu_peers, archivers)); } info!( @@ -159,7 +159,7 @@ fn spy( let now = Instant::now(); let mut met_criteria = false; let mut tvu_peers: Vec = Vec::new(); - let mut replicators: Vec = Vec::new(); + let mut archivers: Vec = Vec::new(); let mut i = 0; loop { if let Some(secs) = timeout { @@ -167,22 +167,22 @@ fn spy( break; } } - // collect tvu peers but filter out replicators since their tvu is transient and we do not want + // collect tvu peers but filter out archivers since their tvu is transient and we do not want // it to show up as a "node" tvu_peers = spy_ref .read() .unwrap() .tvu_peers() .into_iter() - .filter(|node| !ClusterInfo::is_replicator(&node)) + .filter(|node| !ClusterInfo::is_archiver(&node)) .collect::>(); - replicators = spy_ref.read().unwrap().storage_peers(); + archivers = spy_ref.read().unwrap().storage_peers(); if let Some(num) = num_nodes { - if tvu_peers.len() + replicators.len() >= num { + if tvu_peers.len() + archivers.len() >= num { if let Some(ipaddr) = find_node_by_ipaddr { if tvu_peers .iter() - .chain(replicators.iter()) + .chain(archivers.iter()) .any(|x| x.gossip.ip() == ipaddr) { met_criteria = true; @@ -192,7 +192,7 @@ fn spy( if let Some(pubkey) = find_node_by_pubkey { if tvu_peers .iter() - .chain(replicators.iter()) + .chain(archivers.iter()) .any(|x| x.id == pubkey) { met_criteria = true; @@ -209,7 +209,7 @@ fn spy( if let Some(pubkey) = find_node_by_pubkey { if tvu_peers .iter() - .chain(replicators.iter()) + .chain(archivers.iter()) .any(|x| x.id == pubkey) { met_criteria = true; @@ -219,7 +219,7 @@ fn spy( if let Some(ipaddr) = find_node_by_ipaddr { if tvu_peers .iter() - .chain(replicators.iter()) + .chain(archivers.iter()) .any(|x| x.gossip.ip() == ipaddr) { met_criteria = true; @@ -238,12 +238,7 @@ fn spy( )); i += 1; } - ( - met_criteria, - now.elapsed().as_secs(), - tvu_peers, - replicators, - ) + (met_criteria, now.elapsed().as_secs(), tvu_peers, archivers) } /// Makes a spy or gossip node based on whether or not a gossip_addr was passed in diff --git a/core/src/lib.rs b/core/src/lib.rs index 232fd43c7..122823f2d 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -15,6 +15,7 @@ pub mod recycler; pub mod shred_fetch_stage; #[macro_use] pub mod contact_info; +pub mod archiver; pub mod blockstream; pub mod blockstream_service; pub mod cluster_info; @@ -39,7 +40,6 @@ pub mod poh_service; pub mod recvmmsg; pub mod repair_service; pub mod replay_stage; -pub mod replicator; pub mod result; pub mod retransmit_stage; pub mod rpc; diff --git a/core/src/repair_service.rs b/core/src/repair_service.rs index b3603b867..db2b4f1c7 100644 --- a/core/src/repair_service.rs +++ b/core/src/repair_service.rs @@ -129,7 +129,7 @@ impl RepairService { let repairs = { match repair_strategy { RepairStrategy::RepairRange(ref repair_slot_range) => { - // Strategy used by replicators + // Strategy used by archivers Self::generate_repairs_in_range( blocktree, MAX_REPAIR_LENGTH, diff --git a/core/src/storage_stage.rs b/core/src/storage_stage.rs index 8a6e3faa9..ac4d06a6d 100644 --- a/core/src/storage_stage.rs +++ b/core/src/storage_stage.rs @@ -1,5 +1,5 @@ // A stage that handles generating the keys used to encrypt the ledger and sample it -// for storage mining. Replicators submit storage proofs, validator then bundles them +// for storage mining. Archivers submit storage proofs, validator then bundles them // to submit its proof for mining to be rewarded. use crate::chacha_cuda::chacha_cbc_encrypt_file_many_keys; @@ -12,7 +12,7 @@ use rand_chacha::ChaChaRng; use solana_ledger::bank_forks::BankForks; use solana_ledger::blocktree::Blocktree; use solana_runtime::bank::Bank; -use solana_runtime::storage_utils::replicator_accounts; +use solana_runtime::storage_utils::archiver_accounts; use solana_sdk::account::Account; use solana_sdk::account_utils::State; use solana_sdk::clock::get_segment_from_slot; @@ -39,13 +39,13 @@ use std::{cmp, io}; // Vec of [ledger blocks] x [keys] type StorageResults = Vec; type StorageKeys = Vec; -type ReplicatorMap = Vec>>; +type ArchiverMap = Vec>>; #[derive(Default)] pub struct StorageStateInner { storage_results: StorageResults, pub storage_keys: StorageKeys, - replicator_map: ReplicatorMap, + archiver_map: ArchiverMap, storage_blockhash: Hash, slot: u64, slots_per_segment: u64, @@ -92,12 +92,12 @@ impl StorageState { pub fn new(hash: &Hash, slots_per_turn: u64, slots_per_segment: u64) -> Self { let storage_keys = vec![0u8; KEY_SIZE * NUM_IDENTITIES]; let storage_results = vec![Hash::default(); NUM_IDENTITIES]; - let replicator_map = vec![]; + let archiver_map = vec![]; let state = StorageStateInner { storage_keys, storage_results, - replicator_map, + archiver_map, slots_per_turn, slot: 0, slots_per_segment, @@ -140,17 +140,16 @@ impl StorageState { const MAX_PUBKEYS_TO_RETURN: usize = 5; let index = get_segment_from_slot(slot, self.state.read().unwrap().slots_per_segment) as usize; - let replicator_map = &self.state.read().unwrap().replicator_map; + let archiver_map = &self.state.read().unwrap().archiver_map; let working_bank = bank_forks.read().unwrap().working_bank(); - let accounts = replicator_accounts(&working_bank); - if index < replicator_map.len() { + let accounts = archiver_accounts(&working_bank); + if index < archiver_map.len() { //perform an account owner lookup - let mut slot_replicators = replicator_map[index] + let mut slot_archivers = archiver_map[index] .keys() .filter_map(|account_id| { accounts.get(account_id).and_then(|account| { - if let Ok(StorageContract::ReplicatorStorage { owner, .. }) = - account.state() + if let Ok(StorageContract::ArchiverStorage { owner, .. }) = account.state() { Some(owner) } else { @@ -159,8 +158,8 @@ impl StorageState { }) }) .collect::>(); - slot_replicators.truncate(MAX_PUBKEYS_TO_RETURN); - slot_replicators + slot_archivers.truncate(MAX_PUBKEYS_TO_RETURN); + slot_archivers } else { vec![] } @@ -448,7 +447,7 @@ impl StorageStage { storage_state: &Arc>, current_key_idx: &mut usize, ) { - if let Ok(StorageContract::ReplicatorStorage { proofs, .. }) = account.state() { + if let Ok(StorageContract::ArchiverStorage { proofs, .. }) = account.state() { //convert slot to segment let segment = get_segment_from_slot(slot, slots_per_segment); if let Some(proofs) = proofs.get(&segment) { @@ -467,16 +466,14 @@ impl StorageStage { } let mut statew = storage_state.write().unwrap(); - if statew.replicator_map.len() < segment as usize { - statew - .replicator_map - .resize(segment as usize, HashMap::new()); + if statew.archiver_map.len() < segment as usize { + statew.archiver_map.resize(segment as usize, HashMap::new()); } let proof_segment_index = proof.segment_index as usize; - if proof_segment_index < statew.replicator_map.len() { + if proof_segment_index < statew.archiver_map.len() { // TODO randomly select and verify the proof first // Copy the submitted proof - statew.replicator_map[proof_segment_index] + statew.archiver_map[proof_segment_index] .entry(account_id) .or_default() .push(proof.clone()); @@ -510,11 +507,11 @@ impl StorageStage { storage_slots.slot_count += 1; storage_slots.last_root = bank.slot(); if storage_slots.slot_count % slots_per_turn == 0 { - // load all the replicator accounts in the bank. collect all their proofs at the current slot - let replicator_accounts = replicator_accounts(bank.as_ref()); + // load all the archiver accounts in the bank. collect all their proofs at the current slot + let archiver_accounts = archiver_accounts(bank.as_ref()); // find proofs, and use them to update // the storage_keys with their signatures - for (account_id, account) in replicator_accounts.into_iter() { + for (account_id, account) in archiver_accounts.into_iter() { Self::collect_proofs( bank.slot(), bank.slots_per_segment(), @@ -553,13 +550,13 @@ impl StorageStage { storage_keypair: &Arc, ix_sender: &Sender, ) -> Result<()> { - // bundle up mining submissions from replicators + // bundle up mining submissions from archivers // and submit them in a tx to the leader to get rewarded. let mut w_state = storage_state.write().unwrap(); let mut max_proof_mask = 0; let proof_mask_limit = storage_instruction::proof_mask_limit(); let instructions: Vec<_> = w_state - .replicator_map + .archiver_map .iter_mut() .enumerate() .flat_map(|(_, proof_map)| { diff --git a/core/src/window_service.rs b/core/src/window_service.rs index deebe55ac..6246c478a 100644 --- a/core/src/window_service.rs +++ b/core/src/window_service.rs @@ -118,7 +118,7 @@ where ); if !packets.packets.is_empty() { - // Ignore the send error, as the retransmit is optional (e.g. replicators don't retransmit) + // Ignore the send error, as the retransmit is optional (e.g. archivers don't retransmit) let _ = retransmit.send(packets); } diff --git a/core/tests/storage_stage.rs b/core/tests/storage_stage.rs index e7d5c3fae..85472a704 100644 --- a/core/tests/storage_stage.rs +++ b/core/tests/storage_stage.rs @@ -35,7 +35,7 @@ mod tests { solana_logger::setup(); let keypair = Arc::new(Keypair::new()); let storage_keypair = Arc::new(Keypair::new()); - let replicator_keypair = Arc::new(Keypair::new()); + let archiver_keypair = Arc::new(Keypair::new()); let exit = Arc::new(AtomicBool::new(false)); let GenesisBlockInfo { @@ -81,9 +81,9 @@ mod tests { let account_ix = storage_instruction::create_storage_account( &mint_keypair.pubkey(), &Pubkey::new_rand(), - &replicator_keypair.pubkey(), + &archiver_keypair.pubkey(), 1, - StorageAccountType::Replicator, + StorageAccountType::Archiver, ); let account_tx = Transaction::new_signed_instructions( &[&mint_keypair], @@ -104,7 +104,7 @@ mod tests { let keypair = Keypair::new(); let mining_proof_ix = storage_instruction::mining_proof( - &replicator_keypair.pubkey(), + &archiver_keypair.pubkey(), Hash::default(), 0, keypair.sign_message(b"test"), @@ -124,7 +124,7 @@ mod tests { .unwrap(); let message = Message::new_with_payer(vec![mining_proof_ix], Some(&mint_keypair.pubkey())); let mining_proof_tx = Transaction::new( - &[&mint_keypair, replicator_keypair.as_ref()], + &[&mint_keypair, archiver_keypair.as_ref()], message, next_bank.last_blockhash(), ); diff --git a/gossip/src/main.rs b/gossip/src/main.rs index 9ea3bd9f9..01cfe8670 100644 --- a/gossip/src/main.rs +++ b/gossip/src/main.rs @@ -142,7 +142,7 @@ fn main() -> Result<(), Box> { .value_of("node_pubkey") .map(|pubkey_str| pubkey_str.parse::().unwrap()); - let (nodes, _replicators) = discover( + let (nodes, _archivers) = discover( &entrypoint_addr, num_nodes, timeout, @@ -185,7 +185,7 @@ fn main() -> Result<(), Box> { } ("get-rpc-url", Some(matches)) => { let timeout = value_t_or_exit!(matches, "timeout", u64); - let (nodes, _replicators) = discover( + let (nodes, _archivers) = discover( &entrypoint_addr, Some(1), Some(timeout), @@ -213,7 +213,7 @@ fn main() -> Result<(), Box> { .unwrap() .parse::() .unwrap(); - let (nodes, _replicators) = discover( + let (nodes, _archivers) = discover( &entrypoint_addr, None, None, diff --git a/local_cluster/src/local_cluster.rs b/local_cluster/src/local_cluster.rs index 6770ea4c3..d918e71cc 100644 --- a/local_cluster/src/local_cluster.rs +++ b/local_cluster/src/local_cluster.rs @@ -1,11 +1,11 @@ use crate::cluster::{Cluster, ClusterValidatorInfo, ValidatorInfo}; use solana_client::thin_client::{create_client, ThinClient}; use solana_core::{ + archiver::Archiver, cluster_info::{Node, VALIDATOR_PORT_RANGE}, contact_info::ContactInfo, genesis_utils::{create_genesis_block_with_leader, GenesisBlockInfo}, gossip_service::discover_cluster, - replicator::Replicator, service::Service, validator::{Validator, ValidatorConfig}, }; @@ -42,15 +42,15 @@ use std::{ sync::Arc, }; -pub struct ReplicatorInfo { - pub replicator_storage_pubkey: Pubkey, +pub struct ArchiverInfo { + pub archiver_storage_pubkey: Pubkey, pub ledger_path: PathBuf, } -impl ReplicatorInfo { +impl ArchiverInfo { fn new(storage_pubkey: Pubkey, ledger_path: PathBuf) -> Self { Self { - replicator_storage_pubkey: storage_pubkey, + archiver_storage_pubkey: storage_pubkey, ledger_path, } } @@ -60,9 +60,9 @@ impl ReplicatorInfo { pub struct ClusterConfig { /// The validator config that should be applied to every node in the cluster pub validator_configs: Vec, - /// Number of replicators in the cluster - /// Note- replicators will timeout if ticks_per_slot is much larger than the default 8 - pub num_replicators: usize, + /// Number of archivers in the cluster + /// Note- archivers will timeout if ticks_per_slot is much larger than the default 8 + pub num_archivers: usize, /// Number of nodes that are unstaked and not voting (a.k.a listening) pub num_listeners: u64, /// The stakes of each node @@ -81,7 +81,7 @@ impl Default for ClusterConfig { fn default() -> Self { ClusterConfig { validator_configs: vec![], - num_replicators: 0, + num_archivers: 0, num_listeners: 0, node_stakes: vec![], cluster_lamports: 0, @@ -104,8 +104,8 @@ pub struct LocalCluster { pub listener_infos: HashMap, validators: HashMap, pub genesis_block: GenesisBlock, - replicators: Vec, - pub replicator_infos: HashMap, + archivers: Vec, + pub archiver_infos: HashMap, } impl LocalCluster { @@ -210,10 +210,10 @@ impl LocalCluster { funding_keypair: mint_keypair, entry_point_info: leader_contact_info, validators, - replicators: vec![], + archivers: vec![], genesis_block, validator_infos, - replicator_infos: HashMap::new(), + archiver_infos: HashMap::new(), listener_infos: HashMap::new(), }; @@ -236,13 +236,13 @@ impl LocalCluster { ) .unwrap(); - for _ in 0..config.num_replicators { - cluster.add_replicator(); + for _ in 0..config.num_archivers { + cluster.add_archiver(); } discover_cluster( &cluster.entry_point_info.gossip, - config.node_stakes.len() + config.num_replicators as usize, + config.node_stakes.len() + config.num_archivers as usize, ) .unwrap(); @@ -261,8 +261,8 @@ impl LocalCluster { node.join().unwrap(); } - while let Some(replicator) = self.replicators.pop() { - replicator.close(); + while let Some(archiver) = self.archivers.pop() { + archiver.close(); } } @@ -344,9 +344,9 @@ impl LocalCluster { } } - fn add_replicator(&mut self) { - let replicator_keypair = Arc::new(Keypair::new()); - let replicator_pubkey = replicator_keypair.pubkey(); + fn add_archiver(&mut self) { + let archiver_keypair = Arc::new(Keypair::new()); + let archiver_pubkey = archiver_keypair.pubkey(); let storage_keypair = Arc::new(Keypair::new()); let storage_pubkey = storage_keypair.pubkey(); let client = create_client( @@ -354,31 +354,31 @@ impl LocalCluster { VALIDATOR_PORT_RANGE, ); - // Give the replicator some lamports to setup its storage accounts + // Give the archiver some lamports to setup its storage accounts Self::transfer_with_client( &client, &self.funding_keypair, - &replicator_keypair.pubkey(), + &archiver_keypair.pubkey(), 42, ); - let replicator_node = Node::new_localhost_replicator(&replicator_pubkey); + let archiver_node = Node::new_localhost_archiver(&archiver_pubkey); - Self::setup_storage_account(&client, &storage_keypair, &replicator_keypair, true).unwrap(); + Self::setup_storage_account(&client, &storage_keypair, &archiver_keypair, true).unwrap(); - let (replicator_ledger_path, _blockhash) = create_new_tmp_ledger!(&self.genesis_block); - let replicator = Replicator::new( - &replicator_ledger_path, - replicator_node, + let (archiver_ledger_path, _blockhash) = create_new_tmp_ledger!(&self.genesis_block); + let archiver = Archiver::new( + &archiver_ledger_path, + archiver_node, self.entry_point_info.clone(), - replicator_keypair, + archiver_keypair, storage_keypair, ) - .unwrap_or_else(|err| panic!("Replicator::new() failed: {:?}", err)); + .unwrap_or_else(|err| panic!("Archiver::new() failed: {:?}", err)); - self.replicators.push(replicator); - self.replicator_infos.insert( - replicator_pubkey, - ReplicatorInfo::new(storage_pubkey, replicator_ledger_path), + self.archivers.push(archiver); + self.archiver_infos.insert( + archiver_pubkey, + ArchiverInfo::new(storage_pubkey, archiver_ledger_path), ); } @@ -388,7 +388,7 @@ impl LocalCluster { .validator_infos .values() .map(|f| &f.info.ledger_path) - .chain(self.replicator_infos.values().map(|info| &info.ledger_path)) + .chain(self.archiver_infos.values().map(|info| &info.ledger_path)) { remove_dir_all(&ledger_path) .unwrap_or_else(|_| panic!("Unable to remove {:?}", ledger_path)); @@ -531,15 +531,15 @@ impl LocalCluster { } } - /// Sets up the storage account for validators/replicators and assumes the funder is the owner + /// Sets up the storage account for validators/archivers and assumes the funder is the owner fn setup_storage_account( client: &ThinClient, storage_keypair: &Keypair, from_keypair: &Arc, - replicator: bool, + archiver: bool, ) -> Result<()> { - let storage_account_type = if replicator { - StorageAccountType::Replicator + let storage_account_type = if archiver { + StorageAccountType::Archiver } else { StorageAccountType::Validator }; @@ -644,7 +644,7 @@ mod test { let num_nodes = 1; let cluster = LocalCluster::new_with_equal_stakes(num_nodes, 100, 3); assert_eq!(cluster.validators.len(), num_nodes); - assert_eq!(cluster.replicators.len(), 0); + assert_eq!(cluster.archivers.len(), 0); } #[test] @@ -654,10 +654,10 @@ mod test { validator_config.rpc_config.enable_validator_exit = true; validator_config.storage_slots_per_turn = SLOTS_PER_TURN_TEST; const NUM_NODES: usize = 1; - let num_replicators = 1; + let num_archivers = 1; let config = ClusterConfig { validator_configs: vec![ValidatorConfig::default(); NUM_NODES], - num_replicators, + num_archivers, node_stakes: vec![3; NUM_NODES], cluster_lamports: 100, ticks_per_slot: 8, @@ -666,6 +666,6 @@ mod test { }; let cluster = LocalCluster::new(&config); assert_eq!(cluster.validators.len(), NUM_NODES); - assert_eq!(cluster.replicators.len(), num_replicators); + assert_eq!(cluster.archivers.len(), num_archivers); } } diff --git a/local_cluster/src/tests.rs b/local_cluster/src/tests.rs index 8c1fec092..63cc89c03 100644 --- a/local_cluster/src/tests.rs +++ b/local_cluster/src/tests.rs @@ -1,4 +1,4 @@ +mod archiver; mod bench_exchange; mod bench_tps; mod local_cluster; -mod replicator; diff --git a/local_cluster/src/tests/replicator.rs b/local_cluster/src/tests/archiver.rs similarity index 59% rename from local_cluster/src/tests/replicator.rs rename to local_cluster/src/tests/archiver.rs index 585b00e06..41518ab4c 100644 --- a/local_cluster/src/tests/replicator.rs +++ b/local_cluster/src/tests/archiver.rs @@ -1,10 +1,10 @@ use crate::local_cluster::{ClusterConfig, LocalCluster}; use serial_test_derive::serial; use solana_client::thin_client::create_client; +use solana_core::archiver::Archiver; use solana_core::cluster_info::{ClusterInfo, Node, VALIDATOR_PORT_RANGE}; use solana_core::contact_info::ContactInfo; use solana_core::gossip_service::discover_cluster; -use solana_core::replicator::Replicator; use solana_core::storage_stage::SLOTS_PER_TURN_TEST; use solana_core::validator::ValidatorConfig; use solana_ledger::blocktree::{create_new_tmp_ledger, get_tmp_ledger_path, Blocktree}; @@ -13,18 +13,18 @@ use solana_sdk::signature::{Keypair, KeypairUtil}; use std::fs::remove_dir_all; use std::sync::{Arc, RwLock}; -/// Start the cluster with the given configuration and wait till the replicators are discovered +/// Start the cluster with the given configuration and wait till the archivers are discovered /// Then download blobs from one of them. -fn run_replicator_startup_basic(num_nodes: usize, num_replicators: usize) { +fn run_archiver_startup_basic(num_nodes: usize, num_archivers: usize) { solana_logger::setup(); - info!("starting replicator test"); + info!("starting archiver test"); let mut validator_config = ValidatorConfig::default(); let slots_per_segment = 8; validator_config.storage_slots_per_turn = SLOTS_PER_TURN_TEST; let config = ClusterConfig { validator_configs: vec![validator_config; num_nodes], - num_replicators, + num_archivers, node_stakes: vec![100; num_nodes], cluster_lamports: 10_000, // keep a low slot/segment count to speed up the test @@ -33,130 +33,122 @@ fn run_replicator_startup_basic(num_nodes: usize, num_replicators: usize) { }; let cluster = LocalCluster::new(&config); - let (cluster_nodes, cluster_replicators) = discover_cluster( - &cluster.entry_point_info.gossip, - num_nodes + num_replicators, - ) - .unwrap(); + let (cluster_nodes, cluster_archivers) = + discover_cluster(&cluster.entry_point_info.gossip, num_nodes + num_archivers).unwrap(); assert_eq!( - cluster_nodes.len() + cluster_replicators.len(), - num_nodes + num_replicators + cluster_nodes.len() + cluster_archivers.len(), + num_nodes + num_archivers ); - let mut replicator_count = 0; - let mut replicator_info = ContactInfo::default(); - for node in &cluster_replicators { + let mut archiver_count = 0; + let mut archiver_info = ContactInfo::default(); + for node in &cluster_archivers { info!("storage: {:?} rpc: {:?}", node.storage_addr, node.rpc); if ContactInfo::is_valid_address(&node.storage_addr) { - replicator_count += 1; - replicator_info = node.clone(); + archiver_count += 1; + archiver_info = node.clone(); } } - assert_eq!(replicator_count, num_replicators); + assert_eq!(archiver_count, num_archivers); let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair( cluster_nodes[0].clone(), ))); let path = get_tmp_ledger_path("test"); let blocktree = Arc::new(Blocktree::open(&path).unwrap()); - Replicator::download_from_replicator( - &cluster_info, - &replicator_info, - &blocktree, - slots_per_segment, - ) - .unwrap(); + Archiver::download_from_archiver(&cluster_info, &archiver_info, &blocktree, slots_per_segment) + .unwrap(); } #[test] #[serial] -fn test_replicator_startup_1_node() { - run_replicator_startup_basic(1, 1); +fn test_archiver_startup_1_node() { + run_archiver_startup_basic(1, 1); } #[test] #[serial] -fn test_replicator_startup_2_nodes() { - run_replicator_startup_basic(2, 1); +fn test_archiver_startup_2_nodes() { + run_archiver_startup_basic(2, 1); } #[test] #[serial] -fn test_replicator_startup_leader_hang() { +fn test_archiver_startup_leader_hang() { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; solana_logger::setup(); - info!("starting replicator test"); + info!("starting archiver test"); - let leader_ledger_path = std::path::PathBuf::from("replicator_test_leader_ledger"); + let leader_ledger_path = std::path::PathBuf::from("archiver_test_leader_ledger"); let (genesis_block, _mint_keypair) = create_genesis_block(10_000); - let (replicator_ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_block); + let (archiver_ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_block); { - let replicator_keypair = Arc::new(Keypair::new()); + let archiver_keypair = Arc::new(Keypair::new()); let storage_keypair = Arc::new(Keypair::new()); - info!("starting replicator node"); - let replicator_node = Node::new_localhost_with_pubkey(&replicator_keypair.pubkey()); + info!("starting archiver node"); + let archiver_node = Node::new_localhost_with_pubkey(&archiver_keypair.pubkey()); let fake_gossip = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0); let leader_info = ContactInfo::new_gossip_entry_point(&fake_gossip); - let replicator_res = Replicator::new( - &replicator_ledger_path, - replicator_node, + let archiver_res = Archiver::new( + &archiver_ledger_path, + archiver_node, leader_info, - replicator_keypair, + archiver_keypair, storage_keypair, ); - assert!(replicator_res.is_err()); + assert!(archiver_res.is_err()); } let _ignored = Blocktree::destroy(&leader_ledger_path); - let _ignored = Blocktree::destroy(&replicator_ledger_path); + let _ignored = Blocktree::destroy(&archiver_ledger_path); let _ignored = remove_dir_all(&leader_ledger_path); - let _ignored = remove_dir_all(&replicator_ledger_path); + let _ignored = remove_dir_all(&archiver_ledger_path); } #[test] #[serial] -fn test_replicator_startup_ledger_hang() { +fn test_archiver_startup_ledger_hang() { solana_logger::setup(); - info!("starting replicator test"); + info!("starting archiver test"); let mut validator_config = ValidatorConfig::default(); validator_config.storage_slots_per_turn = SLOTS_PER_TURN_TEST; let cluster = LocalCluster::new_with_equal_stakes(2, 10_000, 100); - info!("starting replicator node"); + info!("starting archiver node"); let bad_keys = Arc::new(Keypair::new()); let storage_keypair = Arc::new(Keypair::new()); - let mut replicator_node = Node::new_localhost_with_pubkey(&bad_keys.pubkey()); + let mut archiver_node = Node::new_localhost_with_pubkey(&bad_keys.pubkey()); // Pass bad TVU sockets to prevent successful ledger download - replicator_node.sockets.tvu = vec![std::net::UdpSocket::bind("0.0.0.0:0").unwrap()]; - let (replicator_ledger_path, _blockhash) = create_new_tmp_ledger!(&cluster.genesis_block); + archiver_node.sockets.tvu = vec![std::net::UdpSocket::bind("0.0.0.0:0").unwrap()]; + let (archiver_ledger_path, _blockhash) = create_new_tmp_ledger!(&cluster.genesis_block); - let replicator_res = Replicator::new( - &replicator_ledger_path, - replicator_node, + let archiver_res = Archiver::new( + &archiver_ledger_path, + archiver_node, cluster.entry_point_info.clone(), bad_keys, storage_keypair, ); - assert!(replicator_res.is_err()); + assert!(archiver_res.is_err()); } #[test] #[serial] fn test_account_setup() { let num_nodes = 1; - let num_replicators = 1; + let num_archivers = 1; let mut validator_config = ValidatorConfig::default(); validator_config.storage_slots_per_turn = SLOTS_PER_TURN_TEST; let config = ClusterConfig { validator_configs: vec![ValidatorConfig::default(); num_nodes], - num_replicators, + num_archivers, node_stakes: vec![100; num_nodes], cluster_lamports: 10_000, ..ClusterConfig::default() @@ -165,18 +157,18 @@ fn test_account_setup() { let _ = discover_cluster( &cluster.entry_point_info.gossip, - num_nodes + num_replicators as usize, + num_nodes + num_archivers as usize, ) .unwrap(); - // now check that the cluster actually has accounts for the replicator. + // now check that the cluster actually has accounts for the archiver. let client = create_client( cluster.entry_point_info.client_facing_addr(), VALIDATOR_PORT_RANGE, ); - cluster.replicator_infos.iter().for_each(|(_, value)| { + cluster.archiver_infos.iter().for_each(|(_, value)| { assert_eq!( client - .poll_get_balance(&value.replicator_storage_pubkey) + .poll_get_balance(&value.archiver_storage_pubkey) .unwrap(), 1 ); diff --git a/multinode-demo/common.sh b/multinode-demo/common.sh index 48dcb6699..b6052ab07 100644 --- a/multinode-demo/common.sh +++ b/multinode-demo/common.sh @@ -59,7 +59,7 @@ solana_gossip=$(solana_program gossip) solana_keygen=$(solana_program keygen) solana_ledger_tool=$(solana_program ledger-tool) solana_cli=$(solana_program) -solana_replicator=$(solana_program replicator) +solana_archiver=$(solana_program archiver) export RUST_BACKTRACE=1 diff --git a/multinode-demo/replicator-x.sh b/multinode-demo/replicator-x.sh deleted file mode 100755 index 59f018430..000000000 --- a/multinode-demo/replicator-x.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/usr/bin/env bash -# -# Start a dynamically-configured replicator -# - -here=$(dirname "$0") -exec "$here"/replicator.sh --label x$$ "$@" diff --git a/net/common.sh b/net/common.sh index 12501aa17..642c96d0f 100644 --- a/net/common.sh +++ b/net/common.sh @@ -38,9 +38,9 @@ clientIpListZone=() blockstreamerIpList=() blockstreamerIpListPrivate=() blockstreamerIpListZone=() -replicatorIpList=() -replicatorIpListPrivate=() -replicatorIpListZone=() +archiverIpList=() +archiverIpListPrivate=() +archiverIpListZone=() buildSshOptions() { sshOptions=( diff --git a/net/gce.sh b/net/gce.sh index b5cb1e6a0..fe91c768a 100755 --- a/net/gce.sh +++ b/net/gce.sh @@ -18,7 +18,7 @@ gce) fullNodeMachineType=$cpuBootstrapLeaderMachineType clientMachineType="--custom-cpu 16 --custom-memory 20GB" blockstreamerMachineType="--machine-type n1-standard-8" - replicatorMachineType="--custom-cpu 4 --custom-memory 16GB" + archiverMachineType="--custom-cpu 4 --custom-memory 16GB" ;; ec2) # shellcheck source=net/scripts/ec2-provider.sh @@ -34,7 +34,7 @@ ec2) fullNodeMachineType=$cpuBootstrapLeaderMachineType clientMachineType=c5.2xlarge blockstreamerMachineType=c5.2xlarge - replicatorMachineType=c5.xlarge + archiverMachineType=c5.xlarge ;; azure) # shellcheck source=net/scripts/azure-provider.sh @@ -47,7 +47,7 @@ azure) fullNodeMachineType=$cpuBootstrapLeaderMachineType clientMachineType=Standard_D16s_v3 blockstreamerMachineType=Standard_D16s_v3 - replicatorMachineType=Standard_D4s_v3 + archiverMachineType=Standard_D4s_v3 ;; colo) # shellcheck source=net/scripts/colo-provider.sh @@ -59,7 +59,7 @@ colo) fullNodeMachineType=$cpuBootstrapLeaderMachineType clientMachineType=0 blockstreamerMachineType=0 - replicatorMachineType=0 + archiverMachineType=0 ;; *) echo "Error: Unknown cloud provider: $cloudProvider" @@ -70,11 +70,11 @@ esac prefix=testnet-dev-${USER//[^A-Za-z0-9]/} additionalFullNodeCount=2 clientNodeCount=0 -replicatorNodeCount=0 +archiverNodeCount=0 blockstreamer=false fullNodeBootDiskSizeInGb=500 clientBootDiskSizeInGb=75 -replicatorBootDiskSizeInGb=500 +archiverBootDiskSizeInGb=500 fullNodeAdditionalDiskSizeInGb= externalNodes=false failOnValidatorBootupFailure=true @@ -127,7 +127,7 @@ Manage testnet instances create-specific options: -n [number] - Number of additional fullnodes (default: $additionalFullNodeCount) -c [number] - Number of client nodes (default: $clientNodeCount) - -r [number] - Number of replicator nodes (default: $replicatorNodeCount) + -r [number] - Number of archiver nodes (default: $archiverNodeCount) -u - Include a Blockstreamer (default: $blockstreamer) -P - Use public network IP addresses (default: $publicNetwork) -g - Enable GPU (default: $enableGpu) @@ -149,7 +149,7 @@ Manage testnet instances Only supported on GCE. --dedicated - Use dedicated instances for additional full nodes (by default preemptible instances are used to reduce - cost). Note that the bootstrap leader, replicator, + cost). Note that the bootstrap leader, archiver, blockstreamer and client nodes are always dedicated. config-specific options: @@ -217,7 +217,7 @@ while getopts "h?p:Pn:c:r:z:gG:a:d:uxf" opt "${shortArgs[@]}"; do clientNodeCount=$OPTARG ;; r) - replicatorNodeCount=$OPTARG + archiverNodeCount=$OPTARG ;; z) containsZone "$OPTARG" "${zones[@]}" || zones+=("$OPTARG") @@ -547,13 +547,13 @@ EOF } if ! $externalNodes; then - echo "replicatorIpList=()" >> "$configFile" - echo "replicatorIpListPrivate=()" >> "$configFile" + echo "archiverIpList=()" >> "$configFile" + echo "archiverIpListPrivate=()" >> "$configFile" fi - echo "Looking for replicator instances..." - cloud_FindInstances "$prefix-replicator" + echo "Looking for archiver instances..." + cloud_FindInstances "$prefix-archiver" [[ ${#instances[@]} -eq 0 ]] || { - cloud_ForEachInstance recordInstanceIp true replicatorIpList + cloud_ForEachInstance recordInstanceIp true archiverIpList } echo "Wrote $configFile" @@ -621,7 +621,7 @@ create) Bootstrap leader = $bootstrapLeaderMachineType (GPU=$enableGpu) Additional fullnodes = $additionalFullNodeCount x $fullNodeMachineType Client(s) = $clientNodeCount x $clientMachineType - Replicators(s) = $replicatorNodeCount x $replicatorMachineType + Archivers(s) = $archiverNodeCount x $archiverMachineType Blockstreamer = $blockstreamer ======================================================================================== @@ -775,9 +775,9 @@ EOF "$startupScript" "$blockstreamerAddress" "$bootDiskType" "" "$sshPrivateKey" fi - if [[ $replicatorNodeCount -gt 0 ]]; then - cloud_CreateInstances "$prefix" "$prefix-replicator" "$replicatorNodeCount" \ - false "$replicatorMachineType" "${zones[0]}" "$replicatorBootDiskSizeInGb" \ + if [[ $archiverNodeCount -gt 0 ]]; then + cloud_CreateInstances "$prefix" "$prefix-archiver" "$archiverNodeCount" \ + false "$archiverMachineType" "${zones[0]}" "$archiverBootDiskSizeInGb" \ "$startupScript" "" "" "" "never preemptible" "$sshPrivateKey" fi @@ -824,11 +824,11 @@ info) printNode blockstreamer "$ipAddress" "$ipAddressPrivate" "$zone" done - for i in $(seq 0 $(( ${#replicatorIpList[@]} - 1)) ); do - ipAddress=${replicatorIpList[$i]} - ipAddressPrivate=${replicatorIpListPrivate[$i]} - zone=${replicatorIpListZone[$i]} - printNode replicator "$ipAddress" "$ipAddressPrivate" "$zone" + for i in $(seq 0 $(( ${#archiverIpList[@]} - 1)) ); do + ipAddress=${archiverIpList[$i]} + ipAddressPrivate=${archiverIpListPrivate[$i]} + zone=${archiverIpListZone[$i]} + printNode archiver "$ipAddress" "$ipAddressPrivate" "$zone" done ;; status) diff --git a/net/net.sh b/net/net.sh index 021b4ccce..c89d1f100 100755 --- a/net/net.sh +++ b/net/net.sh @@ -428,7 +428,7 @@ startBootstrapLeader() { $deployMethod \ bootstrap-leader \ $entrypointIp \ - $((${#fullnodeIpList[@]} + ${#blockstreamerIpList[@]} + ${#replicatorIpList[@]})) \ + $((${#fullnodeIpList[@]} + ${#blockstreamerIpList[@]} + ${#archiverIpList[@]})) \ \"$RUST_LOG\" \ $skipSetup \ $failOnValidatorBootupFailure \ @@ -493,7 +493,7 @@ startNode() { $deployMethod \ $nodeType \ $entrypointIp \ - $((${#fullnodeIpList[@]} + ${#blockstreamerIpList[@]} + ${#replicatorIpList[@]})) \ + $((${#fullnodeIpList[@]} + ${#blockstreamerIpList[@]} + ${#archiverIpList[@]})) \ \"$RUST_LOG\" \ $skipSetup \ $failOnValidatorBootupFailure \ @@ -609,12 +609,12 @@ getNodeType() { nodeIndex=0 # <-- global nodeType=validator # <-- global - for ipAddress in "${fullnodeIpList[@]}" b "${blockstreamerIpList[@]}" r "${replicatorIpList[@]}"; do + for ipAddress in "${fullnodeIpList[@]}" b "${blockstreamerIpList[@]}" r "${archiverIpList[@]}"; do if [[ $ipAddress = b ]]; then nodeType=blockstreamer continue elif [[ $ipAddress = r ]]; then - nodeType=replicator + nodeType=archiver continue fi @@ -689,7 +689,7 @@ deploy() { $metricsWriteDatapoint "testnet-deploy net-start-begin=1" declare bootstrapLeader=true - for nodeAddress in "${fullnodeIpList[@]}" "${blockstreamerIpList[@]}" "${replicatorIpList[@]}"; do + for nodeAddress in "${fullnodeIpList[@]}" "${blockstreamerIpList[@]}" "${archiverIpList[@]}"; do nodeType= nodeIndex= getNodeType @@ -774,7 +774,7 @@ deploy() { echo echo "+++ Deployment Successful" echo "Bootstrap leader deployment took $bootstrapNodeDeployTime seconds" - echo "Additional fullnode deployment (${#fullnodeIpList[@]} full nodes, ${#blockstreamerIpList[@]} blockstreamer nodes, ${#replicatorIpList[@]} replicators) took $additionalNodeDeployTime seconds" + echo "Additional fullnode deployment (${#fullnodeIpList[@]} full nodes, ${#blockstreamerIpList[@]} blockstreamer nodes, ${#archiverIpList[@]} archivers) took $additionalNodeDeployTime seconds" echo "Client deployment (${#clientIpList[@]} instances) took $clientDeployTime seconds" echo "Network start logs in $netLogDir" } @@ -820,7 +820,7 @@ stop() { declare loopCount=0 pids=() - for ipAddress in "${fullnodeIpList[@]}" "${blockstreamerIpList[@]}" "${replicatorIpList[@]}" "${clientIpList[@]}"; do + for ipAddress in "${fullnodeIpList[@]}" "${blockstreamerIpList[@]}" "${archiverIpList[@]}" "${clientIpList[@]}"; do stopNode "$ipAddress" false # Stagger additional node stop time to avoid too many concurrent ssh @@ -922,7 +922,7 @@ logs) for ipAddress in "${blockstreamerIpList[@]}"; do fetchRemoteLog "$ipAddress" fullnode done - for ipAddress in "${replicatorIpList[@]}"; do + for ipAddress in "${archiverIpList[@]}"; do fetchRemoteLog "$ipAddress" fullnode done ;; diff --git a/net/remote/remote-node.sh b/net/remote/remote-node.sh index 6c02a6fdb..5a2a6e75f 100755 --- a/net/remote/remote-node.sh +++ b/net/remote/remote-node.sh @@ -364,7 +364,7 @@ EOF validator-info publish "$(hostname)" -n team/solana --force || true fi ;; - replicator) + archiver) if [[ $deployMethod != skip ]]; then net/scripts/rsync-retry.sh -vPrc "$entrypointIp":~/.cargo/bin/ ~/.cargo/bin/ fi @@ -374,14 +374,14 @@ EOF ) if [[ $airdropsEnabled != true ]]; then - echo "TODO: replicators not supported without airdrops" + echo "TODO: archivers not supported without airdrops" # TODO: need to provide the `--identity` argument to an existing system # account with lamports in it exit 1 fi cat >> ~/solana/on-reboot < fullnode.log.\$now 2>&1 & + nohup multinode-demo/archiver.sh ${args[@]} > fullnode.log.\$now 2>&1 & pid=\$! oom_score_adj "\$pid" 1000 disown diff --git a/net/ssh.sh b/net/ssh.sh index 768d76339..2c3bc2e1d 100755 --- a/net/ssh.sh +++ b/net/ssh.sh @@ -72,11 +72,11 @@ else done fi echo -echo Replicators: -if [[ ${#replicatorIpList[@]} -eq 0 ]]; then +echo Archivers: +if [[ ${#archiverIpList[@]} -eq 0 ]]; then echo " None" else - for ipAddress in "${replicatorIpList[@]}"; do + for ipAddress in "${archiverIpList[@]}"; do printNode fullnode "$ipAddress" done fi diff --git a/programs/storage_api/src/storage_contract.rs b/programs/storage_api/src/storage_contract.rs index c83c812a0..d8e6c270c 100644 --- a/programs/storage_api/src/storage_contract.rs +++ b/programs/storage_api/src/storage_contract.rs @@ -62,7 +62,7 @@ impl Default for ProofStatus { #[derive(Default, Debug, Serialize, Deserialize, Clone, PartialEq)] pub struct Proof { - /// The encryption key the replicator used (also used to generate offsets) + /// The encryption key the archiver used (also used to generate offsets) pub signature: Signature, /// A "recent" blockhash used to generate the seed pub blockhash: Hash, @@ -82,14 +82,14 @@ pub enum StorageContract { segment: u64, // Most recently advertised blockhash hash: Hash, - // Lockouts and Rewards are per segment per replicator. It needs to remain this way until + // Lockouts and Rewards are per segment per archiver. It needs to remain this way until // the challenge stage is added. lockout_validations: BTreeMap>>, // Used to keep track of ongoing credits credits: Credits, }, - ReplicatorStorage { + ArchiverStorage { owner: Pubkey, // TODO what to do about duplicate proofs across segments? - Check the blockhashes // Map of Proofs per segment, in a Vec @@ -139,7 +139,7 @@ impl<'a> StorageAccount<'a> { let storage_contract = &mut self.account.state()?; if let StorageContract::Uninitialized = storage_contract { *storage_contract = match account_type { - StorageAccountType::Replicator => StorageContract::ReplicatorStorage { + StorageAccountType::Archiver => StorageContract::ArchiverStorage { owner, proofs: BTreeMap::new(), validations: BTreeMap::new(), @@ -168,7 +168,7 @@ impl<'a> StorageAccount<'a> { clock: sysvar::clock::Clock, ) -> Result<(), InstructionError> { let mut storage_contract = &mut self.account.state()?; - if let StorageContract::ReplicatorStorage { + if let StorageContract::ArchiverStorage { proofs, validations, credits, @@ -278,7 +278,7 @@ impl<'a> StorageAccount<'a> { clock: sysvar::clock::Clock, segment_index: u64, proofs_per_account: Vec>, - replicator_accounts: &mut [StorageAccount], + archiver_accounts: &mut [StorageAccount], ) -> Result<(), InstructionError> { let mut storage_contract = &mut self.account.state()?; if let StorageContract::ValidatorStorage { @@ -293,12 +293,12 @@ impl<'a> StorageAccount<'a> { )); } - let accounts = replicator_accounts + let accounts = archiver_accounts .iter_mut() .enumerate() .filter_map(|(i, account)| { account.account.state().ok().map(|contract| match contract { - StorageContract::ReplicatorStorage { + StorageContract::ArchiverStorage { proofs: account_proofs, .. } => { @@ -349,11 +349,11 @@ impl<'a> StorageAccount<'a> { // allow validators to store successful validations stored_proofs .into_iter() - .for_each(|(replicator_account_id, proof_mask)| { + .for_each(|(archiver_account_id, proof_mask)| { lockout_validations .entry(segment_index) .or_default() - .insert(replicator_account_id, proof_mask); + .insert(archiver_account_id, proof_mask); }); self.account.set_state(storage_contract) @@ -387,7 +387,7 @@ impl<'a> StorageAccount<'a> { check_redeemable(credits, rewards.storage_point_value, rewards_pool, owner)?; self.account.set_state(storage_contract) - } else if let StorageContract::ReplicatorStorage { + } else if let StorageContract::ArchiverStorage { owner: account_owner, validations, credits, @@ -438,7 +438,7 @@ pub fn create_rewards_pool() -> Account { Account::new_data(std::u64::MAX, &StorageContract::RewardsPool, &crate::id()).unwrap() } -/// Store the result of a proof validation into the replicator account +/// Store the result of a proof validation into the archiver account fn store_validation_result( me: &Pubkey, clock: &sysvar::clock::Clock, @@ -448,7 +448,7 @@ fn store_validation_result( ) -> Result<(), InstructionError> { let mut storage_contract = storage_account.account.state()?; match &mut storage_contract { - StorageContract::ReplicatorStorage { + StorageContract::ArchiverStorage { proofs, validations, credits, @@ -514,7 +514,7 @@ mod tests { if let StorageContract::ValidatorStorage { .. } = contract { assert!(true) } - if let StorageContract::ReplicatorStorage { .. } = &mut contract { + if let StorageContract::ArchiverStorage { .. } = &mut contract { panic!("Contract should not decode into two types"); } @@ -526,10 +526,10 @@ mod tests { credits: Credits::default(), }; storage_account.account.set_state(&contract).unwrap(); - if let StorageContract::ReplicatorStorage { .. } = contract { + if let StorageContract::ArchiverStorage { .. } = contract { panic!("Wrong contract type"); } - contract = StorageContract::ReplicatorStorage { + contract = StorageContract::ArchiverStorage { owner: Pubkey::default(), proofs: BTreeMap::new(), validations: BTreeMap::new(), @@ -574,7 +574,7 @@ mod tests { if let StorageContract::Uninitialized = storage_contract { let mut proofs = BTreeMap::new(); proofs.insert(0, vec![proof.clone()]); - *storage_contract = StorageContract::ReplicatorStorage { + *storage_contract = StorageContract::ArchiverStorage { owner: Pubkey::default(), proofs, validations: BTreeMap::new(), diff --git a/programs/storage_api/src/storage_instruction.rs b/programs/storage_api/src/storage_instruction.rs index ce72ee4c9..d5213686f 100644 --- a/programs/storage_api/src/storage_instruction.rs +++ b/programs/storage_api/src/storage_instruction.rs @@ -10,13 +10,13 @@ use solana_sdk::sysvar::{clock, rewards}; #[derive(Debug, Serialize, Deserialize, PartialEq, Clone, Copy)] pub enum StorageAccountType { - Replicator, + Archiver, Validator, } #[derive(Serialize, Deserialize, Debug, Clone)] pub enum StorageInstruction { - /// Initialize the account as a validator or replicator + /// Initialize the account as a validator or archiver /// /// Expects 1 Account: /// 0 - Account to be initialized @@ -40,14 +40,14 @@ pub enum StorageInstruction { /// Expects 1 Account: /// 0 - Storage account with credits to redeem /// 1 - Clock Syscall to figure out the clock epoch - /// 2 - Replicator account to credit - this account *must* be the owner + /// 2 - Archiver account to credit - this account *must* be the owner /// 3 - MiningPool account to redeem credits from /// 4 - Rewards Syscall to figure out point values ClaimStorageReward, ProofValidation { /// The segment during which this proof was generated segment: u64, - /// A Vec of proof masks per keyed replicator account loaded by the instruction + /// A Vec of proof masks per keyed archiver account loaded by the instruction proofs: Vec>, }, } diff --git a/programs/storage_program/tests/storage_processor.rs b/programs/storage_program/tests/storage_processor.rs index 3eba11454..738b0bfb8 100644 --- a/programs/storage_program/tests/storage_processor.rs +++ b/programs/storage_program/tests/storage_processor.rs @@ -54,7 +54,7 @@ fn test_instruction( fn test_account_owner() { let account_owner = Pubkey::new_rand(); let validator_storage_pubkey = Pubkey::new_rand(); - let replicator_storage_pubkey = Pubkey::new_rand(); + let archiver_storage_pubkey = Pubkey::new_rand(); let GenesisBlockInfo { genesis_block, @@ -90,18 +90,18 @@ fn test_account_owner() { let message = Message::new(storage_instruction::create_storage_account( &mint_pubkey, &account_owner, - &replicator_storage_pubkey, + &archiver_storage_pubkey, 1, - StorageAccountType::Replicator, + StorageAccountType::Archiver, )); bank_client .send_message(&[&mint_keypair], message) .expect("failed to create account"); let account = bank - .get_account(&replicator_storage_pubkey) + .get_account(&archiver_storage_pubkey) .expect("account not found"); let storage_contract = account.state().expect("couldn't unpack account data"); - if let StorageContract::ReplicatorStorage { owner, .. } = storage_contract { + if let StorageContract::ArchiverStorage { owner, .. } = storage_contract { assert_eq!(owner, account_owner); } else { assert!(false, "wrong account type found") @@ -119,7 +119,7 @@ fn test_proof_bounds() { { let mut storage_account = StorageAccount::new(pubkey, &mut account); storage_account - .initialize_storage(account_owner, StorageAccountType::Replicator) + .initialize_storage(account_owner, StorageAccountType::Archiver) .unwrap(); } @@ -232,7 +232,7 @@ fn test_submit_mining_ok() { { let mut storage_account = StorageAccount::new(pubkey, &mut account); storage_account - .initialize_storage(account_owner, StorageAccountType::Replicator) + .initialize_storage(account_owner, StorageAccountType::Archiver) .unwrap(); } @@ -270,14 +270,14 @@ fn test_validate_mining() { .native_instruction_processors .push(solana_storage_program::solana_storage_program!()); let mint_pubkey = mint_keypair.pubkey(); - // 1 owner for all replicator and validator accounts for the test + // 1 owner for all archiver and validator accounts for the test let owner_pubkey = Pubkey::new_rand(); - let replicator_1_storage_keypair = Keypair::new(); - let replicator_1_storage_id = replicator_1_storage_keypair.pubkey(); + let archiver_1_storage_keypair = Keypair::new(); + let archiver_1_storage_id = archiver_1_storage_keypair.pubkey(); - let replicator_2_storage_keypair = Keypair::new(); - let replicator_2_storage_id = replicator_2_storage_keypair.pubkey(); + let archiver_2_storage_keypair = Keypair::new(); + let archiver_2_storage_id = archiver_2_storage_keypair.pubkey(); let validator_storage_keypair = Keypair::new(); let validator_storage_id = validator_storage_keypair.pubkey(); @@ -291,7 +291,7 @@ fn test_validate_mining() { &bank_client, &mint_keypair, &[&validator_storage_id], - &[&replicator_1_storage_id, &replicator_2_storage_id], + &[&archiver_1_storage_id, &archiver_2_storage_id], 10, ); @@ -317,24 +317,24 @@ fn test_validate_mining() { Ok(_) ); - // submit proofs 5 proofs for each replicator for segment 0 + // submit proofs 5 proofs for each archiver for segment 0 let mut checked_proofs: HashMap<_, Vec<_>> = HashMap::new(); for _ in 0..5 { checked_proofs - .entry(replicator_1_storage_id) + .entry(archiver_1_storage_id) .or_default() .push(submit_proof( &mint_keypair, - &replicator_1_storage_keypair, + &archiver_1_storage_keypair, &bank_client, 0, )); checked_proofs - .entry(replicator_2_storage_id) + .entry(archiver_2_storage_id) .or_default() .push(submit_proof( &mint_keypair, - &replicator_2_storage_keypair, + &archiver_2_storage_keypair, &bank_client, 0, )); @@ -429,15 +429,12 @@ fn test_validate_mining() { bank.register_tick(&bank.last_blockhash()); } - assert_eq!( - bank_client.get_balance(&replicator_1_storage_id).unwrap(), - 10 - ); + assert_eq!(bank_client.get_balance(&archiver_1_storage_id).unwrap(), 10); let message = Message::new_with_payer( vec![storage_instruction::claim_reward( &owner_pubkey, - &replicator_1_storage_id, + &archiver_1_storage_id, )], Some(&mint_pubkey), ); @@ -451,7 +448,7 @@ fn test_validate_mining() { let message = Message::new_with_payer( vec![storage_instruction::claim_reward( &owner_pubkey, - &replicator_2_storage_id, + &archiver_2_storage_id, )], Some(&mint_pubkey), ); @@ -469,7 +466,7 @@ fn init_storage_accounts( client: &BankClient, mint: &Keypair, validator_accounts_to_create: &[&Pubkey], - replicator_accounts_to_create: &[&Pubkey], + archiver_accounts_to_create: &[&Pubkey], lamports: u64, ) { let mut ixs: Vec<_> = vec![system_instruction::transfer_now(&mint.pubkey(), owner, 1)]; @@ -487,17 +484,15 @@ fn init_storage_accounts( }) .collect(), ); - replicator_accounts_to_create - .into_iter() - .for_each(|account| { - ixs.append(&mut storage_instruction::create_storage_account( - &mint.pubkey(), - owner, - account, - lamports, - StorageAccountType::Replicator, - )) - }); + archiver_accounts_to_create.into_iter().for_each(|account| { + ixs.append(&mut storage_instruction::create_storage_account( + &mint.pubkey(), + owner, + account, + lamports, + StorageAccountType::Archiver, + )) + }); let message = Message::new(ixs); client.send_message(&[mint], message).unwrap(); } @@ -573,8 +568,8 @@ fn test_bank_storage() { .native_instruction_processors .push(solana_storage_program::solana_storage_program!()); let mint_pubkey = mint_keypair.pubkey(); - let replicator_keypair = Keypair::new(); - let replicator_pubkey = replicator_keypair.pubkey(); + let archiver_keypair = Keypair::new(); + let archiver_pubkey = archiver_keypair.pubkey(); let validator_keypair = Keypair::new(); let validator_pubkey = validator_keypair.pubkey(); @@ -595,9 +590,9 @@ fn test_bank_storage() { let message = Message::new(storage_instruction::create_storage_account( &mint_pubkey, &Pubkey::default(), - &replicator_pubkey, + &archiver_pubkey, 11, - StorageAccountType::Replicator, + StorageAccountType::Archiver, )); bank_client.send_message(&[&mint_keypair], message).unwrap(); @@ -627,7 +622,7 @@ fn test_bank_storage() { let slot = 0; let message = Message::new_with_payer( vec![storage_instruction::mining_proof( - &replicator_pubkey, + &archiver_pubkey, Hash::default(), slot, Signature::default(), @@ -636,7 +631,7 @@ fn test_bank_storage() { Some(&mint_pubkey), ); assert_matches!( - bank_client.send_message(&[&mint_keypair, &replicator_keypair], message), + bank_client.send_message(&[&mint_keypair, &archiver_keypair], message), Ok(_) ); diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 6f81080fa..8ed873635 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -237,7 +237,7 @@ pub struct Bank { /// cache of vote_account and stake_account state for this fork stakes: RwLock, - /// cache of validator and replicator storage accounts for this fork + /// cache of validator and archiver storage accounts for this fork storage_accounts: RwLock, /// staked nodes on epoch boundaries, saved off when a bank.slot() is at @@ -1683,13 +1683,13 @@ mod tests { let ((vote_id, mut vote_account), stake) = crate::stakes::tests::create_staked_node_accounts(1_0000); - let ((validator_id, validator_account), (replicator_id, replicator_account)) = + let ((validator_id, validator_account), (archiver_id, archiver_account)) = crate::storage_utils::tests::create_storage_accounts_with_credits(100); // set up stakes,vote, and storage accounts bank.store_account(&stake.0, &stake.1); bank.store_account(&validator_id, &validator_account); - bank.store_account(&replicator_id, &replicator_account); + bank.store_account(&archiver_id, &archiver_account); // generate some rewards let mut vote_state = VoteState::from(&vote_account).unwrap(); diff --git a/runtime/src/storage_utils.rs b/runtime/src/storage_utils.rs index 9149593a2..8a84a24fd 100644 --- a/runtime/src/storage_utils.rs +++ b/runtime/src/storage_utils.rs @@ -10,8 +10,8 @@ pub struct StorageAccounts { /// validator storage accounts and their credits validator_accounts: HashSet, - /// replicator storage accounts and their credits - replicator_accounts: HashSet, + /// archiver storage accounts and their credits + archiver_accounts: HashSet, /// unclaimed points. // 1 point == 1 storage account credit @@ -25,11 +25,11 @@ pub fn is_storage(account: &Account) -> bool { impl StorageAccounts { pub fn store(&mut self, pubkey: &Pubkey, account: &Account) { if let Ok(storage_state) = account.state() { - if let StorageContract::ReplicatorStorage { credits, .. } = storage_state { + if let StorageContract::ArchiverStorage { credits, .. } = storage_state { if account.lamports == 0 { - self.replicator_accounts.remove(pubkey); + self.archiver_accounts.remove(pubkey); } else { - self.replicator_accounts.insert(*pubkey); + self.archiver_accounts.insert(*pubkey); self.points.insert(*pubkey, credits.current_epoch); } } else if let StorageContract::ValidatorStorage { credits, .. } = storage_state { @@ -67,9 +67,9 @@ pub fn validator_accounts(bank: &Bank) -> HashMap { .collect() } -pub fn replicator_accounts(bank: &Bank) -> HashMap { +pub fn archiver_accounts(bank: &Bank) -> HashMap { bank.storage_accounts() - .replicator_accounts + .archiver_accounts .iter() .filter_map(|account_id| { bank.get_account(account_id) @@ -97,8 +97,8 @@ pub(crate) mod tests { fn test_store_and_recover() { let (genesis_block, mint_keypair) = create_genesis_block(1000); let mint_pubkey = mint_keypair.pubkey(); - let replicator_keypair = Keypair::new(); - let replicator_pubkey = replicator_keypair.pubkey(); + let archiver_keypair = Keypair::new(); + let archiver_pubkey = archiver_keypair.pubkey(); let validator_keypair = Keypair::new(); let validator_pubkey = validator_keypair.pubkey(); let mut bank = Bank::new(&genesis_block); @@ -113,9 +113,9 @@ pub(crate) mod tests { let message = Message::new(storage_instruction::create_storage_account( &mint_pubkey, &Pubkey::default(), - &replicator_pubkey, + &archiver_pubkey, 11, - StorageAccountType::Replicator, + StorageAccountType::Archiver, )); bank_client.send_message(&[&mint_keypair], message).unwrap(); @@ -129,7 +129,7 @@ pub(crate) mod tests { bank_client.send_message(&[&mint_keypair], message).unwrap(); assert_eq!(validator_accounts(bank.as_ref()).len(), 1); - assert_eq!(replicator_accounts(bank.as_ref()).len(), 1); + assert_eq!(archiver_accounts(bank.as_ref()).len(), 1); } #[test] @@ -140,38 +140,38 @@ pub(crate) mod tests { assert_eq!(storage_accounts.points(), 0); assert_eq!(storage_accounts.claim_points(), 0); - // create random validator and replicator accounts with `credits` - let ((validator_pubkey, validator_account), (replicator_pubkey, replicator_account)) = + // create random validator and archiver accounts with `credits` + let ((validator_pubkey, validator_account), (archiver_pubkey, archiver_account)) = create_storage_accounts_with_credits(credits); storage_accounts.store(&validator_pubkey, &validator_account); - storage_accounts.store(&replicator_pubkey, &replicator_account); + storage_accounts.store(&archiver_pubkey, &archiver_account); // check that 2x credits worth of points are available assert_eq!(storage_accounts.points(), credits * 2); - let ((validator_pubkey, validator_account), (replicator_pubkey, mut replicator_account)) = + let ((validator_pubkey, validator_account), (archiver_pubkey, mut archiver_account)) = create_storage_accounts_with_credits(credits); storage_accounts.store(&validator_pubkey, &validator_account); - storage_accounts.store(&replicator_pubkey, &replicator_account); + storage_accounts.store(&archiver_pubkey, &archiver_account); // check that 4x credits worth of points are available assert_eq!(storage_accounts.points(), credits * 2 * 2); storage_accounts.store(&validator_pubkey, &validator_account); - storage_accounts.store(&replicator_pubkey, &replicator_account); + storage_accounts.store(&archiver_pubkey, &archiver_account); // check that storing again has no effect assert_eq!(storage_accounts.points(), credits * 2 * 2); - let storage_contract = &mut replicator_account.state().unwrap(); - if let StorageContract::ReplicatorStorage { + let storage_contract = &mut archiver_account.state().unwrap(); + if let StorageContract::ArchiverStorage { credits: account_credits, .. } = storage_contract { account_credits.current_epoch += 1; } - replicator_account.set_state(storage_contract).unwrap(); - storage_accounts.store(&replicator_pubkey, &replicator_account); + archiver_account.set_state(storage_contract).unwrap(); + storage_accounts.store(&archiver_pubkey, &archiver_account); // check that incremental store increases credits assert_eq!(storage_accounts.points(), credits * 2 * 2 + 1); @@ -185,7 +185,7 @@ pub(crate) mod tests { credits: u64, ) -> ((Pubkey, Account), (Pubkey, Account)) { let validator_pubkey = Pubkey::new_rand(); - let replicator_pubkey = Pubkey::new_rand(); + let archiver_pubkey = Pubkey::new_rand(); let mut validator_account = Account::new(1, STORAGE_ACCOUNT_SPACE as usize, &solana_storage_api::id()); @@ -203,25 +203,25 @@ pub(crate) mod tests { } validator_account.set_state(storage_contract).unwrap(); - let mut replicator_account = + let mut archiver_account = Account::new(1, STORAGE_ACCOUNT_SPACE as usize, &solana_storage_api::id()); - let mut replicator = StorageAccount::new(replicator_pubkey, &mut replicator_account); - replicator - .initialize_storage(replicator_pubkey, StorageAccountType::Replicator) + let mut archiver = StorageAccount::new(archiver_pubkey, &mut archiver_account); + archiver + .initialize_storage(archiver_pubkey, StorageAccountType::Archiver) .unwrap(); - let storage_contract = &mut replicator_account.state().unwrap(); - if let StorageContract::ReplicatorStorage { + let storage_contract = &mut archiver_account.state().unwrap(); + if let StorageContract::ArchiverStorage { credits: account_credits, .. } = storage_contract { account_credits.current_epoch = credits; } - replicator_account.set_state(storage_contract).unwrap(); + archiver_account.set_state(storage_contract).unwrap(); ( (validator_pubkey, validator_account), - (replicator_pubkey, replicator_account), + (archiver_pubkey, archiver_account), ) } } diff --git a/scripts/cargo-install-all.sh b/scripts/cargo-install-all.sh index 7606eba3b..219d765dd 100755 --- a/scripts/cargo-install-all.sh +++ b/scripts/cargo-install-all.sh @@ -49,7 +49,7 @@ BINS=( solana-install-init solana-keygen solana-ledger-tool - solana-replicator + solana-archiver solana-validator ) diff --git a/sdk/src/clock.rs b/sdk/src/clock.rs index 93bb5f2cc..e8db231d5 100644 --- a/sdk/src/clock.rs +++ b/sdk/src/clock.rs @@ -65,7 +65,7 @@ pub fn get_complete_segment_from_slot( /// is some some number of Ticks long. pub type Slot = u64; -/// A segment is some number of slots stored by replicators +/// A segment is some number of slots stored by archivers pub type Segment = u64; /// Epoch is a unit of time a given leader schedule is honored, diff --git a/validator/src/lib.rs b/validator/src/lib.rs index d8ee7a7a1..2a09ffd02 100644 --- a/validator/src/lib.rs +++ b/validator/src/lib.rs @@ -162,7 +162,7 @@ fn initialize_ledger_path( ledger_path: &Path, no_snapshot_fetch: bool, ) -> Result { - let (nodes, _replicators) = discover( + let (nodes, _archivers) = discover( &entrypoint.gossip, Some(1), Some(60),