More fullnode -> validator renaming (#4414)

* s/fullnode_config/validator_config/g

* s/FullnodeConfig/ValidatorConfig/g

* mv core/lib/fullnode.rs core/lib/validator.rs

* s/Fullnode/Validator/g

* Add replicator-x.sh

* Rename fullnode.md to validator.md

* cargo fmt
This commit is contained in:
Michael Vines 2019-05-23 22:05:16 -07:00 committed by GitHub
parent 50207a30ef
commit 94beb4b8c2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
27 changed files with 291 additions and 284 deletions

View File

@ -892,9 +892,9 @@ pub fn airdrop_lamports(client: &Client, drone_addr: &SocketAddr, id: &Keypair,
#[cfg(test)]
mod tests {
use super::*;
use solana::fullnode::FullnodeConfig;
use solana::gossip_service::{discover_cluster, get_clients};
use solana::local_cluster::{ClusterConfig, LocalCluster};
use solana::validator::ValidatorConfig;
use solana_drone::drone::run_local_drone;
use solana_exchange_api::exchange_processor::process_instruction;
use solana_runtime::bank::Bank;
@ -907,7 +907,7 @@ mod tests {
solana_logger::setup();
const NUM_NODES: usize = 1;
let fullnode_config = FullnodeConfig::default();
let validator_config = ValidatorConfig::default();
let mut config = Config::default();
config.identity = Keypair::new();
@ -929,7 +929,7 @@ mod tests {
let cluster = LocalCluster::new(&ClusterConfig {
node_stakes: vec![100_000; NUM_NODES],
cluster_lamports: 100_000_000_000_000,
fullnode_config,
validator_config,
native_instruction_processors: [solana_exchange_program!()].to_vec(),
..ClusterConfig::default()
});

View File

@ -623,8 +623,8 @@ pub fn generate_and_fund_keypairs<T: Client>(
mod tests {
use super::*;
use solana::cluster_info::FULLNODE_PORT_RANGE;
use solana::fullnode::FullnodeConfig;
use solana::local_cluster::{ClusterConfig, LocalCluster};
use solana::validator::ValidatorConfig;
use solana_client::thin_client::create_client;
use solana_drone::drone::run_local_drone;
use solana_runtime::bank::Bank;
@ -651,12 +651,12 @@ mod tests {
#[test]
fn test_bench_tps_local_cluster() {
solana_logger::setup();
let fullnode_config = FullnodeConfig::default();
let validator_config = ValidatorConfig::default();
const NUM_NODES: usize = 1;
let cluster = LocalCluster::new(&ClusterConfig {
node_stakes: vec![999_990; NUM_NODES],
cluster_lamports: 2_000_000,
fullnode_config,
validator_config,
..ClusterConfig::default()
});

View File

@ -1,30 +0,0 @@
.--------------------------------------.
| Fullnode |
| |
.--------. | .-------------------. |
| |---->| | |
| Client | | | JSON RPC Service | |
| |<----| | |
`----+---` | `-------------------` |
| | ^ |
| | | .----------------. | .------------------.
| | | | Gossip Service |<----------| Validators |
| | | `----------------` | | |
| | | ^ | | |
| | | | | | .------------. |
| | .---+---. .----+---. .-----------. | | | | |
| | | Bank |<-+ Replay | | BlobFetch |<------+ Upstream | |
| | | Forks | | Stage | | Stage | | | | Validators | |
| | `-------` `--------` `--+--------` | | | | |
| | ^ ^ | | | `------------` |
| | | | v | | |
| | | .--+--------. | | |
| | | | Blocktree | | | |
| | | `-----------` | | .------------. |
| | | ^ | | | | |
| | | | | | | Downstream | |
| | .--+--. .-------+---. | | | Validators | |
`-------->| TPU +---->| Broadcast +--------------->| | |
| `-----` | Stage | | | `------------` |
| `-----------` | `------------------`
`--------------------------------------`

View File

@ -0,0 +1,60 @@
.------------.
| Upstream |
| Validators |
`----+-------`
|
|
.-----------------------------------.
| Validator | |
| v |
| .-----------. .------------. |
.--------. | | Fetch | | Repair | |
| Client +---->| Stage | | Stage | |
`--------` | `---+-------` `----+-------` |
| | | |
| v v |
| .-----------. .------------. |
| | TPU |<-->| Blockstore | |
| | | | | |
| `-----------` `----+-------` |
| | |
| v |
| .------------. |
| | Multicast | |
| | Stage | |
| `----+-------` |
| | |
`-----------------------------------`
|
v
.------------.
| Downstream |
| Validators |
`------------`
.------------.
| PoH |
| Service |
`-------+----`
^ |
| |
.-----------------------------------.
| TPU | | |
| | v |
.-------. | .-----------. .---+--------. | .------------.
| Fetch +---->| SigVerify +--->| Banking |<--->| Blockstore |
| Stage | | | Stage | | Stage | | | |
`-------` | `-----------` `-----+------` | `------------`
| | |
| | |
`-----------------------------------`
|
v
.------------.
| Banktree |
| |
`------------`

View File

@ -1,60 +1,30 @@
.------------.
| Upstream |
| Validators |
`----+-------`
|
|
.-----------------------------------.
| Validator | |
| v |
| .-----------. .------------. |
.--------. | | Fetch | | Repair | |
| Client +---->| Stage | | Stage | |
`--------` | `---+-------` `----+-------` |
| | | |
| v v |
| .-----------. .------------. |
| | TPU |<-->| Blockstore | |
| | | | | |
| `-----------` `----+-------` |
| | |
| v |
| .------------. |
| | Multicast | |
| | Stage | |
| `----+-------` |
| | |
`-----------------------------------`
|
v
.------------.
| Downstream |
| Validators |
`------------`
.------------.
| PoH |
| Service |
`-------+----`
^ |
| |
.-----------------------------------.
| TPU | | |
| | v |
.-------. | .-----------. .---+--------. | .------------.
| Fetch +---->| SigVerify +--->| Banking |<--->| Blockstore |
| Stage | | | Stage | | Stage | | | |
`-------` | `-----------` `-----+------` | `------------`
| | |
| | |
`-----------------------------------`
|
v
.------------.
| Banktree |
| |
`------------`
.--------------------------------------.
| Validator |
| |
.--------. | .-------------------. |
| |---->| | |
| Client | | | JSON RPC Service | |
| |<----| | |
`----+---` | `-------------------` |
| | ^ |
| | | .----------------. | .------------------.
| | | | Gossip Service |<----------| Validators |
| | | `----------------` | | |
| | | ^ | | |
| | | | | | .------------. |
| | .---+---. .----+---. .-----------. | | | | |
| | | Bank |<-+ Replay | | BlobFetch |<------+ Upstream | |
| | | Forks | | Stage | | Stage | | | | Validators | |
| | `-------` `--------` `--+--------` | | | | |
| | ^ ^ | | | `------------` |
| | | | v | | |
| | | .--+--------. | | |
| | | | Blocktree | | | |
| | | `-----------` | | .------------. |
| | | ^ | | | | |
| | | | | | | Downstream | |
| | .--+--. .-------+---. | | | Validators | |
`-------->| TPU +---->| Broadcast +--------------->| | |
| `-----` | Stage | | | `------------` |
| `-----------` | `------------------`
`--------------------------------------`

View File

@ -22,7 +22,7 @@
- [Staking Delegation and Rewards](stake-delegation-and-rewards.md)
- [Performance Metrics](performance-metrics.md)
- [Anatomy of a Fullnode](fullnode.md)
- [Anatomy of a Validator](validator.md)
- [TPU](tpu.md)
- [TVU](tvu.md)
- [Blocktree](blocktree.md)
@ -56,7 +56,7 @@
- [Cluster Test Framework](cluster-test-framework.md)
- [Credit-only Accounts](credit-only-credit-debit-accounts.md)
- [Deterministic Transaction Fees](transaction-fees.md)
- [Validator](validator.md)
- [Validator](validator-proposal.md)
- [Implemented Design Proposals](implemented-proposals.md)
- [Fork Selection](fork-selection.md)

View File

@ -20,7 +20,7 @@ least amount of internal plumbing exposed to the test.
Tests are provided an entry point, which is a `contact_info::ContactInfo`
structure, and a keypair that has already been funded.
Each node in the cluster is configured with a `fullnode::FullnodeConfig` at boot
Each node in the cluster is configured with a `fullnode::ValidatorConfig` at boot
time. At boot time this configuration specifies any extra cluster configuration
required for the test. The cluster should boot with the configuration when it
is run in-process or in a data center.
@ -61,18 +61,18 @@ let cluster_nodes = discover_nodes(&entry_point_info, num_nodes);
To enable specific scenarios, the cluster needs to be booted with special
configurations. These configurations can be captured in
`fullnode::FullnodeConfig`.
`fullnode::ValidatorConfig`.
For example:
```rust,ignore
let mut fullnode_config = FullnodeConfig::default();
fullnode_config.rpc_config.enable_fullnode_exit = true;
let mut validator_config = ValidatorConfig::default();
validator_config.rpc_config.enable_fullnode_exit = true;
let local = LocalCluster::new_with_config(
num_nodes,
10_000,
100,
&fullnode_config
&validator_config
);
```
@ -86,9 +86,9 @@ advertised gossip nodes.
Configure the RPC service:
```rust,ignore
let mut fullnode_config = FullnodeConfig::default();
fullnode_config.rpc_config.enable_rpc_gossip_push = true;
fullnode_config.rpc_config.enable_rpc_gossip_refresh_active_set = true;
let mut validator_config = ValidatorConfig::default();
validator_config.rpc_config.enable_rpc_gossip_push = true;
validator_config.rpc_config.enable_rpc_gossip_refresh_active_set = true;
```
Wire the RPCs and write a new test:

View File

@ -28,7 +28,7 @@ its copy.
## Joining a Cluster
Fullnodes and replicators enter the cluster via registration messages sent to
Validators and replicators enter the cluster via registration messages sent to
its *control plane*. The control plane is implemented using a *gossip*
protocol, meaning that a node may register with any existing node, and expect
its registration to propagate to all nodes in the cluster. The time it takes

View File

@ -1,29 +0,0 @@
# Anatomy of a Fullnode
<img alt="Fullnode block diagrams" src="img/fullnode.svg" class="center"/>
## Pipelining
The fullnodes make extensive use of an optimization common in CPU design,
called *pipelining*. Pipelining is the right tool for the job when there's a
stream of input data that needs to be processed by a sequence of steps, and
there's different hardware responsible for each. The quintessential example is
using a washer and dryer to wash/dry/fold several loads of laundry. Washing
must occur before drying and drying before folding, but each of the three
operations is performed by a separate unit. To maximize efficiency, one creates
a pipeline of *stages*. We'll call the washer one stage, the dryer another, and
the folding process a third. To run the pipeline, one adds a second load of
laundry to the washer just after the first load is added to the dryer.
Likewise, the third load is added to the washer after the second is in the
dryer and the first is being folded. In this way, one can make progress on
three loads of laundry simultaneously. Given infinite loads, the pipeline will
consistently complete a load at the rate of the slowest stage in the pipeline.
## Pipelining in the Fullnode
The fullnode contains two pipelined processes, one used in leader mode called
the TPU and one used in validator mode called the TVU. In both cases, the
hardware being pipelined is the same, the network input, the GPU cards, the CPU
cores, writes to disk, and the network output. What it does with that hardware
is different. The TPU exists to create ledger entries whereas the TVU exists
to validate them.

View File

@ -1,6 +1,6 @@
# Gossip Service
The Gossip Service acts as a gateway to nodes in the control plane. Fullnodes
The Gossip Service acts as a gateway to nodes in the control plane. Validators
use the service to ensure information is available to all other nodes in a cluster.
The service broadcasts information using a gossip protocol.

View File

@ -57,7 +57,7 @@ Forwarding is preferred, as it would minimize network congestion, allowing the
cluster to advertise higher TPS capacity.
## Fullnode Loop
## Validator Loop
The PoH Recorder manages the transition between modes. Once a ledger is
replayed, the validator can run until the recorder indicates it should be

View File

@ -0,0 +1,56 @@
# Anatomy of a Validator
## History
When we first started Solana, the goal was to de-risk our TPS claims. We knew
that between optimistic concurrency control and sufficiently long leader slots,
that PoS consensus was not the biggest risk to TPS. It was GPU-based signature
verification, software pipelining and concurrent banking. Thus, the TPU was
born. After topping 100k TPS, we split the team into one group working toward
710k TPS and another to flesh out the validator pipeline. Hence, the TVU was
born. The current architecture is a consequence of incremental development with
that ordering and project priorities. It is not a reflection of what we ever
believed was the most technically elegant cross-section of those technologies.
In the context of leader rotation, the strong distinction between leading and
validating is blurred.
## Difference between validating and leading
The fundamental difference between the pipelines is when the PoH is present. In
a leader, we process transactions, removing bad ones, and then tag the result
with a PoH hash. In the validator, we verify that hash, peel it off, and
process the transactions in exactly the same way. The only difference is that
if a validator sees a bad transaction, it can't simply remove it like the
leader does, because that would cause the PoH hash to change. Instead, it
rejects the whole block. The other difference between the pipelines is what
happens *after* banking. The leader broadcasts entries to downstream validators
whereas the validator will have already done that in RetransmitStage, which is
a confirmation time optimization. The validation pipeline, on the other hand,
has one last step. Any time it finishes processing a block, it needs to weigh
any forks it's observing, possibly cast a vote, and if so, reset its PoH hash
to the block hash it just voted on.
## Proposed Design
We unwrap the many abstraction layers and build a single pipeline that can
toggle leader mode on whenever the validator's ID shows up in the leader
schedule.
<img alt="Validator block diagram" src="img/validator-proposal.svg" class="center"/>
## Notable changes
* No threads are shut down to switch out of leader mode. Instead, FetchStage
should forward transactions to the next leader.
* Hoist FetchStage and BroadcastStage out of TPU
* Blocktree renamed to Blockstore
* BankForks renamed to Banktree
* TPU moves to new socket-free crate called solana-tpu.
* TPU's BankingStage absorbs ReplayStage
* TVU goes away
* New RepairStage absorbs Blob Fetch Stage and repair requests
* JSON RPC Service is optional - used for debugging. It should instead be part
of a separate `solana-blockstreamer` executable.
* New MulticastStage absorbs retransmit part of RetransmitStage
* MulticastStage downstream of Blockstore

View File

@ -1,56 +1,29 @@
# Anatomy of a Validator
## History
<img alt="Validator block diagrams" src="img/validator.svg" class="center"/>
When we first started Solana, the goal was to de-risk our TPS claims. We knew
that between optimistic concurrency control and sufficiently long leader slots,
that PoS consensus was not the biggest risk to TPS. It was GPU-based signature
verification, software pipelining and concurrent banking. Thus, the TPU was
born. After topping 100k TPS, we split the team into one group working toward
710k TPS and another to flesh out the validator pipeline. Hence, the TVU was
born. The current architecture is a consequence of incremental development with
that ordering and project priorities. It is not a reflection of what we ever
believed was the most technically elegant cross-section of those technologies.
In the context of leader rotation, the strong distinction between leading and
validating is blurred.
## Pipelining
## Difference between validating and leading
The validators make extensive use of an optimization common in CPU design,
called *pipelining*. Pipelining is the right tool for the job when there's a
stream of input data that needs to be processed by a sequence of steps, and
there's different hardware responsible for each. The quintessential example is
using a washer and dryer to wash/dry/fold several loads of laundry. Washing
must occur before drying and drying before folding, but each of the three
operations is performed by a separate unit. To maximize efficiency, one creates
a pipeline of *stages*. We'll call the washer one stage, the dryer another, and
the folding process a third. To run the pipeline, one adds a second load of
laundry to the washer just after the first load is added to the dryer.
Likewise, the third load is added to the washer after the second is in the
dryer and the first is being folded. In this way, one can make progress on
three loads of laundry simultaneously. Given infinite loads, the pipeline will
consistently complete a load at the rate of the slowest stage in the pipeline.
The fundamental difference between the pipelines is when the PoH is present. In
a leader, we process transactions, removing bad ones, and then tag the result
with a PoH hash. In the validator, we verify that hash, peel it off, and
process the transactions in exactly the same way. The only difference is that
if a validator sees a bad transaction, it can't simply remove it like the
leader does, because that would cause the PoH hash to change. Instead, it
rejects the whole block. The other difference between the pipelines is what
happens *after* banking. The leader broadcasts entries to downstream validators
whereas the validator will have already done that in RetransmitStage, which is
a confirmation time optimization. The validation pipeline, on the other hand,
has one last step. Any time it finishes processing a block, it needs to weigh
any forks it's observing, possibly cast a vote, and if so, reset its PoH hash
to the block hash it just voted on.
## Proposed Design
We unwrap the many abstraction layers and build a single pipeline that can
toggle leader mode on whenever the validator's ID shows up in the leader
schedule.
<img alt="Validator block diagram" src="img/validator.svg" class="center"/>
## Notable changes
* No threads are shut down to switch out of leader mode. Instead, FetchStage
should forward transactions to the next leader.
* Hoist FetchStage and BroadcastStage out of TPU
* Blocktree renamed to Blockstore
* BankForks renamed to Banktree
* TPU moves to new socket-free crate called solana-tpu.
* TPU's BankingStage absorbs ReplayStage
* TVU goes away
* New RepairStage absorbs Blob Fetch Stage and repair requests
* JSON RPC Service is optional - used for debugging. It should instead be part
of a separate `solana-blockstreamer` executable.
* New MulticastStage absorbs retransmit part of RetransmitStage
* MulticastStage downstream of Blockstore
## Pipelining in the Validator
The validator contains two pipelined processes, one used in leader mode called
the TPU and one used in validator mode called the TVU. In both cases, the
hardware being pipelined is the same, the network input, the GPU cards, the CPU
cores, writes to disk, and the network output. What it does with that hardware
is different. The TPU exists to create ledger entries whereas the TVU exists
to validate them.

View File

@ -1,6 +1,6 @@
//! The `solana` library implements the Solana high-performance blockchain architecture.
//! It includes a full Rust implementation of the architecture (see
//! [Fullnode](server/struct.Fullnode.html)) as well as hooks to GPU implementations of its most
//! [Validator](server/struct.Validator.html)) as well as hooks to GPU implementations of its most
//! paralellizable components (i.e. [SigVerify](sigverify/index.html)). It also includes
//! command-line tools to spin up fullnodes and a Rust library
//!
@ -34,7 +34,6 @@ pub mod cluster_tests;
pub mod entry;
pub mod erasure;
pub mod fetch_stage;
pub mod fullnode;
pub mod gen_keys;
pub mod genesis_utils;
pub mod gossip_service;
@ -68,6 +67,7 @@ pub mod streamer;
pub mod test_tx;
pub mod tpu;
pub mod tvu;
pub mod validator;
pub mod window_service;
#[macro_use]

View File

@ -2,11 +2,11 @@ use crate::blocktree::{create_new_tmp_ledger, tmp_copy_blocktree};
use crate::cluster::Cluster;
use crate::cluster_info::{Node, FULLNODE_PORT_RANGE};
use crate::contact_info::ContactInfo;
use crate::fullnode::{Fullnode, FullnodeConfig};
use crate::genesis_utils::{create_genesis_block_with_leader, GenesisBlockInfo};
use crate::gossip_service::discover_cluster;
use crate::replicator::Replicator;
use crate::service::Service;
use crate::validator::{Validator, ValidatorConfig};
use solana_client::thin_client::create_client;
use solana_client::thin_client::ThinClient;
use solana_sdk::client::SyncClient;
@ -28,7 +28,7 @@ use std::fs::remove_dir_all;
use std::io::{Error, ErrorKind, Result};
use std::sync::Arc;
pub struct FullnodeInfo {
pub struct ValidatorInfo {
pub keypair: Arc<Keypair>,
pub voting_keypair: Arc<Keypair>,
pub storage_keypair: Arc<Keypair>,
@ -52,7 +52,7 @@ impl ReplicatorInfo {
#[derive(Clone, Debug)]
pub struct ClusterConfig {
/// The fullnode config that should be applied to every node in the cluster
pub fullnode_config: FullnodeConfig,
pub validator_config: ValidatorConfig,
/// Number of replicators in the cluster
/// Note- replicators will timeout if ticks_per_slot is much larger than the default 8
pub num_replicators: usize,
@ -71,7 +71,7 @@ pub struct ClusterConfig {
impl Default for ClusterConfig {
fn default() -> Self {
ClusterConfig {
fullnode_config: FullnodeConfig::default(),
validator_config: ValidatorConfig::default(),
num_replicators: 0,
num_listeners: 0,
node_stakes: vec![],
@ -87,12 +87,12 @@ impl Default for ClusterConfig {
pub struct LocalCluster {
/// Keypair with funding to participate in the network
pub funding_keypair: Keypair,
pub fullnode_config: FullnodeConfig,
pub validator_config: ValidatorConfig,
/// Entry point from which the rest of the network can be discovered
pub entry_point_info: ContactInfo,
pub fullnode_infos: HashMap<Pubkey, FullnodeInfo>,
pub listener_infos: HashMap<Pubkey, FullnodeInfo>,
fullnodes: HashMap<Pubkey, Fullnode>,
pub fullnode_infos: HashMap<Pubkey, ValidatorInfo>,
pub listener_infos: HashMap<Pubkey, ValidatorInfo>,
fullnodes: HashMap<Pubkey, Validator>,
genesis_ledger_path: String,
pub genesis_block: GenesisBlock,
replicators: Vec<Replicator>,
@ -140,7 +140,7 @@ impl LocalCluster {
let leader_contact_info = leader_node.info.clone();
let leader_storage_keypair = Arc::new(storage_keypair);
let leader_voting_keypair = Arc::new(voting_keypair);
let leader_server = Fullnode::new(
let leader_server = Validator::new(
leader_node,
&leader_keypair,
&leader_ledger_path,
@ -148,7 +148,7 @@ impl LocalCluster {
&leader_voting_keypair,
&leader_storage_keypair,
None,
&config.fullnode_config,
&config.validator_config,
);
let mut fullnodes = HashMap::new();
@ -156,7 +156,7 @@ impl LocalCluster {
fullnodes.insert(leader_pubkey, leader_server);
fullnode_infos.insert(
leader_pubkey,
FullnodeInfo {
ValidatorInfo {
keypair: leader_keypair,
voting_keypair: leader_voting_keypair,
storage_keypair: leader_storage_keypair,
@ -173,17 +173,17 @@ impl LocalCluster {
genesis_block,
fullnode_infos,
replicator_infos: HashMap::new(),
fullnode_config: config.fullnode_config.clone(),
validator_config: config.validator_config.clone(),
listener_infos: HashMap::new(),
};
for stake in &config.node_stakes[1..] {
cluster.add_validator(&config.fullnode_config, *stake);
cluster.add_validator(&config.validator_config, *stake);
}
let listener_config = FullnodeConfig {
let listener_config = ValidatorConfig {
voting_disabled: true,
..config.fullnode_config.clone()
..config.validator_config.clone()
};
(0..config.num_listeners).for_each(|_| cluster.add_validator(&listener_config, 0));
@ -223,7 +223,7 @@ impl LocalCluster {
}
}
fn add_validator(&mut self, fullnode_config: &FullnodeConfig, stake: u64) {
fn add_validator(&mut self, validator_config: &ValidatorConfig, stake: u64) {
let client = create_client(
self.entry_point_info.client_facing_addr(),
FULLNODE_PORT_RANGE,
@ -237,7 +237,7 @@ impl LocalCluster {
let validator_node = Node::new_localhost_with_pubkey(&validator_keypair.pubkey());
let ledger_path = tmp_copy_blocktree!(&self.genesis_ledger_path);
if fullnode_config.voting_disabled {
if validator_config.voting_disabled {
// setup as a listener
info!("listener {} ", validator_pubkey,);
} else {
@ -266,7 +266,7 @@ impl LocalCluster {
}
let voting_keypair = Arc::new(voting_keypair);
let validator_server = Fullnode::new(
let validator_server = Validator::new(
validator_node,
&validator_keypair,
&ledger_path,
@ -274,15 +274,15 @@ impl LocalCluster {
&voting_keypair,
&storage_keypair,
Some(&self.entry_point_info),
&fullnode_config,
&validator_config,
);
self.fullnodes
.insert(validator_keypair.pubkey(), validator_server);
if fullnode_config.voting_disabled {
if validator_config.voting_disabled {
self.listener_infos.insert(
validator_keypair.pubkey(),
FullnodeInfo {
ValidatorInfo {
keypair: validator_keypair,
voting_keypair,
storage_keypair,
@ -292,7 +292,7 @@ impl LocalCluster {
} else {
self.fullnode_infos.insert(
validator_keypair.pubkey(),
FullnodeInfo {
ValidatorInfo {
keypair: validator_keypair,
voting_keypair,
storage_keypair,
@ -522,7 +522,7 @@ impl Cluster for LocalCluster {
if pubkey == self.entry_point_info.id {
self.entry_point_info = node.info.clone();
}
let restarted_node = Fullnode::new(
let restarted_node = Validator::new(
node,
&fullnode_info.keypair,
&fullnode_info.ledger_path,
@ -530,7 +530,7 @@ impl Cluster for LocalCluster {
&fullnode_info.voting_keypair,
&fullnode_info.storage_keypair,
None,
&self.fullnode_config,
&self.validator_config,
);
self.fullnodes.insert(pubkey, restarted_node);
@ -561,13 +561,13 @@ mod test {
#[test]
fn test_local_cluster_start_and_exit_with_config() {
solana_logger::setup();
let mut fullnode_config = FullnodeConfig::default();
fullnode_config.rpc_config.enable_fullnode_exit = true;
fullnode_config.storage_rotate_count = STORAGE_ROTATE_TEST_COUNT;
let mut validator_config = ValidatorConfig::default();
validator_config.rpc_config.enable_fullnode_exit = true;
validator_config.storage_rotate_count = STORAGE_ROTATE_TEST_COUNT;
const NUM_NODES: usize = 1;
let num_replicators = 1;
let config = ClusterConfig {
fullnode_config,
validator_config,
num_replicators,
node_stakes: vec![3; NUM_NODES],
cluster_lamports: 100,

View File

@ -32,7 +32,7 @@ use std::sync::{Arc, Mutex, RwLock};
use std::thread::Result;
#[derive(Clone, Debug)]
pub struct FullnodeConfig {
pub struct ValidatorConfig {
pub sigverify_disabled: bool,
pub voting_disabled: bool,
pub blockstream: Option<String>,
@ -40,7 +40,7 @@ pub struct FullnodeConfig {
pub account_paths: Option<String>,
pub rpc_config: JsonRpcConfig,
}
impl Default for FullnodeConfig {
impl Default for ValidatorConfig {
fn default() -> Self {
// TODO: remove this, temporary parameter to configure
// storage amount differently for test configurations
@ -57,7 +57,7 @@ impl Default for FullnodeConfig {
}
}
pub struct Fullnode {
pub struct Validator {
pub id: Pubkey,
exit: Arc<AtomicBool>,
rpc_service: Option<JsonRpcService>,
@ -70,7 +70,7 @@ pub struct Fullnode {
ip_echo_server: solana_netutil::IpEchoServer,
}
impl Fullnode {
impl Validator {
pub fn new(
mut node: Node,
keypair: &Arc<Keypair>,
@ -79,7 +79,7 @@ impl Fullnode {
voting_keypair: &Arc<Keypair>,
storage_keypair: &Arc<Keypair>,
entrypoint_info_option: Option<&ContactInfo>,
config: &FullnodeConfig,
config: &ValidatorConfig,
) -> Self {
info!("creating bank...");
@ -321,7 +321,7 @@ pub fn new_banks_from_blocktree(
)
}
impl Service for Fullnode {
impl Service for Validator {
type JoinReturnType = ();
fn join(self) -> Result<()> {
@ -343,7 +343,7 @@ impl Service for Fullnode {
}
}
pub fn new_fullnode_for_tests() -> (Fullnode, ContactInfo, Keypair, String) {
pub fn new_validator_for_tests() -> (Validator, ContactInfo, Keypair, String) {
use crate::blocktree::create_new_tmp_ledger;
use crate::genesis_utils::{create_genesis_block_with_leader, GenesisBlockInfo};
@ -364,7 +364,7 @@ pub fn new_fullnode_for_tests() -> (Fullnode, ContactInfo, Keypair, String) {
let voting_keypair = Arc::new(Keypair::new());
let storage_keypair = Arc::new(Keypair::new());
let node = Fullnode::new(
let node = Validator::new(
node,
&node_keypair,
&ledger_path,
@ -372,7 +372,7 @@ pub fn new_fullnode_for_tests() -> (Fullnode, ContactInfo, Keypair, String) {
&voting_keypair,
&storage_keypair,
None,
&FullnodeConfig::default(),
&ValidatorConfig::default(),
);
discover_cluster(&contact_info.gossip, 1).expect("Node startup failed");
(node, contact_info, mint_keypair, ledger_path)
@ -399,7 +399,7 @@ mod tests {
let voting_keypair = Arc::new(Keypair::new());
let storage_keypair = Arc::new(Keypair::new());
let validator = Fullnode::new(
let validator = Validator::new(
validator_node,
&Arc::new(validator_keypair),
&validator_ledger_path,
@ -407,7 +407,7 @@ mod tests {
&voting_keypair,
&storage_keypair,
Some(&leader_node.info),
&FullnodeConfig::default(),
&ValidatorConfig::default(),
);
validator.close().unwrap();
remove_dir_all(validator_ledger_path).unwrap();
@ -419,7 +419,7 @@ mod tests {
let leader_node = Node::new_localhost_with_pubkey(&leader_keypair.pubkey());
let mut ledger_paths = vec![];
let validators: Vec<Fullnode> = (0..2)
let validators: Vec<Validator> = (0..2)
.map(|_| {
let validator_keypair = Keypair::new();
let validator_node = Node::new_localhost_with_pubkey(&validator_keypair.pubkey());
@ -430,7 +430,7 @@ mod tests {
ledger_paths.push(validator_ledger_path.clone());
let voting_keypair = Arc::new(Keypair::new());
let storage_keypair = Arc::new(Keypair::new());
Fullnode::new(
Validator::new(
validator_node,
&Arc::new(validator_keypair),
&validator_ledger_path,
@ -438,7 +438,7 @@ mod tests {
&voting_keypair,
&storage_keypair,
Some(&leader_node.info),
&FullnodeConfig::default(),
&ValidatorConfig::default(),
)
})
.collect();

View File

@ -2,9 +2,9 @@ extern crate solana;
use solana::cluster::Cluster;
use solana::cluster_tests;
use solana::fullnode::FullnodeConfig;
use solana::gossip_service::discover_cluster;
use solana::local_cluster::{ClusterConfig, LocalCluster};
use solana::validator::ValidatorConfig;
use solana_runtime::epoch_schedule::MINIMUM_SLOT_LENGTH;
use solana_sdk::poh_config::PohConfig;
use solana_sdk::timing;
@ -75,12 +75,12 @@ fn test_fullnode_exit_default_config_should_panic() {
fn test_fullnode_exit_2() {
solana_logger::setup();
let num_nodes = 2;
let mut fullnode_config = FullnodeConfig::default();
fullnode_config.rpc_config.enable_fullnode_exit = true;
let mut validator_config = ValidatorConfig::default();
validator_config.rpc_config.enable_fullnode_exit = true;
let config = ClusterConfig {
cluster_lamports: 10_000,
node_stakes: vec![100; 2],
fullnode_config,
validator_config,
..ClusterConfig::default()
};
let local = LocalCluster::new(&config);
@ -92,12 +92,12 @@ fn test_fullnode_exit_2() {
fn test_leader_failure_4() {
solana_logger::setup();
let num_nodes = 4;
let mut fullnode_config = FullnodeConfig::default();
fullnode_config.rpc_config.enable_fullnode_exit = true;
let mut validator_config = ValidatorConfig::default();
validator_config.rpc_config.enable_fullnode_exit = true;
let config = ClusterConfig {
cluster_lamports: 10_000,
node_stakes: vec![100; 4],
fullnode_config: fullnode_config.clone(),
validator_config: validator_config.clone(),
..ClusterConfig::default()
};
let local = LocalCluster::new(&config);
@ -111,16 +111,16 @@ fn test_leader_failure_4() {
#[test]
fn test_two_unbalanced_stakes() {
solana_logger::setup();
let mut fullnode_config = FullnodeConfig::default();
let mut validator_config = ValidatorConfig::default();
let num_ticks_per_second = 100;
let num_ticks_per_slot = 10;
let num_slots_per_epoch = MINIMUM_SLOT_LENGTH as u64;
fullnode_config.rpc_config.enable_fullnode_exit = true;
validator_config.rpc_config.enable_fullnode_exit = true;
let mut cluster = LocalCluster::new(&ClusterConfig {
node_stakes: vec![999_990, 3],
cluster_lamports: 1_000_000,
fullnode_config: fullnode_config.clone(),
validator_config: validator_config.clone(),
ticks_per_slot: num_ticks_per_slot,
slots_per_epoch: num_slots_per_epoch,
poh_config: PohConfig::new_sleep(Duration::from_millis(1000 / num_ticks_per_second)),
@ -164,13 +164,13 @@ fn test_forwarding() {
#[test]
fn test_restart_node() {
let fullnode_config = FullnodeConfig::default();
let validator_config = ValidatorConfig::default();
let slots_per_epoch = MINIMUM_SLOT_LENGTH as u64;
let ticks_per_slot = 16;
let mut cluster = LocalCluster::new(&ClusterConfig {
node_stakes: vec![3],
cluster_lamports: 100,
fullnode_config: fullnode_config.clone(),
validator_config: validator_config.clone(),
ticks_per_slot,
slots_per_epoch,
..ClusterConfig::default()

View File

@ -8,13 +8,13 @@ use bincode::{deserialize, serialize};
use solana::blocktree::{create_new_tmp_ledger, Blocktree};
use solana::cluster_info::{ClusterInfo, Node, FULLNODE_PORT_RANGE};
use solana::contact_info::ContactInfo;
use solana::fullnode::FullnodeConfig;
use solana::gossip_service::discover_cluster;
use solana::local_cluster::{ClusterConfig, LocalCluster};
use solana::replicator::Replicator;
use solana::replicator::ReplicatorRequest;
use solana::storage_stage::STORAGE_ROTATE_TEST_COUNT;
use solana::streamer::blob_receiver;
use solana::validator::ValidatorConfig;
use solana_client::thin_client::create_client;
use solana_sdk::genesis_block::create_genesis_block;
use solana_sdk::hash::Hash;
@ -103,10 +103,10 @@ fn run_replicator_startup_basic(num_nodes: usize, num_replicators: usize) {
solana_logger::setup();
info!("starting replicator test");
let mut fullnode_config = FullnodeConfig::default();
fullnode_config.storage_rotate_count = STORAGE_ROTATE_TEST_COUNT;
let mut validator_config = ValidatorConfig::default();
validator_config.storage_rotate_count = STORAGE_ROTATE_TEST_COUNT;
let config = ClusterConfig {
fullnode_config,
validator_config,
num_replicators,
node_stakes: vec![100; num_nodes],
cluster_lamports: 10_000,
@ -189,8 +189,8 @@ fn test_replicator_startup_leader_hang() {
fn test_replicator_startup_ledger_hang() {
solana_logger::setup();
info!("starting replicator test");
let mut fullnode_config = FullnodeConfig::default();
fullnode_config.storage_rotate_count = STORAGE_ROTATE_TEST_COUNT;
let mut validator_config = ValidatorConfig::default();
validator_config.storage_rotate_count = STORAGE_ROTATE_TEST_COUNT;
let cluster = LocalCluster::new_with_equal_stakes(2, 10_000, 100);;
info!("starting replicator node");
@ -217,10 +217,10 @@ fn test_replicator_startup_ledger_hang() {
fn test_account_setup() {
let num_nodes = 1;
let num_replicators = 1;
let mut fullnode_config = FullnodeConfig::default();
fullnode_config.storage_rotate_count = STORAGE_ROTATE_TEST_COUNT;
let mut validator_config = ValidatorConfig::default();
validator_config.storage_rotate_count = STORAGE_ROTATE_TEST_COUNT;
let config = ClusterConfig {
fullnode_config,
validator_config,
num_replicators,
node_stakes: vec![100; num_nodes],
cluster_lamports: 10_000,

View File

@ -3,7 +3,7 @@ use log::*;
use reqwest;
use reqwest::header::CONTENT_TYPE;
use serde_json::{json, Value};
use solana::fullnode::new_fullnode_for_tests;
use solana::validator::new_validator_for_tests;
use solana_client::rpc_client::get_rpc_request_str;
use solana_sdk::hash::Hash;
use solana_sdk::pubkey::Pubkey;
@ -16,7 +16,7 @@ use std::time::Duration;
fn test_rpc_send_tx() {
solana_logger::setup();
let (server, leader_data, alice, ledger_path) = new_fullnode_for_tests();
let (server, leader_data, alice, ledger_path) = new_validator_for_tests();
let bob_pubkey = Pubkey::new_rand();
let client = reqwest::Client::new();

View File

@ -7,7 +7,6 @@ use solana::blocktree::{create_new_tmp_ledger, Blocktree};
use solana::cluster_info::{ClusterInfo, Node};
use solana::entry::next_entry_mut;
use solana::entry::EntrySlice;
use solana::fullnode;
use solana::genesis_utils::{create_genesis_block_with_leader, GenesisBlockInfo};
use solana::gossip_service::GossipService;
use solana::packet::index_blobs;
@ -17,6 +16,7 @@ use solana::storage_stage::StorageState;
use solana::storage_stage::STORAGE_ROTATE_TEST_COUNT;
use solana::streamer;
use solana::tvu::{Sockets, Tvu};
use solana::validator;
use solana_runtime::epoch_schedule::MINIMUM_SLOT_LENGTH;
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::system_transaction;
@ -94,7 +94,7 @@ fn test_replay() {
completed_slots_receiver,
leader_schedule_cache,
_,
) = fullnode::new_banks_from_blocktree(&blocktree_path, None);
) = validator::new_banks_from_blocktree(&blocktree_path, None);
let working_bank = bank_forks.working_bank();
assert_eq!(
working_bank.get_balance(&mint_keypair.pubkey()),

View File

@ -265,9 +265,9 @@ elif [[ $node_type = replicator ]]; then
read -r entrypoint entrypoint_address shift < <(find_entrypoint "${positional_args[@]}")
shift "$shift"
replicator_keypair_path=$SOLANA_CONFIG_DIR/replicator-id.json
replicator_storage_keypair_path="$SOLANA_CONFIG_DIR"/replicator-storage-id.json
ledger_config_dir=$SOLANA_CONFIG_DIR/replicator-ledger
replicator_keypair_path=$SOLANA_CONFIG_DIR/replicator-keypair$label.json
replicator_storage_keypair_path="$SOLANA_CONFIG_DIR"/replicator-storage-keypair$label.json
ledger_config_dir=$SOLANA_CONFIG_DIR/replicator-ledger$label
mkdir -p "$SOLANA_CONFIG_DIR"
[[ -r "$replicator_keypair_path" ]] || $solana_keygen -o "$replicator_keypair_path"

7
multinode-demo/replicator-x.sh Executable file
View File

@ -0,0 +1,7 @@
#!/usr/bin/env bash
#
# Start a dynamically-configured replicator
#
here=$(dirname "$0")
exec "$here"/replicator.sh --label x$$ "$@"

View File

@ -464,7 +464,7 @@ start() {
fi
declare bootstrapLeader=true
declare nodeType=fullnode
declare nodeType=validator
declare loopCount=0
for ipAddress in "${fullnodeIpList[@]}" - "${blockstreamerIpList[@]}"; do
if [[ $ipAddress = - ]]; then

View File

@ -2,10 +2,10 @@ use clap::{crate_description, crate_name, crate_version, App, Arg};
use log::*;
use solana::cluster_info::{Node, FULLNODE_PORT_RANGE};
use solana::contact_info::ContactInfo;
use solana::fullnode::{Fullnode, FullnodeConfig};
use solana::local_vote_signer_service::LocalVoteSignerService;
use solana::service::Service;
use solana::socketaddr;
use solana::validator::{Validator, ValidatorConfig};
use solana_netutil::parse_port_range;
use solana_sdk::signature::{read_keypair, Keypair, KeypairUtil};
use std::fs::File;
@ -23,7 +23,7 @@ fn port_range_validator(port_range: String) -> Result<(), String> {
fn main() {
solana_logger::setup();
solana_metrics::set_panic_hook("fullnode");
solana_metrics::set_panic_hook("validator");
let default_dynamic_port_range =
&format!("{}-{}", FULLNODE_PORT_RANGE.0, FULLNODE_PORT_RANGE.1);
@ -158,7 +158,7 @@ fn main() {
)
.get_matches();
let mut fullnode_config = FullnodeConfig::default();
let mut validator_config = ValidatorConfig::default();
let keypair = if let Some(identity) = matches.value_of("identity") {
read_keypair(identity).unwrap_or_else(|err| {
eprintln!("{}: Unable to open keypair file: {}", err, identity);
@ -192,14 +192,14 @@ fn main() {
let ledger_path = matches.value_of("ledger").unwrap();
fullnode_config.sigverify_disabled = matches.is_present("no_sigverify");
validator_config.sigverify_disabled = matches.is_present("no_sigverify");
fullnode_config.voting_disabled = matches.is_present("no_voting");
validator_config.voting_disabled = matches.is_present("no_voting");
if matches.is_present("enable_rpc_exit") {
fullnode_config.rpc_config.enable_fullnode_exit = true;
validator_config.rpc_config.enable_fullnode_exit = true;
}
fullnode_config.rpc_config.drone_addr = matches.value_of("rpc_drone_address").map(|address| {
validator_config.rpc_config.drone_addr = matches.value_of("rpc_drone_address").map(|address| {
solana_netutil::parse_host_port(address).expect("failed to parse drone address")
});
@ -216,9 +216,9 @@ fn main() {
);
if let Some(paths) = matches.value_of("accounts") {
fullnode_config.account_paths = Some(paths.to_string());
validator_config.account_paths = Some(paths.to_string());
} else {
fullnode_config.account_paths = None;
validator_config.account_paths = None;
}
let cluster_entrypoint = matches.value_of("entrypoint").map(|entrypoint| {
let entrypoint_addr = solana_netutil::parse_host_port(entrypoint)
@ -238,7 +238,7 @@ fn main() {
(Some(signer_service), signer_addr)
};
let init_complete_file = matches.value_of("init_complete_file");
fullnode_config.blockstream = matches.value_of("blockstream").map(ToString::to_string);
validator_config.blockstream = matches.value_of("blockstream").map(ToString::to_string);
let keypair = Arc::new(keypair);
let mut node = Node::new_with_external_ip(&keypair.pubkey(), &gossip_addr, dynamic_port_range);
@ -252,7 +252,7 @@ fn main() {
node.info.rpc_pubsub = SocketAddr::new(gossip_addr.ip(), port_number + 1);
};
let fullnode = Fullnode::new(
let validator = Validator::new(
node,
&keypair,
ledger_path,
@ -260,13 +260,13 @@ fn main() {
&Arc::new(voting_keypair),
&Arc::new(storage_keypair),
cluster_entrypoint.as_ref(),
&fullnode_config,
&validator_config,
);
if let Some(filename) = init_complete_file {
File::create(filename).unwrap_or_else(|_| panic!("Unable to create: {}", filename));
}
info!("Node initialized");
fullnode.join().expect("fullnode exit");
info!("Node exiting..");
info!("Validator initialized");
validator.join().expect("validator exit");
info!("Validator exiting..");
}

View File

@ -1,5 +1,5 @@
use serde_json::{json, Value};
use solana::fullnode::new_fullnode_for_tests;
use solana::validator::new_validator_for_tests;
use solana_client::rpc_client::RpcClient;
use solana_client::rpc_request::RpcRequest;
use solana_drone::drone::run_local_drone;
@ -20,7 +20,7 @@ fn test_wallet_deploy_program() {
pathbuf.push("noop");
pathbuf.set_extension("so");
let (server, leader_data, alice, ledger_path) = new_fullnode_for_tests();
let (server, leader_data, alice, ledger_path) = new_validator_for_tests();
let (sender, receiver) = channel();
run_local_drone(alice, sender, None);

View File

@ -11,7 +11,7 @@ use std::fs::remove_dir_all;
use std::sync::mpsc::channel;
#[cfg(test)]
use solana::fullnode::new_fullnode_for_tests;
use solana::validator::new_validator_for_tests;
fn check_balance(expected_balance: u64, client: &RpcClient, pubkey: &Pubkey) {
let balance = client.retry_get_balance(pubkey, 1).unwrap().unwrap();
@ -20,7 +20,7 @@ fn check_balance(expected_balance: u64, client: &RpcClient, pubkey: &Pubkey) {
#[test]
fn test_wallet_timestamp_tx() {
let (server, leader_data, alice, ledger_path) = new_fullnode_for_tests();
let (server, leader_data, alice, ledger_path) = new_validator_for_tests();
let bob_pubkey = Pubkey::new_rand();
let (sender, receiver) = channel();
@ -85,7 +85,7 @@ fn test_wallet_timestamp_tx() {
#[test]
fn test_wallet_witness_tx() {
let (server, leader_data, alice, ledger_path) = new_fullnode_for_tests();
let (server, leader_data, alice, ledger_path) = new_validator_for_tests();
let bob_pubkey = Pubkey::new_rand();
let (sender, receiver) = channel();
@ -147,7 +147,7 @@ fn test_wallet_witness_tx() {
#[test]
fn test_wallet_cancel_tx() {
let (server, leader_data, alice, ledger_path) = new_fullnode_for_tests();
let (server, leader_data, alice, ledger_path) = new_validator_for_tests();
let bob_pubkey = Pubkey::new_rand();
let (sender, receiver) = channel();

View File

@ -1,4 +1,4 @@
use solana::fullnode::new_fullnode_for_tests;
use solana::validator::new_validator_for_tests;
use solana_client::rpc_client::RpcClient;
use solana_drone::drone::run_local_drone;
use solana_sdk::signature::KeypairUtil;
@ -8,7 +8,7 @@ use std::sync::mpsc::channel;
#[test]
fn test_wallet_request_airdrop() {
let (server, leader_data, alice, ledger_path) = new_fullnode_for_tests();
let (server, leader_data, alice, ledger_path) = new_validator_for_tests();
let (sender, receiver) = channel();
run_local_drone(alice, sender, None);
let drone_addr = receiver.recv().unwrap();