More fullnode to validator renaming (#6337)

This commit is contained in:
Greg Fitzgerald 2019-10-11 13:30:52 -06:00 committed by GitHub
parent 5650231df3
commit 322fcea6e5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 88 additions and 87 deletions

View File

@ -207,7 +207,7 @@ killNodes() {
set -x
curl --retry 5 --retry-delay 2 --retry-connrefused \
-X POST -H 'Content-Type: application/json' \
-d '{"jsonrpc":"2.0","id":1, "method":"fullnodeExit"}' \
-d '{"jsonrpc":"2.0","id":1, "method":"validatorExit"}' \
http://localhost:$port
)
done

View File

@ -679,20 +679,20 @@ impl RpcClient {
})
}
pub fn fullnode_exit(&self) -> io::Result<bool> {
pub fn validator_exit(&self) -> io::Result<bool> {
let response = self
.client
.send(&RpcRequest::FullnodeExit, None, 0)
.send(&RpcRequest::ValidatorExit, None, 0)
.map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("FullnodeExit request failure: {:?}", err),
format!("ValidatorExit request failure: {:?}", err),
)
})?;
serde_json::from_value(response).map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("FullnodeExit parse failure: {:?}", err),
format!("ValidatorExit parse failure: {:?}", err),
)
})
}

View File

@ -21,7 +21,7 @@ pub struct RpcEpochInfo {
pub enum RpcRequest {
ConfirmTransaction,
DeregisterNode,
FullnodeExit,
ValidatorExit,
GetAccountInfo,
GetBalance,
GetClusterNodes,
@ -54,7 +54,7 @@ impl RpcRequest {
let method = match self {
RpcRequest::ConfirmTransaction => "confirmTransaction",
RpcRequest::DeregisterNode => "deregisterNode",
RpcRequest::FullnodeExit => "fullnodeExit",
RpcRequest::ValidatorExit => "validatorExit",
RpcRequest::GetAccountInfo => "getAccountInfo",
RpcRequest::GetBalance => "getBalance",
RpcRequest::GetClusterNodes => "getClusterNodes",

View File

@ -268,8 +268,8 @@ impl ThinClient {
self.rpc_client().check_signature(signature)
}
pub fn fullnode_exit(&self) -> io::Result<bool> {
self.rpc_client().fullnode_exit()
pub fn validator_exit(&self) -> io::Result<bool> {
self.rpc_client().validator_exit()
}
pub fn get_num_blocks_since_signature_confirmation(

View File

@ -2,7 +2,7 @@
//! It includes a full Rust implementation of the architecture (see
//! [Validator](server/struct.Validator.html)) as well as hooks to GPU implementations of its most
//! paralellizable components (i.e. [SigVerify](sigverify/index.html)). It also includes
//! command-line tools to spin up fullnodes and a Rust library
//! command-line tools to spin up validators and a Rust library
//!
pub mod bank_forks;

View File

@ -1,4 +1,4 @@
//! The `local_vote_signer_service` can be started locally to sign fullnode votes
//! The `local_vote_signer_service` can be started locally to sign validator votes
use crate::service::Service;
use solana_netutil::PortRange;

View File

@ -28,14 +28,14 @@ use std::time::{Duration, Instant};
#[derive(Debug, Clone)]
pub struct JsonRpcConfig {
pub enable_fullnode_exit: bool, // Enable the 'fullnodeExit' command
pub enable_validator_exit: bool, // Enable the 'validatorExit' command
pub drone_addr: Option<SocketAddr>,
}
impl Default for JsonRpcConfig {
fn default() -> Self {
Self {
enable_fullnode_exit: false,
enable_validator_exit: false,
drone_addr: None,
}
}
@ -197,15 +197,15 @@ impl JsonRpcRequestProcessor {
.get_pubkeys_for_slot(slot, &self.bank_forks))
}
pub fn fullnode_exit(&self) -> Result<bool> {
if self.config.enable_fullnode_exit {
warn!("fullnode_exit request...");
pub fn validator_exit(&self) -> Result<bool> {
if self.config.enable_validator_exit {
warn!("validator_exit request...");
if let Some(x) = self.validator_exit.write().unwrap().take() {
x.exit()
}
Ok(true)
} else {
debug!("fullnode_exit ignored");
debug!("validator_exit ignored");
Ok(false)
}
}
@ -356,8 +356,8 @@ pub trait RpcSol {
#[rpc(meta, name = "getStoragePubkeysForSlot")]
fn get_storage_pubkeys_for_slot(&self, _: Self::Metadata, _: u64) -> Result<Vec<Pubkey>>;
#[rpc(meta, name = "fullnodeExit")]
fn fullnode_exit(&self, _: Self::Metadata) -> Result<bool>;
#[rpc(meta, name = "validatorExit")]
fn validator_exit(&self, _: Self::Metadata) -> Result<bool>;
#[rpc(meta, name = "getNumBlocksSinceSignatureConfirmation")]
fn get_num_blocks_since_signature_confirmation(
@ -689,8 +689,8 @@ impl RpcSol for RpcSolImpl {
.get_storage_pubkeys_for_slot(slot)
}
fn fullnode_exit(&self, meta: Self::Metadata) -> Result<bool> {
meta.request_processor.read().unwrap().fullnode_exit()
fn validator_exit(&self, meta: Self::Metadata) -> Result<bool> {
meta.request_processor.read().unwrap().validator_exit()
}
fn get_version(&self, _: Self::Metadata) -> Result<RpcVersionInfo> {
@ -1238,7 +1238,7 @@ pub mod tests {
}
#[test]
fn test_rpc_request_processor_config_default_trait_fullnode_exit_fails() {
fn test_rpc_request_processor_config_default_trait_validator_exit_fails() {
let exit = Arc::new(AtomicBool::new(false));
let validator_exit = create_validator_exit(&exit);
let request_processor = JsonRpcRequestProcessor::new(
@ -1247,23 +1247,23 @@ pub mod tests {
new_bank_forks().0,
&validator_exit,
);
assert_eq!(request_processor.fullnode_exit(), Ok(false));
assert_eq!(request_processor.validator_exit(), Ok(false));
assert_eq!(exit.load(Ordering::Relaxed), false);
}
#[test]
fn test_rpc_request_processor_allow_fullnode_exit_config() {
fn test_rpc_request_processor_allow_validator_exit_config() {
let exit = Arc::new(AtomicBool::new(false));
let validator_exit = create_validator_exit(&exit);
let mut config = JsonRpcConfig::default();
config.enable_fullnode_exit = true;
config.enable_validator_exit = true;
let request_processor = JsonRpcRequestProcessor::new(
StorageState::default(),
config,
new_bank_forks().0,
&validator_exit,
);
assert_eq!(request_processor.fullnode_exit(), Ok(true));
assert_eq!(request_processor.validator_exit(), Ok(true));
assert_eq!(exit.load(Ordering::Relaxed), true);
}

View File

@ -194,7 +194,7 @@ pub fn ed25519_verify(
// micro-benchmarks show GPU time for smallest batch around 15-20ms
// and CPU speed for 64-128 sigverifies around 10-20ms. 64 is a nice
// power-of-two number around that accounting for the fact that the CPU
// may be busy doing other things while being a real fullnode
// may be busy doing other things while being a real validator
// TODO: dynamically adjust this crossover
if count < 64 {
return ed25519_verify_cpu(batches);

View File

@ -1,4 +1,4 @@
//! The `fullnode` module hosts all the fullnode microservices.
//! The `validator` module hosts all the validator microservices.
use crate::bank_forks::{BankForks, SnapshotConfig};
use crate::blocktree::{Blocktree, CompletedSlotsReceiver};

View File

@ -229,7 +229,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
}
println!("\nSending stop request to node {:?}", pubkey);
let result = RpcClient::new_socket(node.rpc).fullnode_exit()?;
let result = RpcClient::new_socket(node.rpc).validator_exit()?;
if result {
println!("Stop signal accepted");
} else {

View File

@ -114,17 +114,17 @@ pub fn send_many_transactions(
expected_balances
}
pub fn fullnode_exit(entry_point_info: &ContactInfo, nodes: usize) {
pub fn validator_exit(entry_point_info: &ContactInfo, nodes: usize) {
let (cluster_nodes, _) = discover_cluster(&entry_point_info.gossip, nodes).unwrap();
assert!(cluster_nodes.len() >= nodes);
for node in &cluster_nodes {
let client = create_client(node.client_facing_addr(), VALIDATOR_PORT_RANGE);
assert!(client.fullnode_exit().unwrap());
assert!(client.validator_exit().unwrap());
}
sleep(Duration::from_millis(DEFAULT_SLOT_MILLIS));
for node in &cluster_nodes {
let client = create_client(node.client_facing_addr(), VALIDATOR_PORT_RANGE);
assert!(client.fullnode_exit().is_err());
assert!(client.validator_exit().is_err());
}
}
@ -198,7 +198,7 @@ pub fn kill_entry_and_spend_and_verify_rest(
));
info!("done sleeping for first 2 warmup epochs");
info!("killing entry point: {}", entry_point_info.id);
assert!(client.fullnode_exit().unwrap());
assert!(client.validator_exit().unwrap());
info!("sleeping for some time");
sleep(Duration::from_millis(
slot_millis * NUM_CONSECUTIVE_LEADER_SLOTS,

View File

@ -58,7 +58,7 @@ impl ReplicatorInfo {
#[derive(Clone, Debug)]
pub struct ClusterConfig {
/// The fullnode config that should be applied to every node in the cluster
/// The validator config that should be applied to every node in the cluster
pub validator_configs: Vec<ValidatorConfig>,
/// Number of replicators in the cluster
/// Note- replicators will timeout if ticks_per_slot is much larger than the default 8
@ -100,9 +100,9 @@ pub struct LocalCluster {
pub funding_keypair: Keypair,
/// Entry point from which the rest of the network can be discovered
pub entry_point_info: ContactInfo,
pub fullnode_infos: HashMap<Pubkey, ClusterValidatorInfo>,
pub validator_infos: HashMap<Pubkey, ClusterValidatorInfo>,
pub listener_infos: HashMap<Pubkey, ClusterValidatorInfo>,
fullnodes: HashMap<Pubkey, Validator>,
validators: HashMap<Pubkey, Validator>,
pub genesis_block: GenesisBlock,
replicators: Vec<Replicator>,
pub replicator_infos: HashMap<Pubkey, ReplicatorInfo>,
@ -190,9 +190,9 @@ impl LocalCluster {
&config.validator_configs[0],
);
let mut fullnodes = HashMap::new();
let mut fullnode_infos = HashMap::new();
fullnodes.insert(leader_pubkey, leader_server);
let mut validators = HashMap::new();
let mut validator_infos = HashMap::new();
validators.insert(leader_pubkey, leader_server);
let leader_info = ValidatorInfo {
keypair: leader_keypair,
voting_keypair: leader_voting_keypair,
@ -204,15 +204,15 @@ impl LocalCluster {
let cluster_leader =
ClusterValidatorInfo::new(leader_info, config.validator_configs[0].clone());
fullnode_infos.insert(leader_pubkey, cluster_leader);
validator_infos.insert(leader_pubkey, cluster_leader);
let mut cluster = Self {
funding_keypair: mint_keypair,
entry_point_info: leader_contact_info,
fullnodes,
validators,
replicators: vec![],
genesis_block,
fullnode_infos,
validator_infos,
replicator_infos: HashMap::new(),
listener_infos: HashMap::new(),
};
@ -250,14 +250,14 @@ impl LocalCluster {
}
pub fn exit(&mut self) {
for node in self.fullnodes.values_mut() {
for node in self.validators.values_mut() {
node.exit();
}
}
pub fn close_preserve_ledgers(&mut self) {
self.exit();
for (_, node) in self.fullnodes.drain() {
for (_, node) in self.validators.drain() {
node.join().unwrap();
}
@ -322,7 +322,7 @@ impl LocalCluster {
&validator_config,
);
self.fullnodes
self.validators
.insert(validator_keypair.pubkey(), validator_server);
let validator_pubkey = validator_keypair.pubkey();
let validator_info = ClusterValidatorInfo::new(
@ -339,7 +339,8 @@ impl LocalCluster {
if validator_config.voting_disabled {
self.listener_infos.insert(validator_pubkey, validator_info);
} else {
self.fullnode_infos.insert(validator_pubkey, validator_info);
self.validator_infos
.insert(validator_pubkey, validator_info);
}
}
@ -384,7 +385,7 @@ impl LocalCluster {
fn close(&mut self) {
self.close_preserve_ledgers();
for ledger_path in self
.fullnode_infos
.validator_infos
.values()
.map(|f| &f.info.ledger_path)
.chain(self.replicator_infos.values().map(|info| &info.ledger_path))
@ -567,11 +568,11 @@ impl LocalCluster {
impl Cluster for LocalCluster {
fn get_node_pubkeys(&self) -> Vec<Pubkey> {
self.fullnodes.keys().cloned().collect()
self.validators.keys().cloned().collect()
}
fn get_validator_client(&self, pubkey: &Pubkey) -> Option<ThinClient> {
self.fullnode_infos.get(pubkey).map(|f| {
self.validator_infos.get(pubkey).map(|f| {
create_client(
f.info.contact_info.client_facing_addr(),
VALIDATOR_PORT_RANGE,
@ -580,13 +581,13 @@ impl Cluster for LocalCluster {
}
fn exit_node(&mut self, pubkey: &Pubkey) -> ClusterValidatorInfo {
let mut node = self.fullnodes.remove(&pubkey).unwrap();
let mut node = self.validators.remove(&pubkey).unwrap();
// Shut down the fullnode
// Shut down the validator
node.exit();
node.join().unwrap();
self.fullnode_infos.remove(&pubkey).unwrap()
self.validator_infos.remove(&pubkey).unwrap()
}
fn restart_node(&mut self, pubkey: &Pubkey, mut cluster_validator_info: ClusterValidatorInfo) {
@ -604,22 +605,22 @@ impl Cluster for LocalCluster {
};
// Restart the node
let fullnode_info = &cluster_validator_info.info;
let validator_info = &cluster_validator_info.info;
let restarted_node = Validator::new(
node,
&fullnode_info.keypair,
&fullnode_info.ledger_path,
&fullnode_info.voting_keypair.pubkey(),
&fullnode_info.voting_keypair,
&fullnode_info.storage_keypair,
&validator_info.keypair,
&validator_info.ledger_path,
&validator_info.voting_keypair.pubkey(),
&validator_info.voting_keypair,
&validator_info.storage_keypair,
entry_point_info,
true,
&cluster_validator_info.config,
);
self.fullnodes.insert(*pubkey, restarted_node);
self.fullnode_infos.insert(*pubkey, cluster_validator_info);
self.validators.insert(*pubkey, restarted_node);
self.validator_infos.insert(*pubkey, cluster_validator_info);
}
fn exit_restart_node(&mut self, pubkey: &Pubkey, validator_config: ValidatorConfig) {
@ -646,7 +647,7 @@ mod test {
solana_logger::setup();
let num_nodes = 1;
let cluster = LocalCluster::new_with_equal_stakes(num_nodes, 100, 3);
assert_eq!(cluster.fullnodes.len(), num_nodes);
assert_eq!(cluster.validators.len(), num_nodes);
assert_eq!(cluster.replicators.len(), 0);
}
@ -654,7 +655,7 @@ mod test {
fn test_local_cluster_start_and_exit_with_config() {
solana_logger::setup();
let mut validator_config = ValidatorConfig::default();
validator_config.rpc_config.enable_fullnode_exit = true;
validator_config.rpc_config.enable_validator_exit = true;
validator_config.storage_slots_per_turn = SLOTS_PER_TURN_TEST;
const NUM_NODES: usize = 1;
let num_replicators = 1;
@ -668,7 +669,7 @@ mod test {
..ClusterConfig::default()
};
let cluster = LocalCluster::new(&config);
assert_eq!(cluster.fullnodes.len(), NUM_NODES);
assert_eq!(cluster.validators.len(), NUM_NODES);
assert_eq!(cluster.replicators.len(), num_replicators);
}
}

View File

@ -52,7 +52,7 @@ fn test_ledger_cleanup_service() {
);
cluster.close_preserve_ledgers();
//check everyone's ledgers and make sure only ~100 slots are stored
for (_, info) in &cluster.fullnode_infos {
for (_, info) in &cluster.validator_infos {
let mut slots = 0;
let blocktree = Blocktree::open(&info.info.ledger_path).unwrap();
blocktree
@ -130,22 +130,22 @@ fn test_spend_and_verify_all_nodes_env_num_nodes() {
#[allow(unused_attributes)]
#[test]
#[should_panic]
fn test_fullnode_exit_default_config_should_panic() {
fn test_validator_exit_default_config_should_panic() {
solana_logger::setup();
error!("test_fullnode_exit_default_config_should_panic");
error!("test_validator_exit_default_config_should_panic");
let num_nodes = 2;
let local = LocalCluster::new_with_equal_stakes(num_nodes, 10_000, 100);
cluster_tests::fullnode_exit(&local.entry_point_info, num_nodes);
cluster_tests::validator_exit(&local.entry_point_info, num_nodes);
}
#[test]
#[serial]
fn test_fullnode_exit_2() {
fn test_validator_exit_2() {
solana_logger::setup();
error!("test_fullnode_exit_2");
error!("test_validator_exit_2");
let num_nodes = 2;
let mut validator_config = ValidatorConfig::default();
validator_config.rpc_config.enable_fullnode_exit = true;
validator_config.rpc_config.enable_validator_exit = true;
let config = ClusterConfig {
cluster_lamports: 10_000,
node_stakes: vec![100; num_nodes],
@ -153,7 +153,7 @@ fn test_fullnode_exit_2() {
..ClusterConfig::default()
};
let local = LocalCluster::new(&config);
cluster_tests::fullnode_exit(&local.entry_point_info, num_nodes);
cluster_tests::validator_exit(&local.entry_point_info, num_nodes);
}
// Cluster needs a supermajority to remain, so the minimum size for this test is 4
@ -164,7 +164,7 @@ fn test_leader_failure_4() {
error!("test_leader_failure_4");
let num_nodes = 4;
let mut validator_config = ValidatorConfig::default();
validator_config.rpc_config.enable_fullnode_exit = true;
validator_config.rpc_config.enable_validator_exit = true;
let config = ClusterConfig {
cluster_lamports: 10_000,
node_stakes: vec![100; 4],
@ -189,7 +189,7 @@ fn test_two_unbalanced_stakes() {
let num_ticks_per_slot = 10;
let num_slots_per_epoch = MINIMUM_SLOTS_PER_EPOCH as u64;
validator_config.rpc_config.enable_fullnode_exit = true;
validator_config.rpc_config.enable_validator_exit = true;
let mut cluster = LocalCluster::new(&ClusterConfig {
node_stakes: vec![999_990, 3],
cluster_lamports: 1_000_000,
@ -208,7 +208,7 @@ fn test_two_unbalanced_stakes() {
);
cluster.close_preserve_ledgers();
let leader_pubkey = cluster.entry_point_info.id;
let leader_ledger = cluster.fullnode_infos[&leader_pubkey]
let leader_ledger = cluster.validator_infos[&leader_pubkey]
.info
.ledger_path
.clone();
@ -430,7 +430,7 @@ fn test_snapshots_blocktree_floor() {
// Check the validator ledger doesn't contain any slots < slot_floor
cluster.close_preserve_ledgers();
let validator_ledger_path = &cluster.fullnode_infos[&validator_id];
let validator_ledger_path = &cluster.validator_infos[&validator_id];
let blocktree = Blocktree::open(&validator_ledger_path.info.ledger_path).unwrap();
// Skip the zeroth slot in blocktree that the ledger is initialized with
@ -490,7 +490,7 @@ fn test_snapshots_restart_validity() {
let tar = snapshot_utils::get_snapshot_tar_path(&snapshot_package_output_path);
wait_for_next_snapshot(&cluster, &tar);
// Create new account paths since fullnode exit is not guaranteed to cleanup RPC threads,
// Create new account paths since validator exit is not guaranteed to cleanup RPC threads,
// which may delete the old accounts on exit at any point
let (new_account_storage_dirs, new_account_storage_paths) =
generate_account_paths(num_account_paths);
@ -566,7 +566,7 @@ fn test_faulty_node(faulty_node_type: BroadcastStageType) {
);
let corrupt_node = cluster
.fullnode_infos
.validator_infos
.iter()
.find(|(_, v)| v.config.broadcast_stage_type == faulty_node_type)
.unwrap()
@ -616,7 +616,7 @@ fn run_repairman_catchup(num_repairmen: u64) {
// their root could actually be much less than 31. This is why we give a num_root_buffer_slots buffer.
let stakers_slot_offset = num_slots_per_epoch + num_root_buffer_slots;
validator_config.rpc_config.enable_fullnode_exit = true;
validator_config.rpc_config.enable_validator_exit = true;
let lamports_per_repairman = 1000;
@ -740,7 +740,7 @@ fn setup_snapshot_validator_config(
// Create the validator config
let mut validator_config = ValidatorConfig::default();
validator_config.rpc_config.enable_fullnode_exit = true;
validator_config.rpc_config.enable_validator_exit = true;
validator_config.snapshot_config = Some(snapshot_config);
validator_config.account_paths = Some(account_storage_paths);

View File

@ -4815,7 +4815,7 @@
},
"id": 41,
"panels": [],
"title": "Fullnode Streamer",
"title": "Validator Streamer",
"type": "row"
},
{

View File

@ -24,7 +24,7 @@
"FromPort": 8000,
"IpRanges": [
{
"Description": "fullnode UDP range",
"Description": "validator UDP range",
"CidrIp": "0.0.0.0/0"
}
],
@ -34,7 +34,7 @@
"Ipv6Ranges": [
{
"CidrIpv6": "::/0",
"Description": "fullnode UDP range"
"Description": "validator UDP range"
}
]
},
@ -100,7 +100,7 @@
"FromPort": 8000,
"IpRanges": [
{
"Description": "fullnode TCP range",
"Description": "validator TCP range",
"CidrIp": "0.0.0.0/0"
}
],
@ -110,7 +110,7 @@
"Ipv6Ranges": [
{
"CidrIpv6": "::/0",
"Description": "fullnode TCP range"
"Description": "validator TCP range"
}
]
},

View File

@ -1,6 +1,6 @@
# |source| this file
#
# Adjusts system settings for optimal fullnode performance
# Adjusts system settings for optimal validator performance
#
sysctl_write() {

View File

@ -335,7 +335,7 @@ pub fn main() {
Arg::with_name("enable_rpc_exit")
.long("enable-rpc-exit")
.takes_value(false)
.help("Enable the JSON RPC 'fullnodeExit' API. Only enable in a debug environment"),
.help("Enable the JSON RPC 'validatorExit' API. Only enable in a debug environment"),
)
.arg(
Arg::with_name("rpc_drone_addr")
@ -457,7 +457,7 @@ pub fn main() {
validator_config.voting_disabled = matches.is_present("no_voting");
validator_config.rpc_config.enable_fullnode_exit = matches.is_present("enable_rpc_exit");
validator_config.rpc_config.enable_validator_exit = matches.is_present("enable_rpc_exit");
validator_config.rpc_config.drone_addr = matches.value_of("rpc_drone_addr").map(|address| {
solana_netutil::parse_host_port(address).expect("failed to parse drone address")