solana/src/bin/fullnode.rs

223 lines
7.0 KiB
Rust
Raw Normal View History

#[macro_use]
extern crate clap;
extern crate getopts;
#[macro_use]
2018-06-18 15:49:41 -07:00
extern crate log;
2018-03-03 23:13:40 -08:00
extern crate serde_json;
#[macro_use]
2018-03-27 15:24:05 -07:00
extern crate solana;
2018-11-16 08:45:59 -08:00
extern crate solana_metrics;
2018-12-03 10:26:28 -08:00
extern crate solana_sdk;
2018-02-28 17:04:35 -08:00
use clap::{App, Arg};
use solana::client::mk_client;
use solana::cluster_info::{Node, FULLNODE_PORT_RANGE};
use solana::fullnode::{Config, Fullnode, FullnodeReturnType};
Leader scheduler plumbing (#1440) * Added LeaderScheduler module and tests * plumbing for LeaderScheduler in Fullnode + tests. Add vote processing for active set to ReplicateStage and WriteStage * Add LeaderScheduler plumbing for Tvu, window, and tests * Fix bank and switch tests to use new LeaderScheduler * move leader rotation check from window service to replicate stage * Add replicate_stage leader rotation exit test * removed leader scheduler from the window service and associated modules/tests * Corrected is_leader calculation in repair() function in window.rs * Integrate LeaderScheduler with write_stage for leader to validator transitions * Integrated LeaderScheduler with BroadcastStage * Removed gossip leader rotation from crdt * Add multi validator, leader test * Comments and cleanup * Remove unneeded checks from broadcast stage * Fix case where a validator/leader need to immediately transition on startup after reading ledger and seeing they are not in the correct role * Set new leader in validator -> validator transitions * Clean up for PR comments, refactor LeaderScheduler from process_entry/process_ledger_tail * Cleaned out LeaderScheduler options, implemented LeaderScheduler strategy that only picks the bootstrap leader to support existing tests, drone/airdrops * Ignore test_full_leader_validator_network test due to bug where the next leader in line fails to get the last entry before rotation (b/c it hasn't started up yet). Added a test test_dropped_handoff_recovery go track this bug
2018-10-10 16:49:41 -07:00
use solana::leader_scheduler::LeaderScheduler;
2018-07-27 21:37:53 -07:00
use solana::logger;
use solana::netutil::find_available_port_in_range;
use solana::thin_client::poll_gossip_for_leader;
2018-12-03 10:26:28 -08:00
use solana_sdk::signature::{Keypair, KeypairUtil};
2018-12-04 07:45:32 -08:00
use solana_sdk::vote_program::VoteProgram;
2018-12-04 15:05:41 -08:00
use solana_sdk::vote_transaction::VoteTransaction;
use std::fs::File;
use std::net::{Ipv4Addr, SocketAddr};
2018-04-19 07:06:19 -07:00
use std::process::exit;
use std::sync::Arc;
use std::thread::sleep;
use std::time::Duration;
2018-02-28 17:04:35 -08:00
fn main() {
2018-07-27 21:37:53 -07:00
logger::setup();
2018-11-16 08:45:59 -08:00
solana_metrics::set_panic_hook("fullnode");
let matches = App::new("fullnode")
.version(crate_version!())
.arg(
Arg::with_name("nosigverify")
.short("v")
.long("nosigverify")
.help("Run without signature verification"),
)
.arg(
2018-12-06 09:08:30 -08:00
Arg::with_name("no-leader-rotation")
.long("no-leader-rotation")
.help("Disable leader rotation"),
)
.arg(
Arg::with_name("identity")
.short("i")
.long("identity")
2018-09-14 15:32:57 -07:00
.value_name("PATH")
.takes_value(true)
2018-09-14 15:32:57 -07:00
.help("Run with the identity found in FILE"),
)
.arg(
Arg::with_name("network")
.short("n")
.long("network")
.value_name("HOST:PORT")
.takes_value(true)
2018-09-14 15:32:57 -07:00
.help("Rendezvous with the network at this gossip entry point"),
)
.arg(
Arg::with_name("ledger")
.short("l")
.long("ledger")
.value_name("DIR")
.takes_value(true)
.required(true)
2018-11-05 09:50:58 -08:00
.help("Use DIR as persistent ledger location"),
)
.arg(
2018-11-05 09:50:58 -08:00
Arg::with_name("rpc")
.long("rpc")
.value_name("PORT")
.takes_value(true)
.help("Custom RPC port for this node"),
)
.get_matches();
2018-04-21 06:12:57 -07:00
let nosigverify = matches.is_present("nosigverify");
2018-12-06 09:08:30 -08:00
let use_only_bootstrap_leader = matches.is_present("no-leader-rotation");
let (keypair, vote_account_keypair, gossip) = if let Some(i) = matches.value_of("identity") {
let path = i.to_string();
2018-05-30 07:17:04 -07:00
if let Ok(file) = File::open(path.clone()) {
let parse: serde_json::Result<Config> = serde_json::from_reader(file);
if let Ok(data) = parse {
2018-11-15 17:05:31 -08:00
(
data.keypair(),
data.vote_account_keypair(),
data.node_info.gossip,
2018-11-15 17:05:31 -08:00
)
2018-05-30 07:17:04 -07:00
} else {
eprintln!("failed to parse {}", path);
exit(1);
2018-05-30 07:17:04 -07:00
}
2018-06-15 15:29:43 -07:00
} else {
eprintln!("failed to read {}", path);
exit(1);
}
} else {
2018-11-15 17:05:31 -08:00
(Keypair::new(), Keypair::new(), socketaddr!(0, 8000))
};
let ledger_path = matches.value_of("ledger").unwrap();
// socketaddr that is initial pointer into the network's gossip
let network = matches
.value_of("network")
.map(|network| network.parse().expect("failed to parse network address"));
let node = Node::new_with_external_ip(keypair.pubkey(), &gossip);
// save off some stuff for airdrop
let mut node_info = node.info.clone();
2018-11-15 17:05:31 -08:00
let vote_account_keypair = Arc::new(vote_account_keypair);
let vote_account_id = vote_account_keypair.pubkey();
let keypair = Arc::new(keypair);
let pubkey = keypair.pubkey();
let mut leader_scheduler = LeaderScheduler::default();
// Remove this line to enable leader rotation
2018-12-06 09:08:30 -08:00
leader_scheduler.use_only_bootstrap_leader = use_only_bootstrap_leader;
2018-11-05 09:50:58 -08:00
let rpc_port = if let Some(port) = matches.value_of("rpc") {
let port_number = port.to_string().parse().expect("integer");
if port_number == 0 {
eprintln!("Invalid RPC port requested: {:?}", port);
exit(1);
}
Some(port_number)
} else {
match find_available_port_in_range(FULLNODE_PORT_RANGE) {
Ok(port) => Some(port),
Err(_) => None,
}
};
let leader = match network {
Some(network) => {
poll_gossip_for_leader(network, None).expect("can't find leader on network")
}
None => {
//self = leader
if rpc_port.is_some() {
2018-11-15 13:42:57 -08:00
node_info.rpc.set_port(rpc_port.unwrap());
node_info.rpc_pubsub.set_port(rpc_port.unwrap() + 1);
}
node_info
}
2018-11-05 09:50:58 -08:00
};
Leader scheduler plumbing (#1440) * Added LeaderScheduler module and tests * plumbing for LeaderScheduler in Fullnode + tests. Add vote processing for active set to ReplicateStage and WriteStage * Add LeaderScheduler plumbing for Tvu, window, and tests * Fix bank and switch tests to use new LeaderScheduler * move leader rotation check from window service to replicate stage * Add replicate_stage leader rotation exit test * removed leader scheduler from the window service and associated modules/tests * Corrected is_leader calculation in repair() function in window.rs * Integrate LeaderScheduler with write_stage for leader to validator transitions * Integrated LeaderScheduler with BroadcastStage * Removed gossip leader rotation from crdt * Add multi validator, leader test * Comments and cleanup * Remove unneeded checks from broadcast stage * Fix case where a validator/leader need to immediately transition on startup after reading ledger and seeing they are not in the correct role * Set new leader in validator -> validator transitions * Clean up for PR comments, refactor LeaderScheduler from process_entry/process_ledger_tail * Cleaned out LeaderScheduler options, implemented LeaderScheduler strategy that only picks the bootstrap leader to support existing tests, drone/airdrops * Ignore test_full_leader_validator_network test due to bug where the next leader in line fails to get the last entry before rotation (b/c it hasn't started up yet). Added a test test_dropped_handoff_recovery go track this bug
2018-10-10 16:49:41 -07:00
let mut fullnode = Fullnode::new(
node,
ledger_path,
keypair.clone(),
2018-11-15 17:05:31 -08:00
vote_account_keypair.clone(),
Leader scheduler plumbing (#1440) * Added LeaderScheduler module and tests * plumbing for LeaderScheduler in Fullnode + tests. Add vote processing for active set to ReplicateStage and WriteStage * Add LeaderScheduler plumbing for Tvu, window, and tests * Fix bank and switch tests to use new LeaderScheduler * move leader rotation check from window service to replicate stage * Add replicate_stage leader rotation exit test * removed leader scheduler from the window service and associated modules/tests * Corrected is_leader calculation in repair() function in window.rs * Integrate LeaderScheduler with write_stage for leader to validator transitions * Integrated LeaderScheduler with BroadcastStage * Removed gossip leader rotation from crdt * Add multi validator, leader test * Comments and cleanup * Remove unneeded checks from broadcast stage * Fix case where a validator/leader need to immediately transition on startup after reading ledger and seeing they are not in the correct role * Set new leader in validator -> validator transitions * Clean up for PR comments, refactor LeaderScheduler from process_entry/process_ledger_tail * Cleaned out LeaderScheduler options, implemented LeaderScheduler strategy that only picks the bootstrap leader to support existing tests, drone/airdrops * Ignore test_full_leader_validator_network test due to bug where the next leader in line fails to get the last entry before rotation (b/c it hasn't started up yet). Added a test test_dropped_handoff_recovery go track this bug
2018-10-10 16:49:41 -07:00
network,
nosigverify,
leader_scheduler,
2018-11-05 09:50:58 -08:00
rpc_port,
Leader scheduler plumbing (#1440) * Added LeaderScheduler module and tests * plumbing for LeaderScheduler in Fullnode + tests. Add vote processing for active set to ReplicateStage and WriteStage * Add LeaderScheduler plumbing for Tvu, window, and tests * Fix bank and switch tests to use new LeaderScheduler * move leader rotation check from window service to replicate stage * Add replicate_stage leader rotation exit test * removed leader scheduler from the window service and associated modules/tests * Corrected is_leader calculation in repair() function in window.rs * Integrate LeaderScheduler with write_stage for leader to validator transitions * Integrated LeaderScheduler with BroadcastStage * Removed gossip leader rotation from crdt * Add multi validator, leader test * Comments and cleanup * Remove unneeded checks from broadcast stage * Fix case where a validator/leader need to immediately transition on startup after reading ledger and seeing they are not in the correct role * Set new leader in validator -> validator transitions * Clean up for PR comments, refactor LeaderScheduler from process_entry/process_ledger_tail * Cleaned out LeaderScheduler options, implemented LeaderScheduler strategy that only picks the bootstrap leader to support existing tests, drone/airdrops * Ignore test_full_leader_validator_network test due to bug where the next leader in line fails to get the last entry before rotation (b/c it hasn't started up yet). Added a test test_dropped_handoff_recovery go track this bug
2018-10-10 16:49:41 -07:00
);
let mut client = mk_client(&leader);
2018-11-15 17:05:31 -08:00
let balance = client.poll_get_balance(&pubkey).unwrap_or(0);
info!("balance is {}", balance);
if balance < 1 {
error!("insufficient tokens");
exit(1);
}
2018-11-15 17:05:31 -08:00
// Create the vote account if necessary
if client.poll_get_balance(&vote_account_id).unwrap_or(0) == 0 {
// Need at least two tokens as one token will be spent on a vote_account_new() transaction
if balance < 2 {
error!("insufficient tokens");
exit(1);
}
loop {
let last_id = client.get_last_id();
2018-11-15 17:05:31 -08:00
let transaction =
VoteTransaction::vote_account_new(&keypair, vote_account_id, last_id, 1, 1);
2018-11-15 17:05:31 -08:00
if client.transfer_signed(&transaction).is_err() {
sleep(Duration::from_secs(2));
continue;
}
2018-11-15 17:05:31 -08:00
let balance = client.poll_get_balance(&vote_account_id).unwrap_or(0);
if balance > 0 {
break;
}
sleep(Duration::from_secs(2));
}
}
loop {
let vote_account_user_data = client.get_account_userdata(&vote_account_id);
if let Ok(Some(vote_account_user_data)) = vote_account_user_data {
if let Ok(vote_state) = VoteProgram::deserialize(&vote_account_user_data) {
if vote_state.node_id == pubkey {
break;
}
}
}
panic!("Expected successful vote account registration");
}
loop {
let status = fullnode.handle_role_transition();
match status {
Leader scheduler plumbing (#1440) * Added LeaderScheduler module and tests * plumbing for LeaderScheduler in Fullnode + tests. Add vote processing for active set to ReplicateStage and WriteStage * Add LeaderScheduler plumbing for Tvu, window, and tests * Fix bank and switch tests to use new LeaderScheduler * move leader rotation check from window service to replicate stage * Add replicate_stage leader rotation exit test * removed leader scheduler from the window service and associated modules/tests * Corrected is_leader calculation in repair() function in window.rs * Integrate LeaderScheduler with write_stage for leader to validator transitions * Integrated LeaderScheduler with BroadcastStage * Removed gossip leader rotation from crdt * Add multi validator, leader test * Comments and cleanup * Remove unneeded checks from broadcast stage * Fix case where a validator/leader need to immediately transition on startup after reading ledger and seeing they are not in the correct role * Set new leader in validator -> validator transitions * Clean up for PR comments, refactor LeaderScheduler from process_entry/process_ledger_tail * Cleaned out LeaderScheduler options, implemented LeaderScheduler strategy that only picks the bootstrap leader to support existing tests, drone/airdrops * Ignore test_full_leader_validator_network test due to bug where the next leader in line fails to get the last entry before rotation (b/c it hasn't started up yet). Added a test test_dropped_handoff_recovery go track this bug
2018-10-10 16:49:41 -07:00
Ok(Some(FullnodeReturnType::LeaderToValidatorRotation)) => (),
Ok(Some(FullnodeReturnType::ValidatorToLeaderRotation)) => (),
_ => {
// Fullnode tpu/tvu exited for some unexpected
// reason, so exit
exit(1);
}
}
}
}