Fullnode::new() - provide FullnodeConfig as a ref
This commit is contained in:
parent
65708f234d
commit
d87c2eb903
|
@ -271,7 +271,7 @@ fn main() {
|
|||
cluster_entrypoint
|
||||
.map(|i| NodeInfo::new_entry_point(&i))
|
||||
.as_ref(),
|
||||
fullnode_config,
|
||||
&fullnode_config,
|
||||
);
|
||||
|
||||
if !no_signer {
|
||||
|
|
|
@ -114,7 +114,7 @@ impl Fullnode {
|
|||
leader_scheduler: Arc<RwLock<LeaderScheduler>>,
|
||||
vote_signer: VoteSignerProxy,
|
||||
entrypoint_info_option: Option<&NodeInfo>,
|
||||
config: FullnodeConfig,
|
||||
config: &FullnodeConfig,
|
||||
) -> Self {
|
||||
let id = keypair.pubkey();
|
||||
let (genesis_block, db_ledger) = Self::make_db_ledger(ledger_path);
|
||||
|
@ -234,7 +234,7 @@ impl Fullnode {
|
|||
config.storage_rotate_count,
|
||||
to_leader_sender,
|
||||
&storage_state,
|
||||
config.entry_stream,
|
||||
config.entry_stream.as_ref(),
|
||||
);
|
||||
let max_tick_height = {
|
||||
let ls_lock = bank.leader_scheduler.read().unwrap();
|
||||
|
@ -462,10 +462,10 @@ impl Service for Fullnode {
|
|||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::cluster_info::Node;
|
||||
use crate::db_ledger::*;
|
||||
use crate::entry::make_consecutive_blobs;
|
||||
use crate::fullnode::{Fullnode, FullnodeReturnType};
|
||||
use crate::leader_scheduler::{
|
||||
make_active_set_entries, LeaderScheduler, LeaderSchedulerConfig,
|
||||
};
|
||||
|
@ -500,7 +500,7 @@ mod tests {
|
|||
Arc::new(RwLock::new(LeaderScheduler::new(&Default::default()))),
|
||||
VoteSignerProxy::new(),
|
||||
Some(&leader_node.info),
|
||||
Default::default(),
|
||||
&FullnodeConfig::default(),
|
||||
);
|
||||
validator.close().unwrap();
|
||||
remove_dir_all(validator_ledger_path).unwrap();
|
||||
|
@ -532,7 +532,7 @@ mod tests {
|
|||
Arc::new(RwLock::new(LeaderScheduler::new(&Default::default()))),
|
||||
VoteSignerProxy::new(),
|
||||
Some(&leader_node.info),
|
||||
Default::default(),
|
||||
&FullnodeConfig::default(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
@ -596,7 +596,7 @@ mod tests {
|
|||
Arc::new(RwLock::new(LeaderScheduler::new(&leader_scheduler_config))),
|
||||
signer,
|
||||
Some(&bootstrap_leader_info),
|
||||
Default::default(),
|
||||
&FullnodeConfig::default(),
|
||||
);
|
||||
|
||||
// Wait for the leader to transition, ticks should cause the leader to
|
||||
|
@ -658,7 +658,7 @@ mod tests {
|
|||
Arc::new(RwLock::new(LeaderScheduler::new(&leader_scheduler_config))),
|
||||
VoteSignerProxy::new(),
|
||||
Some(&bootstrap_leader_info),
|
||||
Default::default(),
|
||||
&FullnodeConfig::default(),
|
||||
);
|
||||
|
||||
assert!(!bootstrap_leader.node_services.tpu.is_leader());
|
||||
|
@ -671,7 +671,7 @@ mod tests {
|
|||
Arc::new(RwLock::new(LeaderScheduler::new(&leader_scheduler_config))),
|
||||
VoteSignerProxy::new(),
|
||||
Some(&bootstrap_leader_info),
|
||||
Default::default(),
|
||||
&FullnodeConfig::default(),
|
||||
);
|
||||
|
||||
assert!(validator.node_services.tpu.is_leader());
|
||||
|
@ -726,7 +726,7 @@ mod tests {
|
|||
Arc::new(RwLock::new(LeaderScheduler::new(&leader_scheduler_config))),
|
||||
vote_signer,
|
||||
Some(&leader_node.info),
|
||||
Default::default(),
|
||||
&FullnodeConfig::default(),
|
||||
);
|
||||
|
||||
// Send blobs to the validator from our mock leader
|
||||
|
@ -829,7 +829,7 @@ mod tests {
|
|||
Arc::new(RwLock::new(LeaderScheduler::new(&leader_scheduler_config))),
|
||||
vote_signer,
|
||||
Some(&leader_node_info),
|
||||
Default::default(),
|
||||
&FullnodeConfig::default(),
|
||||
);
|
||||
|
||||
// Hold Tvu bank lock to prevent tvu from making progress
|
||||
|
|
|
@ -207,9 +207,10 @@ impl ReplayStage {
|
|||
entry_height: Arc<RwLock<u64>>,
|
||||
last_entry_id: Arc<RwLock<Hash>>,
|
||||
to_leader_sender: TvuRotationSender,
|
||||
entry_stream: Option<String>,
|
||||
entry_stream: Option<&String>,
|
||||
) -> (Self, EntryReceiver) {
|
||||
let (ledger_entry_sender, ledger_entry_receiver) = channel();
|
||||
let mut entry_stream = entry_stream.cloned().map(EntryStream::new);
|
||||
|
||||
let t_replay = Builder::new()
|
||||
.name("solana-replay-stage".to_string())
|
||||
|
@ -220,7 +221,6 @@ impl ReplayStage {
|
|||
let (mut last_leader_id, _) = bank
|
||||
.get_current_leader()
|
||||
.expect("Scheduled leader should be calculated by this point");
|
||||
let mut entry_stream = entry_stream.map(EntryStream::new);
|
||||
loop {
|
||||
let (leader_id, _) = bank
|
||||
.get_current_leader()
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
use crate::bank::Bank;
|
||||
use crate::cluster_info::{ClusterInfo, ClusterInfoError, NodeInfo};
|
||||
use crate::fullnode::Fullnode;
|
||||
use crate::fullnode::{Fullnode, FullnodeConfig};
|
||||
use crate::gossip_service::GossipService;
|
||||
use crate::packet::PACKET_DATA_SIZE;
|
||||
use crate::result::{Error, Result};
|
||||
|
@ -460,7 +460,7 @@ pub fn new_fullnode(ledger_name: &'static str) -> (Fullnode, NodeInfo, Keypair,
|
|||
Arc::new(RwLock::new(leader_scheduler)),
|
||||
vote_signer,
|
||||
None,
|
||||
Default::default(),
|
||||
&FullnodeConfig::default(),
|
||||
);
|
||||
|
||||
(node, node_info, mint_keypair, ledger_path)
|
||||
|
|
|
@ -74,7 +74,7 @@ impl Tvu {
|
|||
storage_rotate_count: u64,
|
||||
to_leader_sender: TvuRotationSender,
|
||||
storage_state: &StorageState,
|
||||
entry_stream: Option<String>,
|
||||
entry_stream: Option<&String>,
|
||||
) -> (Self, BlobSender) {
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let keypair: Arc<Keypair> = cluster_info
|
||||
|
|
|
@ -7,7 +7,7 @@ use solana::contact_info::ContactInfo;
|
|||
use solana::db_ledger::{create_tmp_sample_ledger, tmp_copy_ledger};
|
||||
use solana::db_ledger::{DbLedger, DEFAULT_SLOT_HEIGHT};
|
||||
use solana::entry::{reconstruct_entries_from_blobs, Entry};
|
||||
use solana::fullnode::{Fullnode, FullnodeReturnType};
|
||||
use solana::fullnode::{Fullnode, FullnodeConfig, FullnodeReturnType};
|
||||
use solana::gossip_service::GossipService;
|
||||
use solana::leader_scheduler::{make_active_set_entries, LeaderScheduler, LeaderSchedulerConfig};
|
||||
use solana::packet::SharedBlob;
|
||||
|
@ -164,7 +164,7 @@ fn test_multi_node_ledger_window() -> result::Result<()> {
|
|||
))),
|
||||
signer_proxy,
|
||||
None,
|
||||
Default::default(),
|
||||
&FullnodeConfig::default(),
|
||||
);
|
||||
|
||||
// start up another validator from zero, converge and then check
|
||||
|
@ -183,7 +183,7 @@ fn test_multi_node_ledger_window() -> result::Result<()> {
|
|||
))),
|
||||
signer_proxy,
|
||||
Some(&leader_data),
|
||||
Default::default(),
|
||||
&FullnodeConfig::default(),
|
||||
);
|
||||
|
||||
// Send validator some tokens to vote
|
||||
|
@ -267,7 +267,7 @@ fn test_multi_node_validator_catchup_from_zero() -> result::Result<()> {
|
|||
))),
|
||||
signer_proxy,
|
||||
None,
|
||||
Default::default(),
|
||||
&FullnodeConfig::default(),
|
||||
);
|
||||
|
||||
let mut nodes = vec![server];
|
||||
|
@ -300,7 +300,7 @@ fn test_multi_node_validator_catchup_from_zero() -> result::Result<()> {
|
|||
))),
|
||||
signer_proxy,
|
||||
Some(&leader_data),
|
||||
Default::default(),
|
||||
&FullnodeConfig::default(),
|
||||
);
|
||||
nodes.push(val);
|
||||
}
|
||||
|
@ -362,7 +362,7 @@ fn test_multi_node_validator_catchup_from_zero() -> result::Result<()> {
|
|||
))),
|
||||
signer_proxy,
|
||||
Some(&leader_data),
|
||||
Default::default(),
|
||||
&FullnodeConfig::default(),
|
||||
);
|
||||
nodes.push(val);
|
||||
let servers = converge(&leader_data, N + 2); // contains the leader and new node
|
||||
|
@ -451,7 +451,7 @@ fn test_multi_node_basic() {
|
|||
))),
|
||||
signer_proxy,
|
||||
None,
|
||||
Default::default(),
|
||||
&FullnodeConfig::default(),
|
||||
);
|
||||
|
||||
let mut nodes = vec![server];
|
||||
|
@ -480,7 +480,7 @@ fn test_multi_node_basic() {
|
|||
))),
|
||||
signer_proxy,
|
||||
Some(&leader_data),
|
||||
Default::default(),
|
||||
&FullnodeConfig::default(),
|
||||
);
|
||||
nodes.push(val);
|
||||
}
|
||||
|
@ -559,7 +559,7 @@ fn test_boot_validator_from_file() -> result::Result<()> {
|
|||
))),
|
||||
signer_proxy,
|
||||
None,
|
||||
Default::default(),
|
||||
&FullnodeConfig::default(),
|
||||
);
|
||||
let leader_balance =
|
||||
send_tx_and_retry_get_balance(&leader_data, &alice, &bob_pubkey, 500, Some(500)).unwrap();
|
||||
|
@ -583,7 +583,7 @@ fn test_boot_validator_from_file() -> result::Result<()> {
|
|||
))),
|
||||
signer_proxy,
|
||||
Some(&leader_data),
|
||||
Default::default(),
|
||||
&FullnodeConfig::default(),
|
||||
);
|
||||
let mut client = mk_client(&validator_data);
|
||||
let getbal = retry_get_balance(&mut client, &bob_pubkey, Some(leader_balance));
|
||||
|
@ -615,7 +615,7 @@ fn create_leader(
|
|||
))),
|
||||
signer,
|
||||
None,
|
||||
Default::default(),
|
||||
&FullnodeConfig::default(),
|
||||
);
|
||||
(leader_data, leader_fullnode)
|
||||
}
|
||||
|
@ -692,7 +692,7 @@ fn test_leader_restart_validator_start_from_old_ledger() -> result::Result<()> {
|
|||
))),
|
||||
signer_proxy,
|
||||
Some(&leader_data),
|
||||
Default::default(),
|
||||
&FullnodeConfig::default(),
|
||||
);
|
||||
|
||||
// trigger broadcast, validator should catch up from leader, whose window contains
|
||||
|
@ -765,7 +765,7 @@ fn test_multi_node_dynamic_network() {
|
|||
))),
|
||||
signer_proxy,
|
||||
None,
|
||||
Default::default(),
|
||||
&FullnodeConfig::default(),
|
||||
);
|
||||
info!(
|
||||
"found leader: {:?}",
|
||||
|
@ -839,7 +839,7 @@ fn test_multi_node_dynamic_network() {
|
|||
))),
|
||||
signer_proxy,
|
||||
Some(&leader_data),
|
||||
Default::default(),
|
||||
&FullnodeConfig::default(),
|
||||
);
|
||||
(rd, val)
|
||||
})
|
||||
|
@ -1014,7 +1014,7 @@ fn test_leader_to_validator_transition() {
|
|||
Arc::new(RwLock::new(LeaderScheduler::new(&leader_scheduler_config))),
|
||||
signer_proxy,
|
||||
Some(&leader_info),
|
||||
Default::default(),
|
||||
&FullnodeConfig::default(),
|
||||
);
|
||||
|
||||
// Make an extra node for our leader to broadcast to,
|
||||
|
@ -1163,7 +1163,7 @@ fn test_leader_validator_basic() {
|
|||
Arc::new(RwLock::new(LeaderScheduler::new(&leader_scheduler_config))),
|
||||
signer_proxy,
|
||||
Some(&leader_info),
|
||||
Default::default(),
|
||||
&FullnodeConfig::default(),
|
||||
);
|
||||
|
||||
// Start the leader fullnode
|
||||
|
@ -1175,7 +1175,7 @@ fn test_leader_validator_basic() {
|
|||
Arc::new(RwLock::new(LeaderScheduler::new(&leader_scheduler_config))),
|
||||
signer_proxy,
|
||||
Some(&leader_info),
|
||||
Default::default(),
|
||||
&FullnodeConfig::default(),
|
||||
);
|
||||
|
||||
// Wait for convergence
|
||||
|
@ -1361,7 +1361,7 @@ fn test_dropped_handoff_recovery() {
|
|||
Arc::new(RwLock::new(LeaderScheduler::new(&leader_scheduler_config))),
|
||||
signer_proxy,
|
||||
Some(&bootstrap_leader_info),
|
||||
Default::default(),
|
||||
&FullnodeConfig::default(),
|
||||
);
|
||||
|
||||
let mut nodes = vec![bootstrap_leader];
|
||||
|
@ -1383,7 +1383,7 @@ fn test_dropped_handoff_recovery() {
|
|||
Arc::new(RwLock::new(LeaderScheduler::new(&leader_scheduler_config))),
|
||||
signer_proxy,
|
||||
Some(&bootstrap_leader_info),
|
||||
Default::default(),
|
||||
&FullnodeConfig::default(),
|
||||
);
|
||||
|
||||
nodes.push(validator);
|
||||
|
@ -1409,7 +1409,7 @@ fn test_dropped_handoff_recovery() {
|
|||
Arc::new(RwLock::new(LeaderScheduler::new(&leader_scheduler_config))),
|
||||
signer_proxy,
|
||||
Some(&bootstrap_leader_info),
|
||||
Default::default(),
|
||||
&FullnodeConfig::default(),
|
||||
);
|
||||
|
||||
info!("Wait for 'next leader' to assume leader role");
|
||||
|
@ -1547,7 +1547,7 @@ fn test_full_leader_validator_network() {
|
|||
leader_scheduler.clone(),
|
||||
signer_proxy,
|
||||
Some(&bootstrap_leader_info),
|
||||
Default::default(),
|
||||
&FullnodeConfig::default(),
|
||||
);
|
||||
|
||||
schedules.push(leader_scheduler);
|
||||
|
@ -1564,7 +1564,7 @@ fn test_full_leader_validator_network() {
|
|||
leader_scheduler.clone(),
|
||||
signer_proxy,
|
||||
Some(&bootstrap_leader_info),
|
||||
Default::default(),
|
||||
&FullnodeConfig::default(),
|
||||
);
|
||||
|
||||
schedules.push(leader_scheduler);
|
||||
|
@ -1741,7 +1741,7 @@ fn test_broadcast_last_tick() {
|
|||
Arc::new(RwLock::new(LeaderScheduler::new(&leader_scheduler_config))),
|
||||
signer_proxy,
|
||||
Some(&bootstrap_leader_info),
|
||||
Default::default(),
|
||||
&FullnodeConfig::default(),
|
||||
);
|
||||
|
||||
// Wait for convergence
|
||||
|
|
|
@ -60,7 +60,7 @@ fn test_replicator_startup() {
|
|||
))),
|
||||
signer_proxy,
|
||||
None,
|
||||
fullnode_config,
|
||||
&fullnode_config,
|
||||
);
|
||||
|
||||
let validator_keypair = Arc::new(Keypair::new());
|
||||
|
@ -79,8 +79,6 @@ fn test_replicator_startup() {
|
|||
#[cfg(feature = "chacha")]
|
||||
let validator_node_info = validator_node.info.clone();
|
||||
|
||||
let mut fullnode_config = FullnodeConfig::default();
|
||||
fullnode_config.storage_rotate_count = STORAGE_ROTATE_TEST_COUNT;
|
||||
let validator = Fullnode::new(
|
||||
validator_node,
|
||||
&validator_keypair,
|
||||
|
@ -90,7 +88,7 @@ fn test_replicator_startup() {
|
|||
))),
|
||||
signer_proxy,
|
||||
Some(&leader_info),
|
||||
fullnode_config,
|
||||
&fullnode_config,
|
||||
);
|
||||
|
||||
let bob = Keypair::new();
|
||||
|
@ -285,7 +283,7 @@ fn test_replicator_startup_ledger_hang() {
|
|||
))),
|
||||
signer_proxy,
|
||||
None,
|
||||
Default::default(),
|
||||
&FullnodeConfig::default(),
|
||||
);
|
||||
|
||||
let validator_keypair = Arc::new(Keypair::new());
|
||||
|
@ -301,7 +299,7 @@ fn test_replicator_startup_ledger_hang() {
|
|||
))),
|
||||
signer_proxy,
|
||||
Some(&leader_info),
|
||||
Default::default(),
|
||||
&FullnodeConfig::default(),
|
||||
);
|
||||
|
||||
info!("starting replicator node");
|
||||
|
|
Loading…
Reference in New Issue