Rename Ncp to GossipService
And BroadcastStage to BroadcastService since it's not included in the TPU pipeline.
This commit is contained in:
parent
02bfcd23a9
commit
97b1156a7a
|
@ -7,9 +7,9 @@
|
||||||
| |<----| | |
|
| |<----| | |
|
||||||
`----+---` | `------------------` |
|
`----+---` | `------------------` |
|
||||||
| | ^ | .------------------.
|
| | ^ | .------------------.
|
||||||
| | | .-----. | | Validators |
|
| | | .----------------. | | Validators |
|
||||||
| | | | NCP |<---------->| |
|
| | | | Gossip Service +----->| |
|
||||||
| | | `---+-` | | .------------. |
|
| | | `--------+-------` | | .------------. |
|
||||||
| | | ^ | | | | | |
|
| | | ^ | | | | | |
|
||||||
| | | | v | | | Upstream | |
|
| | | | v | | | Upstream | |
|
||||||
| | .--+---. .-+---. | | | Validators | |
|
| | .--+---. .-+---. | | | Validators | |
|
||||||
|
@ -19,7 +19,7 @@
|
||||||
| | | | | .------------. |
|
| | | | | .------------. |
|
||||||
| | .--+--. .-----------. | | | | |
|
| | .--+--. .-----------. | | | | |
|
||||||
`-------->| TPU +-->| Broadcast +--------->| Downstream | |
|
`-------->| TPU +-->| Broadcast +--------->| Downstream | |
|
||||||
| `-----` | Stage | | | | Validators | |
|
| `-----` | Service | | | | Validators | |
|
||||||
| `-----------` | | | | |
|
| `-----------` | | | | |
|
||||||
| | | `------------` |
|
| | | `------------` |
|
||||||
`---------------------------` | |
|
`---------------------------` | |
|
||||||
|
|
|
@ -17,5 +17,6 @@
|
||||||
| | |
|
| | |
|
||||||
| V v
|
| V v
|
||||||
.+-----------. .------.
|
.+-----------. .------.
|
||||||
| NCP | | Bank |
|
| Gossip | | Bank |
|
||||||
`------------` `------`
|
| Service | `------`
|
||||||
|
`------------`
|
||||||
|
|
|
@ -16,7 +16,7 @@
|
||||||
- [Anatomy of a Fullnode](fullnode.md)
|
- [Anatomy of a Fullnode](fullnode.md)
|
||||||
- [TPU](tpu.md)
|
- [TPU](tpu.md)
|
||||||
- [TVU](tvu.md)
|
- [TVU](tvu.md)
|
||||||
- [NCP](ncp.md)
|
- [Gossip Service](gossip.md)
|
||||||
- [The Runtime](runtime.md)
|
- [The Runtime](runtime.md)
|
||||||
|
|
||||||
## Appendix
|
## Appendix
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
# The Network Control Plane
|
# The Gossip Service
|
||||||
|
|
||||||
The Network Control Plane (NCP) acts as a gateway to nodes in the control
|
The Gossip Service acts as a gateway to nodes in the control plane. Fullnodes
|
||||||
plane. Fullnodes use the NCP to ensure information is available to all other
|
use the service to ensure information is available to all other nodes in a cluster.
|
||||||
nodes in a cluster. The NCP broadcasts information using a gossip protocol.
|
The service broadcasts information using a gossip protocol.
|
||||||
|
|
||||||
## Gossip Overview
|
## Gossip Overview
|
||||||
|
|
||||||
|
@ -25,7 +25,7 @@ timestamp) as needed to make sense to the node receiving them. If a node
|
||||||
recieves two records from the same source, it it updates its own copy with the
|
recieves two records from the same source, it it updates its own copy with the
|
||||||
record with the most recent timestamp.
|
record with the most recent timestamp.
|
||||||
|
|
||||||
## NCP Interface
|
## Gossip Service Interface
|
||||||
|
|
||||||
### Push Message
|
### Push Message
|
||||||
|
|
|
@ -12,8 +12,8 @@ use clap::{App, Arg};
|
||||||
use rayon::prelude::*;
|
use rayon::prelude::*;
|
||||||
use solana::client::mk_client;
|
use solana::client::mk_client;
|
||||||
use solana::cluster_info::{ClusterInfo, NodeInfo};
|
use solana::cluster_info::{ClusterInfo, NodeInfo};
|
||||||
|
use solana::gossip_service::GossipService;
|
||||||
use solana::logger;
|
use solana::logger;
|
||||||
use solana::ncp::Ncp;
|
|
||||||
use solana::service::Service;
|
use solana::service::Service;
|
||||||
use solana::signature::GenKeys;
|
use solana::signature::GenKeys;
|
||||||
use solana::thin_client::{poll_gossip_for_leader, ThinClient};
|
use solana::thin_client::{poll_gossip_for_leader, ThinClient};
|
||||||
|
@ -642,7 +642,7 @@ fn main() {
|
||||||
let leader = poll_gossip_for_leader(network, None).expect("unable to find leader on network");
|
let leader = poll_gossip_for_leader(network, None).expect("unable to find leader on network");
|
||||||
|
|
||||||
let exit_signal = Arc::new(AtomicBool::new(false));
|
let exit_signal = Arc::new(AtomicBool::new(false));
|
||||||
let (nodes, leader, ncp) = converge(&leader, &exit_signal, num_nodes);
|
let (nodes, leader, gossip_service) = converge(&leader, &exit_signal, num_nodes);
|
||||||
|
|
||||||
if nodes.len() < num_nodes {
|
if nodes.len() < num_nodes {
|
||||||
println!(
|
println!(
|
||||||
|
@ -825,14 +825,14 @@ fn main() {
|
||||||
);
|
);
|
||||||
|
|
||||||
// join the cluster_info client threads
|
// join the cluster_info client threads
|
||||||
ncp.join().unwrap();
|
gossip_service.join().unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn converge(
|
fn converge(
|
||||||
leader: &NodeInfo,
|
leader: &NodeInfo,
|
||||||
exit_signal: &Arc<AtomicBool>,
|
exit_signal: &Arc<AtomicBool>,
|
||||||
num_nodes: usize,
|
num_nodes: usize,
|
||||||
) -> (Vec<NodeInfo>, Option<NodeInfo>, Ncp) {
|
) -> (Vec<NodeInfo>, Option<NodeInfo>, GossipService) {
|
||||||
//lets spy on the network
|
//lets spy on the network
|
||||||
let (node, gossip_socket) = ClusterInfo::spy_node();
|
let (node, gossip_socket) = ClusterInfo::spy_node();
|
||||||
let mut spy_cluster_info = ClusterInfo::new(node);
|
let mut spy_cluster_info = ClusterInfo::new(node);
|
||||||
|
@ -840,7 +840,8 @@ fn converge(
|
||||||
spy_cluster_info.set_leader(leader.id);
|
spy_cluster_info.set_leader(leader.id);
|
||||||
let spy_ref = Arc::new(RwLock::new(spy_cluster_info));
|
let spy_ref = Arc::new(RwLock::new(spy_cluster_info));
|
||||||
let window = Arc::new(RwLock::new(default_window()));
|
let window = Arc::new(RwLock::new(default_window()));
|
||||||
let ncp = Ncp::new(&spy_ref, window, None, gossip_socket, exit_signal.clone());
|
let gossip_service =
|
||||||
|
GossipService::new(&spy_ref, window, None, gossip_socket, exit_signal.clone());
|
||||||
let mut v: Vec<NodeInfo> = vec![];
|
let mut v: Vec<NodeInfo> = vec![];
|
||||||
// wait for the network to converge, 30 seconds should be plenty
|
// wait for the network to converge, 30 seconds should be plenty
|
||||||
for _ in 0..30 {
|
for _ in 0..30 {
|
||||||
|
@ -866,7 +867,7 @@ fn converge(
|
||||||
sleep(Duration::new(1, 0));
|
sleep(Duration::new(1, 0));
|
||||||
}
|
}
|
||||||
let leader = spy_ref.read().unwrap().leader_data().cloned();
|
let leader = spy_ref.read().unwrap().leader_data().cloned();
|
||||||
(v, leader, ncp)
|
(v, leader, gossip_service)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
|
|
@ -74,7 +74,7 @@ fn main() {
|
||||||
let nosigverify = matches.is_present("nosigverify");
|
let nosigverify = matches.is_present("nosigverify");
|
||||||
let use_only_bootstrap_leader = matches.is_present("no-leader-rotation");
|
let use_only_bootstrap_leader = matches.is_present("no-leader-rotation");
|
||||||
|
|
||||||
let (keypair, vote_account_keypair, ncp) = if let Some(i) = matches.value_of("identity") {
|
let (keypair, vote_account_keypair, gossip) = if let Some(i) = matches.value_of("identity") {
|
||||||
let path = i.to_string();
|
let path = i.to_string();
|
||||||
if let Ok(file) = File::open(path.clone()) {
|
if let Ok(file) = File::open(path.clone()) {
|
||||||
let parse: serde_json::Result<Config> = serde_json::from_reader(file);
|
let parse: serde_json::Result<Config> = serde_json::from_reader(file);
|
||||||
|
@ -82,7 +82,7 @@ fn main() {
|
||||||
(
|
(
|
||||||
data.keypair(),
|
data.keypair(),
|
||||||
data.vote_account_keypair(),
|
data.vote_account_keypair(),
|
||||||
data.node_info.ncp,
|
data.node_info.gossip,
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
eprintln!("failed to parse {}", path);
|
eprintln!("failed to parse {}", path);
|
||||||
|
@ -98,12 +98,12 @@ fn main() {
|
||||||
|
|
||||||
let ledger_path = matches.value_of("ledger").unwrap();
|
let ledger_path = matches.value_of("ledger").unwrap();
|
||||||
|
|
||||||
// socketaddr that is initial pointer into the network's gossip (ncp)
|
// socketaddr that is initial pointer into the network's gossip
|
||||||
let network = matches
|
let network = matches
|
||||||
.value_of("network")
|
.value_of("network")
|
||||||
.map(|network| network.parse().expect("failed to parse network address"));
|
.map(|network| network.parse().expect("failed to parse network address"));
|
||||||
|
|
||||||
let node = Node::new_with_external_ip(keypair.pubkey(), &ncp);
|
let node = Node::new_with_external_ip(keypair.pubkey(), &gossip);
|
||||||
|
|
||||||
// save off some stuff for airdrop
|
// save off some stuff for airdrop
|
||||||
let mut node_info = node.info.clone();
|
let mut node_info = node.info.clone();
|
||||||
|
|
|
@ -59,12 +59,12 @@ fn main() {
|
||||||
|
|
||||||
let ledger_path = matches.value_of("ledger");
|
let ledger_path = matches.value_of("ledger");
|
||||||
|
|
||||||
let (keypair, ncp) = if let Some(i) = matches.value_of("identity") {
|
let (keypair, gossip) = if let Some(i) = matches.value_of("identity") {
|
||||||
let path = i.to_string();
|
let path = i.to_string();
|
||||||
if let Ok(file) = File::open(path.clone()) {
|
if let Ok(file) = File::open(path.clone()) {
|
||||||
let parse: serde_json::Result<Config> = serde_json::from_reader(file);
|
let parse: serde_json::Result<Config> = serde_json::from_reader(file);
|
||||||
if let Ok(data) = parse {
|
if let Ok(data) = parse {
|
||||||
(data.keypair(), data.node_info.ncp)
|
(data.keypair(), data.node_info.gossip)
|
||||||
} else {
|
} else {
|
||||||
eprintln!("failed to parse {}", path);
|
eprintln!("failed to parse {}", path);
|
||||||
exit(1);
|
exit(1);
|
||||||
|
@ -77,12 +77,12 @@ fn main() {
|
||||||
(Keypair::new(), socketaddr!([127, 0, 0, 1], 8700))
|
(Keypair::new(), socketaddr!([127, 0, 0, 1], 8700))
|
||||||
};
|
};
|
||||||
|
|
||||||
let node = Node::new_with_external_ip(keypair.pubkey(), &ncp);
|
let node = Node::new_with_external_ip(keypair.pubkey(), &gossip);
|
||||||
|
|
||||||
println!(
|
println!(
|
||||||
"replicating the data with keypair: {:?} ncp:{:?}",
|
"replicating the data with keypair: {:?} gossip:{:?}",
|
||||||
keypair.pubkey(),
|
keypair.pubkey(),
|
||||||
ncp
|
gossip
|
||||||
);
|
);
|
||||||
|
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
//! The `broadcast_stage` broadcasts data from a leader node to validators
|
//! The `broadcast_service` broadcasts data from a leader node to validators
|
||||||
//!
|
//!
|
||||||
use cluster_info::{ClusterInfo, ClusterInfoError, NodeInfo};
|
use cluster_info::{ClusterInfo, ClusterInfoError, NodeInfo};
|
||||||
use counter::Counter;
|
use counter::Counter;
|
||||||
|
@ -24,7 +24,7 @@ use std::time::{Duration, Instant};
|
||||||
use window::{SharedWindow, WindowIndex, WindowUtil};
|
use window::{SharedWindow, WindowIndex, WindowUtil};
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||||
pub enum BroadcastStageReturnType {
|
pub enum BroadcastServiceReturnType {
|
||||||
LeaderRotation,
|
LeaderRotation,
|
||||||
ChannelDisconnected,
|
ChannelDisconnected,
|
||||||
}
|
}
|
||||||
|
@ -54,7 +54,7 @@ fn broadcast(
|
||||||
num_entries += entries.len();
|
num_entries += entries.len();
|
||||||
ventries.push(entries);
|
ventries.push(entries);
|
||||||
}
|
}
|
||||||
inc_new_counter_info!("broadcast_stage-entries_received", num_entries);
|
inc_new_counter_info!("broadcast_service-entries_received", num_entries);
|
||||||
|
|
||||||
let to_blobs_start = Instant::now();
|
let to_blobs_start = Instant::now();
|
||||||
let num_ticks: u64 = ventries
|
let num_ticks: u64 = ventries
|
||||||
|
@ -154,7 +154,7 @@ fn broadcast(
|
||||||
let broadcast_elapsed = duration_as_ms(&broadcast_start.elapsed());
|
let broadcast_elapsed = duration_as_ms(&broadcast_start.elapsed());
|
||||||
|
|
||||||
inc_new_counter_info!(
|
inc_new_counter_info!(
|
||||||
"broadcast_stage-time_ms",
|
"broadcast_service-time_ms",
|
||||||
duration_as_ms(&now.elapsed()) as usize
|
duration_as_ms(&now.elapsed()) as usize
|
||||||
);
|
);
|
||||||
info!(
|
info!(
|
||||||
|
@ -163,7 +163,7 @@ fn broadcast(
|
||||||
);
|
);
|
||||||
|
|
||||||
submit(
|
submit(
|
||||||
influxdb::Point::new("broadcast-stage")
|
influxdb::Point::new("broadcast-service")
|
||||||
.add_field(
|
.add_field(
|
||||||
"transmit-index",
|
"transmit-index",
|
||||||
influxdb::Value::Integer(transmit_index.data as i64),
|
influxdb::Value::Integer(transmit_index.data as i64),
|
||||||
|
@ -173,7 +173,7 @@ fn broadcast(
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Implement a destructor for the BroadcastStage thread to signal it exited
|
// Implement a destructor for the BroadcastService3 thread to signal it exited
|
||||||
// even on panics
|
// even on panics
|
||||||
struct Finalizer {
|
struct Finalizer {
|
||||||
exit_sender: Arc<AtomicBool>,
|
exit_sender: Arc<AtomicBool>,
|
||||||
|
@ -191,11 +191,11 @@ impl Drop for Finalizer {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct BroadcastStage {
|
pub struct BroadcastService {
|
||||||
thread_hdl: JoinHandle<BroadcastStageReturnType>,
|
thread_hdl: JoinHandle<BroadcastServiceReturnType>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BroadcastStage {
|
impl BroadcastService {
|
||||||
fn run(
|
fn run(
|
||||||
sock: &UdpSocket,
|
sock: &UdpSocket,
|
||||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||||
|
@ -205,7 +205,7 @@ impl BroadcastStage {
|
||||||
receiver: &Receiver<Vec<Entry>>,
|
receiver: &Receiver<Vec<Entry>>,
|
||||||
max_tick_height: Option<u64>,
|
max_tick_height: Option<u64>,
|
||||||
tick_height: u64,
|
tick_height: u64,
|
||||||
) -> BroadcastStageReturnType {
|
) -> BroadcastServiceReturnType {
|
||||||
let mut transmit_index = WindowIndex {
|
let mut transmit_index = WindowIndex {
|
||||||
data: entry_height,
|
data: entry_height,
|
||||||
coding: entry_height,
|
coding: entry_height,
|
||||||
|
@ -231,7 +231,7 @@ impl BroadcastStage {
|
||||||
) {
|
) {
|
||||||
match e {
|
match e {
|
||||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => {
|
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => {
|
||||||
return BroadcastStageReturnType::ChannelDisconnected
|
return BroadcastServiceReturnType::ChannelDisconnected
|
||||||
}
|
}
|
||||||
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||||
Error::ClusterInfoError(ClusterInfoError::NoPeers) => (), // TODO: Why are the unit-tests throwing hundreds of these?
|
Error::ClusterInfoError(ClusterInfoError::NoPeers) => (), // TODO: Why are the unit-tests throwing hundreds of these?
|
||||||
|
@ -252,11 +252,11 @@ impl BroadcastStage {
|
||||||
/// * `cluster_info` - ClusterInfo structure
|
/// * `cluster_info` - ClusterInfo structure
|
||||||
/// * `window` - Cache of blobs that we have broadcast
|
/// * `window` - Cache of blobs that we have broadcast
|
||||||
/// * `receiver` - Receive channel for blobs to be retransmitted to all the layer 1 nodes.
|
/// * `receiver` - Receive channel for blobs to be retransmitted to all the layer 1 nodes.
|
||||||
/// * `exit_sender` - Set to true when this stage exits, allows rest of Tpu to exit cleanly. Otherwise,
|
/// * `exit_sender` - Set to true when this service exits, allows rest of Tpu to exit cleanly.
|
||||||
/// when a Tpu stage closes, it only closes the stages that come after it. The stages
|
/// Otherwise, when a Tpu closes, it only closes the stages that come after it. The stages
|
||||||
/// that come before could be blocked on a receive, and never notice that they need to
|
/// that come before could be blocked on a receive, and never notice that they need to
|
||||||
/// exit. Now, if any stage of the Tpu closes, it will lead to closing the WriteStage (b/c
|
/// exit. Now, if any stage of the Tpu closes, it will lead to closing the WriteStage (b/c
|
||||||
/// WriteStage is the last stage in the pipeline), which will then close Broadcast stage,
|
/// WriteStage is the last stage in the pipeline), which will then close Broadcast service,
|
||||||
/// which will then close FetchStage in the Tpu, and then the rest of the Tpu,
|
/// which will then close FetchStage in the Tpu, and then the rest of the Tpu,
|
||||||
/// completing the cycle.
|
/// completing the cycle.
|
||||||
pub fn new(
|
pub fn new(
|
||||||
|
@ -286,14 +286,14 @@ impl BroadcastStage {
|
||||||
)
|
)
|
||||||
}).unwrap();
|
}).unwrap();
|
||||||
|
|
||||||
BroadcastStage { thread_hdl }
|
Self { thread_hdl }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Service for BroadcastStage {
|
impl Service for BroadcastService {
|
||||||
type JoinReturnType = BroadcastStageReturnType;
|
type JoinReturnType = BroadcastServiceReturnType;
|
||||||
|
|
||||||
fn join(self) -> thread::Result<BroadcastStageReturnType> {
|
fn join(self) -> thread::Result<BroadcastServiceReturnType> {
|
||||||
self.thread_hdl.join()
|
self.thread_hdl.join()
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -191,10 +191,10 @@ impl ClusterInfo {
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|node| {
|
.map(|node| {
|
||||||
format!(
|
format!(
|
||||||
" ncp: {:20} | {}{}\n \
|
" gossip: {:20} | {}{}\n \
|
||||||
tpu: {:20} |\n \
|
tpu: {:20} |\n \
|
||||||
rpc: {:20} |\n",
|
rpc: {:20} |\n",
|
||||||
node.ncp.to_string(),
|
node.gossip.to_string(),
|
||||||
node.id,
|
node.id,
|
||||||
if node.id == leader_id {
|
if node.id == leader_id {
|
||||||
" <==== leader"
|
" <==== leader"
|
||||||
|
@ -231,7 +231,7 @@ impl ClusterInfo {
|
||||||
self.gossip.purge(now);
|
self.gossip.purge(now);
|
||||||
}
|
}
|
||||||
pub fn convergence(&self) -> usize {
|
pub fn convergence(&self) -> usize {
|
||||||
self.ncp_peers().len() + 1
|
self.gossip_peers().len() + 1
|
||||||
}
|
}
|
||||||
pub fn rpc_peers(&self) -> Vec<NodeInfo> {
|
pub fn rpc_peers(&self) -> Vec<NodeInfo> {
|
||||||
let me = self.my_data().id;
|
let me = self.my_data().id;
|
||||||
|
@ -246,7 +246,7 @@ impl ClusterInfo {
|
||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn ncp_peers(&self) -> Vec<NodeInfo> {
|
pub fn gossip_peers(&self) -> Vec<NodeInfo> {
|
||||||
let me = self.my_data().id;
|
let me = self.my_data().id;
|
||||||
self.gossip
|
self.gossip
|
||||||
.crds
|
.crds
|
||||||
|
@ -254,7 +254,7 @@ impl ClusterInfo {
|
||||||
.values()
|
.values()
|
||||||
.filter_map(|x| x.value.contact_info())
|
.filter_map(|x| x.value.contact_info())
|
||||||
.filter(|x| x.id != me)
|
.filter(|x| x.id != me)
|
||||||
.filter(|x| ContactInfo::is_valid_address(&x.ncp))
|
.filter(|x| ContactInfo::is_valid_address(&x.gossip))
|
||||||
.cloned()
|
.cloned()
|
||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
@ -508,12 +508,12 @@ impl ClusterInfo {
|
||||||
pub fn window_index_request(&self, ix: u64) -> Result<(SocketAddr, Vec<u8>)> {
|
pub fn window_index_request(&self, ix: u64) -> Result<(SocketAddr, Vec<u8>)> {
|
||||||
// find a peer that appears to be accepting replication, as indicated
|
// find a peer that appears to be accepting replication, as indicated
|
||||||
// by a valid tvu port location
|
// by a valid tvu port location
|
||||||
let valid: Vec<_> = self.ncp_peers();
|
let valid: Vec<_> = self.gossip_peers();
|
||||||
if valid.is_empty() {
|
if valid.is_empty() {
|
||||||
Err(ClusterInfoError::NoPeers)?;
|
Err(ClusterInfoError::NoPeers)?;
|
||||||
}
|
}
|
||||||
let n = thread_rng().gen::<usize>() % valid.len();
|
let n = thread_rng().gen::<usize>() % valid.len();
|
||||||
let addr = valid[n].ncp; // send the request to the peer's gossip port
|
let addr = valid[n].gossip; // send the request to the peer's gossip port
|
||||||
let req = Protocol::RequestWindowIndex(self.my_data().clone(), ix);
|
let req = Protocol::RequestWindowIndex(self.my_data().clone(), ix);
|
||||||
let out = serialize(&req)?;
|
let out = serialize(&req)?;
|
||||||
Ok((addr, out))
|
Ok((addr, out))
|
||||||
|
@ -530,12 +530,12 @@ impl ClusterInfo {
|
||||||
.crds
|
.crds
|
||||||
.lookup(&peer_label)
|
.lookup(&peer_label)
|
||||||
.and_then(|v| v.contact_info())
|
.and_then(|v| v.contact_info())
|
||||||
.map(|peer_info| (peer, filter, peer_info.ncp, self_info))
|
.map(|peer_info| (peer, filter, peer_info.gossip, self_info))
|
||||||
}).collect();
|
}).collect();
|
||||||
pr.into_iter()
|
pr.into_iter()
|
||||||
.map(|(peer, filter, ncp, self_info)| {
|
.map(|(peer, filter, gossip, self_info)| {
|
||||||
self.gossip.mark_pull_request_creation_time(peer, now);
|
self.gossip.mark_pull_request_creation_time(peer, now);
|
||||||
(ncp, Protocol::PullRequest(filter, self_info))
|
(gossip, Protocol::PullRequest(filter, self_info))
|
||||||
}).collect()
|
}).collect()
|
||||||
}
|
}
|
||||||
fn new_push_requests(&mut self) -> Vec<(SocketAddr, Protocol)> {
|
fn new_push_requests(&mut self) -> Vec<(SocketAddr, Protocol)> {
|
||||||
|
@ -549,7 +549,7 @@ impl ClusterInfo {
|
||||||
.crds
|
.crds
|
||||||
.lookup(&peer_label)
|
.lookup(&peer_label)
|
||||||
.and_then(|v| v.contact_info())
|
.and_then(|v| v.contact_info())
|
||||||
.map(|p| p.ncp)
|
.map(|p| p.gossip)
|
||||||
}).map(|peer| (peer, Protocol::PushMessage(self_id, msgs.clone())))
|
}).map(|peer| (peer, Protocol::PushMessage(self_id, msgs.clone())))
|
||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
@ -760,12 +760,12 @@ impl ClusterInfo {
|
||||||
// the remote side may not know his public IP:PORT, record what he looks like to us
|
// the remote side may not know his public IP:PORT, record what he looks like to us
|
||||||
// this may or may not be correct for everybody but it's better than leaving him with
|
// this may or may not be correct for everybody but it's better than leaving him with
|
||||||
// an unspecified address in our table
|
// an unspecified address in our table
|
||||||
if from.ncp.ip().is_unspecified() {
|
if from.gossip.ip().is_unspecified() {
|
||||||
inc_new_counter_info!("cluster_info-window-request-updates-unspec-ncp", 1);
|
inc_new_counter_info!("cluster_info-window-request-updates-unspec-gossip", 1);
|
||||||
from.ncp = *from_addr;
|
from.gossip = *from_addr;
|
||||||
}
|
}
|
||||||
inc_new_counter_info!("cluster_info-pull_request-rsp", len);
|
inc_new_counter_info!("cluster_info-pull_request-rsp", len);
|
||||||
to_blob(rsp, from.ncp).ok().into_iter().collect()
|
to_blob(rsp, from.gossip).ok().into_iter().collect()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fn handle_pull_response(me: &Arc<RwLock<Self>>, from: Pubkey, data: Vec<CrdsValue>) {
|
fn handle_pull_response(me: &Arc<RwLock<Self>>, from: Pubkey, data: Vec<CrdsValue>) {
|
||||||
|
@ -810,7 +810,7 @@ impl ClusterInfo {
|
||||||
};
|
};
|
||||||
prune_msg.sign(&me.read().unwrap().keypair);
|
prune_msg.sign(&me.read().unwrap().keypair);
|
||||||
let rsp = Protocol::PruneMessage(self_id, prune_msg);
|
let rsp = Protocol::PruneMessage(self_id, prune_msg);
|
||||||
to_blob(rsp, ci.ncp).ok()
|
to_blob(rsp, ci.gossip).ok()
|
||||||
}).into_iter()
|
}).into_iter()
|
||||||
.collect();
|
.collect();
|
||||||
let mut blobs: Vec<_> = pushes
|
let mut blobs: Vec<_> = pushes
|
||||||
|
@ -1056,13 +1056,16 @@ impl Node {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub fn new_with_external_ip(pubkey: Pubkey, ncp: &SocketAddr) -> Node {
|
pub fn new_with_external_ip(pubkey: Pubkey, gossip_addr: &SocketAddr) -> Node {
|
||||||
fn bind() -> (u16, UdpSocket) {
|
fn bind() -> (u16, UdpSocket) {
|
||||||
bind_in_range(FULLNODE_PORT_RANGE).expect("Failed to bind")
|
bind_in_range(FULLNODE_PORT_RANGE).expect("Failed to bind")
|
||||||
};
|
};
|
||||||
|
|
||||||
let (gossip_port, gossip) = if ncp.port() != 0 {
|
let (gossip_port, gossip) = if gossip_addr.port() != 0 {
|
||||||
(ncp.port(), bind_to(ncp.port(), false).expect("ncp bind"))
|
(
|
||||||
|
gossip_addr.port(),
|
||||||
|
bind_to(gossip_addr.port(), false).expect("gossip_addr bind"),
|
||||||
|
)
|
||||||
} else {
|
} else {
|
||||||
bind()
|
bind()
|
||||||
};
|
};
|
||||||
|
@ -1080,12 +1083,12 @@ impl Node {
|
||||||
|
|
||||||
let info = NodeInfo::new(
|
let info = NodeInfo::new(
|
||||||
pubkey,
|
pubkey,
|
||||||
SocketAddr::new(ncp.ip(), gossip_port),
|
SocketAddr::new(gossip_addr.ip(), gossip_port),
|
||||||
SocketAddr::new(ncp.ip(), replicate_port),
|
SocketAddr::new(gossip_addr.ip(), replicate_port),
|
||||||
SocketAddr::new(ncp.ip(), transaction_port),
|
SocketAddr::new(gossip_addr.ip(), transaction_port),
|
||||||
SocketAddr::new(ncp.ip(), storage_port),
|
SocketAddr::new(gossip_addr.ip(), storage_port),
|
||||||
SocketAddr::new(ncp.ip(), RPC_PORT),
|
SocketAddr::new(gossip_addr.ip(), RPC_PORT),
|
||||||
SocketAddr::new(ncp.ip(), RPC_PORT + 1),
|
SocketAddr::new(gossip_addr.ip(), RPC_PORT + 1),
|
||||||
0,
|
0,
|
||||||
);
|
);
|
||||||
trace!("new NodeInfo: {:?}", info);
|
trace!("new NodeInfo: {:?}", info);
|
||||||
|
@ -1171,10 +1174,10 @@ mod tests {
|
||||||
let rv = cluster_info.window_index_request(0);
|
let rv = cluster_info.window_index_request(0);
|
||||||
assert_matches!(rv, Err(Error::ClusterInfoError(ClusterInfoError::NoPeers)));
|
assert_matches!(rv, Err(Error::ClusterInfoError(ClusterInfoError::NoPeers)));
|
||||||
|
|
||||||
let ncp = socketaddr!([127, 0, 0, 1], 1234);
|
let gossip_addr = socketaddr!([127, 0, 0, 1], 1234);
|
||||||
let nxt = NodeInfo::new(
|
let nxt = NodeInfo::new(
|
||||||
Keypair::new().pubkey(),
|
Keypair::new().pubkey(),
|
||||||
ncp,
|
gossip_addr,
|
||||||
socketaddr!([127, 0, 0, 1], 1235),
|
socketaddr!([127, 0, 0, 1], 1235),
|
||||||
socketaddr!([127, 0, 0, 1], 1236),
|
socketaddr!([127, 0, 0, 1], 1236),
|
||||||
socketaddr!([127, 0, 0, 1], 1237),
|
socketaddr!([127, 0, 0, 1], 1237),
|
||||||
|
@ -1184,13 +1187,13 @@ mod tests {
|
||||||
);
|
);
|
||||||
cluster_info.insert_info(nxt.clone());
|
cluster_info.insert_info(nxt.clone());
|
||||||
let rv = cluster_info.window_index_request(0).unwrap();
|
let rv = cluster_info.window_index_request(0).unwrap();
|
||||||
assert_eq!(nxt.ncp, ncp);
|
assert_eq!(nxt.gossip, gossip_addr);
|
||||||
assert_eq!(rv.0, nxt.ncp);
|
assert_eq!(rv.0, nxt.gossip);
|
||||||
|
|
||||||
let ncp2 = socketaddr!([127, 0, 0, 2], 1234);
|
let gossip_addr2 = socketaddr!([127, 0, 0, 2], 1234);
|
||||||
let nxt = NodeInfo::new(
|
let nxt = NodeInfo::new(
|
||||||
Keypair::new().pubkey(),
|
Keypair::new().pubkey(),
|
||||||
ncp2,
|
gossip_addr2,
|
||||||
socketaddr!([127, 0, 0, 1], 1235),
|
socketaddr!([127, 0, 0, 1], 1235),
|
||||||
socketaddr!([127, 0, 0, 1], 1236),
|
socketaddr!([127, 0, 0, 1], 1236),
|
||||||
socketaddr!([127, 0, 0, 1], 1237),
|
socketaddr!([127, 0, 0, 1], 1237),
|
||||||
|
@ -1204,10 +1207,10 @@ mod tests {
|
||||||
while !one || !two {
|
while !one || !two {
|
||||||
//this randomly picks an option, so eventually it should pick both
|
//this randomly picks an option, so eventually it should pick both
|
||||||
let rv = cluster_info.window_index_request(0).unwrap();
|
let rv = cluster_info.window_index_request(0).unwrap();
|
||||||
if rv.0 == ncp {
|
if rv.0 == gossip_addr {
|
||||||
one = true;
|
one = true;
|
||||||
}
|
}
|
||||||
if rv.0 == ncp2 {
|
if rv.0 == gossip_addr2 {
|
||||||
two = true;
|
two = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,7 +12,7 @@ pub struct ContactInfo {
|
||||||
/// signature of this ContactInfo
|
/// signature of this ContactInfo
|
||||||
pub signature: Signature,
|
pub signature: Signature,
|
||||||
/// gossip address
|
/// gossip address
|
||||||
pub ncp: SocketAddr,
|
pub gossip: SocketAddr,
|
||||||
/// address to connect to for replication
|
/// address to connect to for replication
|
||||||
pub tvu: SocketAddr,
|
pub tvu: SocketAddr,
|
||||||
/// transactions address
|
/// transactions address
|
||||||
|
@ -48,7 +48,7 @@ impl Default for ContactInfo {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
ContactInfo {
|
ContactInfo {
|
||||||
id: Pubkey::default(),
|
id: Pubkey::default(),
|
||||||
ncp: socketaddr_any!(),
|
gossip: socketaddr_any!(),
|
||||||
tvu: socketaddr_any!(),
|
tvu: socketaddr_any!(),
|
||||||
tpu: socketaddr_any!(),
|
tpu: socketaddr_any!(),
|
||||||
storage_addr: socketaddr_any!(),
|
storage_addr: socketaddr_any!(),
|
||||||
|
@ -63,7 +63,7 @@ impl Default for ContactInfo {
|
||||||
impl ContactInfo {
|
impl ContactInfo {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
id: Pubkey,
|
id: Pubkey,
|
||||||
ncp: SocketAddr,
|
gossip: SocketAddr,
|
||||||
tvu: SocketAddr,
|
tvu: SocketAddr,
|
||||||
tpu: SocketAddr,
|
tpu: SocketAddr,
|
||||||
storage_addr: SocketAddr,
|
storage_addr: SocketAddr,
|
||||||
|
@ -74,7 +74,7 @@ impl ContactInfo {
|
||||||
ContactInfo {
|
ContactInfo {
|
||||||
id,
|
id,
|
||||||
signature: Signature::default(),
|
signature: Signature::default(),
|
||||||
ncp,
|
gossip,
|
||||||
tvu,
|
tvu,
|
||||||
tpu,
|
tpu,
|
||||||
storage_addr,
|
storage_addr,
|
||||||
|
@ -175,7 +175,7 @@ impl Signable for ContactInfo {
|
||||||
#[derive(Serialize)]
|
#[derive(Serialize)]
|
||||||
struct SignData {
|
struct SignData {
|
||||||
id: Pubkey,
|
id: Pubkey,
|
||||||
ncp: SocketAddr,
|
gossip: SocketAddr,
|
||||||
tvu: SocketAddr,
|
tvu: SocketAddr,
|
||||||
tpu: SocketAddr,
|
tpu: SocketAddr,
|
||||||
storage_addr: SocketAddr,
|
storage_addr: SocketAddr,
|
||||||
|
@ -187,7 +187,7 @@ impl Signable for ContactInfo {
|
||||||
let me = self;
|
let me = self;
|
||||||
let data = SignData {
|
let data = SignData {
|
||||||
id: me.id,
|
id: me.id,
|
||||||
ncp: me.ncp,
|
gossip: me.gossip,
|
||||||
tvu: me.tvu,
|
tvu: me.tvu,
|
||||||
tpu: me.tpu,
|
tpu: me.tpu,
|
||||||
storage_addr: me.storage_addr,
|
storage_addr: me.storage_addr,
|
||||||
|
@ -227,7 +227,7 @@ mod tests {
|
||||||
#[test]
|
#[test]
|
||||||
fn test_default() {
|
fn test_default() {
|
||||||
let ci = ContactInfo::default();
|
let ci = ContactInfo::default();
|
||||||
assert!(ci.ncp.ip().is_unspecified());
|
assert!(ci.gossip.ip().is_unspecified());
|
||||||
assert!(ci.tvu.ip().is_unspecified());
|
assert!(ci.tvu.ip().is_unspecified());
|
||||||
assert!(ci.rpc.ip().is_unspecified());
|
assert!(ci.rpc.ip().is_unspecified());
|
||||||
assert!(ci.rpc_pubsub.ip().is_unspecified());
|
assert!(ci.rpc_pubsub.ip().is_unspecified());
|
||||||
|
@ -237,7 +237,7 @@ mod tests {
|
||||||
#[test]
|
#[test]
|
||||||
fn test_multicast() {
|
fn test_multicast() {
|
||||||
let ci = ContactInfo::new_multicast();
|
let ci = ContactInfo::new_multicast();
|
||||||
assert!(ci.ncp.ip().is_multicast());
|
assert!(ci.gossip.ip().is_multicast());
|
||||||
assert!(ci.tvu.ip().is_multicast());
|
assert!(ci.tvu.ip().is_multicast());
|
||||||
assert!(ci.rpc.ip().is_multicast());
|
assert!(ci.rpc.ip().is_multicast());
|
||||||
assert!(ci.rpc_pubsub.ip().is_multicast());
|
assert!(ci.rpc_pubsub.ip().is_multicast());
|
||||||
|
@ -248,7 +248,7 @@ mod tests {
|
||||||
fn test_entry_point() {
|
fn test_entry_point() {
|
||||||
let addr = socketaddr!("127.0.0.1:10");
|
let addr = socketaddr!("127.0.0.1:10");
|
||||||
let ci = ContactInfo::new_entry_point(&addr);
|
let ci = ContactInfo::new_entry_point(&addr);
|
||||||
assert_eq!(ci.ncp, addr);
|
assert_eq!(ci.gossip, addr);
|
||||||
assert!(ci.tvu.ip().is_unspecified());
|
assert!(ci.tvu.ip().is_unspecified());
|
||||||
assert!(ci.rpc.ip().is_unspecified());
|
assert!(ci.rpc.ip().is_unspecified());
|
||||||
assert!(ci.rpc_pubsub.ip().is_unspecified());
|
assert!(ci.rpc_pubsub.ip().is_unspecified());
|
||||||
|
@ -260,7 +260,7 @@ mod tests {
|
||||||
let addr = socketaddr!("127.0.0.1:10");
|
let addr = socketaddr!("127.0.0.1:10");
|
||||||
let ci = ContactInfo::new_with_socketaddr(&addr);
|
let ci = ContactInfo::new_with_socketaddr(&addr);
|
||||||
assert_eq!(ci.tpu, addr);
|
assert_eq!(ci.tpu, addr);
|
||||||
assert_eq!(ci.ncp.port(), 11);
|
assert_eq!(ci.gossip.port(), 11);
|
||||||
assert_eq!(ci.tvu.port(), 12);
|
assert_eq!(ci.tvu.port(), 12);
|
||||||
assert_eq!(ci.rpc.port(), 8899);
|
assert_eq!(ci.rpc.port(), 8899);
|
||||||
assert_eq!(ci.rpc_pubsub.port(), 8900);
|
assert_eq!(ci.rpc_pubsub.port(), 8900);
|
||||||
|
@ -274,7 +274,7 @@ mod tests {
|
||||||
&socketaddr!("127.0.0.1:1234"),
|
&socketaddr!("127.0.0.1:1234"),
|
||||||
);
|
);
|
||||||
assert_eq!(d1.id, keypair.pubkey());
|
assert_eq!(d1.id, keypair.pubkey());
|
||||||
assert_eq!(d1.ncp, socketaddr!("127.0.0.1:1235"));
|
assert_eq!(d1.gossip, socketaddr!("127.0.0.1:1235"));
|
||||||
assert_eq!(d1.tvu, socketaddr!("127.0.0.1:1236"));
|
assert_eq!(d1.tvu, socketaddr!("127.0.0.1:1236"));
|
||||||
assert_eq!(d1.tpu, socketaddr!("127.0.0.1:1234"));
|
assert_eq!(d1.tpu, socketaddr!("127.0.0.1:1234"));
|
||||||
assert_eq!(d1.rpc, socketaddr!("127.0.0.1:8899"));
|
assert_eq!(d1.rpc, socketaddr!("127.0.0.1:8899"));
|
||||||
|
|
|
@ -59,7 +59,7 @@ impl CrdsGossipPull {
|
||||||
.values()
|
.values()
|
||||||
.filter_map(|v| v.value.contact_info())
|
.filter_map(|v| v.value.contact_info())
|
||||||
.filter(|v| {
|
.filter(|v| {
|
||||||
v.id != self_id && !v.ncp.ip().is_unspecified() && !v.ncp.ip().is_multicast()
|
v.id != self_id && !v.gossip.ip().is_unspecified() && !v.gossip.ip().is_multicast()
|
||||||
}).map(|item| {
|
}).map(|item| {
|
||||||
let req_time: u64 = *self.pull_request_time.get(&item.id).unwrap_or(&0);
|
let req_time: u64 = *self.pull_request_time.get(&item.id).unwrap_or(&0);
|
||||||
let weight = cmp::max(
|
let weight = cmp::max(
|
||||||
|
|
|
@ -184,7 +184,7 @@ impl CrdsGossipPush {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if let Some(contact) = val.1.value.contact_info() {
|
if let Some(contact) = val.1.value.contact_info() {
|
||||||
if !ContactInfo::is_valid_address(&contact.ncp) {
|
if !ContactInfo::is_valid_address(&contact.gossip) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,12 +1,12 @@
|
||||||
//! The `fullnode` module hosts all the fullnode microservices.
|
//! The `fullnode` module hosts all the fullnode microservices.
|
||||||
|
|
||||||
use bank::Bank;
|
use bank::Bank;
|
||||||
use broadcast_stage::BroadcastStage;
|
use broadcast_service::BroadcastService;
|
||||||
use cluster_info::{ClusterInfo, Node, NodeInfo};
|
use cluster_info::{ClusterInfo, Node, NodeInfo};
|
||||||
use db_ledger::{write_entries_to_ledger, DbLedger};
|
use db_ledger::{write_entries_to_ledger, DbLedger};
|
||||||
|
use gossip_service::GossipService;
|
||||||
use leader_scheduler::LeaderScheduler;
|
use leader_scheduler::LeaderScheduler;
|
||||||
use ledger::read_ledger;
|
use ledger::read_ledger;
|
||||||
use ncp::Ncp;
|
|
||||||
use rpc::JsonRpcService;
|
use rpc::JsonRpcService;
|
||||||
use rpc_pubsub::PubSubService;
|
use rpc_pubsub::PubSubService;
|
||||||
use service::Service;
|
use service::Service;
|
||||||
|
@ -31,19 +31,19 @@ pub enum NodeRole {
|
||||||
|
|
||||||
pub struct LeaderServices {
|
pub struct LeaderServices {
|
||||||
tpu: Tpu,
|
tpu: Tpu,
|
||||||
broadcast_stage: BroadcastStage,
|
broadcast_service: BroadcastService,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl LeaderServices {
|
impl LeaderServices {
|
||||||
fn new(tpu: Tpu, broadcast_stage: BroadcastStage) -> Self {
|
fn new(tpu: Tpu, broadcast_service: BroadcastService) -> Self {
|
||||||
LeaderServices {
|
LeaderServices {
|
||||||
tpu,
|
tpu,
|
||||||
broadcast_stage,
|
broadcast_service,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn join(self) -> Result<Option<TpuReturnType>> {
|
pub fn join(self) -> Result<Option<TpuReturnType>> {
|
||||||
self.broadcast_stage.join()?;
|
self.broadcast_service.join()?;
|
||||||
self.tpu.join()
|
self.tpu.join()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -94,7 +94,7 @@ pub struct Fullnode {
|
||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
rpc_service: Option<JsonRpcService>,
|
rpc_service: Option<JsonRpcService>,
|
||||||
rpc_pubsub_service: Option<PubSubService>,
|
rpc_pubsub_service: Option<PubSubService>,
|
||||||
ncp: Ncp,
|
gossip_service: GossipService,
|
||||||
bank: Arc<Bank>,
|
bank: Arc<Bank>,
|
||||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||||
ledger_path: String,
|
ledger_path: String,
|
||||||
|
@ -164,7 +164,7 @@ impl Fullnode {
|
||||||
|
|
||||||
info!(
|
info!(
|
||||||
"starting... local gossip address: {} (advertising {})",
|
"starting... local gossip address: {} (advertising {})",
|
||||||
local_gossip_addr, node.info.ncp
|
local_gossip_addr, node.info.gossip
|
||||||
);
|
);
|
||||||
let mut rpc_addr = node.info.rpc;
|
let mut rpc_addr = node.info.rpc;
|
||||||
if let Some(port) = rpc_port {
|
if let Some(port) = rpc_port {
|
||||||
|
@ -240,7 +240,7 @@ impl Fullnode {
|
||||||
let (rpc_service, rpc_pubsub_service) =
|
let (rpc_service, rpc_pubsub_service) =
|
||||||
Self::startup_rpc_services(rpc_addr, rpc_pubsub_addr, &bank, &cluster_info);
|
Self::startup_rpc_services(rpc_addr, rpc_pubsub_addr, &bank, &cluster_info);
|
||||||
|
|
||||||
let ncp = Ncp::new(
|
let gossip_service = GossipService::new(
|
||||||
&cluster_info,
|
&cluster_info,
|
||||||
shared_window.clone(),
|
shared_window.clone(),
|
||||||
Some(ledger_path),
|
Some(ledger_path),
|
||||||
|
@ -324,7 +324,7 @@ impl Fullnode {
|
||||||
last_entry_id,
|
last_entry_id,
|
||||||
);
|
);
|
||||||
|
|
||||||
let broadcast_stage = BroadcastStage::new(
|
let broadcast_service = BroadcastService::new(
|
||||||
node.sockets
|
node.sockets
|
||||||
.broadcast
|
.broadcast
|
||||||
.try_clone()
|
.try_clone()
|
||||||
|
@ -338,7 +338,7 @@ impl Fullnode {
|
||||||
bank.tick_height(),
|
bank.tick_height(),
|
||||||
tpu_exit,
|
tpu_exit,
|
||||||
);
|
);
|
||||||
let leader_state = LeaderServices::new(tpu, broadcast_stage);
|
let leader_state = LeaderServices::new(tpu, broadcast_service);
|
||||||
Some(NodeRole::Leader(leader_state))
|
Some(NodeRole::Leader(leader_state))
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -349,7 +349,7 @@ impl Fullnode {
|
||||||
shared_window,
|
shared_window,
|
||||||
bank,
|
bank,
|
||||||
sigverify_disabled,
|
sigverify_disabled,
|
||||||
ncp,
|
gossip_service,
|
||||||
rpc_service: Some(rpc_service),
|
rpc_service: Some(rpc_service),
|
||||||
rpc_pubsub_service: Some(rpc_pubsub_service),
|
rpc_pubsub_service: Some(rpc_pubsub_service),
|
||||||
node_role,
|
node_role,
|
||||||
|
@ -488,7 +488,7 @@ impl Fullnode {
|
||||||
&last_id,
|
&last_id,
|
||||||
);
|
);
|
||||||
|
|
||||||
let broadcast_stage = BroadcastStage::new(
|
let broadcast_service = BroadcastService::new(
|
||||||
self.broadcast_socket
|
self.broadcast_socket
|
||||||
.try_clone()
|
.try_clone()
|
||||||
.expect("Failed to clone broadcast socket"),
|
.expect("Failed to clone broadcast socket"),
|
||||||
|
@ -501,7 +501,7 @@ impl Fullnode {
|
||||||
tick_height,
|
tick_height,
|
||||||
tpu_exit,
|
tpu_exit,
|
||||||
);
|
);
|
||||||
let leader_state = LeaderServices::new(tpu, broadcast_stage);
|
let leader_state = LeaderServices::new(tpu, broadcast_service);
|
||||||
self.node_role = Some(NodeRole::Leader(leader_state));
|
self.node_role = Some(NodeRole::Leader(leader_state));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -626,7 +626,7 @@ impl Service for Fullnode {
|
||||||
rpc_pubsub_service.join()?;
|
rpc_pubsub_service.join()?;
|
||||||
}
|
}
|
||||||
|
|
||||||
self.ncp.join()?;
|
self.gossip_service.join()?;
|
||||||
|
|
||||||
match self.node_role {
|
match self.node_role {
|
||||||
Some(NodeRole::Validator(validator_service)) => {
|
Some(NodeRole::Validator(validator_service)) => {
|
||||||
|
@ -796,7 +796,7 @@ mod tests {
|
||||||
&bootstrap_leader_ledger_path,
|
&bootstrap_leader_ledger_path,
|
||||||
Arc::new(bootstrap_leader_keypair),
|
Arc::new(bootstrap_leader_keypair),
|
||||||
Arc::new(Keypair::new()),
|
Arc::new(Keypair::new()),
|
||||||
Some(bootstrap_leader_info.ncp),
|
Some(bootstrap_leader_info.gossip),
|
||||||
false,
|
false,
|
||||||
LeaderScheduler::new(&leader_scheduler_config),
|
LeaderScheduler::new(&leader_scheduler_config),
|
||||||
None,
|
None,
|
||||||
|
@ -895,7 +895,7 @@ mod tests {
|
||||||
&bootstrap_leader_ledger_path,
|
&bootstrap_leader_ledger_path,
|
||||||
bootstrap_leader_keypair,
|
bootstrap_leader_keypair,
|
||||||
leader_vote_account_keypair,
|
leader_vote_account_keypair,
|
||||||
Some(bootstrap_leader_info.ncp),
|
Some(bootstrap_leader_info.gossip),
|
||||||
false,
|
false,
|
||||||
LeaderScheduler::new(&leader_scheduler_config),
|
LeaderScheduler::new(&leader_scheduler_config),
|
||||||
None,
|
None,
|
||||||
|
@ -914,7 +914,7 @@ mod tests {
|
||||||
&validator_ledger_path,
|
&validator_ledger_path,
|
||||||
Arc::new(validator_keypair),
|
Arc::new(validator_keypair),
|
||||||
Arc::new(validator_vote_account_keypair),
|
Arc::new(validator_vote_account_keypair),
|
||||||
Some(bootstrap_leader_info.ncp),
|
Some(bootstrap_leader_info.gossip),
|
||||||
false,
|
false,
|
||||||
LeaderScheduler::new(&leader_scheduler_config),
|
LeaderScheduler::new(&leader_scheduler_config),
|
||||||
None,
|
None,
|
||||||
|
@ -942,7 +942,7 @@ mod tests {
|
||||||
let leader_keypair = Keypair::new();
|
let leader_keypair = Keypair::new();
|
||||||
let leader_node = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
|
let leader_node = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
|
||||||
let leader_id = leader_node.info.id;
|
let leader_id = leader_node.info.id;
|
||||||
let leader_ncp = leader_node.info.ncp;
|
let leader_gossip = leader_node.info.gossip;
|
||||||
|
|
||||||
// Create validator identity
|
// Create validator identity
|
||||||
let num_ending_ticks = 1;
|
let num_ending_ticks = 1;
|
||||||
|
@ -1001,7 +1001,7 @@ mod tests {
|
||||||
&validator_ledger_path,
|
&validator_ledger_path,
|
||||||
Arc::new(validator_keypair),
|
Arc::new(validator_keypair),
|
||||||
Arc::new(validator_vote_account_keypair),
|
Arc::new(validator_vote_account_keypair),
|
||||||
Some(leader_ncp),
|
Some(leader_gossip),
|
||||||
false,
|
false,
|
||||||
LeaderScheduler::new(&leader_scheduler_config),
|
LeaderScheduler::new(&leader_scheduler_config),
|
||||||
None,
|
None,
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
//! The `ncp` module implements the network control plane.
|
//! The `gossip_service` module implements the network control plane.
|
||||||
|
|
||||||
use cluster_info::ClusterInfo;
|
use cluster_info::ClusterInfo;
|
||||||
use service::Service;
|
use service::Service;
|
||||||
|
@ -10,12 +10,12 @@ use std::thread::{self, JoinHandle};
|
||||||
use streamer;
|
use streamer;
|
||||||
use window::SharedWindow;
|
use window::SharedWindow;
|
||||||
|
|
||||||
pub struct Ncp {
|
pub struct GossipService {
|
||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
thread_hdls: Vec<JoinHandle<()>>,
|
thread_hdls: Vec<JoinHandle<()>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Ncp {
|
impl GossipService {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||||
window: SharedWindow,
|
window: SharedWindow,
|
||||||
|
@ -26,14 +26,14 @@ impl Ncp {
|
||||||
let (request_sender, request_receiver) = channel();
|
let (request_sender, request_receiver) = channel();
|
||||||
let gossip_socket = Arc::new(gossip_socket);
|
let gossip_socket = Arc::new(gossip_socket);
|
||||||
trace!(
|
trace!(
|
||||||
"Ncp: id: {}, listening on: {:?}",
|
"GossipService: id: {}, listening on: {:?}",
|
||||||
&cluster_info.read().unwrap().my_data().id,
|
&cluster_info.read().unwrap().my_data().id,
|
||||||
gossip_socket.local_addr().unwrap()
|
gossip_socket.local_addr().unwrap()
|
||||||
);
|
);
|
||||||
let t_receiver =
|
let t_receiver =
|
||||||
streamer::blob_receiver(gossip_socket.clone(), exit.clone(), request_sender);
|
streamer::blob_receiver(gossip_socket.clone(), exit.clone(), request_sender);
|
||||||
let (response_sender, response_receiver) = channel();
|
let (response_sender, response_receiver) = channel();
|
||||||
let t_responder = streamer::responder("ncp", gossip_socket, response_receiver);
|
let t_responder = streamer::responder("gossip", gossip_socket, response_receiver);
|
||||||
let t_listen = ClusterInfo::listen(
|
let t_listen = ClusterInfo::listen(
|
||||||
cluster_info.clone(),
|
cluster_info.clone(),
|
||||||
window,
|
window,
|
||||||
|
@ -44,7 +44,7 @@ impl Ncp {
|
||||||
);
|
);
|
||||||
let t_gossip = ClusterInfo::gossip(cluster_info.clone(), response_sender, exit.clone());
|
let t_gossip = ClusterInfo::gossip(cluster_info.clone(), response_sender, exit.clone());
|
||||||
let thread_hdls = vec![t_receiver, t_responder, t_listen, t_gossip];
|
let thread_hdls = vec![t_receiver, t_responder, t_listen, t_gossip];
|
||||||
Ncp { exit, thread_hdls }
|
Self { exit, thread_hdls }
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn close(self) -> thread::Result<()> {
|
pub fn close(self) -> thread::Result<()> {
|
||||||
|
@ -53,7 +53,7 @@ impl Ncp {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Service for Ncp {
|
impl Service for GossipService {
|
||||||
type JoinReturnType = ();
|
type JoinReturnType = ();
|
||||||
|
|
||||||
fn join(self) -> thread::Result<()> {
|
fn join(self) -> thread::Result<()> {
|
||||||
|
@ -66,8 +66,8 @@ impl Service for Ncp {
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
use super::*;
|
||||||
use cluster_info::{ClusterInfo, Node};
|
use cluster_info::{ClusterInfo, Node};
|
||||||
use ncp::Ncp;
|
|
||||||
use std::sync::atomic::AtomicBool;
|
use std::sync::atomic::AtomicBool;
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
|
|
||||||
|
@ -80,7 +80,7 @@ mod tests {
|
||||||
let cluster_info = ClusterInfo::new(tn.info.clone());
|
let cluster_info = ClusterInfo::new(tn.info.clone());
|
||||||
let c = Arc::new(RwLock::new(cluster_info));
|
let c = Arc::new(RwLock::new(cluster_info));
|
||||||
let w = Arc::new(RwLock::new(vec![]));
|
let w = Arc::new(RwLock::new(vec![]));
|
||||||
let d = Ncp::new(&c, w, None, tn.sockets.gossip, exit.clone());
|
let d = GossipService::new(&c, w, None, tn.sockets.gossip, exit.clone());
|
||||||
d.close().expect("thread join");
|
d.close().expect("thread join");
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -13,7 +13,7 @@ pub mod bank;
|
||||||
pub mod banking_stage;
|
pub mod banking_stage;
|
||||||
pub mod blob_fetch_stage;
|
pub mod blob_fetch_stage;
|
||||||
pub mod bloom;
|
pub mod bloom;
|
||||||
pub mod broadcast_stage;
|
pub mod broadcast_service;
|
||||||
#[cfg(feature = "chacha")]
|
#[cfg(feature = "chacha")]
|
||||||
pub mod chacha;
|
pub mod chacha;
|
||||||
#[cfg(all(feature = "chacha", feature = "cuda"))]
|
#[cfg(all(feature = "chacha", feature = "cuda"))]
|
||||||
|
@ -38,12 +38,12 @@ pub mod entry;
|
||||||
pub mod erasure;
|
pub mod erasure;
|
||||||
pub mod fetch_stage;
|
pub mod fetch_stage;
|
||||||
pub mod fullnode;
|
pub mod fullnode;
|
||||||
|
pub mod gossip_service;
|
||||||
pub mod leader_scheduler;
|
pub mod leader_scheduler;
|
||||||
pub mod ledger;
|
pub mod ledger;
|
||||||
pub mod ledger_write_stage;
|
pub mod ledger_write_stage;
|
||||||
pub mod logger;
|
pub mod logger;
|
||||||
pub mod mint;
|
pub mod mint;
|
||||||
pub mod ncp;
|
|
||||||
pub mod netutil;
|
pub mod netutil;
|
||||||
pub mod packet;
|
pub mod packet;
|
||||||
pub mod poh;
|
pub mod poh;
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
use blob_fetch_stage::BlobFetchStage;
|
use blob_fetch_stage::BlobFetchStage;
|
||||||
use cluster_info::{ClusterInfo, Node, NodeInfo};
|
use cluster_info::{ClusterInfo, Node, NodeInfo};
|
||||||
use db_ledger::DbLedger;
|
use db_ledger::DbLedger;
|
||||||
|
use gossip_service::GossipService;
|
||||||
use leader_scheduler::LeaderScheduler;
|
use leader_scheduler::LeaderScheduler;
|
||||||
use ncp::Ncp;
|
|
||||||
use service::Service;
|
use service::Service;
|
||||||
use solana_sdk::hash::{Hash, Hasher};
|
use solana_sdk::hash::{Hash, Hasher};
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
|
@ -28,7 +28,7 @@ use window;
|
||||||
use window_service::window_service;
|
use window_service::window_service;
|
||||||
|
|
||||||
pub struct Replicator {
|
pub struct Replicator {
|
||||||
ncp: Ncp,
|
gossip_service: GossipService,
|
||||||
fetch_stage: BlobFetchStage,
|
fetch_stage: BlobFetchStage,
|
||||||
store_ledger_stage: StoreLedgerStage,
|
store_ledger_stage: StoreLedgerStage,
|
||||||
t_window: JoinHandle<()>,
|
t_window: JoinHandle<()>,
|
||||||
|
@ -134,7 +134,7 @@ impl Replicator {
|
||||||
|
|
||||||
let store_ledger_stage = StoreLedgerStage::new(entry_window_receiver, ledger_path);
|
let store_ledger_stage = StoreLedgerStage::new(entry_window_receiver, ledger_path);
|
||||||
|
|
||||||
let ncp = Ncp::new(
|
let gossip_service = GossipService::new(
|
||||||
&cluster_info,
|
&cluster_info,
|
||||||
shared_window.clone(),
|
shared_window.clone(),
|
||||||
ledger_path,
|
ledger_path,
|
||||||
|
@ -147,7 +147,7 @@ impl Replicator {
|
||||||
|
|
||||||
(
|
(
|
||||||
Replicator {
|
Replicator {
|
||||||
ncp,
|
gossip_service,
|
||||||
fetch_stage,
|
fetch_stage,
|
||||||
store_ledger_stage,
|
store_ledger_stage,
|
||||||
t_window,
|
t_window,
|
||||||
|
@ -158,7 +158,7 @@ impl Replicator {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn join(self) {
|
pub fn join(self) {
|
||||||
self.ncp.join().unwrap();
|
self.gossip_service.join().unwrap();
|
||||||
self.fetch_stage.join().unwrap();
|
self.fetch_stage.join().unwrap();
|
||||||
self.t_window.join().unwrap();
|
self.t_window.join().unwrap();
|
||||||
self.store_ledger_stage.join().unwrap();
|
self.store_ledger_stage.join().unwrap();
|
||||||
|
|
|
@ -7,8 +7,8 @@ use bank::Bank;
|
||||||
use bincode::serialize;
|
use bincode::serialize;
|
||||||
use bs58;
|
use bs58;
|
||||||
use cluster_info::{ClusterInfo, ClusterInfoError, NodeInfo};
|
use cluster_info::{ClusterInfo, ClusterInfoError, NodeInfo};
|
||||||
|
use gossip_service::GossipService;
|
||||||
use log::Level;
|
use log::Level;
|
||||||
use ncp::Ncp;
|
|
||||||
use packet::PACKET_DATA_SIZE;
|
use packet::PACKET_DATA_SIZE;
|
||||||
use result::{Error, Result};
|
use result::{Error, Result};
|
||||||
use rpc_request::{RpcClient, RpcRequest};
|
use rpc_request::{RpcClient, RpcRequest};
|
||||||
|
@ -347,7 +347,7 @@ pub fn poll_gossip_for_leader(leader_ncp: SocketAddr, timeout: Option<u64>) -> R
|
||||||
let my_addr = gossip_socket.local_addr().unwrap();
|
let my_addr = gossip_socket.local_addr().unwrap();
|
||||||
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new(node)));
|
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new(node)));
|
||||||
let window = Arc::new(RwLock::new(vec![]));
|
let window = Arc::new(RwLock::new(vec![]));
|
||||||
let ncp = Ncp::new(
|
let gossip_service = GossipService::new(
|
||||||
&cluster_info.clone(),
|
&cluster_info.clone(),
|
||||||
window,
|
window,
|
||||||
None,
|
None,
|
||||||
|
@ -390,7 +390,7 @@ pub fn poll_gossip_for_leader(leader_ncp: SocketAddr, timeout: Option<u64>) -> R
|
||||||
sleep(Duration::from_millis(100));
|
sleep(Duration::from_millis(100));
|
||||||
}
|
}
|
||||||
|
|
||||||
ncp.close()?;
|
gossip_service.close()?;
|
||||||
|
|
||||||
if log_enabled!(Level::Trace) {
|
if log_enabled!(Level::Trace) {
|
||||||
trace!("{}", cluster_info.read().unwrap().node_info_trace());
|
trace!("{}", cluster_info.read().unwrap().node_info_trace());
|
||||||
|
|
|
@ -160,7 +160,7 @@ mod tests {
|
||||||
sleep(Duration::from_millis(100));
|
sleep(Duration::from_millis(100));
|
||||||
|
|
||||||
let mut data = vec![0u8; 64];
|
let mut data = vec![0u8; 64];
|
||||||
// should be nothing on any socket ncp
|
// should be nothing on any gossip socket
|
||||||
assert!(nodes[0].0.recv_from(&mut data).is_err());
|
assert!(nodes[0].0.recv_from(&mut data).is_err());
|
||||||
assert!(nodes[1].0.recv_from(&mut data).is_err());
|
assert!(nodes[1].0.recv_from(&mut data).is_err());
|
||||||
assert!(nodes[2].0.recv_from(&mut data).is_err());
|
assert!(nodes[2].0.recv_from(&mut data).is_err());
|
||||||
|
|
|
@ -169,11 +169,11 @@ pub mod tests {
|
||||||
use cluster_info::{ClusterInfo, Node};
|
use cluster_info::{ClusterInfo, Node};
|
||||||
use db_ledger::DbLedger;
|
use db_ledger::DbLedger;
|
||||||
use entry::Entry;
|
use entry::Entry;
|
||||||
|
use gossip_service::GossipService;
|
||||||
use leader_scheduler::LeaderScheduler;
|
use leader_scheduler::LeaderScheduler;
|
||||||
use ledger::get_tmp_ledger_path;
|
use ledger::get_tmp_ledger_path;
|
||||||
use logger;
|
use logger;
|
||||||
use mint::Mint;
|
use mint::Mint;
|
||||||
use ncp::Ncp;
|
|
||||||
use packet::SharedBlob;
|
use packet::SharedBlob;
|
||||||
use rocksdb::{Options, DB};
|
use rocksdb::{Options, DB};
|
||||||
use service::Service;
|
use service::Service;
|
||||||
|
@ -195,10 +195,10 @@ pub mod tests {
|
||||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||||
gossip: UdpSocket,
|
gossip: UdpSocket,
|
||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
) -> (Ncp, SharedWindow) {
|
) -> (GossipService, SharedWindow) {
|
||||||
let window = Arc::new(RwLock::new(window::default_window()));
|
let window = Arc::new(RwLock::new(window::default_window()));
|
||||||
let ncp = Ncp::new(&cluster_info, window.clone(), None, gossip, exit);
|
let gossip_service = GossipService::new(&cluster_info, window.clone(), None, gossip, exit);
|
||||||
(ncp, window)
|
(gossip_service, window)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Test that message sent from leader to target1 and replicated to target2
|
/// Test that message sent from leader to target1 and replicated to target2
|
||||||
|
|
|
@ -1128,7 +1128,7 @@ mod tests {
|
||||||
let drone_addr = receiver.recv().unwrap();
|
let drone_addr = receiver.recv().unwrap();
|
||||||
|
|
||||||
let mut config = WalletConfig::default();
|
let mut config = WalletConfig::default();
|
||||||
config.network = leader_data.ncp;
|
config.network = leader_data.gossip;
|
||||||
config.drone_port = Some(drone_addr.port());
|
config.drone_port = Some(drone_addr.port());
|
||||||
|
|
||||||
let tokens = 50;
|
let tokens = 50;
|
||||||
|
@ -1199,7 +1199,7 @@ mod tests {
|
||||||
let drone_addr = receiver.recv().unwrap();
|
let drone_addr = receiver.recv().unwrap();
|
||||||
|
|
||||||
let mut bob_config = WalletConfig::default();
|
let mut bob_config = WalletConfig::default();
|
||||||
bob_config.network = leader_data.ncp;
|
bob_config.network = leader_data.gossip;
|
||||||
bob_config.drone_port = Some(drone_addr.port());
|
bob_config.drone_port = Some(drone_addr.port());
|
||||||
bob_config.command = WalletCommand::AirDrop(50);
|
bob_config.command = WalletCommand::AirDrop(50);
|
||||||
|
|
||||||
|
@ -1282,11 +1282,11 @@ mod tests {
|
||||||
let rpc_client = RpcClient::new_from_socket(leader_data.rpc);
|
let rpc_client = RpcClient::new_from_socket(leader_data.rpc);
|
||||||
|
|
||||||
let mut config_payer = WalletConfig::default();
|
let mut config_payer = WalletConfig::default();
|
||||||
config_payer.network = leader_data.ncp;
|
config_payer.network = leader_data.gossip;
|
||||||
config_payer.drone_port = Some(drone_addr.port());
|
config_payer.drone_port = Some(drone_addr.port());
|
||||||
|
|
||||||
let mut config_witness = WalletConfig::default();
|
let mut config_witness = WalletConfig::default();
|
||||||
config_witness.network = leader_data.ncp;
|
config_witness.network = leader_data.gossip;
|
||||||
config_witness.drone_port = Some(drone_addr.port());
|
config_witness.drone_port = Some(drone_addr.port());
|
||||||
|
|
||||||
assert_ne!(config_payer.id.pubkey(), config_witness.id.pubkey());
|
assert_ne!(config_payer.id.pubkey(), config_witness.id.pubkey());
|
||||||
|
@ -1527,11 +1527,11 @@ mod tests {
|
||||||
let rpc_client = RpcClient::new_from_socket(leader_data.rpc);
|
let rpc_client = RpcClient::new_from_socket(leader_data.rpc);
|
||||||
|
|
||||||
let mut config_payer = WalletConfig::default();
|
let mut config_payer = WalletConfig::default();
|
||||||
config_payer.network = leader_data.ncp;
|
config_payer.network = leader_data.gossip;
|
||||||
config_payer.drone_port = Some(drone_addr.port());
|
config_payer.drone_port = Some(drone_addr.port());
|
||||||
|
|
||||||
let mut config_witness = WalletConfig::default();
|
let mut config_witness = WalletConfig::default();
|
||||||
config_witness.network = leader_data.ncp;
|
config_witness.network = leader_data.gossip;
|
||||||
config_witness.drone_port = Some(drone_addr.port());
|
config_witness.drone_port = Some(drone_addr.port());
|
||||||
|
|
||||||
assert_ne!(config_payer.id.pubkey(), config_witness.id.pubkey());
|
assert_ne!(config_payer.id.pubkey(), config_witness.id.pubkey());
|
||||||
|
|
|
@ -298,7 +298,7 @@ mod test {
|
||||||
|
|
||||||
let t_responder = responder("window_send_test", blob_sockets[0].clone(), r_responder);
|
let t_responder = responder("window_send_test", blob_sockets[0].clone(), r_responder);
|
||||||
let mut num_blobs_to_make = 10;
|
let mut num_blobs_to_make = 10;
|
||||||
let gossip_address = &tn.info.ncp;
|
let gossip_address = &tn.info.gossip;
|
||||||
let msgs = make_consecutive_blobs(
|
let msgs = make_consecutive_blobs(
|
||||||
me_id,
|
me_id,
|
||||||
num_blobs_to_make,
|
num_blobs_to_make,
|
||||||
|
@ -375,7 +375,7 @@ mod test {
|
||||||
w.set_id(&me_id).unwrap();
|
w.set_id(&me_id).unwrap();
|
||||||
assert_eq!(i, w.index().unwrap());
|
assert_eq!(i, w.index().unwrap());
|
||||||
w.meta.size = PACKET_DATA_SIZE;
|
w.meta.size = PACKET_DATA_SIZE;
|
||||||
w.meta.set_addr(&tn.info.ncp);
|
w.meta.set_addr(&tn.info.gossip);
|
||||||
}
|
}
|
||||||
msgs.push(b);
|
msgs.push(b);
|
||||||
}
|
}
|
||||||
|
@ -392,7 +392,7 @@ mod test {
|
||||||
w.set_id(&me_id).unwrap();
|
w.set_id(&me_id).unwrap();
|
||||||
assert_eq!(i, w.index().unwrap());
|
assert_eq!(i, w.index().unwrap());
|
||||||
w.meta.size = PACKET_DATA_SIZE;
|
w.meta.size = PACKET_DATA_SIZE;
|
||||||
w.meta.set_addr(&tn.info.ncp);
|
w.meta.set_addr(&tn.info.gossip);
|
||||||
}
|
}
|
||||||
msgs1.push(b);
|
msgs1.push(b);
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,8 +6,8 @@ extern crate solana_sdk;
|
||||||
|
|
||||||
use rayon::iter::*;
|
use rayon::iter::*;
|
||||||
use solana::cluster_info::{ClusterInfo, Node};
|
use solana::cluster_info::{ClusterInfo, Node};
|
||||||
|
use solana::gossip_service::GossipService;
|
||||||
use solana::logger;
|
use solana::logger;
|
||||||
use solana::ncp::Ncp;
|
|
||||||
use solana::packet::{Blob, SharedBlob};
|
use solana::packet::{Blob, SharedBlob};
|
||||||
use solana::result;
|
use solana::result;
|
||||||
use solana::service::Service;
|
use solana::service::Service;
|
||||||
|
@ -19,13 +19,13 @@ use std::sync::{Arc, RwLock};
|
||||||
use std::thread::sleep;
|
use std::thread::sleep;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
fn test_node(exit: Arc<AtomicBool>) -> (Arc<RwLock<ClusterInfo>>, Ncp, UdpSocket) {
|
fn test_node(exit: Arc<AtomicBool>) -> (Arc<RwLock<ClusterInfo>>, GossipService, UdpSocket) {
|
||||||
let keypair = Keypair::new();
|
let keypair = Keypair::new();
|
||||||
let mut tn = Node::new_localhost_with_pubkey(keypair.pubkey());
|
let mut tn = Node::new_localhost_with_pubkey(keypair.pubkey());
|
||||||
let cluster_info = ClusterInfo::new_with_keypair(tn.info.clone(), Arc::new(keypair));
|
let cluster_info = ClusterInfo::new_with_keypair(tn.info.clone(), Arc::new(keypair));
|
||||||
let c = Arc::new(RwLock::new(cluster_info));
|
let c = Arc::new(RwLock::new(cluster_info));
|
||||||
let w = Arc::new(RwLock::new(vec![]));
|
let w = Arc::new(RwLock::new(vec![]));
|
||||||
let d = Ncp::new(&c.clone(), w, None, tn.sockets.gossip, exit);
|
let d = GossipService::new(&c.clone(), w, None, tn.sockets.gossip, exit);
|
||||||
let _ = c.read().unwrap().my_data();
|
let _ = c.read().unwrap().my_data();
|
||||||
(c, d, tn.sockets.replicate.pop().unwrap())
|
(c, d, tn.sockets.replicate.pop().unwrap())
|
||||||
}
|
}
|
||||||
|
@ -36,7 +36,7 @@ fn test_node(exit: Arc<AtomicBool>) -> (Arc<RwLock<ClusterInfo>>, Ncp, UdpSocket
|
||||||
/// tests that actually use this function are below
|
/// tests that actually use this function are below
|
||||||
fn run_gossip_topo<F>(num: usize, topo: F)
|
fn run_gossip_topo<F>(num: usize, topo: F)
|
||||||
where
|
where
|
||||||
F: Fn(&Vec<(Arc<RwLock<ClusterInfo>>, Ncp, UdpSocket)>) -> (),
|
F: Fn(&Vec<(Arc<RwLock<ClusterInfo>>, GossipService, UdpSocket)>) -> (),
|
||||||
{
|
{
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
let listen: Vec<_> = (0..num).map(|_| test_node(exit.clone())).collect();
|
let listen: Vec<_> = (0..num).map(|_| test_node(exit.clone())).collect();
|
||||||
|
@ -46,7 +46,7 @@ where
|
||||||
done = true;
|
done = true;
|
||||||
let total: usize = listen
|
let total: usize = listen
|
||||||
.iter()
|
.iter()
|
||||||
.map(|v| v.0.read().unwrap().ncp_peers().len())
|
.map(|v| v.0.read().unwrap().gossip_peers().len())
|
||||||
.sum();
|
.sum();
|
||||||
if (total + num) * 10 > num * num * 9 {
|
if (total + num) * 10 > num * num * 9 {
|
||||||
done = true;
|
done = true;
|
||||||
|
@ -165,9 +165,9 @@ pub fn cluster_info_retransmit() -> result::Result<()> {
|
||||||
trace!("waiting to converge:");
|
trace!("waiting to converge:");
|
||||||
let mut done = false;
|
let mut done = false;
|
||||||
for _ in 0..30 {
|
for _ in 0..30 {
|
||||||
done = c1.read().unwrap().ncp_peers().len() == num - 1
|
done = c1.read().unwrap().gossip_peers().len() == num - 1
|
||||||
&& c2.read().unwrap().ncp_peers().len() == num - 1
|
&& c2.read().unwrap().gossip_peers().len() == num - 1
|
||||||
&& c3.read().unwrap().ncp_peers().len() == num - 1;
|
&& c3.read().unwrap().gossip_peers().len() == num - 1;
|
||||||
if done {
|
if done {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,6 +12,7 @@ use solana::contact_info::ContactInfo;
|
||||||
use solana::db_ledger::DbLedger;
|
use solana::db_ledger::DbLedger;
|
||||||
use solana::entry::{reconstruct_entries_from_blobs, Entry};
|
use solana::entry::{reconstruct_entries_from_blobs, Entry};
|
||||||
use solana::fullnode::{Fullnode, FullnodeReturnType};
|
use solana::fullnode::{Fullnode, FullnodeReturnType};
|
||||||
|
use solana::gossip_service::GossipService;
|
||||||
use solana::leader_scheduler::{make_active_set_entries, LeaderScheduler, LeaderSchedulerConfig};
|
use solana::leader_scheduler::{make_active_set_entries, LeaderScheduler, LeaderSchedulerConfig};
|
||||||
use solana::ledger::{
|
use solana::ledger::{
|
||||||
create_tmp_genesis, create_tmp_sample_ledger, read_ledger, tmp_copy_ledger, LedgerWindow,
|
create_tmp_genesis, create_tmp_sample_ledger, read_ledger, tmp_copy_ledger, LedgerWindow,
|
||||||
|
@ -19,7 +20,6 @@ use solana::ledger::{
|
||||||
};
|
};
|
||||||
use solana::logger;
|
use solana::logger;
|
||||||
use solana::mint::Mint;
|
use solana::mint::Mint;
|
||||||
use solana::ncp::Ncp;
|
|
||||||
use solana::packet::SharedBlob;
|
use solana::packet::SharedBlob;
|
||||||
use solana::poh_service::NUM_TICKS_PER_SECOND;
|
use solana::poh_service::NUM_TICKS_PER_SECOND;
|
||||||
use solana::result;
|
use solana::result;
|
||||||
|
@ -41,7 +41,7 @@ use std::sync::{Arc, RwLock};
|
||||||
use std::thread::{sleep, Builder, JoinHandle};
|
use std::thread::{sleep, Builder, JoinHandle};
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
fn make_spy_node(leader: &NodeInfo) -> (Ncp, Arc<RwLock<ClusterInfo>>, Pubkey) {
|
fn make_spy_node(leader: &NodeInfo) -> (GossipService, Arc<RwLock<ClusterInfo>>, Pubkey) {
|
||||||
let keypair = Keypair::new();
|
let keypair = Keypair::new();
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
let mut spy = Node::new_localhost_with_pubkey(keypair.pubkey());
|
let mut spy = Node::new_localhost_with_pubkey(keypair.pubkey());
|
||||||
|
@ -53,7 +53,7 @@ fn make_spy_node(leader: &NodeInfo) -> (Ncp, Arc<RwLock<ClusterInfo>>, Pubkey) {
|
||||||
spy_cluster_info.set_leader(leader.id);
|
spy_cluster_info.set_leader(leader.id);
|
||||||
let spy_cluster_info_ref = Arc::new(RwLock::new(spy_cluster_info));
|
let spy_cluster_info_ref = Arc::new(RwLock::new(spy_cluster_info));
|
||||||
let spy_window = Arc::new(RwLock::new(default_window()));
|
let spy_window = Arc::new(RwLock::new(default_window()));
|
||||||
let ncp = Ncp::new(
|
let gossip_service = GossipService::new(
|
||||||
&spy_cluster_info_ref,
|
&spy_cluster_info_ref,
|
||||||
spy_window,
|
spy_window,
|
||||||
None,
|
None,
|
||||||
|
@ -61,10 +61,12 @@ fn make_spy_node(leader: &NodeInfo) -> (Ncp, Arc<RwLock<ClusterInfo>>, Pubkey) {
|
||||||
exit.clone(),
|
exit.clone(),
|
||||||
);
|
);
|
||||||
|
|
||||||
(ncp, spy_cluster_info_ref, me)
|
(gossip_service, spy_cluster_info_ref, me)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn make_listening_node(leader: &NodeInfo) -> (Ncp, Arc<RwLock<ClusterInfo>>, Node, Pubkey) {
|
fn make_listening_node(
|
||||||
|
leader: &NodeInfo,
|
||||||
|
) -> (GossipService, Arc<RwLock<ClusterInfo>>, Node, Pubkey) {
|
||||||
let keypair = Keypair::new();
|
let keypair = Keypair::new();
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
let new_node = Node::new_localhost_with_pubkey(keypair.pubkey());
|
let new_node = Node::new_localhost_with_pubkey(keypair.pubkey());
|
||||||
|
@ -75,7 +77,7 @@ fn make_listening_node(leader: &NodeInfo) -> (Ncp, Arc<RwLock<ClusterInfo>>, Nod
|
||||||
new_node_cluster_info.set_leader(leader.id);
|
new_node_cluster_info.set_leader(leader.id);
|
||||||
let new_node_cluster_info_ref = Arc::new(RwLock::new(new_node_cluster_info));
|
let new_node_cluster_info_ref = Arc::new(RwLock::new(new_node_cluster_info));
|
||||||
let new_node_window = Arc::new(RwLock::new(default_window()));
|
let new_node_window = Arc::new(RwLock::new(default_window()));
|
||||||
let ncp = Ncp::new(
|
let gossip_service = GossipService::new(
|
||||||
&new_node_cluster_info_ref,
|
&new_node_cluster_info_ref,
|
||||||
new_node_window,
|
new_node_window,
|
||||||
None,
|
None,
|
||||||
|
@ -87,12 +89,12 @@ fn make_listening_node(leader: &NodeInfo) -> (Ncp, Arc<RwLock<ClusterInfo>>, Nod
|
||||||
exit.clone(),
|
exit.clone(),
|
||||||
);
|
);
|
||||||
|
|
||||||
(ncp, new_node_cluster_info_ref, new_node, me)
|
(gossip_service, new_node_cluster_info_ref, new_node, me)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn converge(leader: &NodeInfo, num_nodes: usize) -> Vec<NodeInfo> {
|
fn converge(leader: &NodeInfo, num_nodes: usize) -> Vec<NodeInfo> {
|
||||||
//lets spy on the network
|
//lets spy on the network
|
||||||
let (ncp, spy_ref, _) = make_spy_node(leader);
|
let (gossip_service, spy_ref, _) = make_spy_node(leader);
|
||||||
|
|
||||||
//wait for the network to converge
|
//wait for the network to converge
|
||||||
let mut converged = false;
|
let mut converged = false;
|
||||||
|
@ -108,7 +110,7 @@ fn converge(leader: &NodeInfo, num_nodes: usize) -> Vec<NodeInfo> {
|
||||||
sleep(Duration::new(1, 0));
|
sleep(Duration::new(1, 0));
|
||||||
}
|
}
|
||||||
assert!(converged);
|
assert!(converged);
|
||||||
ncp.close().unwrap();
|
gossip_service.close().unwrap();
|
||||||
rv
|
rv
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -170,7 +172,7 @@ fn test_multi_node_ledger_window() -> result::Result<()> {
|
||||||
&zero_ledger_path,
|
&zero_ledger_path,
|
||||||
keypair,
|
keypair,
|
||||||
Arc::new(Keypair::new()),
|
Arc::new(Keypair::new()),
|
||||||
Some(leader_data.ncp),
|
Some(leader_data.gossip),
|
||||||
false,
|
false,
|
||||||
LeaderScheduler::from_bootstrap_leader(leader_pubkey),
|
LeaderScheduler::from_bootstrap_leader(leader_pubkey),
|
||||||
None,
|
None,
|
||||||
|
@ -275,7 +277,7 @@ fn test_multi_node_validator_catchup_from_zero() -> result::Result<()> {
|
||||||
&ledger_path,
|
&ledger_path,
|
||||||
keypair,
|
keypair,
|
||||||
Arc::new(Keypair::new()),
|
Arc::new(Keypair::new()),
|
||||||
Some(leader_data.ncp),
|
Some(leader_data.gossip),
|
||||||
false,
|
false,
|
||||||
LeaderScheduler::from_bootstrap_leader(leader_pubkey),
|
LeaderScheduler::from_bootstrap_leader(leader_pubkey),
|
||||||
None,
|
None,
|
||||||
|
@ -313,7 +315,7 @@ fn test_multi_node_validator_catchup_from_zero() -> result::Result<()> {
|
||||||
&zero_ledger_path,
|
&zero_ledger_path,
|
||||||
keypair,
|
keypair,
|
||||||
Arc::new(Keypair::new()),
|
Arc::new(Keypair::new()),
|
||||||
Some(leader_data.ncp),
|
Some(leader_data.gossip),
|
||||||
false,
|
false,
|
||||||
LeaderScheduler::from_bootstrap_leader(leader_pubkey),
|
LeaderScheduler::from_bootstrap_leader(leader_pubkey),
|
||||||
None,
|
None,
|
||||||
|
@ -407,7 +409,7 @@ fn test_multi_node_basic() {
|
||||||
&ledger_path,
|
&ledger_path,
|
||||||
keypair,
|
keypair,
|
||||||
Arc::new(Keypair::new()),
|
Arc::new(Keypair::new()),
|
||||||
Some(leader_data.ncp),
|
Some(leader_data.gossip),
|
||||||
false,
|
false,
|
||||||
LeaderScheduler::from_bootstrap_leader(leader_pubkey),
|
LeaderScheduler::from_bootstrap_leader(leader_pubkey),
|
||||||
None,
|
None,
|
||||||
|
@ -483,7 +485,7 @@ fn test_boot_validator_from_file() -> result::Result<()> {
|
||||||
&ledger_path,
|
&ledger_path,
|
||||||
keypair,
|
keypair,
|
||||||
Arc::new(Keypair::new()),
|
Arc::new(Keypair::new()),
|
||||||
Some(leader_data.ncp),
|
Some(leader_data.gossip),
|
||||||
false,
|
false,
|
||||||
LeaderScheduler::from_bootstrap_leader(leader_pubkey),
|
LeaderScheduler::from_bootstrap_leader(leader_pubkey),
|
||||||
None,
|
None,
|
||||||
|
@ -571,7 +573,7 @@ fn test_leader_restart_validator_start_from_old_ledger() -> result::Result<()> {
|
||||||
&stale_ledger_path,
|
&stale_ledger_path,
|
||||||
keypair,
|
keypair,
|
||||||
Arc::new(Keypair::new()),
|
Arc::new(Keypair::new()),
|
||||||
Some(leader_data.ncp),
|
Some(leader_data.gossip),
|
||||||
false,
|
false,
|
||||||
LeaderScheduler::from_bootstrap_leader(leader_data.id),
|
LeaderScheduler::from_bootstrap_leader(leader_data.id),
|
||||||
None,
|
None,
|
||||||
|
@ -702,7 +704,7 @@ fn test_multi_node_dynamic_network() {
|
||||||
&ledger_path,
|
&ledger_path,
|
||||||
Arc::new(keypair),
|
Arc::new(keypair),
|
||||||
Arc::new(Keypair::new()),
|
Arc::new(Keypair::new()),
|
||||||
Some(leader_data.ncp),
|
Some(leader_data.gossip),
|
||||||
true,
|
true,
|
||||||
LeaderScheduler::from_bootstrap_leader(leader_pubkey),
|
LeaderScheduler::from_bootstrap_leader(leader_pubkey),
|
||||||
None,
|
None,
|
||||||
|
@ -848,7 +850,7 @@ fn test_leader_to_validator_transition() {
|
||||||
&leader_ledger_path,
|
&leader_ledger_path,
|
||||||
leader_keypair,
|
leader_keypair,
|
||||||
Arc::new(Keypair::new()),
|
Arc::new(Keypair::new()),
|
||||||
Some(leader_info.ncp),
|
Some(leader_info.gossip),
|
||||||
false,
|
false,
|
||||||
LeaderScheduler::new(&leader_scheduler_config),
|
LeaderScheduler::new(&leader_scheduler_config),
|
||||||
None,
|
None,
|
||||||
|
@ -856,7 +858,7 @@ fn test_leader_to_validator_transition() {
|
||||||
|
|
||||||
// Make an extra node for our leader to broadcast to,
|
// Make an extra node for our leader to broadcast to,
|
||||||
// who won't vote and mess with our leader's entry count
|
// who won't vote and mess with our leader's entry count
|
||||||
let (ncp, spy_node, _) = make_spy_node(&leader_info);
|
let (gossip_service, spy_node, _) = make_spy_node(&leader_info);
|
||||||
|
|
||||||
// Wait for the leader to see the spy node
|
// Wait for the leader to see the spy node
|
||||||
let mut converged = false;
|
let mut converged = false;
|
||||||
|
@ -921,7 +923,7 @@ fn test_leader_to_validator_transition() {
|
||||||
assert_eq!(bank.tick_height(), bootstrap_height);
|
assert_eq!(bank.tick_height(), bootstrap_height);
|
||||||
|
|
||||||
// Shut down
|
// Shut down
|
||||||
ncp.close().unwrap();
|
gossip_service.close().unwrap();
|
||||||
leader.close().unwrap();
|
leader.close().unwrap();
|
||||||
remove_dir_all(leader_ledger_path).unwrap();
|
remove_dir_all(leader_ledger_path).unwrap();
|
||||||
}
|
}
|
||||||
|
@ -988,7 +990,7 @@ fn test_leader_validator_basic() {
|
||||||
&validator_ledger_path,
|
&validator_ledger_path,
|
||||||
validator_keypair,
|
validator_keypair,
|
||||||
Arc::new(vote_account_keypair),
|
Arc::new(vote_account_keypair),
|
||||||
Some(leader_info.ncp),
|
Some(leader_info.gossip),
|
||||||
false,
|
false,
|
||||||
LeaderScheduler::new(&leader_scheduler_config),
|
LeaderScheduler::new(&leader_scheduler_config),
|
||||||
None,
|
None,
|
||||||
|
@ -1000,7 +1002,7 @@ fn test_leader_validator_basic() {
|
||||||
&leader_ledger_path,
|
&leader_ledger_path,
|
||||||
leader_keypair,
|
leader_keypair,
|
||||||
Arc::new(Keypair::new()),
|
Arc::new(Keypair::new()),
|
||||||
Some(leader_info.ncp),
|
Some(leader_info.gossip),
|
||||||
false,
|
false,
|
||||||
LeaderScheduler::new(&leader_scheduler_config),
|
LeaderScheduler::new(&leader_scheduler_config),
|
||||||
None,
|
None,
|
||||||
|
@ -1177,7 +1179,7 @@ fn test_dropped_handoff_recovery() {
|
||||||
&bootstrap_leader_ledger_path,
|
&bootstrap_leader_ledger_path,
|
||||||
bootstrap_leader_keypair,
|
bootstrap_leader_keypair,
|
||||||
Arc::new(Keypair::new()),
|
Arc::new(Keypair::new()),
|
||||||
Some(bootstrap_leader_info.ncp),
|
Some(bootstrap_leader_info.gossip),
|
||||||
false,
|
false,
|
||||||
LeaderScheduler::new(&leader_scheduler_config),
|
LeaderScheduler::new(&leader_scheduler_config),
|
||||||
None,
|
None,
|
||||||
|
@ -1200,7 +1202,7 @@ fn test_dropped_handoff_recovery() {
|
||||||
&validator_ledger_path,
|
&validator_ledger_path,
|
||||||
kp,
|
kp,
|
||||||
Arc::new(Keypair::new()),
|
Arc::new(Keypair::new()),
|
||||||
Some(bootstrap_leader_info.ncp),
|
Some(bootstrap_leader_info.gossip),
|
||||||
false,
|
false,
|
||||||
LeaderScheduler::new(&leader_scheduler_config),
|
LeaderScheduler::new(&leader_scheduler_config),
|
||||||
None,
|
None,
|
||||||
|
@ -1226,7 +1228,7 @@ fn test_dropped_handoff_recovery() {
|
||||||
&next_leader_ledger_path,
|
&next_leader_ledger_path,
|
||||||
next_leader_keypair,
|
next_leader_keypair,
|
||||||
Arc::new(vote_account_keypair),
|
Arc::new(vote_account_keypair),
|
||||||
Some(bootstrap_leader_info.ncp),
|
Some(bootstrap_leader_info.gossip),
|
||||||
false,
|
false,
|
||||||
LeaderScheduler::new(&leader_scheduler_config),
|
LeaderScheduler::new(&leader_scheduler_config),
|
||||||
None,
|
None,
|
||||||
|
@ -1364,7 +1366,7 @@ fn test_full_leader_validator_network() {
|
||||||
&validator_ledger_path,
|
&validator_ledger_path,
|
||||||
Arc::new(kp),
|
Arc::new(kp),
|
||||||
Arc::new(vote_account_keypairs.pop_front().unwrap()),
|
Arc::new(vote_account_keypairs.pop_front().unwrap()),
|
||||||
Some(bootstrap_leader_info.ncp),
|
Some(bootstrap_leader_info.gossip),
|
||||||
false,
|
false,
|
||||||
LeaderScheduler::new(&leader_scheduler_config),
|
LeaderScheduler::new(&leader_scheduler_config),
|
||||||
None,
|
None,
|
||||||
|
@ -1380,7 +1382,7 @@ fn test_full_leader_validator_network() {
|
||||||
&bootstrap_leader_ledger_path,
|
&bootstrap_leader_ledger_path,
|
||||||
Arc::new(leader_keypair),
|
Arc::new(leader_keypair),
|
||||||
Arc::new(leader_vote_keypair),
|
Arc::new(leader_vote_keypair),
|
||||||
Some(bootstrap_leader_info.ncp),
|
Some(bootstrap_leader_info.gossip),
|
||||||
false,
|
false,
|
||||||
LeaderScheduler::new(&leader_scheduler_config),
|
LeaderScheduler::new(&leader_scheduler_config),
|
||||||
None,
|
None,
|
||||||
|
@ -1562,7 +1564,7 @@ fn test_broadcast_last_tick() {
|
||||||
&bootstrap_leader_ledger_path,
|
&bootstrap_leader_ledger_path,
|
||||||
Arc::new(bootstrap_leader_keypair),
|
Arc::new(bootstrap_leader_keypair),
|
||||||
Arc::new(Keypair::new()),
|
Arc::new(Keypair::new()),
|
||||||
Some(bootstrap_leader_info.ncp),
|
Some(bootstrap_leader_info.gossip),
|
||||||
false,
|
false,
|
||||||
LeaderScheduler::new(&leader_scheduler_config),
|
LeaderScheduler::new(&leader_scheduler_config),
|
||||||
None,
|
None,
|
||||||
|
|
Loading…
Reference in New Issue