Delete NodeInfo type

This commit is contained in:
Michael Vines 2019-03-08 17:23:07 -08:00
parent 5de38852d2
commit 17921c9fae
17 changed files with 135 additions and 130 deletions

View File

@ -2,7 +2,7 @@ use solana_metrics;
use rayon::prelude::*;
use solana::client::mk_client;
use solana::cluster_info::NodeInfo;
use solana::contact_info::ContactInfo;
use solana::thin_client::ThinClient;
use solana_drone::drone::request_airdrop_transaction;
use solana_metrics::influxdb;
@ -48,7 +48,7 @@ pub fn sample_tx_count(
exit_signal: &Arc<AtomicBool>,
maxes: &Arc<RwLock<Vec<(SocketAddr, NodeStats)>>>,
first_tx_count: u64,
v: &NodeInfo,
v: &ContactInfo,
sample_period: u64,
) {
let mut client = mk_client(&v);
@ -179,9 +179,9 @@ pub fn generate_txs(
dest: &[Keypair],
threads: usize,
reclaim: bool,
node_info: &NodeInfo,
contact_info: &ContactInfo,
) {
let mut client = mk_client(node_info);
let mut client = mk_client(contact_info);
let blockhash = client.get_recent_blockhash();
let tx_count = source.len();
println!("Signing transactions... {} (reclaim={})", tx_count, reclaim);
@ -236,12 +236,12 @@ pub fn generate_txs(
pub fn do_tx_transfers(
exit_signal: &Arc<AtomicBool>,
shared_txs: &SharedTransactions,
node_info: &NodeInfo,
contact_info: &ContactInfo,
shared_tx_thread_count: &Arc<AtomicIsize>,
total_tx_sent_count: &Arc<AtomicUsize>,
thread_batch_sleep_ms: usize,
) {
let client = mk_client(&node_info);
let client = mk_client(&contact_info);
loop {
if thread_batch_sleep_ms > 0 {
sleep(Duration::from_millis(thread_batch_sleep_ms as u64));
@ -256,7 +256,7 @@ pub fn do_tx_transfers(
println!(
"Transferring 1 unit {} times... to {}",
txs0.len(),
node_info.tpu
contact_info.tpu
);
let tx_len = txs0.len();
let transfer_start = Instant::now();

View File

@ -1,13 +1,14 @@
use crate::cluster_info::{NodeInfo, FULLNODE_PORT_RANGE};
use crate::cluster_info::FULLNODE_PORT_RANGE;
use crate::contact_info::ContactInfo;
use crate::thin_client::ThinClient;
use std::time::Duration;
pub fn mk_client(r: &NodeInfo) -> ThinClient {
pub fn mk_client(r: &ContactInfo) -> ThinClient {
let (_, transactions_socket) = solana_netutil::bind_in_range(FULLNODE_PORT_RANGE).unwrap();
ThinClient::new(r.rpc, r.tpu, transactions_socket)
}
pub fn mk_client_with_timeout(r: &NodeInfo, timeout: Duration) -> ThinClient {
pub fn mk_client_with_timeout(r: &ContactInfo, timeout: Duration) -> ThinClient {
let (_, transactions_socket) = solana_netutil::bind_in_range(FULLNODE_PORT_RANGE).unwrap();
ThinClient::new_with_timeout(r.rpc, r.tpu, transactions_socket, timeout)
}

View File

@ -47,8 +47,6 @@ use std::sync::{Arc, RwLock};
use std::thread::{sleep, Builder, JoinHandle};
use std::time::{Duration, Instant};
pub type NodeInfo = ContactInfo;
pub const FULLNODE_PORT_RANGE: (u16, u16) = (8000, 10_000);
/// The fanout for Ledger Replication
@ -65,7 +63,6 @@ pub enum ClusterInfoError {
NoPeers,
NoLeader,
BadContactInfo,
BadNodeInfo,
BadGossipAddress,
}
#[derive(Clone)]
@ -78,7 +75,7 @@ pub struct ClusterInfo {
// purged
gossip_leader_id: Pubkey,
/// The network entrypoint
entrypoint: Option<NodeInfo>,
entrypoint: Option<ContactInfo>,
}
#[derive(Default, Clone)]
@ -162,31 +159,31 @@ enum Protocol {
/// Window protocol messages
/// TODO: move this message to a different module
RequestWindowIndex(NodeInfo, u64, u64),
RequestHighestWindowIndex(NodeInfo, u64, u64),
RequestWindowIndex(ContactInfo, u64, u64),
RequestHighestWindowIndex(ContactInfo, u64, u64),
}
impl ClusterInfo {
/// Without a valid keypair gossip will not function. Only useful for tests.
pub fn new_with_invalid_keypair(node_info: NodeInfo) -> Self {
Self::new(node_info, Arc::new(Keypair::new()))
pub fn new_with_invalid_keypair(contact_info: ContactInfo) -> Self {
Self::new(contact_info, Arc::new(Keypair::new()))
}
pub fn new(node_info: NodeInfo, keypair: Arc<Keypair>) -> Self {
pub fn new(contact_info: ContactInfo, keypair: Arc<Keypair>) -> Self {
let mut me = Self {
gossip: CrdsGossip::default(),
keypair,
gossip_leader_id: Pubkey::default(),
entrypoint: None,
};
let id = node_info.id;
let id = contact_info.id;
me.gossip.set_self(id);
me.insert_self(node_info);
me.insert_self(contact_info);
me.push_self(&HashMap::new());
me
}
pub fn insert_self(&mut self, node_info: NodeInfo) {
if self.id() == node_info.id {
let mut value = CrdsValue::ContactInfo(node_info.clone());
pub fn insert_self(&mut self, contact_info: ContactInfo) {
if self.id() == contact_info.id {
let mut value = CrdsValue::ContactInfo(contact_info.clone());
value.sign(&self.keypair);
let _ = self.gossip.crds.insert(value, timestamp());
}
@ -201,39 +198,39 @@ impl ClusterInfo {
self.gossip.process_push_message(&[entry], now);
}
// TODO kill insert_info, only used by tests
pub fn insert_info(&mut self, node_info: NodeInfo) {
let mut value = CrdsValue::ContactInfo(node_info);
pub fn insert_info(&mut self, contact_info: ContactInfo) {
let mut value = CrdsValue::ContactInfo(contact_info);
value.sign(&self.keypair);
let _ = self.gossip.crds.insert(value, timestamp());
}
pub fn set_entrypoint(&mut self, entrypoint: NodeInfo) {
pub fn set_entrypoint(&mut self, entrypoint: ContactInfo) {
self.entrypoint = Some(entrypoint)
}
pub fn id(&self) -> Pubkey {
self.gossip.id
}
pub fn lookup(&self, id: Pubkey) -> Option<&NodeInfo> {
pub fn lookup(&self, id: Pubkey) -> Option<&ContactInfo> {
let entry = CrdsValueLabel::ContactInfo(id);
self.gossip
.crds
.lookup(&entry)
.and_then(|x| x.contact_info())
}
pub fn my_data(&self) -> NodeInfo {
pub fn my_data(&self) -> ContactInfo {
self.lookup(self.id()).cloned().unwrap()
}
fn leader_id(&self) -> Pubkey {
self.gossip_leader_id
}
// Deprecated: don't use leader_data().
pub fn leader_data(&self) -> Option<&NodeInfo> {
pub fn leader_data(&self) -> Option<&ContactInfo> {
let leader_id = self.leader_id();
if leader_id == Pubkey::default() {
return None;
}
self.lookup(leader_id)
}
pub fn node_info_trace(&self) -> String {
pub fn contact_info_trace(&self) -> String {
let leader_id = self.leader_id();
let nodes: Vec<_> = self
.rpc_peers()
@ -312,7 +309,7 @@ impl ClusterInfo {
self.gossip.purge(now);
}
pub fn rpc_peers(&self) -> Vec<NodeInfo> {
pub fn rpc_peers(&self) -> Vec<ContactInfo> {
let me = self.my_data().id;
self.gossip
.crds
@ -325,7 +322,7 @@ impl ClusterInfo {
.collect()
}
pub fn gossip_peers(&self) -> Vec<NodeInfo> {
pub fn gossip_peers(&self) -> Vec<ContactInfo> {
let me = self.my_data().id;
self.gossip
.crds
@ -339,7 +336,7 @@ impl ClusterInfo {
}
/// compute broadcast table
pub fn tvu_peers(&self) -> Vec<NodeInfo> {
pub fn tvu_peers(&self) -> Vec<ContactInfo> {
let me = self.my_data().id;
self.gossip
.crds
@ -353,7 +350,7 @@ impl ClusterInfo {
}
/// all peers that have a valid tvu
pub fn retransmit_peers(&self) -> Vec<NodeInfo> {
pub fn retransmit_peers(&self) -> Vec<ContactInfo> {
let me = self.my_data().id;
self.gossip
.crds
@ -367,7 +364,7 @@ impl ClusterInfo {
}
/// all tvu peers with valid gossip addrs
pub fn repair_peers(&self) -> Vec<NodeInfo> {
pub fn repair_peers(&self) -> Vec<ContactInfo> {
let me = self.my_data().id;
ClusterInfo::tvu_peers(self)
.into_iter()
@ -377,9 +374,9 @@ impl ClusterInfo {
}
fn sort_by_stake<S: std::hash::BuildHasher>(
peers: &[NodeInfo],
peers: &[ContactInfo],
stakes: &HashMap<Pubkey, u64, S>,
) -> Vec<(u64, NodeInfo)> {
) -> Vec<(u64, ContactInfo)> {
let mut peers_with_stakes: Vec<_> = peers
.iter()
.map(|c| (*stakes.get(&c.id).unwrap_or(&0), c.clone()))
@ -398,7 +395,7 @@ impl ClusterInfo {
pub fn sorted_retransmit_peers<S: std::hash::BuildHasher>(
&self,
stakes: &HashMap<Pubkey, u64, S>,
) -> Vec<NodeInfo> {
) -> Vec<ContactInfo> {
let peers = self.retransmit_peers();
let peers_with_stakes: Vec<_> = ClusterInfo::sort_by_stake(&peers, stakes);
peers_with_stakes
@ -407,7 +404,7 @@ impl ClusterInfo {
.collect()
}
pub fn sorted_tvu_peers(&self, stakes: &HashMap<Pubkey, u64>) -> Vec<NodeInfo> {
pub fn sorted_tvu_peers(&self, stakes: &HashMap<Pubkey, u64>) -> Vec<ContactInfo> {
let peers = self.tvu_peers();
let peers_with_stakes: Vec<_> = ClusterInfo::sort_by_stake(&peers, stakes);
peers_with_stakes
@ -417,7 +414,7 @@ impl ClusterInfo {
}
/// compute broadcast table
pub fn tpu_peers(&self) -> Vec<NodeInfo> {
pub fn tpu_peers(&self) -> Vec<ContactInfo> {
let me = self.my_data().id;
self.gossip
.crds
@ -566,7 +563,7 @@ impl ClusterInfo {
pub fn broadcast(
id: &Pubkey,
contains_last_tick: bool,
broadcast_table: &[NodeInfo],
broadcast_table: &[ContactInfo],
s: &UdpSocket,
blobs: &[SharedBlob],
) -> Result<()> {
@ -599,11 +596,11 @@ impl ClusterInfo {
/// We need to avoid having obj locked while doing any io, such as the `send_to`
pub fn retransmit_to(
obj: &Arc<RwLock<Self>>,
peers: &[NodeInfo],
peers: &[ContactInfo],
blob: &SharedBlob,
s: &UdpSocket,
) -> Result<()> {
let (me, orders): (NodeInfo, &[NodeInfo]) = {
let (me, orders): (ContactInfo, &[ContactInfo]) = {
// copy to avoid locking during IO
let s = obj.read().unwrap();
(s.my_data().clone(), peers)
@ -646,7 +643,7 @@ impl ClusterInfo {
fn send_orders(
id: &Pubkey,
s: &UdpSocket,
orders: Vec<(SharedBlob, Vec<&NodeInfo>)>,
orders: Vec<(SharedBlob, Vec<&ContactInfo>)>,
) -> Vec<io::Result<usize>> {
orders
.into_iter()
@ -694,8 +691,8 @@ impl ClusterInfo {
pub fn create_broadcast_orders<'a, T>(
contains_last_tick: bool,
blobs: &[T],
broadcast_table: &'a [NodeInfo],
) -> Vec<(T, Vec<&'a NodeInfo>)>
broadcast_table: &'a [ContactInfo],
) -> Vec<(T, Vec<&'a ContactInfo>)>
where
T: Clone,
{
@ -910,10 +907,10 @@ impl ClusterInfo {
}
fn run_window_request(
from: &NodeInfo,
from: &ContactInfo,
from_addr: &SocketAddr,
blocktree: Option<&Arc<Blocktree>>,
me: &NodeInfo,
me: &ContactInfo,
slot: u64,
blob_index: u64,
) -> Vec<SharedBlob> {
@ -1271,11 +1268,11 @@ impl ClusterInfo {
.unwrap()
}
pub fn spy_node(id: &Pubkey) -> (NodeInfo, UdpSocket) {
pub fn spy_node(id: &Pubkey) -> (ContactInfo, UdpSocket) {
let (_, gossip_socket) = bind_in_range(FULLNODE_PORT_RANGE).unwrap();
let daddr = socketaddr_any!();
let node = NodeInfo::new(*id, daddr, daddr, daddr, daddr, daddr, daddr, timestamp());
let node = ContactInfo::new(*id, daddr, daddr, daddr, daddr, daddr, daddr, timestamp());
(node, gossip_socket)
}
}
@ -1294,7 +1291,7 @@ pub fn compute_retransmit_peers<S: std::hash::BuildHasher>(
fanout: usize,
hood_size: usize,
grow: bool,
) -> (Vec<NodeInfo>, Vec<NodeInfo>) {
) -> (Vec<ContactInfo>, Vec<ContactInfo>) {
let peers = cluster_info.read().unwrap().sorted_retransmit_peers(stakes);
let my_id = cluster_info.read().unwrap().id();
//calc num_layers and num_neighborhoods using the total number of nodes
@ -1341,7 +1338,7 @@ pub struct Sockets {
#[derive(Debug)]
pub struct Node {
pub info: NodeInfo,
pub info: ContactInfo,
pub sockets: Sockets,
}
@ -1364,7 +1361,7 @@ impl Node {
let broadcast = UdpSocket::bind("0.0.0.0:0").unwrap();
let retransmit = UdpSocket::bind("0.0.0.0:0").unwrap();
let storage = UdpSocket::bind("0.0.0.0:0").unwrap();
let info = NodeInfo::new(
let info = ContactInfo::new(
pubkey,
gossip.local_addr().unwrap(),
tvu.local_addr().unwrap(),
@ -1413,7 +1410,7 @@ impl Node {
let (_, retransmit) = bind();
let (storage_port, _) = bind();
let info = NodeInfo::new(
let info = ContactInfo::new(
pubkey,
SocketAddr::new(gossip_addr.ip(), gossip_port),
SocketAddr::new(gossip_addr.ip(), tvu_port),
@ -1423,7 +1420,7 @@ impl Node {
SocketAddr::new(gossip_addr.ip(), RPC_PORT + 1),
0,
);
trace!("new NodeInfo: {:?}", info);
trace!("new ContactInfo: {:?}", info);
Node {
info,
@ -1488,42 +1485,42 @@ mod tests {
#[test]
fn test_cluster_info_new() {
let d = NodeInfo::new_localhost(Keypair::new().pubkey(), timestamp());
let d = ContactInfo::new_localhost(Keypair::new().pubkey(), timestamp());
let cluster_info = ClusterInfo::new_with_invalid_keypair(d.clone());
assert_eq!(d.id, cluster_info.my_data().id);
}
#[test]
fn insert_info_test() {
let d = NodeInfo::new_localhost(Keypair::new().pubkey(), timestamp());
let d = ContactInfo::new_localhost(Keypair::new().pubkey(), timestamp());
let mut cluster_info = ClusterInfo::new_with_invalid_keypair(d);
let d = NodeInfo::new_localhost(Keypair::new().pubkey(), timestamp());
let d = ContactInfo::new_localhost(Keypair::new().pubkey(), timestamp());
let label = CrdsValueLabel::ContactInfo(d.id);
cluster_info.insert_info(d);
assert!(cluster_info.gossip.crds.lookup(&label).is_some());
}
#[test]
fn test_insert_self() {
let d = NodeInfo::new_localhost(Keypair::new().pubkey(), timestamp());
let d = ContactInfo::new_localhost(Keypair::new().pubkey(), timestamp());
let mut cluster_info = ClusterInfo::new_with_invalid_keypair(d.clone());
let entry_label = CrdsValueLabel::ContactInfo(cluster_info.id());
assert!(cluster_info.gossip.crds.lookup(&entry_label).is_some());
// inserting something else shouldn't work
let d = NodeInfo::new_localhost(Keypair::new().pubkey(), timestamp());
let d = ContactInfo::new_localhost(Keypair::new().pubkey(), timestamp());
cluster_info.insert_self(d.clone());
let label = CrdsValueLabel::ContactInfo(d.id);
assert!(cluster_info.gossip.crds.lookup(&label).is_none());
}
#[test]
fn window_index_request() {
let me = NodeInfo::new_localhost(Keypair::new().pubkey(), timestamp());
let me = ContactInfo::new_localhost(Keypair::new().pubkey(), timestamp());
let mut cluster_info = ClusterInfo::new_with_invalid_keypair(me);
let rv = cluster_info.window_index_request(0, 0, false);
assert_matches!(rv, Err(Error::ClusterInfoError(ClusterInfoError::NoPeers)));
let gossip_addr = socketaddr!([127, 0, 0, 1], 1234);
let nxt = NodeInfo::new(
let nxt = ContactInfo::new(
Keypair::new().pubkey(),
gossip_addr,
socketaddr!([127, 0, 0, 1], 1235),
@ -1539,7 +1536,7 @@ mod tests {
assert_eq!(rv.0, nxt.gossip);
let gossip_addr2 = socketaddr!([127, 0, 0, 2], 1234);
let nxt = NodeInfo::new(
let nxt = ContactInfo::new(
Keypair::new().pubkey(),
gossip_addr2,
socketaddr!([127, 0, 0, 1], 1235),
@ -1572,7 +1569,7 @@ mod tests {
let ledger_path = get_tmp_ledger_path!();
{
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap());
let me = NodeInfo::new(
let me = ContactInfo::new(
Keypair::new().pubkey(),
socketaddr!("127.0.0.1:1234"),
socketaddr!("127.0.0.1:1235"),
@ -1674,9 +1671,10 @@ mod tests {
#[test]
fn test_default_leader() {
solana_logger::setup();
let node_info = NodeInfo::new_localhost(Keypair::new().pubkey(), 0);
let mut cluster_info = ClusterInfo::new_with_invalid_keypair(node_info);
let network_entry_point = NodeInfo::new_gossip_entry_point(&socketaddr!("127.0.0.1:1239"));
let contact_info = ContactInfo::new_localhost(Keypair::new().pubkey(), 0);
let mut cluster_info = ClusterInfo::new_with_invalid_keypair(contact_info);
let network_entry_point =
ContactInfo::new_gossip_entry_point(&socketaddr!("127.0.0.1:1239"));
cluster_info.insert_info(network_entry_point);
assert!(cluster_info.leader_data().is_none());
}
@ -1754,10 +1752,10 @@ mod tests {
let keypair = Keypair::new();
let peer_keypair = Keypair::new();
let leader_keypair = Keypair::new();
let node_info = NodeInfo::new_localhost(keypair.pubkey(), 0);
let leader = NodeInfo::new_localhost(leader_keypair.pubkey(), 0);
let peer = NodeInfo::new_localhost(peer_keypair.pubkey(), 0);
let mut cluster_info = ClusterInfo::new(node_info.clone(), Arc::new(keypair));
let contact_info = ContactInfo::new_localhost(keypair.pubkey(), 0);
let leader = ContactInfo::new_localhost(leader_keypair.pubkey(), 0);
let peer = ContactInfo::new_localhost(peer_keypair.pubkey(), 0);
let mut cluster_info = ClusterInfo::new(contact_info.clone(), Arc::new(keypair));
cluster_info.set_leader(leader.id);
cluster_info.insert_info(peer.clone());
//check that all types of gossip messages are signed correctly
@ -1928,8 +1926,8 @@ mod tests {
fn test_push_vote() {
let keys = Keypair::new();
let now = timestamp();
let node_info = NodeInfo::new_localhost(keys.pubkey(), 0);
let mut cluster_info = ClusterInfo::new_with_invalid_keypair(node_info);
let contact_info = ContactInfo::new_localhost(keys.pubkey(), 0);
let mut cluster_info = ClusterInfo::new_with_invalid_keypair(contact_info);
// make sure empty crds is handled correctly
let (votes, max_ts) = cluster_info.get_votes(now);
@ -1955,11 +1953,11 @@ mod tests {
fn test_add_entrypoint() {
let node_keypair = Arc::new(Keypair::new());
let mut cluster_info = ClusterInfo::new(
NodeInfo::new_localhost(node_keypair.pubkey(), timestamp()),
ContactInfo::new_localhost(node_keypair.pubkey(), timestamp()),
node_keypair,
);
let entrypoint_id = Keypair::new().pubkey();
let entrypoint = NodeInfo::new_localhost(entrypoint_id, timestamp());
let entrypoint = ContactInfo::new_localhost(entrypoint_id, timestamp());
cluster_info.set_entrypoint(entrypoint.clone());
let pulls = cluster_info.new_pull_requests(&HashMap::new());
assert_eq!(1, pulls.len());

View File

@ -179,7 +179,7 @@ pub fn get_weight(max_weight: f32, time_since_last_selected: u32, stake: f32) ->
#[cfg(test)]
mod test {
use super::*;
use crate::cluster_info::NodeInfo;
use crate::contact_info::ContactInfo;
use solana_sdk::hash::hash;
use solana_sdk::timing::timestamp;
@ -188,7 +188,7 @@ mod test {
let mut crds_gossip = CrdsGossip::default();
crds_gossip.id = Pubkey::new(&[0; 32]);
let id = crds_gossip.id;
let ci = NodeInfo::new_localhost(Pubkey::new(&[1; 32]), 0);
let ci = ContactInfo::new_localhost(Pubkey::new(&[1; 32]), 0);
let prune_pubkey = Pubkey::new(&[2; 32]);
crds_gossip
.crds

View File

@ -3,7 +3,8 @@
use crate::bank_forks::BankForks;
use crate::blocktree::Blocktree;
use crate::blocktree_processor::{self, BankForksInfo};
use crate::cluster_info::{ClusterInfo, Node, NodeInfo};
use crate::cluster_info::{ClusterInfo, Node};
use crate::contact_info::ContactInfo;
use crate::entry::create_ticks;
use crate::entry::next_entry_mut;
use crate::entry::Entry;
@ -82,7 +83,7 @@ impl Fullnode {
ledger_path: &str,
vote_account: Pubkey,
voting_keypair: T,
entrypoint_info_option: Option<&NodeInfo>,
entrypoint_info_option: Option<&ContactInfo>,
config: &FullnodeConfig,
) -> Self
where

View File

@ -70,7 +70,7 @@ pub fn discover(gossip_addr: &SocketAddr, num_nodes: usize) -> std::io::Result<V
trace!(
"discover success in {}s...\n{}",
now.elapsed().as_secs(),
spy_ref.read().unwrap().node_info_trace()
spy_ref.read().unwrap().contact_info_trace()
);
exit.store(true, Ordering::Relaxed);
@ -80,7 +80,7 @@ pub fn discover(gossip_addr: &SocketAddr, num_nodes: usize) -> std::io::Result<V
if i % 20 == 0 {
info!(
"discovering...\n{}",
spy_ref.read().unwrap().node_info_trace()
spy_ref.read().unwrap().contact_info_trace()
);
}
sleep(Duration::from_millis(
@ -93,7 +93,7 @@ pub fn discover(gossip_addr: &SocketAddr, num_nodes: usize) -> std::io::Result<V
gossip_service.join().unwrap();
info!(
"discover failed...\n{}",
spy_ref.read().unwrap().node_info_trace()
spy_ref.read().unwrap().contact_info_trace()
);
Err(std::io::Error::new(
std::io::ErrorKind::Other,

View File

@ -1,6 +1,7 @@
use crate::blocktree::{create_new_tmp_ledger, tmp_copy_blocktree};
use crate::client::mk_client;
use crate::cluster_info::{Node, NodeInfo};
use crate::cluster_info::Node;
use crate::contact_info::ContactInfo;
use crate::fullnode::{Fullnode, FullnodeConfig};
use crate::gossip_service::discover;
use crate::service::Service;
@ -20,7 +21,7 @@ pub struct LocalCluster {
/// Keypair with funding to particpiate in the network
pub funding_keypair: Keypair,
/// Entry point from which the rest of the network can be discovered
pub entry_point_info: NodeInfo,
pub entry_point_info: ContactInfo,
pub ledger_paths: Vec<String>,
fullnodes: Vec<Fullnode>,
}
@ -47,7 +48,7 @@ impl LocalCluster {
ledger_paths.push(genesis_ledger_path.clone());
ledger_paths.push(leader_ledger_path.clone());
let voting_keypair = Keypair::new();
let leader_node_info = leader_node.info.clone();
let leader_contact_info = leader_node.info.clone();
let leader_server = Fullnode::new(
leader_node,
&leader_keypair,
@ -58,7 +59,7 @@ impl LocalCluster {
fullnode_config,
);
let mut fullnodes = vec![leader_server];
let mut client = mk_client(&leader_node_info);
let mut client = mk_client(&leader_contact_info);
for stake in &node_stakes[1..] {
// Must have enough tokens to fund vote account and set delegate
assert!(*stake > 2);
@ -90,15 +91,15 @@ impl LocalCluster {
&ledger_path,
voting_keypair.pubkey(),
voting_keypair,
Some(&leader_node_info),
Some(&leader_contact_info),
fullnode_config,
);
fullnodes.push(validator_server);
}
discover(&leader_node_info.gossip, node_stakes.len()).unwrap();
discover(&leader_contact_info.gossip, node_stakes.len()).unwrap();
Self {
funding_keypair: mint_keypair,
entry_point_info: leader_node_info,
entry_point_info: leader_contact_info,
fullnodes,
ledger_paths,
}

View File

@ -4,7 +4,8 @@ use crate::blocktree_processor;
#[cfg(feature = "chacha")]
use crate::chacha::{chacha_cbc_encrypt_ledger, CHACHA_BLOCK_SIZE};
use crate::client::mk_client;
use crate::cluster_info::{ClusterInfo, Node, NodeInfo};
use crate::cluster_info::{ClusterInfo, Node};
use crate::contact_info::ContactInfo;
use crate::gossip_service::GossipService;
use crate::result::Result;
use crate::rpc_request::{RpcClient, RpcRequest, RpcRequestHandler};
@ -101,7 +102,7 @@ impl Replicator {
/// * `ledger_path` - path to where the ledger will be stored.
/// Causes panic if none
/// * `node` - The replicator node
/// * `leader_info` - NodeInfo representing the leader
/// * `leader_info` - ContactInfo representing the leader
/// * `keypair` - Keypair for this replicator
/// * `timeout` - (optional) timeout for polling for leader/downloading the ledger. Defaults to
/// 30 seconds
@ -109,7 +110,7 @@ impl Replicator {
pub fn new(
ledger_path: &str,
node: Node,
leader_info: &NodeInfo,
leader_info: &ContactInfo,
keypair: &Arc<Keypair>,
_timeout: Option<Duration>,
) -> Result<Self> {
@ -198,11 +199,11 @@ impl Replicator {
info!("Done receiving entries from window_service");
let mut node_info = node.info.clone();
node_info.tvu = "0.0.0.0:0".parse().unwrap();
let mut contact_info = node.info.clone();
contact_info.tvu = "0.0.0.0:0".parse().unwrap();
{
let mut cluster_info_w = cluster_info.write().unwrap();
cluster_info_w.insert_self(node_info);
cluster_info_w.insert_self(contact_info);
}
let mut client = mk_client(leader_info);
@ -328,7 +329,7 @@ impl Replicator {
))?
}
fn get_airdrop_lamports(client: &mut ThinClient, keypair: &Keypair, leader_info: &NodeInfo) {
fn get_airdrop_lamports(client: &mut ThinClient, keypair: &Keypair, leader_info: &ContactInfo) {
if retry_get_balance(client, &keypair.pubkey(), None).is_none() {
let mut drone_addr = leader_info.tpu;
drone_addr.set_port(DRONE_PORT);

View File

@ -125,8 +125,8 @@ impl JsonRpcRequestProcessor {
}
fn get_tpu_addr(cluster_info: &Arc<RwLock<ClusterInfo>>) -> Result<SocketAddr> {
let node_info = cluster_info.read().unwrap().my_data();
Ok(node_info.tpu)
let contact_info = cluster_info.read().unwrap().my_data();
Ok(contact_info.tpu)
}
fn verify_pubkey(input: String) -> Result<Pubkey> {
@ -406,7 +406,7 @@ impl RpcSol for RpcSolImpl {
#[cfg(test)]
mod tests {
use super::*;
use crate::cluster_info::NodeInfo;
use crate::contact_info::ContactInfo;
use jsonrpc_core::{MetaIoHandler, Response};
use solana_sdk::genesis_block::GenesisBlock;
use solana_sdk::hash::{hash, Hash};
@ -431,9 +431,9 @@ mod tests {
)));
request_processor.write().unwrap().set_bank(&bank);
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair(
NodeInfo::default(),
ContactInfo::default(),
)));
let leader = NodeInfo::new_with_socketaddr(&socketaddr!("127.0.0.1:1234"));
let leader = ContactInfo::new_with_socketaddr(&socketaddr!("127.0.0.1:1234"));
cluster_info.write().unwrap().insert_info(leader.clone());
cluster_info.write().unwrap().set_leader(leader.id);
@ -634,7 +634,7 @@ mod tests {
Arc::new(RwLock::new(request_processor))
},
cluster_info: Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair(
NodeInfo::default(),
ContactInfo::default(),
))),
};
@ -653,7 +653,7 @@ mod tests {
#[test]
fn test_rpc_get_tpu_addr() {
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair(
NodeInfo::new_with_socketaddr(&socketaddr!("127.0.0.1:1234")),
ContactInfo::new_with_socketaddr(&socketaddr!("127.0.0.1:1234")),
)));
assert_eq!(
get_tpu_addr(&cluster_info),

View File

@ -88,7 +88,7 @@ impl Service for JsonRpcService {
#[cfg(test)]
mod tests {
use super::*;
use crate::cluster_info::NodeInfo;
use crate::contact_info::ContactInfo;
use solana_runtime::bank::Bank;
use solana_sdk::genesis_block::GenesisBlock;
use solana_sdk::signature::KeypairUtil;
@ -100,7 +100,7 @@ mod tests {
let exit = Arc::new(AtomicBool::new(false));
let bank = Bank::new(&genesis_block);
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair(
NodeInfo::default(),
ContactInfo::default(),
)));
let rpc_addr = SocketAddr::new(
IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)),

View File

@ -227,8 +227,8 @@ impl StorageStage {
keypair: &Arc<Keypair>,
account_to_create: Option<Pubkey>,
) -> io::Result<()> {
let node_info = cluster_info.read().unwrap().my_data();
let mut client = mk_client_with_timeout(&node_info, Duration::from_secs(5));
let contact_info = cluster_info.read().unwrap().my_data();
let mut client = mk_client_with_timeout(&contact_info, Duration::from_secs(5));
if let Some(account) = account_to_create {
if client.get_account_userdata(&account).is_ok() {
@ -445,7 +445,8 @@ impl Service for StorageStage {
#[cfg(test)]
mod tests {
use crate::blocktree::{create_new_tmp_ledger, Blocktree};
use crate::cluster_info::{ClusterInfo, NodeInfo};
use crate::cluster_info::ClusterInfo;
use crate::contact_info::ContactInfo;
use crate::entry::{make_tiny_test_entries, Entry};
use crate::service::Service;
use crate::storage_stage::StorageState;
@ -492,8 +493,8 @@ mod tests {
}
fn test_cluster_info(id: Pubkey) -> Arc<RwLock<ClusterInfo>> {
let node_info = NodeInfo::new_localhost(id, 0);
let cluster_info = ClusterInfo::new_with_invalid_keypair(node_info);
let contact_info = ContactInfo::new_localhost(id, 0);
let cluster_info = ClusterInfo::new_with_invalid_keypair(contact_info);
Arc::new(RwLock::new(cluster_info))
}

View File

@ -3,7 +3,7 @@
//! messages to the network directly. The binary encoding of its messages are
//! unstable and may change in future releases.
use crate::cluster_info::NodeInfo;
use crate::contact_info::ContactInfo;
use crate::fullnode::{Fullnode, FullnodeConfig};
use crate::packet::PACKET_DATA_SIZE;
use crate::rpc_request::{RpcClient, RpcRequest, RpcRequestHandler};
@ -412,7 +412,7 @@ pub fn retry_get_balance(
None
}
pub fn new_fullnode() -> (Fullnode, NodeInfo, Keypair, String) {
pub fn new_fullnode() -> (Fullnode, ContactInfo, Keypair, String) {
use crate::blocktree::create_new_tmp_ledger;
use crate::cluster_info::Node;
use crate::fullnode::Fullnode;
@ -421,9 +421,9 @@ pub fn new_fullnode() -> (Fullnode, NodeInfo, Keypair, String) {
let node_keypair = Arc::new(Keypair::new());
let node = Node::new_localhost_with_pubkey(node_keypair.pubkey());
let node_info = node.info.clone();
let contact_info = node.info.clone();
let (genesis_block, mint_keypair) = GenesisBlock::new_with_leader(10_000, node_info.id, 42);
let (genesis_block, mint_keypair) = GenesisBlock::new_with_leader(10_000, contact_info.id, 42);
let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_block);
let voting_keypair = Keypair::new();
@ -437,7 +437,7 @@ pub fn new_fullnode() -> (Fullnode, NodeInfo, Keypair, String) {
&FullnodeConfig::default(),
);
(node, node_info, mint_keypair, ledger_path)
(node, contact_info, mint_keypair, ledger_path)
}
#[cfg(test)]

View File

@ -1,6 +1,7 @@
use clap::{crate_version, App, Arg};
use log::*;
use solana::cluster_info::{Node, NodeInfo, FULLNODE_PORT_RANGE};
use solana::cluster_info::{Node, FULLNODE_PORT_RANGE};
use solana::contact_info::ContactInfo;
use solana::fullnode::{Fullnode, FullnodeConfig};
use solana::local_vote_signer_service::LocalVoteSignerService;
use solana::service::Service;
@ -197,7 +198,7 @@ fn main() {
}
let cluster_entrypoint = matches.value_of("network").map(|network| {
let gossip_addr = network.parse().expect("failed to parse network address");
NodeInfo::new_gossip_entry_point(&gossip_addr)
ContactInfo::new_gossip_entry_point(&gossip_addr)
});
let (_signer_service, _signer_addr) = if let Some(signer_addr) = matches.value_of("signer") {
(

View File

@ -1,5 +1,6 @@
use clap::{crate_version, App, Arg};
use solana::cluster_info::{Node, NodeInfo};
use solana::cluster_info::Node;
use solana::contact_info::ContactInfo;
use solana::replicator::Replicator;
use solana::socketaddr;
use solana_sdk::signature::{read_keypair, Keypair, KeypairUtil};
@ -79,7 +80,7 @@ fn main() {
.map(|network| network.parse().expect("failed to parse network address"))
.unwrap();
let leader_info = NodeInfo::new_gossip_entry_point(&network_addr);
let leader_info = ContactInfo::new_gossip_entry_point(&network_addr);
let replicator =
Replicator::new(ledger_path, node, &leader_info, &Arc::new(keypair), None).unwrap();

View File

@ -2,7 +2,6 @@ use bincode::serialized_size;
use hashbrown::HashMap;
use log::*;
use rayon::prelude::*;
use solana::cluster_info::NodeInfo;
use solana::contact_info::ContactInfo;
use solana::crds_gossip::*;
use solana::crds_gossip_error::CrdsGossipError;
@ -375,7 +374,7 @@ fn test_prune_errors() {
let mut crds_gossip = CrdsGossip::default();
crds_gossip.id = Pubkey::new(&[0; 32]);
let id = crds_gossip.id;
let ci = NodeInfo::new_localhost(Pubkey::new(&[1; 32]), 0);
let ci = ContactInfo::new_localhost(Pubkey::new(&[1; 32]), 0);
let prune_pubkey = Pubkey::new(&[2; 32]);
crds_gossip
.crds

View File

@ -34,8 +34,8 @@ fn test_node(exit: &Arc<AtomicBool>) -> (Arc<RwLock<ClusterInfo>>, GossipService
}
/// Test that the network converges.
/// Run until every node in the network has a full NodeInfo set.
/// Check that nodes stop sending updates after all the NodeInfo has been shared.
/// Run until every node in the network has a full ContactInfo set.
/// Check that nodes stop sending updates after all the ContactInfo has been shared.
/// tests that actually use this function are below
fn run_gossip_topo<F>(num: usize, topo: F)
where

View File

@ -13,7 +13,8 @@ use solana::blocktree::{
create_new_tmp_ledger, get_tmp_ledger_path, tmp_copy_blocktree, Blocktree,
};
use solana::client::mk_client;
use solana::cluster_info::{ClusterInfo, Node, NodeInfo};
use solana::cluster_info::{ClusterInfo, Node};
use solana::contact_info::ContactInfo;
use solana::entry::Entry;
use solana::fullnode::{Fullnode, FullnodeConfig};
use solana::replicator::Replicator;
@ -78,7 +79,7 @@ fn test_replicator_startup_basic() {
let validator_node = Node::new_localhost_with_pubkey(validator_keypair.pubkey());
#[cfg(feature = "chacha")]
let validator_node_info = validator_node.info.clone();
let validator_contact_info = validator_node.info.clone();
let validator = Fullnode::new(
validator_node,
@ -130,7 +131,7 @@ fn test_replicator_startup_basic() {
let replicator_node = Node::new_localhost_with_pubkey(replicator_keypair.pubkey());
let replicator_info = replicator_node.info.clone();
let leader_info = NodeInfo::new_gossip_entry_point(&leader_info.gossip);
let leader_info = ContactInfo::new_gossip_entry_point(&leader_info.gossip);
let replicator = Replicator::new(
replicator_ledger_path,
@ -196,7 +197,7 @@ fn test_replicator_startup_basic() {
"looking for pubkeys for entry: {}",
replicator.entry_height()
);
let rpc_client = RpcClient::new_from_socket(validator_node_info.rpc);
let rpc_client = RpcClient::new_from_socket(validator_contact_info.rpc);
let mut non_zero_pubkeys = false;
for _ in 0..60 {
let params = json!([replicator.entry_height()]);
@ -244,7 +245,7 @@ fn test_replicator_startup_leader_hang() {
let replicator_node = Node::new_localhost_with_pubkey(replicator_keypair.pubkey());
let fake_gossip = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0);
let leader_info = NodeInfo::new_gossip_entry_point(&fake_gossip);
let leader_info = ContactInfo::new_gossip_entry_point(&fake_gossip);
let replicator_res = Replicator::new(
&replicator_ledger_path,
@ -316,7 +317,7 @@ fn test_replicator_startup_ledger_hang() {
// Pass bad TVU sockets to prevent successful ledger download
replicator_node.sockets.tvu = vec![std::net::UdpSocket::bind("0.0.0.0:0").unwrap()];
let leader_info = NodeInfo::new_gossip_entry_point(&leader_info.gossip);
let leader_info = ContactInfo::new_gossip_entry_point(&leader_info.gossip);
let replicator_res = Replicator::new(
&replicator_ledger_path,