adds validator flag to allow private ip addresses (#18850)

This commit is contained in:
behzad nouri 2021-07-23 15:25:03 +00:00 committed by GitHub
parent 63aec9728f
commit d2d5f36a3c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
69 changed files with 1263 additions and 391 deletions

8
Cargo.lock generated
View File

@ -4228,6 +4228,7 @@ dependencies = [
"solana-net-utils",
"solana-runtime",
"solana-sdk",
"solana-streamer",
"solana-transaction-status",
"solana-version",
"spl-token",
@ -4340,6 +4341,7 @@ dependencies = [
"solana-net-utils",
"solana-runtime",
"solana-sdk",
"solana-streamer",
"solana-version",
]
@ -4439,6 +4441,7 @@ dependencies = [
"solana-net-utils",
"solana-remote-wallet",
"solana-sdk",
"solana-streamer",
"solana-transaction-status",
"solana-version",
"solana-vote-program",
@ -4658,6 +4661,7 @@ dependencies = [
"solana-net-utils",
"solana-runtime",
"solana-sdk",
"solana-streamer",
"solana-version",
]
@ -5027,6 +5031,7 @@ dependencies = [
"solana-runtime",
"solana-sdk",
"solana-stake-program",
"solana-streamer",
"solana-vote-program",
"tempfile",
]
@ -5391,6 +5396,7 @@ dependencies = [
"solana-sdk",
"solana-stake-program",
"solana-storage-bigtable",
"solana-streamer",
"solana-transaction-status",
"solana-version",
"solana-vote-program",
@ -5693,6 +5699,7 @@ dependencies = [
"solana-remote-wallet",
"solana-runtime",
"solana-sdk",
"solana-streamer",
"solana-transaction-status",
"solana-version",
"spl-associated-token-account",
@ -5772,6 +5779,7 @@ dependencies = [
"solana-rpc",
"solana-runtime",
"solana-sdk",
"solana-streamer",
"solana-version",
"solana-vote-program",
"symlink",

View File

@ -24,6 +24,7 @@ solana-measure = { path = "../measure", version = "=1.8.0" }
solana-net-utils = { path = "../net-utils", version = "=1.8.0" }
solana-runtime = { path = "../runtime", version = "=1.8.0" }
solana-sdk = { path = "../sdk", version = "=1.8.0" }
solana-streamer = { path = "../streamer", version = "=1.8.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.8.0" }
solana-version = { path = "../version", version = "=1.8.0" }
spl-token-v2-0 = { package = "spl-token", version = "=3.2.0", features = ["no-entrypoint"] }

View File

@ -20,6 +20,7 @@ use solana_sdk::{
timing::timestamp,
transaction::Transaction,
};
use solana_streamer::socket::SocketAddrSpace;
use solana_transaction_status::parse_token::spl_token_v2_0_instruction;
use std::{
net::SocketAddr,
@ -670,6 +671,7 @@ fn main() {
Some(&entrypoint_addr), // find_node_by_gossip_addr
None, // my_gossip_addr
0, // my_shred_version
SocketAddrSpace::Unspecified,
)
.unwrap_or_else(|err| {
eprintln!("Failed to discover {} node: {:?}", entrypoint_addr, err);
@ -721,7 +723,7 @@ pub mod test {
};
let faucet_addr = SocketAddr::from(([127, 0, 0, 1], 9900));
let cluster = LocalCluster::new(&mut config);
let cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
let iterations = 10;
let maybe_space = None;
let batch_size = 100;

View File

@ -25,6 +25,7 @@ use solana_sdk::{
timing::{duration_as_us, timestamp},
transaction::Transaction,
};
use solana_streamer::socket::SocketAddrSpace;
use std::{
sync::{atomic::Ordering, mpsc::Receiver, Arc, Mutex, RwLock},
thread::sleep,
@ -216,7 +217,11 @@ fn main() {
);
let (exit, poh_recorder, poh_service, signal_receiver) =
create_test_recorder(&bank, &blockstore, None);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = ClusterInfo::new(
Node::new_localhost().info,
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
);
let cluster_info = Arc::new(cluster_info);
let banking_stage = BankingStage::new(
&cluster_info,

View File

@ -27,6 +27,7 @@ solana-measure = { path = "../measure", version = "=1.8.0" }
solana-net-utils = { path = "../net-utils", version = "=1.8.0" }
solana-runtime = { path = "../runtime", version = "=1.8.0" }
solana-sdk = { path = "../sdk", version = "=1.8.0" }
solana-streamer = { path = "../streamer", version = "=1.8.0" }
solana-version = { path = "../version", version = "=1.8.0" }
[dev-dependencies]

View File

@ -7,6 +7,7 @@ use solana_gossip::gossip_service::{discover_cluster, get_client, get_multi_clie
use solana_sdk::fee_calculator::FeeRateGovernor;
use solana_sdk::signature::{Keypair, Signer};
use solana_sdk::system_program;
use solana_streamer::socket::SocketAddrSpace;
use std::{collections::HashMap, fs::File, io::prelude::*, path::Path, process::exit, sync::Arc};
/// Number of signatures for all transactions in ~1 week at ~100K TPS
@ -68,13 +69,14 @@ fn main() {
}
info!("Connecting to the cluster");
let nodes = discover_cluster(entrypoint_addr, *num_nodes).unwrap_or_else(|err| {
eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err);
exit(1);
});
let nodes = discover_cluster(entrypoint_addr, *num_nodes, SocketAddrSpace::Unspecified)
.unwrap_or_else(|err| {
eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err);
exit(1);
});
let client = if *multi_client {
let (client, num_clients) = get_multi_client(&nodes);
let (client, num_clients) = get_multi_client(&nodes, &SocketAddrSpace::Unspecified);
if nodes.len() < num_clients {
eprintln!(
"Error: Insufficient nodes discovered. Expecting {} or more",
@ -88,7 +90,7 @@ fn main() {
let mut target_client = None;
for node in nodes {
if node.id == *target_node {
target_client = Some(Arc::new(get_client(&[node])));
target_client = Some(Arc::new(get_client(&[node], &SocketAddrSpace::Unspecified)));
break;
}
}
@ -97,7 +99,7 @@ fn main() {
exit(1);
})
} else {
Arc::new(get_client(&nodes))
Arc::new(get_client(&nodes, &SocketAddrSpace::Unspecified))
};
let keypairs = if *read_from_client_file {

View File

@ -13,6 +13,7 @@ use solana_local_cluster::{
validator_configs::make_identical_validator_configs,
};
use solana_sdk::signature::{Keypair, Signer};
use solana_streamer::socket::SocketAddrSpace;
use std::{
sync::{mpsc::channel, Arc},
time::Duration,
@ -23,13 +24,19 @@ fn test_bench_tps_local_cluster(config: Config) {
solana_logger::setup();
const NUM_NODES: usize = 1;
let cluster = LocalCluster::new(&mut ClusterConfig {
node_stakes: vec![999_990; NUM_NODES],
cluster_lamports: 200_000_000,
validator_configs: make_identical_validator_configs(&ValidatorConfig::default(), NUM_NODES),
native_instruction_processors,
..ClusterConfig::default()
});
let cluster = LocalCluster::new(
&mut ClusterConfig {
node_stakes: vec![999_990; NUM_NODES],
cluster_lamports: 200_000_000,
validator_configs: make_identical_validator_configs(
&ValidatorConfig::default(),
NUM_NODES,
),
native_instruction_processors,
..ClusterConfig::default()
},
SocketAddrSpace::Unspecified,
);
let faucet_keypair = Keypair::new();
cluster.transfer(

View File

@ -52,6 +52,7 @@ url = "2.2.2"
[dev-dependencies]
solana-core = { path = "../core", version = "=1.8.0" }
solana-streamer = { path = "../streamer", version = "=1.8.0" }
tempfile = "3.2.0"
[[bin]]

View File

@ -18,13 +18,15 @@ use solana_sdk::{
signature::{keypair_from_seed, Keypair, Signer},
system_program,
};
use solana_streamer::socket::SocketAddrSpace;
#[test]
fn test_nonce() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr));
let test_validator =
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
full_battery_tests(test_validator, None, false);
}
@ -34,7 +36,8 @@ fn test_nonce_with_seed() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr));
let test_validator =
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
full_battery_tests(test_validator, Some(String::from("seed")), false);
}
@ -44,7 +47,8 @@ fn test_nonce_with_authority() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr));
let test_validator =
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
full_battery_tests(test_validator, None, true);
}
@ -216,7 +220,12 @@ fn test_create_account_with_seed() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_custom_fees(mint_pubkey, 1, Some(faucet_addr));
let test_validator = TestValidator::with_custom_fees(
mint_pubkey,
1,
Some(faucet_addr),
SocketAddrSpace::Unspecified,
);
let offline_nonce_authority_signer = keypair_from_seed(&[1u8; 32]).unwrap();
let online_nonce_creator_signer = keypair_from_seed(&[2u8; 32]).unwrap();

View File

@ -15,6 +15,7 @@ use solana_sdk::{
pubkey::Pubkey,
signature::{Keypair, Signer},
};
use solana_streamer::socket::SocketAddrSpace;
use std::{env, fs::File, io::Read, path::PathBuf, str::FromStr};
#[test]
@ -30,7 +31,8 @@ fn test_cli_program_deploy_non_upgradeable() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr));
let test_validator =
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
@ -146,7 +148,8 @@ fn test_cli_program_deploy_no_authority() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr));
let test_validator =
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
@ -229,7 +232,8 @@ fn test_cli_program_deploy_with_authority() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr));
let test_validator =
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
@ -557,7 +561,8 @@ fn test_cli_program_write_buffer() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr));
let test_validator =
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
@ -839,7 +844,8 @@ fn test_cli_program_set_buffer_authority() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr));
let test_validator =
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
@ -952,7 +958,8 @@ fn test_cli_program_mismatch_buffer_authority() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr));
let test_validator =
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
@ -1041,7 +1048,8 @@ fn test_cli_program_show() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr));
let test_validator =
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
@ -1221,7 +1229,8 @@ fn test_cli_program_dump() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr));
let test_validator =
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());

View File

@ -6,13 +6,15 @@ use solana_sdk::{
commitment_config::CommitmentConfig,
signature::{Keypair, Signer},
};
use solana_streamer::socket::SocketAddrSpace;
#[test]
fn test_cli_request_airdrop() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr));
let test_validator =
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
let mut bob_config = CliConfig::recent_for_tests();
bob_config.json_rpc_url = test_validator.rpc_url();

View File

@ -24,13 +24,15 @@ use solana_sdk::{
state::{Lockup, StakeAuthorize, StakeState},
},
};
use solana_streamer::socket::SocketAddrSpace;
#[test]
fn test_stake_delegation_force() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr));
let test_validator =
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
@ -120,7 +122,8 @@ fn test_seed_stake_delegation_and_deactivation() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr));
let test_validator =
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
@ -206,7 +209,8 @@ fn test_stake_delegation_and_deactivation() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr));
let test_validator =
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
@ -288,7 +292,8 @@ fn test_offline_stake_delegation_and_deactivation() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr));
let test_validator =
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
@ -431,7 +436,8 @@ fn test_nonced_stake_delegation_and_deactivation() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr));
let test_validator =
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
@ -548,7 +554,8 @@ fn test_stake_authorize() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr));
let test_validator =
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
@ -862,7 +869,12 @@ fn test_stake_authorize_with_fee_payer() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_custom_fees(mint_pubkey, SIG_FEE, Some(faucet_addr));
let test_validator = TestValidator::with_custom_fees(
mint_pubkey,
SIG_FEE,
Some(faucet_addr),
SocketAddrSpace::Unspecified,
);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
@ -1010,7 +1022,12 @@ fn test_stake_split() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_custom_fees(mint_pubkey, 1, Some(faucet_addr));
let test_validator = TestValidator::with_custom_fees(
mint_pubkey,
1,
Some(faucet_addr),
SocketAddrSpace::Unspecified,
);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
@ -1154,7 +1171,12 @@ fn test_stake_set_lockup() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_custom_fees(mint_pubkey, 1, Some(faucet_addr));
let test_validator = TestValidator::with_custom_fees(
mint_pubkey,
1,
Some(faucet_addr),
SocketAddrSpace::Unspecified,
);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
@ -1420,7 +1442,8 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr));
let test_validator =
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
@ -1644,7 +1667,8 @@ fn test_stake_checked_instructions() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr));
let test_validator =
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());

View File

@ -18,6 +18,7 @@ use solana_sdk::{
signature::{keypair_from_seed, Keypair, NullSigner, Signer},
stake,
};
use solana_streamer::socket::SocketAddrSpace;
#[test]
fn test_transfer() {
@ -25,7 +26,12 @@ fn test_transfer() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_custom_fees(mint_pubkey, 1, Some(faucet_addr));
let test_validator = TestValidator::with_custom_fees(
mint_pubkey,
1,
Some(faucet_addr),
SocketAddrSpace::Unspecified,
);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
@ -277,7 +283,12 @@ fn test_transfer_multisession_signing() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_custom_fees(mint_pubkey, 1, Some(faucet_addr));
let test_validator = TestValidator::with_custom_fees(
mint_pubkey,
1,
Some(faucet_addr),
SocketAddrSpace::Unspecified,
);
let to_pubkey = Pubkey::new(&[1u8; 32]);
let offline_from_signer = keypair_from_seed(&[2u8; 32]).unwrap();
@ -404,7 +415,12 @@ fn test_transfer_all() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_custom_fees(mint_pubkey, 1, Some(faucet_addr));
let test_validator = TestValidator::with_custom_fees(
mint_pubkey,
1,
Some(faucet_addr),
SocketAddrSpace::Unspecified,
);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
@ -452,7 +468,12 @@ fn test_transfer_unfunded_recipient() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_custom_fees(mint_pubkey, 1, Some(faucet_addr));
let test_validator = TestValidator::with_custom_fees(
mint_pubkey,
1,
Some(faucet_addr),
SocketAddrSpace::Unspecified,
);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
@ -500,7 +521,12 @@ fn test_transfer_with_seed() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_custom_fees(mint_pubkey, 1, Some(faucet_addr));
let test_validator = TestValidator::with_custom_fees(
mint_pubkey,
1,
Some(faucet_addr),
SocketAddrSpace::Unspecified,
);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());

View File

@ -14,6 +14,7 @@ use solana_sdk::{
commitment_config::CommitmentConfig,
signature::{Keypair, Signer},
};
use solana_streamer::socket::SocketAddrSpace;
use solana_vote_program::vote_state::{VoteAuthorize, VoteState, VoteStateVersions};
#[test]
@ -21,7 +22,8 @@ fn test_vote_authorize_and_withdraw() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr));
let test_validator =
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());

View File

@ -31,6 +31,7 @@ use solana_sdk::system_instruction;
use solana_sdk::system_transaction;
use solana_sdk::timing::{duration_as_us, timestamp};
use solana_sdk::transaction::Transaction;
use solana_streamer::socket::SocketAddrSpace;
use std::collections::VecDeque;
use std::sync::atomic::Ordering;
use std::sync::mpsc::Receiver;
@ -206,7 +207,11 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) {
);
let (exit, poh_recorder, poh_service, signal_receiver) =
create_test_recorder(&bank, &blockstore, None);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = ClusterInfo::new(
Node::new_localhost().info,
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
);
let cluster_info = Arc::new(cluster_info);
let (s, _r) = unbounded();
let _banking_stage = BankingStage::new(

View File

@ -16,8 +16,8 @@ use solana_ledger::{
shred::Shred,
};
use solana_runtime::{bank::Bank, bank_forks::BankForks};
use solana_sdk::pubkey;
use solana_sdk::timing::timestamp;
use solana_sdk::{pubkey, signature::Keypair, timing::timestamp};
use solana_streamer::socket::SocketAddrSpace;
use std::{
collections::HashMap,
net::UdpSocket,
@ -30,7 +30,11 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) {
solana_logger::setup();
let leader_pubkey = pubkey::new_rand();
let leader_info = Node::new_localhost_with_pubkey(&leader_pubkey);
let cluster_info = ClusterInfo::new_with_invalid_keypair(leader_info.info);
let cluster_info = ClusterInfo::new(
leader_info.info,
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
);
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
@ -61,6 +65,7 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) {
&mut TransmitShredsStats::default(),
cluster_info.id(),
&bank_forks,
&SocketAddrSpace::Unspecified,
)
.unwrap();
});

View File

@ -21,6 +21,7 @@ use solana_sdk::pubkey;
use solana_sdk::signature::{Keypair, Signer};
use solana_sdk::system_transaction;
use solana_sdk::timing::timestamp;
use solana_streamer::socket::SocketAddrSpace;
use std::net::UdpSocket;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::mpsc::channel;
@ -35,7 +36,11 @@ use test::Bencher;
#[allow(clippy::same_item_push)]
fn bench_retransmitter(bencher: &mut Bencher) {
solana_logger::setup();
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = ClusterInfo::new(
Node::new_localhost().info,
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
);
const NUM_PEERS: usize = 4;
let mut peer_sockets = Vec::new();
for _ in 0..NUM_PEERS {

View File

@ -223,13 +223,22 @@ mod tests {
hash::hash,
signature::{Keypair, Signer},
};
use solana_streamer::socket::SocketAddrSpace;
fn new_test_cluster_info(contact_info: ContactInfo) -> ClusterInfo {
ClusterInfo::new(
contact_info,
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
)
}
#[test]
fn test_should_halt() {
let keypair = Keypair::new();
let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), 0);
let cluster_info = ClusterInfo::new_with_invalid_keypair(contact_info);
let cluster_info = new_test_cluster_info(contact_info);
let cluster_info = Arc::new(cluster_info);
let mut trusted_validators = HashSet::new();
@ -265,7 +274,7 @@ mod tests {
let keypair = Keypair::new();
let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), 0);
let cluster_info = ClusterInfo::new_with_invalid_keypair(contact_info);
let cluster_info = new_test_cluster_info(contact_info);
let cluster_info = Arc::new(cluster_info);
let trusted_validators = HashSet::new();

View File

@ -1557,7 +1557,7 @@ mod tests {
use crossbeam_channel::unbounded;
use itertools::Itertools;
use solana_entry::entry::{next_entry, Entry, EntrySlice};
use solana_gossip::cluster_info::Node;
use solana_gossip::{cluster_info::Node, contact_info::ContactInfo};
use solana_ledger::{
blockstore::{entries_to_test_shreds, Blockstore},
genesis_utils::{create_genesis_config, GenesisConfigInfo},
@ -1579,6 +1579,7 @@ mod tests {
system_transaction,
transaction::TransactionError,
};
use solana_streamer::socket::SocketAddrSpace;
use solana_transaction_status::TransactionWithStatusMeta;
use std::{
convert::TryInto,
@ -1591,6 +1592,14 @@ mod tests {
thread::sleep,
};
fn new_test_cluster_info(contact_info: ContactInfo) -> ClusterInfo {
ClusterInfo::new(
contact_info,
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
)
}
#[test]
fn test_banking_stage_shutdown1() {
let genesis_config = create_genesis_config(2).genesis_config;
@ -1606,7 +1615,7 @@ mod tests {
);
let (exit, poh_recorder, poh_service, _entry_receiever) =
create_test_recorder(&bank, &blockstore, None);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = new_test_cluster_info(Node::new_localhost().info);
let cluster_info = Arc::new(cluster_info);
let banking_stage = BankingStage::new(
&cluster_info,
@ -1652,7 +1661,7 @@ mod tests {
};
let (exit, poh_recorder, poh_service, entry_receiver) =
create_test_recorder(&bank, &blockstore, Some(poh_config));
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = new_test_cluster_info(Node::new_localhost().info);
let cluster_info = Arc::new(cluster_info);
let (gossip_vote_sender, _gossip_vote_receiver) = unbounded();
@ -1724,7 +1733,7 @@ mod tests {
};
let (exit, poh_recorder, poh_service, entry_receiver) =
create_test_recorder(&bank, &blockstore, Some(poh_config));
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = new_test_cluster_info(Node::new_localhost().info);
let cluster_info = Arc::new(cluster_info);
let (gossip_vote_sender, _gossip_vote_receiver) = unbounded();
@ -1875,8 +1884,7 @@ mod tests {
};
let (exit, poh_recorder, poh_service, entry_receiver) =
create_test_recorder(&bank, &blockstore, Some(poh_config));
let cluster_info =
ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = new_test_cluster_info(Node::new_localhost().info);
let cluster_info = Arc::new(cluster_info);
let _banking_stage = BankingStage::new_num_threads(
&cluster_info,

View File

@ -25,7 +25,7 @@ use solana_sdk::timing::timestamp;
use solana_sdk::{clock::Slot, pubkey::Pubkey, signature::Keypair};
use solana_streamer::{
sendmmsg::{batch_send, SendPktsError},
socket::is_global,
socket::SocketAddrSpace,
};
use std::sync::atomic::AtomicU64;
use std::{
@ -404,6 +404,7 @@ pub fn broadcast_shreds(
transmit_stats: &mut TransmitShredsStats,
self_pubkey: Pubkey,
bank_forks: &Arc<RwLock<BankForks>>,
socket_addr_space: &SocketAddrSpace,
) -> Result<()> {
let mut result = Ok(());
let broadcast_len = cluster_nodes.num_peers();
@ -418,7 +419,7 @@ pub fn broadcast_shreds(
.filter_map(|shred| {
let seed = shred.seed(Some(self_pubkey), &root_bank);
let node = cluster_nodes.get_broadcast_peer(seed)?;
if is_global(&node.tvu) {
if socket_addr_space.check(&node.tvu) {
Some((&shred.payload[..], &node.tvu))
} else {
None
@ -602,7 +603,11 @@ pub mod test {
let broadcast_buddy = Node::new_localhost_with_pubkey(&buddy_keypair.pubkey());
// Fill the cluster_info with the buddy's info
let cluster_info = ClusterInfo::new_with_invalid_keypair(leader_info.info.clone());
let cluster_info = ClusterInfo::new(
leader_info.info.clone(),
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
);
cluster_info.insert_info(broadcast_buddy.info);
let cluster_info = Arc::new(cluster_info);

View File

@ -295,6 +295,7 @@ impl BroadcastRun for BroadcastDuplicatesRun {
&mut TransmitShredsStats::default(),
cluster_info.id(),
bank_forks,
cluster_info.socket_addr_space(),
)?;
Ok(())

View File

@ -140,14 +140,16 @@ impl BroadcastRun for BroadcastFakeShredsRun {
mod tests {
use super::*;
use solana_gossip::contact_info::ContactInfo;
use solana_streamer::socket::SocketAddrSpace;
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
#[test]
fn test_tvu_peers_ordering() {
let cluster = ClusterInfo::new_with_invalid_keypair(ContactInfo::new_localhost(
&solana_sdk::pubkey::new_rand(),
0,
));
let cluster = ClusterInfo::new(
ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0),
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
);
cluster.insert_info(ContactInfo::new_with_socketaddr(&SocketAddr::new(
IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1)),
8080,

View File

@ -147,6 +147,7 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
&mut TransmitShredsStats::default(),
cluster_info.id(),
bank_forks,
cluster_info.socket_addr_space(),
)?;
Ok(())

View File

@ -367,6 +367,7 @@ impl StandardBroadcastRun {
&mut transmit_stats,
cluster_info.id(),
bank_forks,
cluster_info.socket_addr_space(),
)?;
drop(cluster_nodes);
transmit_time.stop();
@ -510,6 +511,7 @@ mod test {
genesis_config::GenesisConfig,
signature::{Keypair, Signer},
};
use solana_streamer::socket::SocketAddrSpace;
use std::ops::Deref;
use std::sync::Arc;
use std::time::Duration;
@ -534,7 +536,11 @@ mod test {
let leader_keypair = Arc::new(Keypair::new());
let leader_pubkey = leader_keypair.pubkey();
let leader_info = Node::new_localhost_with_pubkey(&leader_pubkey);
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(leader_info.info));
let cluster_info = Arc::new(ClusterInfo::new(
leader_info.info,
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
));
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let mut genesis_config = create_genesis_config(10_000).genesis_config;
genesis_config.ticks_per_slot = max_ticks_per_n_shreds(num_shreds_per_slot, None) + 1;

View File

@ -246,8 +246,9 @@ mod tests {
sorted_stakes_with_index,
},
},
solana_sdk::timing::timestamp,
std::iter::repeat_with,
solana_sdk::{signature::Keypair, timing::timestamp},
solana_streamer::socket::SocketAddrSpace,
std::{iter::repeat_with, sync::Arc},
};
// Legacy methods copied for testing backward compatibility.
@ -293,7 +294,11 @@ mod tests {
.collect();
// Add some staked nodes with no contact-info.
stakes.extend(repeat_with(|| (Pubkey::new_unique(), rng.gen_range(0, 20))).take(100));
let cluster_info = ClusterInfo::new_with_invalid_keypair(this_node);
let cluster_info = ClusterInfo::new(
this_node,
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
);
{
let now = timestamp();
let mut gossip_crds = cluster_info.gossip.crds.write().unwrap();

View File

@ -180,14 +180,19 @@ mod test {
use {
super::*,
solana_gossip::{cluster_info::Node, crds_value::LowestSlot},
solana_sdk::pubkey::Pubkey,
solana_sdk::{pubkey::Pubkey, signature::Keypair},
solana_streamer::socket::SocketAddrSpace,
};
#[test]
pub fn test_update_lowest_slot() {
let pubkey = Pubkey::new_unique();
let node_info = Node::new_localhost_with_pubkey(&pubkey);
let cluster_info = ClusterInfo::new_with_invalid_keypair(node_info.info);
let cluster_info = ClusterInfo::new(
node_info.info,
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
);
ClusterSlotsService::update_lowest_slot(5, &cluster_info);
cluster_info.flush_push_queue();
let lowest = {

View File

@ -561,14 +561,24 @@ impl RepairService {
#[cfg(test)]
mod test {
use super::*;
use solana_gossip::cluster_info::Node;
use solana_gossip::{cluster_info::Node, contact_info::ContactInfo};
use solana_ledger::blockstore::{
make_chaining_slot_entries, make_many_slot_entries, make_slot_entries,
};
use solana_ledger::shred::max_ticks_per_n_shreds;
use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
use solana_sdk::signature::Keypair;
use solana_streamer::socket::SocketAddrSpace;
use std::collections::HashSet;
fn new_test_cluster_info(contact_info: ContactInfo) -> ClusterInfo {
ClusterInfo::new(
contact_info,
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
)
}
#[test]
pub fn test_repair_orphan() {
let blockstore_path = get_tmp_ledger_path!();
@ -863,7 +873,8 @@ mod test {
let blockstore_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let cluster_slots = ClusterSlots::default();
let serve_repair = ServeRepair::new_with_invalid_keypair(Node::new_localhost().info);
let serve_repair =
ServeRepair::new(Arc::new(new_test_cluster_info(Node::new_localhost().info)));
let mut ancestor_hashes_request_statuses = HashMap::new();
let dead_slot = 9;
let receive_socket = &UdpSocket::bind("0.0.0.0:0").unwrap();
@ -946,9 +957,7 @@ mod test {
Pubkey::default(),
UdpSocket::bind("0.0.0.0:0").unwrap().local_addr().unwrap(),
));
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(
Node::new_localhost().info,
));
let cluster_info = Arc::new(new_test_cluster_info(Node::new_localhost().info));
let serve_repair = ServeRepair::new(cluster_info.clone());
let valid_repair_peer = Node::new_localhost().info;

View File

@ -2753,6 +2753,7 @@ pub mod tests {
system_transaction,
transaction::TransactionError,
};
use solana_streamer::socket::SocketAddrSpace;
use solana_transaction_status::TransactionWithStatusMeta;
use solana_vote_program::{
vote_state::{VoteState, VoteStateVersions},
@ -2829,6 +2830,7 @@ pub mod tests {
let cluster_info = ClusterInfo::new(
Node::new_localhost_with_pubkey(&my_pubkey).info,
Arc::new(Keypair::from_bytes(&my_keypairs.node_keypair.to_bytes()).unwrap()),
SocketAddrSpace::Unspecified,
);
assert_eq!(my_pubkey, cluster_info.id());

View File

@ -330,6 +330,7 @@ fn retransmit(
epoch_cache_update.stop();
let my_id = cluster_info.id();
let socket_addr_space = cluster_info.socket_addr_space();
let mut discard_total = 0;
let mut repair_total = 0;
let mut retransmit_total = 0;
@ -399,6 +400,7 @@ fn retransmit(
packet,
sock,
/*forward socket=*/ true,
socket_addr_space,
);
}
ClusterInfo::retransmit_to(
@ -406,6 +408,7 @@ fn retransmit(
packet,
sock,
!anchor_node, // send to forward socket!
socket_addr_space,
);
retransmit_time.stop();
retransmit_total += retransmit_time.as_us();
@ -629,6 +632,8 @@ mod tests {
use solana_ledger::shred::Shred;
use solana_net_utils::find_available_port_in_range;
use solana_perf::packet::{Packet, Packets};
use solana_sdk::signature::Keypair;
use solana_streamer::socket::SocketAddrSpace;
use std::net::{IpAddr, Ipv4Addr};
#[test]
@ -665,7 +670,11 @@ mod tests {
.find(|pk| me.id < *pk)
.unwrap();
let other = ContactInfo::new_localhost(&other, 0);
let cluster_info = ClusterInfo::new_with_invalid_keypair(other);
let cluster_info = ClusterInfo::new(
other,
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
);
cluster_info.insert_info(me);
let retransmit_socket = Arc::new(vec![UdpSocket::bind("0.0.0.0:0").unwrap()]);

View File

@ -194,13 +194,6 @@ impl RepairPeers {
}
impl ServeRepair {
/// Without a valid keypair gossip will not function. Only useful for tests.
pub fn new_with_invalid_keypair(contact_info: ContactInfo) -> Self {
Self::new(Arc::new(ClusterInfo::new_with_invalid_keypair(
contact_info,
)))
}
pub fn new(cluster_info: Arc<ClusterInfo>) -> Self {
Self { cluster_info }
}
@ -754,7 +747,8 @@ mod tests {
shred::{max_ticks_per_n_shreds, Shred},
};
use solana_perf::packet::Packet;
use solana_sdk::{hash::Hash, pubkey::Pubkey, timing::timestamp};
use solana_sdk::{hash::Hash, pubkey::Pubkey, signature::Keypair, timing::timestamp};
use solana_streamer::socket::SocketAddrSpace;
#[test]
fn test_run_highest_window_request() {
@ -899,11 +893,19 @@ mod tests {
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
}
fn new_test_cluster_info(contact_info: ContactInfo) -> ClusterInfo {
ClusterInfo::new(
contact_info,
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
)
}
#[test]
fn window_index_request() {
let cluster_slots = ClusterSlots::default();
let me = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), timestamp());
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(me));
let cluster_info = Arc::new(new_test_cluster_info(me));
let serve_repair = ServeRepair::new(cluster_info.clone());
let mut outstanding_requests = OutstandingShredRepairs::default();
let rv = serve_repair.repair_request(
@ -1213,7 +1215,7 @@ mod tests {
fn test_repair_with_repair_validators() {
let cluster_slots = ClusterSlots::default();
let me = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), timestamp());
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(me.clone()));
let cluster_info = Arc::new(new_test_cluster_info(me.clone()));
// Insert two peers on the network
let contact_info2 =

View File

@ -1,7 +1,7 @@
use crate::serve_repair::ServeRepair;
use solana_ledger::blockstore::Blockstore;
use solana_perf::recycler::Recycler;
use solana_streamer::streamer;
use solana_streamer::{socket::SocketAddrSpace, streamer};
use std::net::UdpSocket;
use std::sync::atomic::AtomicBool;
use std::sync::mpsc::channel;
@ -17,6 +17,7 @@ impl ServeRepairService {
serve_repair: &Arc<RwLock<ServeRepair>>,
blockstore: Option<Arc<Blockstore>>,
serve_repair_socket: UdpSocket,
socket_addr_space: SocketAddrSpace,
exit: &Arc<AtomicBool>,
) -> Self {
let (request_sender, request_receiver) = channel();
@ -36,8 +37,12 @@ impl ServeRepairService {
false,
);
let (response_sender, response_receiver) = channel();
let t_responder =
streamer::responder("serve-repairs", serve_repair_socket, response_receiver);
let t_responder = streamer::responder(
"serve-repairs",
serve_repair_socket,
response_receiver,
socket_addr_space,
);
let t_listen = ServeRepair::listen(
serve_repair.clone(),
blockstore,

View File

@ -30,6 +30,7 @@ use {
rent::Rent,
signature::{read_keypair_file, write_keypair_file, Keypair, Signer},
},
solana_streamer::socket::SocketAddrSpace,
std::{
collections::HashMap,
fs::remove_dir_all,
@ -269,8 +270,9 @@ impl TestValidatorGenesis {
pub fn start_with_mint_address(
&self,
mint_address: Pubkey,
socket_addr_space: SocketAddrSpace,
) -> Result<TestValidator, Box<dyn std::error::Error>> {
TestValidator::start(mint_address, self)
TestValidator::start(mint_address, self, socket_addr_space)
}
/// Start a test validator
@ -279,9 +281,9 @@ impl TestValidatorGenesis {
/// created at genesis.
///
/// This function panics on initialization failure.
pub fn start(&self) -> (TestValidator, Keypair) {
pub fn start(&self, socket_addr_space: SocketAddrSpace) -> (TestValidator, Keypair) {
let mint_keypair = Keypair::new();
TestValidator::start(mint_keypair.pubkey(), self)
TestValidator::start(mint_keypair.pubkey(), self, socket_addr_space)
.map(|test_validator| (test_validator, mint_keypair))
.expect("Test validator failed to start")
}
@ -303,7 +305,11 @@ impl TestValidator {
/// Faucet optional.
///
/// This function panics on initialization failure.
pub fn with_no_fees(mint_address: Pubkey, faucet_addr: Option<SocketAddr>) -> Self {
pub fn with_no_fees(
mint_address: Pubkey,
faucet_addr: Option<SocketAddr>,
socket_addr_space: SocketAddrSpace,
) -> Self {
TestValidatorGenesis::default()
.fee_rate_governor(FeeRateGovernor::new(0, 0))
.rent(Rent {
@ -312,7 +318,7 @@ impl TestValidator {
..Rent::default()
})
.faucet_addr(faucet_addr)
.start_with_mint_address(mint_address)
.start_with_mint_address(mint_address, socket_addr_space)
.expect("validator start failed")
}
@ -324,6 +330,7 @@ impl TestValidator {
mint_address: Pubkey,
target_lamports_per_signature: u64,
faucet_addr: Option<SocketAddr>,
socket_addr_space: SocketAddrSpace,
) -> Self {
TestValidatorGenesis::default()
.fee_rate_governor(FeeRateGovernor::new(target_lamports_per_signature, 0))
@ -333,7 +340,7 @@ impl TestValidator {
..Rent::default()
})
.faucet_addr(faucet_addr)
.start_with_mint_address(mint_address)
.start_with_mint_address(mint_address, socket_addr_space)
.expect("validator start failed")
}
@ -436,6 +443,7 @@ impl TestValidator {
fn start(
mint_address: Pubkey,
config: &TestValidatorGenesis,
socket_addr_space: SocketAddrSpace,
) -> Result<Self, Box<dyn std::error::Error>> {
let preserve_ledger = config.ledger_path.is_some();
let ledger_path = TestValidator::initialize_ledger(mint_address, config)?;
@ -516,11 +524,12 @@ impl TestValidator {
&validator_config,
true, // should_check_duplicate_instance
config.start_progress.clone(),
socket_addr_space,
));
// Needed to avoid panics in `solana-responder-gossip` in tests that create a number of
// test validators concurrently...
discover_cluster(&gossip, 1)
discover_cluster(&gossip, 1, socket_addr_space)
.map_err(|err| format!("TestValidator startup failed: {:?}", err))?;
// This is a hack to delay until the fees are non-zero for test consistency

View File

@ -373,7 +373,8 @@ pub mod tests {
use solana_poh::poh_recorder::create_test_recorder;
use solana_rpc::optimistically_confirmed_bank_tracker::OptimisticallyConfirmedBank;
use solana_runtime::bank::Bank;
use solana_sdk::signature::Signer;
use solana_sdk::signature::{Keypair, Signer};
use solana_streamer::socket::SocketAddrSpace;
use std::sync::atomic::Ordering;
#[ignore]
@ -391,7 +392,11 @@ pub mod tests {
let bank_forks = BankForks::new(Bank::new(&genesis_config));
//start cluster_info1
let cluster_info1 = ClusterInfo::new_with_invalid_keypair(target1.info.clone());
let cluster_info1 = ClusterInfo::new(
target1.info.clone(),
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
);
cluster_info1.insert_info(leader.info);
let cref1 = Arc::new(cluster_info1);

View File

@ -73,6 +73,7 @@ use solana_sdk::{
signature::{Keypair, Signer},
timing::timestamp,
};
use solana_streamer::socket::SocketAddrSpace;
use solana_vote_program::vote_state::VoteState;
use std::{
collections::HashSet,
@ -278,6 +279,7 @@ pub(crate) fn abort() -> ! {
}
impl Validator {
#[allow(clippy::too_many_arguments)]
pub fn new(
mut node: Node,
identity_keypair: Arc<Keypair>,
@ -288,6 +290,7 @@ impl Validator {
config: &ValidatorConfig,
should_check_duplicate_instance: bool,
start_progress: Arc<RwLock<ValidatorStartProgress>>,
socket_addr_space: SocketAddrSpace,
) -> Self {
let id = identity_keypair.pubkey();
assert_eq!(id, node.info.id);
@ -438,7 +441,8 @@ impl Validator {
}
}
let mut cluster_info = ClusterInfo::new(node.info.clone(), identity_keypair);
let mut cluster_info =
ClusterInfo::new(node.info.clone(), identity_keypair, socket_addr_space);
cluster_info.set_contact_debug_interval(config.contact_debug_interval);
cluster_info.set_entrypoints(cluster_entrypoints);
cluster_info.restore_contact_info(ledger_path, config.contact_save_interval);
@ -511,10 +515,16 @@ impl Validator {
optimistically_confirmed_bank_tracker,
bank_notification_sender,
) = if let Some((rpc_addr, rpc_pubsub_addr)) = config.rpc_addrs {
if ContactInfo::is_valid_address(&node.info.rpc) {
assert!(ContactInfo::is_valid_address(&node.info.rpc_pubsub));
if ContactInfo::is_valid_address(&node.info.rpc, &socket_addr_space) {
assert!(ContactInfo::is_valid_address(
&node.info.rpc_pubsub,
&socket_addr_space
));
} else {
assert!(!ContactInfo::is_valid_address(&node.info.rpc_pubsub));
assert!(!ContactInfo::is_valid_address(
&node.info.rpc_pubsub,
&socket_addr_space
));
}
let (bank_notification_sender, bank_notification_receiver) = unbounded();
(
@ -595,6 +605,7 @@ impl Validator {
&serve_repair,
Some(blockstore.clone()),
node.sockets.serve_repair,
socket_addr_space,
&exit,
);
@ -1613,6 +1624,7 @@ mod tests {
&config,
true, // should_check_duplicate_instance
start_progress.clone(),
SocketAddrSpace::Unspecified,
);
assert_eq!(
*start_progress.read().unwrap(),
@ -1691,6 +1703,7 @@ mod tests {
&config,
true, // should_check_duplicate_instance
Arc::new(RwLock::new(ValidatorStartProgress::default())),
SocketAddrSpace::Unspecified,
)
})
.collect();
@ -1716,6 +1729,7 @@ mod tests {
let cluster_info = ClusterInfo::new(
ContactInfo::new_localhost(&node_keypair.pubkey(), timestamp()),
node_keypair,
SocketAddrSpace::Unspecified,
);
let (genesis_config, _mint_keypair) = create_genesis_config(1);

View File

@ -615,6 +615,7 @@ mod test {
signature::{Keypair, Signer},
timing::timestamp,
};
use solana_streamer::socket::SocketAddrSpace;
use std::sync::Arc;
fn local_entries_to_shred(
@ -765,7 +766,11 @@ mod test {
assert!(!blockstore.has_duplicate_shreds_in_slot(duplicate_shred_slot));
let keypair = Keypair::new();
let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), timestamp());
let cluster_info = ClusterInfo::new(contact_info, Arc::new(keypair));
let cluster_info = ClusterInfo::new(
contact_info,
Arc::new(keypair),
SocketAddrSpace::Unspecified,
);
run_check_duplicate(
&cluster_info,
&blockstore,

View File

@ -18,6 +18,7 @@ use solana_sdk::{
signature::{Keypair, Signer},
system_transaction,
};
use solana_streamer::socket::SocketAddrSpace;
use std::{
net::{IpAddr, SocketAddr},
sync::{
@ -34,7 +35,8 @@ fn test_rpc_client() {
solana_logger::setup();
let alice = Keypair::new();
let test_validator = TestValidator::with_no_fees(alice.pubkey(), None);
let test_validator =
TestValidator::with_no_fees(alice.pubkey(), None, SocketAddrSpace::Unspecified);
let bob_pubkey = solana_sdk::pubkey::new_rand();

View File

@ -21,6 +21,7 @@ use solana_sdk::{
system_transaction,
transaction::Transaction,
};
use solana_streamer::socket::SocketAddrSpace;
use solana_transaction_status::TransactionStatus;
use std::{
collections::HashSet,
@ -58,7 +59,8 @@ fn test_rpc_send_tx() {
solana_logger::setup();
let alice = Keypair::new();
let test_validator = TestValidator::with_no_fees(alice.pubkey(), None);
let test_validator =
TestValidator::with_no_fees(alice.pubkey(), None, SocketAddrSpace::Unspecified);
let rpc_url = test_validator.rpc_url();
let bob_pubkey = solana_sdk::pubkey::new_rand();
@ -122,7 +124,8 @@ fn test_rpc_invalid_requests() {
solana_logger::setup();
let alice = Keypair::new();
let test_validator = TestValidator::with_no_fees(alice.pubkey(), None);
let test_validator =
TestValidator::with_no_fees(alice.pubkey(), None, SocketAddrSpace::Unspecified);
let rpc_url = test_validator.rpc_url();
let bob_pubkey = solana_sdk::pubkey::new_rand();
@ -153,7 +156,8 @@ fn test_rpc_invalid_requests() {
fn test_rpc_slot_updates() {
solana_logger::setup();
let test_validator = TestValidator::with_no_fees(Pubkey::new_unique(), None);
let test_validator =
TestValidator::with_no_fees(Pubkey::new_unique(), None, SocketAddrSpace::Unspecified);
// Create the pub sub runtime
let rt = Runtime::new().unwrap();
@ -218,7 +222,8 @@ fn test_rpc_subscriptions() {
solana_logger::setup();
let alice = Keypair::new();
let test_validator = TestValidator::with_no_fees(alice.pubkey(), None);
let test_validator =
TestValidator::with_no_fees(alice.pubkey(), None, SocketAddrSpace::Unspecified);
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
transactions_socket.connect(test_validator.tpu()).unwrap();
@ -385,7 +390,8 @@ fn test_rpc_subscriptions() {
fn test_tpu_send_transaction() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let test_validator = TestValidator::with_no_fees(mint_pubkey, None);
let test_validator =
TestValidator::with_no_fees(mint_pubkey, None, SocketAddrSpace::Unspecified);
let rpc_client = Arc::new(RpcClient::new_with_commitment(
test_validator.rpc_url(),
CommitmentConfig::processed(),

View File

@ -62,6 +62,7 @@ mod tests {
signature::{Keypair, Signer},
system_transaction,
};
use solana_streamer::socket::SocketAddrSpace;
use std::{
collections::HashSet,
fs,
@ -452,7 +453,11 @@ mod tests {
// channel hold hard links to these deleted snapshots. We verify this is the case below.
let exit = Arc::new(AtomicBool::new(false));
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(ContactInfo::default()));
let cluster_info = Arc::new(ClusterInfo::new(
ContactInfo::default(),
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
));
let pending_snapshot_package = PendingSnapshotPackage::default();
let snapshot_packager_service = SnapshotPackagerService::new(

View File

@ -22,6 +22,7 @@ solana-logger = { path = "../logger", version = "=1.8.0" }
solana-net-utils = { path = "../net-utils", version = "=1.8.0" }
solana-runtime = { path = "../runtime", version = "=1.8.0" }
solana-sdk = { path = "../sdk", version = "=1.8.0" }
solana-streamer = { path = "../streamer", version = "=1.8.0" }
solana-version = { path = "../version", version = "=1.8.0" }
solana-client = { path = "../client", version = "=1.8.0" }

View File

@ -6,6 +6,7 @@ use solana_client::rpc_client::RpcClient;
use solana_core::serve_repair::RepairProtocol;
use solana_gossip::{contact_info::ContactInfo, gossip_service::discover};
use solana_sdk::pubkey::Pubkey;
use solana_streamer::socket::SocketAddrSpace;
use std::net::{SocketAddr, UdpSocket};
use std::process::exit;
use std::str::FromStr;
@ -197,6 +198,13 @@ fn main() {
.long("skip-gossip")
.help("Just use entrypoint address directly"),
)
.arg(
Arg::with_name("allow_private_addr")
.long("allow-private-addr")
.takes_value(false)
.help("Allow contacting private ip addresses")
.hidden(true),
)
.get_matches();
let mut entrypoint_addr = SocketAddr::from(([127, 0, 0, 1], 8001));
@ -216,6 +224,7 @@ fn main() {
let mut nodes = vec![];
if !skip_gossip {
info!("Finding cluster entry: {:?}", entrypoint_addr);
let socket_addr_space = SocketAddrSpace::new(matches.is_present("allow_private_addr"));
let (gossip_nodes, _validators) = discover(
None, // keypair
Some(&entrypoint_addr),
@ -225,6 +234,7 @@ fn main() {
Some(&entrypoint_addr), // find_node_by_gossip_addr
None, // my_gossip_addr
0, // my_shred_version
socket_addr_space,
)
.unwrap_or_else(|err| {
eprintln!("Failed to discover {} node: {:?}", entrypoint_addr, err);

View File

@ -64,7 +64,7 @@ use {
solana_streamer::{
packet,
sendmmsg::{multi_target_send, SendPktsError},
socket::is_global,
socket::SocketAddrSpace,
streamer::{PacketReceiver, PacketSender},
},
solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY,
@ -157,12 +157,7 @@ pub struct ClusterInfo {
contact_save_interval: u64, // milliseconds, 0 = disabled
instance: RwLock<NodeInstance>,
contact_info_path: PathBuf,
}
impl Default for ClusterInfo {
fn default() -> Self {
Self::new_with_invalid_keypair(ContactInfo::default())
}
socket_addr_space: SocketAddrSpace,
}
#[derive(Clone, Debug, Default, Deserialize, Serialize, AbiExample)]
@ -391,12 +386,11 @@ fn retain_staked(values: &mut Vec<CrdsValue>, stakes: &HashMap<Pubkey, u64>) {
}
impl ClusterInfo {
/// Without a valid keypair gossip will not function. Only useful for tests.
pub fn new_with_invalid_keypair(contact_info: ContactInfo) -> Self {
Self::new(contact_info, Arc::new(Keypair::new()))
}
pub fn new(contact_info: ContactInfo, keypair: Arc<Keypair>) -> Self {
pub fn new(
contact_info: ContactInfo,
keypair: Arc<Keypair>,
socket_addr_space: SocketAddrSpace,
) -> Self {
let id = contact_info.id;
let me = Self {
gossip: CrdsGossip::default(),
@ -415,6 +409,7 @@ impl ClusterInfo {
instance: RwLock::new(NodeInstance::new(&mut thread_rng(), id, timestamp())),
contact_info_path: PathBuf::default(),
contact_save_interval: 0, // disabled
socket_addr_space,
};
me.insert_self();
me.push_self(&HashMap::new(), None);
@ -444,6 +439,7 @@ impl ClusterInfo {
instance: RwLock::new(NodeInstance::new(&mut thread_rng(), *new_id, timestamp())),
contact_info_path: PathBuf::default(),
contact_save_interval: 0, // disabled
..*self
}
}
@ -451,6 +447,10 @@ impl ClusterInfo {
self.contact_debug_interval = new;
}
pub fn socket_addr_space(&self) -> &SocketAddrSpace {
&self.socket_addr_space
}
fn push_self(
&self,
stakes: &HashMap<Pubkey, u64>,
@ -474,8 +474,13 @@ impl ClusterInfo {
shred_version,
..
} = *self.my_contact_info.read().unwrap();
self.gossip
.refresh_push_active_set(&self_pubkey, shred_version, stakes, gossip_validators);
self.gossip.refresh_push_active_set(
&self_pubkey,
shred_version,
stakes,
gossip_validators,
&self.socket_addr_space,
);
}
// TODO kill insert_info, only used by tests
@ -667,7 +672,7 @@ impl ClusterInfo {
.all_peers()
.into_iter()
.filter_map(|(node, last_updated)| {
if !ContactInfo::is_valid_address(&node.rpc) {
if !ContactInfo::is_valid_address(&node.rpc, &self.socket_addr_space) {
return None;
}
@ -678,8 +683,8 @@ impl ClusterInfo {
return None;
}
fn addr_to_string(default_ip: &IpAddr, addr: &SocketAddr) -> String {
if ContactInfo::is_valid_address(addr) {
let addr_to_string = |default_ip: &IpAddr, addr: &SocketAddr| -> String {
if ContactInfo::is_valid_address(addr, &self.socket_addr_space) {
if &addr.ip() == default_ip {
addr.port().to_string()
} else {
@ -688,7 +693,7 @@ impl ClusterInfo {
} else {
"none".to_string()
}
}
};
let rpc_addr = node.rpc.ip();
Some(format!(
@ -732,7 +737,7 @@ impl ClusterInfo {
.all_peers()
.into_iter()
.filter_map(|(node, last_updated)| {
let is_spy_node = Self::is_spy_node(&node);
let is_spy_node = Self::is_spy_node(&node, &self.socket_addr_space);
if is_spy_node {
total_spy_nodes = total_spy_nodes.saturating_add(1);
}
@ -745,8 +750,8 @@ impl ClusterInfo {
if is_spy_node {
shred_spy_nodes = shred_spy_nodes.saturating_add(1);
}
fn addr_to_string(default_ip: &IpAddr, addr: &SocketAddr) -> String {
if ContactInfo::is_valid_address(addr) {
let addr_to_string = |default_ip: &IpAddr, addr: &SocketAddr| -> String {
if ContactInfo::is_valid_address(addr, &self.socket_addr_space) {
if &addr.ip() == default_ip {
addr.port().to_string()
} else {
@ -755,11 +760,11 @@ impl ClusterInfo {
} else {
"none".to_string()
}
}
};
let ip_addr = node.gossip.ip();
Some(format!(
"{:15} {:2}| {:5} | {:44} |{:^9}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {}\n",
if ContactInfo::is_valid_address(&node.gossip) {
if ContactInfo::is_valid_address(&node.gossip, &self.socket_addr_space) {
ip_addr.to_string()
} else {
"none".to_string()
@ -1131,7 +1136,10 @@ impl ClusterInfo {
let gossip_crds = self.gossip.crds.read().unwrap();
gossip_crds
.get_nodes_contact_info()
.filter(|x| x.id != self_pubkey && ContactInfo::is_valid_address(&x.rpc))
.filter(|x| {
x.id != self_pubkey
&& ContactInfo::is_valid_address(&x.rpc, &self.socket_addr_space)
})
.cloned()
.collect()
}
@ -1151,7 +1159,9 @@ impl ClusterInfo {
gossip_crds
.get_nodes_contact_info()
// shred_version not considered for gossip peers (ie, spy nodes do not set shred_version)
.filter(|x| x.id != me && ContactInfo::is_valid_address(&x.gossip))
.filter(|x| {
x.id != me && ContactInfo::is_valid_address(&x.gossip, &self.socket_addr_space)
})
.cloned()
.collect()
}
@ -1161,7 +1171,10 @@ impl ClusterInfo {
let self_pubkey = self.id();
self.time_gossip_read_lock("all_tvu_peers", &self.stats.all_tvu_peers)
.get_nodes_contact_info()
.filter(|x| ContactInfo::is_valid_address(&x.tvu) && x.id != self_pubkey)
.filter(|x| {
ContactInfo::is_valid_address(&x.tvu, &self.socket_addr_space)
&& x.id != self_pubkey
})
.cloned()
.collect()
}
@ -1193,7 +1206,7 @@ impl ClusterInfo {
node.id != self_pubkey
&& node.shred_version == self_shred_version
&& ContactInfo::is_valid_tvu_address(&node.tvu)
&& ContactInfo::is_valid_address(&node.serve_repair)
&& ContactInfo::is_valid_address(&node.serve_repair, &self.socket_addr_space)
&& match gossip_crds.get::<&LowestSlot>(node.id) {
None => true, // fallback to legacy behavior
Some(lowest_slot) => lowest_slot.lowest <= slot,
@ -1203,10 +1216,10 @@ impl ClusterInfo {
.collect()
}
fn is_spy_node(contact_info: &ContactInfo) -> bool {
!ContactInfo::is_valid_address(&contact_info.tpu)
|| !ContactInfo::is_valid_address(&contact_info.gossip)
|| !ContactInfo::is_valid_address(&contact_info.tvu)
fn is_spy_node(contact_info: &ContactInfo, socket_addr_space: &SocketAddrSpace) -> bool {
!ContactInfo::is_valid_address(&contact_info.tpu, socket_addr_space)
|| !ContactInfo::is_valid_address(&contact_info.gossip, socket_addr_space)
|| !ContactInfo::is_valid_address(&contact_info.tvu, socket_addr_space)
}
/// compute broadcast table
@ -1215,7 +1228,10 @@ impl ClusterInfo {
let gossip_crds = self.gossip.crds.read().unwrap();
gossip_crds
.get_nodes_contact_info()
.filter(|x| x.id != self_pubkey && ContactInfo::is_valid_address(&x.tpu))
.filter(|x| {
x.id != self_pubkey
&& ContactInfo::is_valid_address(&x.tpu, &self.socket_addr_space)
})
.cloned()
.collect()
}
@ -1223,20 +1239,25 @@ impl ClusterInfo {
/// retransmit messages to a list of nodes
/// # Remarks
/// We need to avoid having obj locked while doing a io, such as the `send_to`
pub fn retransmit_to(peers: &[&ContactInfo], packet: &Packet, s: &UdpSocket, forwarded: bool) {
pub fn retransmit_to(
peers: &[&ContactInfo],
packet: &Packet,
s: &UdpSocket,
forwarded: bool,
socket_addr_space: &SocketAddrSpace,
) {
trace!("retransmit orders {}", peers.len());
let dests: Vec<_> = if forwarded {
peers
.iter()
.map(|peer| &peer.tvu_forwards)
.filter(|addr| ContactInfo::is_valid_address(addr))
.filter(|addr| is_global(addr))
.filter(|addr| ContactInfo::is_valid_address(addr, socket_addr_space))
.collect()
} else {
peers
.iter()
.map(|peer| &peer.tvu)
.filter(|addr| is_global(addr))
.filter(|addr| socket_addr_space.check(addr))
.collect()
};
let data = &packet.data[..packet.meta.size];
@ -1377,6 +1398,7 @@ impl ClusterInfo {
MAX_BLOOM_SIZE,
&self.ping_cache,
&mut pings,
&self.socket_addr_space,
) {
Err(_) => Vec::default(),
Ok((peer, filters)) => vec![(peer, filters)],
@ -1850,7 +1872,7 @@ impl ClusterInfo {
// incoming pull-requests, pings are also sent to request.from_addr (as
// opposed to caller.gossip address).
move |request| {
ContactInfo::is_valid_address(&request.from_addr) && {
ContactInfo::is_valid_address(&request.from_addr, &self.socket_addr_space) && {
let node = (request.caller.pubkey(), request.from_addr);
*cache.entry(node).or_insert_with(|| hard_check(node))
}
@ -2219,7 +2241,7 @@ impl ClusterInfo {
let new_push_requests = self.new_push_requests(stakes, require_stake_for_gossip);
inc_new_counter_debug!("cluster_info-push_message-pushes", new_push_requests.len());
for (address, request) in new_push_requests {
if ContactInfo::is_valid_address(&address) {
if ContactInfo::is_valid_address(&address, &self.socket_addr_space) {
match Packet::from_data(Some(&address), &request) {
Ok(packet) => packets.packets.push(packet),
Err(err) => error!("failed to write push-request packet: {:?}", err),
@ -2869,13 +2891,14 @@ pub fn push_messages_to_peer(
messages: Vec<CrdsValue>,
self_id: Pubkey,
peer_gossip: SocketAddr,
socket_addr_space: &SocketAddrSpace,
) -> Result<(), GossipError> {
let reqs: Vec<_> = ClusterInfo::split_gossip_messages(PUSH_MESSAGE_MAX_PAYLOAD_SIZE, messages)
.map(move |payload| (peer_gossip, Protocol::PushMessage(self_id, payload)))
.collect();
let packets = to_packets_with_destination(PacketsRecycler::default(), &reqs);
let sock = UdpSocket::bind("0.0.0.0:0").unwrap();
packet::send_to(&packets, &sock)?;
packet::send_to(&packets, &sock, socket_addr_space)?;
Ok(())
}
@ -2969,20 +2992,30 @@ mod tests {
fn test_gossip_node() {
//check that a gossip nodes always show up as spies
let (node, _, _) = ClusterInfo::spy_node(solana_sdk::pubkey::new_rand(), 0);
assert!(ClusterInfo::is_spy_node(&node));
assert!(ClusterInfo::is_spy_node(
&node,
&SocketAddrSpace::Unspecified
));
let (node, _, _) = ClusterInfo::gossip_node(
solana_sdk::pubkey::new_rand(),
&"1.1.1.1:1111".parse().unwrap(),
0,
);
assert!(ClusterInfo::is_spy_node(&node));
assert!(ClusterInfo::is_spy_node(
&node,
&SocketAddrSpace::Unspecified
));
}
#[test]
fn test_handle_pull() {
solana_logger::setup();
let node = Node::new_localhost();
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(node.info));
let cluster_info = Arc::new(ClusterInfo::new(
node.info,
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
));
let entrypoint_pubkey = solana_sdk::pubkey::new_rand();
let data = test_crds_values(entrypoint_pubkey);
@ -3039,6 +3072,7 @@ mod tests {
let cluster_info = ClusterInfo::new(
ContactInfo::new_localhost(&this_node.pubkey(), timestamp()),
this_node.clone(),
SocketAddrSpace::Unspecified,
);
let remote_nodes: Vec<(Keypair, SocketAddr)> =
repeat_with(|| new_rand_remote_node(&mut rng))
@ -3093,6 +3127,7 @@ mod tests {
let cluster_info = ClusterInfo::new(
ContactInfo::new_localhost(&this_node.pubkey(), timestamp()),
this_node.clone(),
SocketAddrSpace::Unspecified,
);
let remote_nodes: Vec<(Keypair, SocketAddr)> =
repeat_with(|| new_rand_remote_node(&mut rng))
@ -3252,13 +3287,18 @@ mod tests {
//check that gossip doesn't try to push to invalid addresses
let node = Node::new_localhost();
let (spy, _, _) = ClusterInfo::spy_node(solana_sdk::pubkey::new_rand(), 0);
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(node.info));
let cluster_info = Arc::new(ClusterInfo::new(
node.info,
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
));
cluster_info.insert_info(spy);
cluster_info.gossip.refresh_push_active_set(
&cluster_info.id(),
cluster_info.my_shred_version(),
&HashMap::new(), // stakes
None, // gossip validators
&SocketAddrSpace::Unspecified,
);
let reqs = cluster_info.generate_new_gossip_requests(
&thread_pool,
@ -3269,7 +3309,7 @@ mod tests {
);
//assert none of the addrs are invalid.
reqs.iter().all(|(addr, _)| {
let res = ContactInfo::is_valid_address(addr);
let res = ContactInfo::is_valid_address(addr, &SocketAddrSpace::Unspecified);
assert!(res);
res
});
@ -3278,14 +3318,19 @@ mod tests {
#[test]
fn test_cluster_info_new() {
let d = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), timestamp());
let cluster_info = ClusterInfo::new_with_invalid_keypair(d.clone());
let cluster_info = ClusterInfo::new(
d.clone(),
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
);
assert_eq!(d.id, cluster_info.id());
}
#[test]
fn insert_info_test() {
let d = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), timestamp());
let cluster_info = ClusterInfo::new_with_invalid_keypair(d);
let cluster_info =
ClusterInfo::new(d, Arc::new(Keypair::new()), SocketAddrSpace::Unspecified);
let d = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), timestamp());
let label = CrdsValueLabel::ContactInfo(d.id);
cluster_info.insert_info(d);
@ -3364,7 +3409,11 @@ mod tests {
let peer_keypair = Keypair::new();
let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), 0);
let peer = ContactInfo::new_localhost(&peer_keypair.pubkey(), 0);
let cluster_info = ClusterInfo::new(contact_info, Arc::new(keypair));
let cluster_info = ClusterInfo::new(
contact_info,
Arc::new(keypair),
SocketAddrSpace::Unspecified,
);
cluster_info
.ping_cache
.lock()
@ -3376,6 +3425,7 @@ mod tests {
cluster_info.my_shred_version(),
&HashMap::new(), // stakes
None, // gossip validators
&SocketAddrSpace::Unspecified,
);
//check that all types of gossip messages are signed correctly
let push_messages = cluster_info
@ -3400,6 +3450,7 @@ mod tests {
MAX_BLOOM_SIZE,
&cluster_info.ping_cache,
&mut pings,
&cluster_info.socket_addr_space,
)
.ok()
.unwrap();
@ -3409,7 +3460,11 @@ mod tests {
fn test_refresh_vote() {
let keys = Keypair::new();
let contact_info = ContactInfo::new_localhost(&keys.pubkey(), 0);
let cluster_info = ClusterInfo::new_with_invalid_keypair(contact_info);
let cluster_info = ClusterInfo::new(
contact_info,
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
);
// Construct and push a vote for some other slot
let unrefresh_slot = 5;
@ -3496,7 +3551,11 @@ mod tests {
let mut rng = rand::thread_rng();
let keys = Keypair::new();
let contact_info = ContactInfo::new_localhost(&keys.pubkey(), 0);
let cluster_info = ClusterInfo::new_with_invalid_keypair(contact_info);
let cluster_info = ClusterInfo::new(
contact_info,
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
);
// make sure empty crds is handled correctly
let mut cursor = Cursor::default();
@ -3567,7 +3626,11 @@ mod tests {
let mut rng = rand::thread_rng();
let keys = Keypair::new();
let contact_info = ContactInfo::new_localhost(&keys.pubkey(), 0);
let cluster_info = ClusterInfo::new_with_invalid_keypair(contact_info);
let cluster_info = ClusterInfo::new(
contact_info,
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
);
let mut tower = Vec::new();
for k in 0..MAX_LOCKOUT_HISTORY {
let slot = k as Slot;
@ -3613,7 +3676,11 @@ mod tests {
fn test_push_epoch_slots() {
let keys = Keypair::new();
let contact_info = ContactInfo::new_localhost(&keys.pubkey(), 0);
let cluster_info = ClusterInfo::new_with_invalid_keypair(contact_info);
let cluster_info = ClusterInfo::new(
contact_info,
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
);
let slots = cluster_info.get_epoch_slots(&mut Cursor::default());
assert!(slots.is_empty());
cluster_info.push_epoch_slots(&[0]);
@ -3670,6 +3737,7 @@ mod tests {
let cluster_info = ClusterInfo::new(
ContactInfo::new_localhost(&node_keypair.pubkey(), timestamp()),
node_keypair,
SocketAddrSpace::Unspecified,
);
let entrypoint_pubkey = solana_sdk::pubkey::new_rand();
let entrypoint = ContactInfo::new_localhost(&entrypoint_pubkey, timestamp());
@ -3820,7 +3888,11 @@ mod tests {
#[test]
fn test_tvu_peers_and_stakes() {
let d = ContactInfo::new_localhost(&Pubkey::new(&[0; 32]), timestamp());
let cluster_info = ClusterInfo::new_with_invalid_keypair(d.clone());
let cluster_info = ClusterInfo::new(
d.clone(),
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
);
let mut stakes = HashMap::new();
// no stake
@ -3861,6 +3933,7 @@ mod tests {
let cluster_info = ClusterInfo::new(
ContactInfo::new_localhost(&node_keypair.pubkey(), timestamp()),
node_keypair,
SocketAddrSpace::Unspecified,
);
let entrypoint_pubkey = solana_sdk::pubkey::new_rand();
let mut entrypoint = ContactInfo::new_localhost(&entrypoint_pubkey, timestamp());
@ -3916,6 +3989,7 @@ mod tests {
let cluster_info = ClusterInfo::new(
ContactInfo::new_localhost(&node_keypair.pubkey(), timestamp()),
node_keypair,
SocketAddrSpace::Unspecified,
);
for i in 0..10 {
// make these invalid for the upcoming repair request
@ -3987,6 +4061,7 @@ mod tests {
let cluster_info = ClusterInfo::new(
ContactInfo::new_localhost(&node_keypair.pubkey(), timestamp()),
node_keypair,
SocketAddrSpace::Unspecified,
);
let mut range: Vec<Slot> = vec![];
//random should be hard to compress
@ -4034,6 +4109,7 @@ mod tests {
let cluster_info = Arc::new(ClusterInfo::new(
ContactInfo::new_localhost(&node_keypair.pubkey(), timestamp()),
node_keypair,
SocketAddrSpace::Unspecified,
));
assert_eq!(cluster_info.my_shred_version(), 0);
@ -4117,6 +4193,7 @@ mod tests {
contact_info
},
node_keypair,
SocketAddrSpace::Unspecified,
));
assert_eq!(cluster_info.my_shred_version(), 2);
@ -4288,7 +4365,11 @@ mod tests {
#[ignore] // TODO: debug why this is flaky on buildkite!
fn test_pull_request_time_pruning() {
let node = Node::new_localhost();
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(node.info));
let cluster_info = Arc::new(ClusterInfo::new(
node.info,
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
));
let entrypoint_pubkey = solana_sdk::pubkey::new_rand();
let entrypoint = ContactInfo::new_localhost(&entrypoint_pubkey, timestamp());
cluster_info.set_entrypoint(entrypoint);

View File

@ -7,6 +7,7 @@ use {
signature::{Keypair, Signer},
timing::timestamp,
},
solana_streamer::socket::SocketAddrSpace,
std::net::{IpAddr, SocketAddr},
};
@ -200,16 +201,22 @@ impl ContactInfo {
(addr.port() != 0) && Self::is_valid_ip(addr.ip())
}
pub fn is_valid_address(addr: &SocketAddr) -> bool {
Self::is_valid_tvu_address(addr) && solana_streamer::socket::is_global(addr)
// TODO: Replace this entirely with streamer SocketAddrSpace.
pub fn is_valid_address(addr: &SocketAddr, socket_addr_space: &SocketAddrSpace) -> bool {
Self::is_valid_tvu_address(addr) && socket_addr_space.check(addr)
}
pub fn client_facing_addr(&self) -> (SocketAddr, SocketAddr) {
(self.rpc, self.tpu)
}
pub fn valid_client_facing_addr(&self) -> Option<(SocketAddr, SocketAddr)> {
if ContactInfo::is_valid_address(&self.rpc) && ContactInfo::is_valid_address(&self.tpu) {
pub fn valid_client_facing_addr(
&self,
socket_addr_space: &SocketAddrSpace,
) -> Option<(SocketAddr, SocketAddr)> {
if ContactInfo::is_valid_address(&self.rpc, socket_addr_space)
&& ContactInfo::is_valid_address(&self.tpu, socket_addr_space)
{
Some((self.rpc, self.tpu))
} else {
None
@ -224,13 +231,25 @@ mod tests {
#[test]
fn test_is_valid_address() {
let bad_address_port = socketaddr!("127.0.0.1:0");
assert!(!ContactInfo::is_valid_address(&bad_address_port));
assert!(!ContactInfo::is_valid_address(
&bad_address_port,
&SocketAddrSpace::Unspecified
));
let bad_address_unspecified = socketaddr!(0, 1234);
assert!(!ContactInfo::is_valid_address(&bad_address_unspecified));
assert!(!ContactInfo::is_valid_address(
&bad_address_unspecified,
&SocketAddrSpace::Unspecified
));
let bad_address_multicast = socketaddr!([224, 254, 0, 0], 1234);
assert!(!ContactInfo::is_valid_address(&bad_address_multicast));
assert!(!ContactInfo::is_valid_address(
&bad_address_multicast,
&SocketAddrSpace::Unspecified
));
let loopback = socketaddr!("127.0.0.1:1234");
assert!(ContactInfo::is_valid_address(&loopback));
assert!(ContactInfo::is_valid_address(
&loopback,
&SocketAddrSpace::Unspecified
));
// assert!(!ContactInfo::is_valid_ip_internal(loopback.ip(), false));
}
@ -313,11 +332,19 @@ mod tests {
#[test]
fn test_valid_client_facing() {
let mut ci = ContactInfo::default();
assert_eq!(ci.valid_client_facing_addr(), None);
assert_eq!(
ci.valid_client_facing_addr(&SocketAddrSpace::Unspecified),
None
);
ci.tpu = socketaddr!("127.0.0.1:123");
assert_eq!(ci.valid_client_facing_addr(), None);
assert_eq!(
ci.valid_client_facing_addr(&SocketAddrSpace::Unspecified),
None
);
ci.rpc = socketaddr!("127.0.0.1:234");
assert!(ci.valid_client_facing_addr().is_some());
assert!(ci
.valid_client_facing_addr(&SocketAddrSpace::Unspecified)
.is_some());
}
#[test]

View File

@ -24,6 +24,7 @@ use {
signature::{Keypair, Signer},
timing::timestamp,
},
solana_streamer::socket::SocketAddrSpace,
std::{
collections::{HashMap, HashSet},
net::SocketAddr,
@ -176,6 +177,7 @@ impl CrdsGossip {
self_shred_version: u16,
stakes: &HashMap<Pubkey, u64>,
gossip_validators: Option<&HashSet<Pubkey>>,
socket_addr_space: &SocketAddrSpace,
) {
let network_size = self.crds.read().unwrap().num_nodes();
self.push.refresh_push_active_set(
@ -186,6 +188,7 @@ impl CrdsGossip {
self_shred_version,
network_size,
CRDS_GOSSIP_NUM_ACTIVE,
socket_addr_space,
)
}
@ -202,6 +205,7 @@ impl CrdsGossip {
bloom_size: usize,
ping_cache: &Mutex<PingCache>,
pings: &mut Vec<(SocketAddr, Ping)>,
socket_addr_space: &SocketAddrSpace,
) -> Result<(ContactInfo, Vec<CrdsFilter>), CrdsGossipError> {
self.pull.new_pull_request(
thread_pool,
@ -214,6 +218,7 @@ impl CrdsGossip {
bloom_size,
ping_cache,
pings,
socket_addr_space,
)
}
@ -370,6 +375,7 @@ mod test {
0, // shred version
&HashMap::new(), // stakes
None, // gossip validators
&SocketAddrSpace::Unspecified,
);
let now = timestamp();
//incorrect dest

View File

@ -32,6 +32,7 @@ use {
pubkey::Pubkey,
signature::{Keypair, Signer},
},
solana_streamer::socket::SocketAddrSpace,
std::{
collections::{HashMap, HashSet, VecDeque},
convert::TryInto,
@ -226,6 +227,7 @@ impl CrdsGossipPull {
bloom_size: usize,
ping_cache: &Mutex<PingCache>,
pings: &mut Vec<(SocketAddr, Ping)>,
socket_addr_space: &SocketAddrSpace,
) -> Result<(ContactInfo, Vec<CrdsFilter>), CrdsGossipError> {
let (weights, peers): (Vec<_>, Vec<_>) = {
self.pull_options(
@ -235,6 +237,7 @@ impl CrdsGossipPull {
now,
gossip_validators,
stakes,
socket_addr_space,
)
.into_iter()
.map(|(weight, node, gossip_addr)| (weight, (node, gossip_addr)))
@ -281,6 +284,7 @@ impl CrdsGossipPull {
now: u64,
gossip_validators: Option<&HashSet<Pubkey>>,
stakes: &HashMap<Pubkey, u64>,
socket_addr_space: &SocketAddrSpace,
) -> Vec<(
u64, // weight
Pubkey, // node
@ -307,7 +311,7 @@ impl CrdsGossipPull {
})
.filter(|v| {
v.id != *self_id
&& ContactInfo::is_valid_address(&v.gossip)
&& ContactInfo::is_valid_address(&v.gossip, socket_addr_space)
&& (self_shred_version == 0 || self_shred_version == v.shred_version)
&& gossip_validators
.map_or(true, |gossip_validators| gossip_validators.contains(&v.id))
@ -734,7 +738,15 @@ pub(crate) mod tests {
}
let now = 1024;
let crds = RwLock::new(crds);
let mut options = node.pull_options(&crds, &me.label().pubkey(), 0, now, None, &stakes);
let mut options = node.pull_options(
&crds,
&me.label().pubkey(),
0,
now,
None,
&stakes,
&SocketAddrSpace::Unspecified,
);
assert!(!options.is_empty());
options
.sort_by(|(weight_l, _, _), (weight_r, _, _)| weight_r.partial_cmp(weight_l).unwrap());
@ -783,7 +795,15 @@ pub(crate) mod tests {
// shred version 123 should ignore nodes with versions 0 and 456
let options = node
.pull_options(&crds, &me.label().pubkey(), 123, 0, None, &stakes)
.pull_options(
&crds,
&me.label().pubkey(),
123,
0,
None,
&stakes,
&SocketAddrSpace::Unspecified,
)
.iter()
.map(|(_, pk, _)| *pk)
.collect::<Vec<_>>();
@ -793,7 +813,15 @@ pub(crate) mod tests {
// spy nodes will see all
let options = node
.pull_options(&crds, &spy.label().pubkey(), 0, 0, None, &stakes)
.pull_options(
&crds,
&spy.label().pubkey(),
0,
0,
None,
&stakes,
&SocketAddrSpace::Unspecified,
)
.iter()
.map(|(_, pk, _)| *pk)
.collect::<Vec<_>>();
@ -834,6 +862,7 @@ pub(crate) mod tests {
0,
Some(&gossip_validators),
&stakes,
&SocketAddrSpace::Unspecified,
);
assert!(options.is_empty());
@ -846,6 +875,7 @@ pub(crate) mod tests {
0,
Some(&gossip_validators),
&stakes,
&SocketAddrSpace::Unspecified,
);
assert!(options.is_empty());
@ -858,6 +888,7 @@ pub(crate) mod tests {
0,
Some(&gossip_validators),
&stakes,
&SocketAddrSpace::Unspecified,
);
assert_eq!(options.len(), 1);
assert_eq!(options[0].1, node_123.pubkey());
@ -978,6 +1009,7 @@ pub(crate) mod tests {
PACKET_DATA_SIZE,
&ping_cache,
&mut pings,
&SocketAddrSpace::Unspecified,
),
Err(CrdsGossipError::NoPeers)
);
@ -995,6 +1027,7 @@ pub(crate) mod tests {
PACKET_DATA_SIZE,
&ping_cache,
&mut pings,
&SocketAddrSpace::Unspecified,
),
Err(CrdsGossipError::NoPeers)
);
@ -1017,6 +1050,7 @@ pub(crate) mod tests {
PACKET_DATA_SIZE,
&ping_cache,
&mut pings,
&SocketAddrSpace::Unspecified,
);
let (peer, _) = req.unwrap();
assert_eq!(peer, *new.contact_info().unwrap());
@ -1036,6 +1070,7 @@ pub(crate) mod tests {
PACKET_DATA_SIZE,
&ping_cache,
&mut pings,
&SocketAddrSpace::Unspecified,
);
// Even though the offline node should have higher weight, we shouldn't request from it
// until we receive a ping.
@ -1091,6 +1126,7 @@ pub(crate) mod tests {
PACKET_DATA_SIZE, // bloom_size
&ping_cache,
&mut pings,
&SocketAddrSpace::Unspecified,
)
.unwrap();
peer
@ -1170,6 +1206,7 @@ pub(crate) mod tests {
PACKET_DATA_SIZE,
&Mutex::new(ping_cache),
&mut pings,
&SocketAddrSpace::Unspecified,
);
let dest_crds = RwLock::<Crds>::default();
@ -1260,6 +1297,7 @@ pub(crate) mod tests {
PACKET_DATA_SIZE,
&Mutex::new(ping_cache),
&mut pings,
&SocketAddrSpace::Unspecified,
);
let dest_crds = RwLock::<Crds>::default();
@ -1339,6 +1377,7 @@ pub(crate) mod tests {
PACKET_DATA_SIZE,
&ping_cache,
&mut pings,
&SocketAddrSpace::Unspecified,
);
let (_, filters) = req.unwrap();
let filters: Vec<_> = filters.into_iter().map(|f| (caller.clone(), f)).collect();

View File

@ -28,6 +28,7 @@ use {
rand::{seq::SliceRandom, Rng},
solana_runtime::bloom::{AtomicBloom, Bloom},
solana_sdk::{packet::PACKET_DATA_SIZE, pubkey::Pubkey, timing::timestamp},
solana_streamer::socket::SocketAddrSpace,
std::{
cmp,
collections::{HashMap, HashSet},
@ -342,6 +343,7 @@ impl CrdsGossipPush {
self_shred_version: u16,
network_size: usize,
ratio: usize,
socket_addr_space: &SocketAddrSpace,
) {
const BLOOM_FALSE_RATE: f64 = 0.1;
const BLOOM_MAX_BITS: usize = 1024 * 8 * 4;
@ -352,9 +354,16 @@ impl CrdsGossipPush {
let mut rng = rand::thread_rng();
let mut new_items = HashMap::new();
let (weights, peers): (Vec<_>, Vec<_>) = {
self.push_options(crds, self_id, self_shred_version, stakes, gossip_validators)
.into_iter()
.unzip()
self.push_options(
crds,
self_id,
self_shred_version,
stakes,
gossip_validators,
socket_addr_space,
)
.into_iter()
.unzip()
};
if peers.is_empty() {
return;
@ -396,6 +405,7 @@ impl CrdsGossipPush {
self_shred_version: u16,
stakes: &HashMap<Pubkey, u64>,
gossip_validators: Option<&HashSet<Pubkey>>,
socket_addr_space: &SocketAddrSpace,
) -> Vec<(/*weight:*/ u64, /*node:*/ Pubkey)> {
let now = timestamp();
let mut rng = rand::thread_rng();
@ -420,7 +430,7 @@ impl CrdsGossipPush {
})
.filter(|info| {
info.id != *self_id
&& ContactInfo::is_valid_address(&info.gossip)
&& ContactInfo::is_valid_address(&info.gossip, socket_addr_space)
&& self_shred_version == info.shred_version
&& gossip_validators.map_or(true, |gossip_validators| {
gossip_validators.contains(&info.id)
@ -657,7 +667,16 @@ mod test {
assert_eq!(crds.insert(value1.clone(), now), Ok(()));
let crds = RwLock::new(crds);
push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1);
push.refresh_push_active_set(
&crds,
&HashMap::new(),
None,
&Pubkey::default(),
0,
1,
1,
&SocketAddrSpace::Unspecified,
);
let active_set = push.active_set.read().unwrap();
assert!(active_set.get(&value1.label().pubkey()).is_some());
@ -669,7 +688,16 @@ mod test {
drop(active_set);
assert_eq!(crds.write().unwrap().insert(value2.clone(), now), Ok(()));
for _ in 0..30 {
push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1);
push.refresh_push_active_set(
&crds,
&HashMap::new(),
None,
&Pubkey::default(),
0,
1,
1,
&SocketAddrSpace::Unspecified,
);
let active_set = push.active_set.read().unwrap();
if active_set.get(&value2.label().pubkey()).is_some() {
break;
@ -685,7 +713,16 @@ mod test {
));
assert_eq!(crds.write().unwrap().insert(value2.clone(), now), Ok(()));
}
push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1);
push.refresh_push_active_set(
&crds,
&HashMap::new(),
None,
&Pubkey::default(),
0,
1,
1,
&SocketAddrSpace::Unspecified,
);
assert_eq!(push.active_set.read().unwrap().len(), push.num_active);
}
#[test]
@ -706,7 +743,14 @@ mod test {
push.last_pushed_to.write().unwrap().put(id, time);
}
let crds = RwLock::new(crds);
let mut options = push.push_options(&crds, &Pubkey::default(), 0, &stakes, None);
let mut options = push.push_options(
&crds,
&Pubkey::default(),
0,
&stakes,
None,
&SocketAddrSpace::Unspecified,
);
assert!(!options.is_empty());
options.sort_by(|(weight_l, _), (weight_r, _)| weight_r.partial_cmp(weight_l).unwrap());
// check that the highest stake holder is also the heaviest weighted.
@ -755,7 +799,14 @@ mod test {
// shred version 123 should ignore nodes with versions 0 and 456
let options = node
.push_options(&crds, &me.label().pubkey(), 123, &stakes, None)
.push_options(
&crds,
&me.label().pubkey(),
123,
&stakes,
None,
&SocketAddrSpace::Unspecified,
)
.iter()
.map(|(_, pk)| *pk)
.collect::<Vec<_>>();
@ -764,7 +815,14 @@ mod test {
assert!(options.contains(&node_123.pubkey()));
// spy nodes should not push to people on different shred versions
let options = node.push_options(&crds, &spy.label().pubkey(), 0, &stakes, None);
let options = node.push_options(
&crds,
&spy.label().pubkey(),
0,
&stakes,
None,
&SocketAddrSpace::Unspecified,
);
assert!(options.is_empty());
}
@ -799,6 +857,7 @@ mod test {
0,
&stakes,
Some(&gossip_validators),
&SocketAddrSpace::Unspecified,
);
assert!(options.is_empty());
@ -811,6 +870,7 @@ mod test {
0,
&stakes,
Some(&gossip_validators),
&SocketAddrSpace::Unspecified,
);
assert!(options.is_empty());
@ -822,6 +882,7 @@ mod test {
0,
&stakes,
Some(&gossip_validators),
&SocketAddrSpace::Unspecified,
);
assert_eq!(options.len(), 1);
@ -839,7 +900,16 @@ mod test {
)));
assert_eq!(crds.insert(peer.clone(), now), Ok(()));
let crds = RwLock::new(crds);
push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1);
push.refresh_push_active_set(
&crds,
&HashMap::new(),
None,
&Pubkey::default(),
0,
1,
1,
&SocketAddrSpace::Unspecified,
);
let new_msg = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&solana_sdk::pubkey::new_rand(),
@ -877,7 +947,16 @@ mod test {
push.process_push_message(&crds, &Pubkey::default(), vec![peers[2].clone()], now),
[Ok(origin[2])],
);
push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1);
push.refresh_push_active_set(
&crds,
&HashMap::new(),
None,
&Pubkey::default(),
0,
1,
1,
&SocketAddrSpace::Unspecified,
);
// push 3's contact info to 1 and 2 and 3
let expected: HashMap<_, _> = vec![
@ -900,7 +979,16 @@ mod test {
)));
assert_eq!(crds.insert(peer.clone(), 0), Ok(()));
let crds = RwLock::new(crds);
push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1);
push.refresh_push_active_set(
&crds,
&HashMap::new(),
None,
&Pubkey::default(),
0,
1,
1,
&SocketAddrSpace::Unspecified,
);
let new_msg = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&solana_sdk::pubkey::new_rand(),
@ -929,7 +1017,16 @@ mod test {
)));
assert_eq!(crds.insert(peer, 0), Ok(()));
let crds = RwLock::new(crds);
push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1);
push.refresh_push_active_set(
&crds,
&HashMap::new(),
None,
&Pubkey::default(),
0,
1,
1,
&SocketAddrSpace::Unspecified,
);
let mut ci = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
ci.wallclock = 1;

View File

@ -13,6 +13,7 @@ use {
pubkey::Pubkey,
signature::{Keypair, Signer},
},
solana_streamer::socket::SocketAddrSpace,
solana_streamer::streamer,
std::{
collections::HashSet,
@ -47,6 +48,7 @@ impl GossipService {
&cluster_info.id(),
gossip_socket.local_addr().unwrap()
);
let socket_addr_space = *cluster_info.socket_addr_space();
let t_receiver = streamer::receiver(
gossip_socket.clone(),
exit,
@ -82,7 +84,12 @@ impl GossipService {
// https://github.com/rust-lang/rust/issues/54267
// responder thread should start after response_sender.clone(). see:
// https://github.com/rust-lang/rust/issues/39364#issuecomment-381446873
let t_responder = streamer::responder("gossip", gossip_socket, response_receiver);
let t_responder = streamer::responder(
"gossip",
gossip_socket,
response_receiver,
socket_addr_space,
);
let thread_hdls = vec![
t_receiver,
t_responder,
@ -105,6 +112,7 @@ impl GossipService {
pub fn discover_cluster(
entrypoint: &SocketAddr,
num_nodes: usize,
socket_addr_space: SocketAddrSpace,
) -> std::io::Result<Vec<ContactInfo>> {
const DISCOVER_CLUSTER_TIMEOUT: Duration = Duration::from_secs(120);
let (_all_peers, validators) = discover(
@ -116,6 +124,7 @@ pub fn discover_cluster(
None, // find_node_by_gossip_addr
None, // my_gossip_addr
0, // my_shred_version
socket_addr_space,
)?;
Ok(validators)
}
@ -129,6 +138,7 @@ pub fn discover(
find_node_by_gossip_addr: Option<&SocketAddr>,
my_gossip_addr: Option<&SocketAddr>,
my_shred_version: u16,
socket_addr_space: SocketAddrSpace,
) -> std::io::Result<(
Vec<ContactInfo>, // all gossip peers
Vec<ContactInfo>, // tvu peers (validators)
@ -145,6 +155,7 @@ pub fn discover(
my_gossip_addr,
my_shred_version,
true, // should_check_duplicate_instance,
socket_addr_space,
);
let id = spy_ref.id();
@ -191,28 +202,31 @@ pub fn discover(
}
/// Creates a ThinClient per valid node
pub fn get_clients(nodes: &[ContactInfo]) -> Vec<ThinClient> {
pub fn get_clients(nodes: &[ContactInfo], socket_addr_space: &SocketAddrSpace) -> Vec<ThinClient> {
nodes
.iter()
.filter_map(ContactInfo::valid_client_facing_addr)
.filter_map(|node| ContactInfo::valid_client_facing_addr(node, socket_addr_space))
.map(|addrs| create_client(addrs, VALIDATOR_PORT_RANGE))
.collect()
}
/// Creates a ThinClient by selecting a valid node at random
pub fn get_client(nodes: &[ContactInfo]) -> ThinClient {
pub fn get_client(nodes: &[ContactInfo], socket_addr_space: &SocketAddrSpace) -> ThinClient {
let nodes: Vec<_> = nodes
.iter()
.filter_map(ContactInfo::valid_client_facing_addr)
.filter_map(|node| ContactInfo::valid_client_facing_addr(node, socket_addr_space))
.collect();
let select = thread_rng().gen_range(0, nodes.len());
create_client(nodes[select], VALIDATOR_PORT_RANGE)
}
pub fn get_multi_client(nodes: &[ContactInfo]) -> (ThinClient, usize) {
pub fn get_multi_client(
nodes: &[ContactInfo],
socket_addr_space: &SocketAddrSpace,
) -> (ThinClient, usize) {
let addrs: Vec<_> = nodes
.iter()
.filter_map(ContactInfo::valid_client_facing_addr)
.filter_map(|node| ContactInfo::valid_client_facing_addr(node, socket_addr_space))
.collect();
let rpc_addrs: Vec<_> = addrs.iter().map(|addr| addr.0).collect();
let tpu_addrs: Vec<_> = addrs.iter().map(|addr| addr.1).collect();
@ -303,13 +317,14 @@ pub fn make_gossip_node(
gossip_addr: Option<&SocketAddr>,
shred_version: u16,
should_check_duplicate_instance: bool,
socket_addr_space: SocketAddrSpace,
) -> (GossipService, Option<TcpListener>, Arc<ClusterInfo>) {
let (node, gossip_socket, ip_echo) = if let Some(gossip_addr) = gossip_addr {
ClusterInfo::gossip_node(keypair.pubkey(), gossip_addr, shred_version)
} else {
ClusterInfo::spy_node(keypair.pubkey(), shred_version)
};
let cluster_info = ClusterInfo::new(node, Arc::new(keypair));
let cluster_info = ClusterInfo::new(node, Arc::new(keypair), socket_addr_space);
if let Some(entrypoint) = entrypoint {
cluster_info.set_entrypoint(ContactInfo::new_gossip_entry_point(entrypoint));
}
@ -339,7 +354,11 @@ mod tests {
fn test_exit() {
let exit = Arc::new(AtomicBool::new(false));
let tn = Node::new_localhost();
let cluster_info = ClusterInfo::new_with_invalid_keypair(tn.info.clone());
let cluster_info = ClusterInfo::new(
tn.info.clone(),
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
);
let c = Arc::new(cluster_info);
let d = GossipService::new(
&c,
@ -362,7 +381,11 @@ mod tests {
let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), 0);
let peer0_info = ContactInfo::new_localhost(&peer0, 0);
let peer1_info = ContactInfo::new_localhost(&peer1, 0);
let cluster_info = ClusterInfo::new(contact_info, Arc::new(keypair));
let cluster_info = ClusterInfo::new(
contact_info,
Arc::new(keypair),
SocketAddrSpace::Unspecified,
);
cluster_info.insert_info(peer0_info.clone());
cluster_info.insert_info(peer1_info);

View File

@ -11,6 +11,7 @@ use {
},
solana_gossip::{contact_info::ContactInfo, gossip_service::discover},
solana_sdk::pubkey::Pubkey,
solana_streamer::socket::SocketAddrSpace,
std::{
error,
net::{IpAddr, Ipv4Addr, SocketAddr},
@ -31,6 +32,13 @@ fn parse_matches() -> ArgMatches<'static> {
.about(crate_description!())
.version(solana_version::version!())
.setting(AppSettings::SubcommandRequiredElseHelp)
.arg(
Arg::with_name("allow_private_addr")
.long("allow-private-addr")
.takes_value(false)
.help("Allow contacting private ip addresses")
.hidden(true),
)
.subcommand(
SubCommand::with_name("rpc-url")
.about("Get an RPC URL for the cluster")
@ -223,6 +231,7 @@ fn process_spy(matches: &ArgMatches) -> std::io::Result<()> {
let pubkey = matches
.value_of("node_pubkey")
.map(|pubkey_str| pubkey_str.parse::<Pubkey>().unwrap());
let socket_addr_space = SocketAddrSpace::new(matches.is_present("allow_private_addr"));
let shred_version = value_t_or_exit!(matches, "shred_version", u16);
let identity_keypair = keypair_of(matches, "identity");
@ -250,6 +259,7 @@ fn process_spy(matches: &ArgMatches) -> std::io::Result<()> {
None, // find_node_by_gossip_addr
Some(&gossip_addr), // my_gossip_addr
shred_version,
socket_addr_space,
)?;
process_spy_results(timeout, validators, num_nodes, num_nodes_exactly, pubkey);
@ -272,6 +282,7 @@ fn process_rpc_url(matches: &ArgMatches) -> std::io::Result<()> {
let entrypoint_addr = parse_entrypoint(matches);
let timeout = value_t_or_exit!(matches, "timeout", u64);
let shred_version = value_t_or_exit!(matches, "shred_version", u16);
let socket_addr_space = SocketAddrSpace::new(matches.is_present("allow_private_addr"));
let (_all_peers, validators) = discover(
None, // keypair
entrypoint_addr.as_ref(),
@ -281,13 +292,14 @@ fn process_rpc_url(matches: &ArgMatches) -> std::io::Result<()> {
entrypoint_addr.as_ref(), // find_node_by_gossip_addr
None, // my_gossip_addr
shred_version,
socket_addr_space,
)?;
let rpc_addrs: Vec<_> = validators
.iter()
.filter_map(|contact_info| {
if (any || all || Some(contact_info.gossip) == entrypoint_addr)
&& ContactInfo::is_valid_address(&contact_info.rpc)
&& ContactInfo::is_valid_address(&contact_info.rpc, &socket_addr_space)
{
return Some(contact_info.rpc);
}

View File

@ -7,7 +7,8 @@ use {
contact_info::ContactInfo,
deprecated::{shuffle_peers_and_index, sorted_retransmit_peers_and_stakes},
},
solana_sdk::pubkey::Pubkey,
solana_sdk::{pubkey::Pubkey, signer::keypair::Keypair},
solana_streamer::socket::SocketAddrSpace,
std::{
collections::{HashMap, HashSet},
sync::{
@ -79,7 +80,11 @@ fn run_simulation(stakes: &[u64], fanout: usize) {
// describe the leader
let leader_info = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
let cluster_info = ClusterInfo::new_with_invalid_keypair(leader_info.clone());
let cluster_info = ClusterInfo::new(
leader_info.clone(),
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
);
// setup staked nodes
let mut staked_nodes = HashMap::new();

View File

@ -21,6 +21,7 @@ use {
signature::{Keypair, Signer},
timing::timestamp,
},
solana_streamer::socket::SocketAddrSpace,
std::{
collections::{HashMap, HashSet},
ops::Deref,
@ -262,6 +263,7 @@ fn network_simulator(thread_pool: &ThreadPool, network: &mut Network, max_conver
0, // shred version
&HashMap::new(), // stakes
None, // gossip validators
&SocketAddrSpace::Unspecified,
);
});
let mut total_bytes = bytes_tx;
@ -420,6 +422,7 @@ fn network_run_push(
0, // shred version
&HashMap::new(), // stakes
None, // gossip validators
&SocketAddrSpace::Unspecified,
);
});
}
@ -490,6 +493,7 @@ fn network_run_pull(
cluster_info::MAX_BLOOM_SIZE,
from.ping_cache.deref(),
&mut pings,
&SocketAddrSpace::Unspecified,
)
.ok()?;
let from_pubkey = from.keypair.pubkey();
@ -710,6 +714,7 @@ fn test_prune_errors() {
0, // shred version
&HashMap::new(), // stakes
None, // gossip validators
&SocketAddrSpace::Unspecified,
);
let now = timestamp();
//incorrect dest

View File

@ -18,6 +18,7 @@ use {
timing::timestamp,
transaction::Transaction,
},
solana_streamer::socket::SocketAddrSpace,
solana_vote_program::{vote_instruction, vote_state::Vote},
std::{
net::UdpSocket,
@ -33,7 +34,11 @@ use {
fn test_node(exit: &Arc<AtomicBool>) -> (Arc<ClusterInfo>, GossipService, UdpSocket) {
let keypair = Arc::new(Keypair::new());
let mut test_node = Node::new_localhost_with_pubkey(&keypair.pubkey());
let cluster_info = Arc::new(ClusterInfo::new(test_node.info.clone(), keypair));
let cluster_info = Arc::new(ClusterInfo::new(
test_node.info.clone(),
keypair,
SocketAddrSpace::Unspecified,
));
let gossip_service = GossipService::new(
&cluster_info,
None,
@ -56,7 +61,11 @@ fn test_node_with_bank(
bank_forks: Arc<RwLock<BankForks>>,
) -> (Arc<ClusterInfo>, GossipService, UdpSocket) {
let mut test_node = Node::new_localhost_with_pubkey(&node_keypair.pubkey());
let cluster_info = Arc::new(ClusterInfo::new(test_node.info.clone(), node_keypair));
let cluster_info = Arc::new(ClusterInfo::new(
test_node.info.clone(),
node_keypair,
SocketAddrSpace::Unspecified,
));
let gossip_service = GossipService::new(
&cluster_info,
Some(bank_forks),
@ -209,7 +218,13 @@ pub fn cluster_info_retransmit() {
p.meta.size = 10;
let peers = c1.tvu_peers();
let retransmit_peers: Vec<_> = peers.iter().collect();
ClusterInfo::retransmit_to(&retransmit_peers, &p, &tn1, false);
ClusterInfo::retransmit_to(
&retransmit_peers,
&p,
&tn1,
false,
&SocketAddrSpace::Unspecified,
);
let res: Vec<_> = [tn1, tn2, tn3]
.into_par_iter()
.map(|s| {

View File

@ -30,6 +30,7 @@ solana-rpc = { path = "../rpc", version = "=1.8.0" }
solana-runtime = { path = "../runtime", version = "=1.8.0" }
solana-sdk = { path = "../sdk", version = "=1.8.0" }
solana-stake-program = { path = "../programs/stake", version = "=1.8.0" }
solana-streamer = { path = "../streamer", version = "=1.8.0" }
solana-vote-program = { path = "../programs/vote", version = "=1.8.0" }
tempfile = "3.2.0"
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.8.0" }

View File

@ -4,6 +4,7 @@ use solana_core::validator::ValidatorConfig;
use solana_gossip::{cluster_info::Node, contact_info::ContactInfo};
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::Keypair;
use solana_streamer::socket::SocketAddrSpace;
use std::path::PathBuf;
use std::sync::Arc;
@ -39,7 +40,12 @@ pub trait Cluster {
fn get_validator_client(&self, pubkey: &Pubkey) -> Option<ThinClient>;
fn get_contact_info(&self, pubkey: &Pubkey) -> Option<&ContactInfo>;
fn exit_node(&mut self, pubkey: &Pubkey) -> ClusterValidatorInfo;
fn restart_node(&mut self, pubkey: &Pubkey, cluster_validator_info: ClusterValidatorInfo);
fn restart_node(
&mut self,
pubkey: &Pubkey,
cluster_validator_info: ClusterValidatorInfo,
socket_addr_space: SocketAddrSpace,
);
fn create_restart_context(
&mut self,
pubkey: &Pubkey,
@ -48,7 +54,13 @@ pub trait Cluster {
fn restart_node_with_context(
cluster_validator_info: ClusterValidatorInfo,
restart_context: (Node, Option<ContactInfo>),
socket_addr_space: SocketAddrSpace,
) -> ClusterValidatorInfo;
fn add_node(&mut self, pubkey: &Pubkey, cluster_validator_info: ClusterValidatorInfo);
fn exit_restart_node(&mut self, pubkey: &Pubkey, config: ValidatorConfig);
fn exit_restart_node(
&mut self,
pubkey: &Pubkey,
config: ValidatorConfig,
socket_addr_space: SocketAddrSpace,
);
}

View File

@ -30,6 +30,7 @@ use solana_sdk::{
timing::{duration_as_ms, timestamp},
transport::TransportError,
};
use solana_streamer::socket::SocketAddrSpace;
use solana_vote_program::vote_transaction;
use std::{
collections::{HashMap, HashSet},
@ -46,8 +47,10 @@ pub fn spend_and_verify_all_nodes<S: ::std::hash::BuildHasher + Sync + Send>(
funding_keypair: &Keypair,
nodes: usize,
ignore_nodes: HashSet<Pubkey, S>,
socket_addr_space: SocketAddrSpace,
) {
let cluster_nodes = discover_cluster(&entry_point_info.gossip, nodes).unwrap();
let cluster_nodes =
discover_cluster(&entry_point_info.gossip, nodes, socket_addr_space).unwrap();
assert!(cluster_nodes.len() >= nodes);
let ignore_nodes = Arc::new(ignore_nodes);
cluster_nodes.par_iter().for_each(|ingress_node| {
@ -186,9 +189,11 @@ pub fn kill_entry_and_spend_and_verify_rest(
funding_keypair: &Keypair,
nodes: usize,
slot_millis: u64,
socket_addr_space: SocketAddrSpace,
) {
info!("kill_entry_and_spend_and_verify_rest...");
let cluster_nodes = discover_cluster(&entry_point_info.gossip, nodes).unwrap();
let cluster_nodes =
discover_cluster(&entry_point_info.gossip, nodes, socket_addr_space).unwrap();
assert!(cluster_nodes.len() >= nodes);
let client = create_client(entry_point_info.client_facing_addr(), VALIDATOR_PORT_RANGE);
// sleep long enough to make sure we are in epoch 3
@ -418,6 +423,7 @@ pub fn submit_vote_to_cluster_gossip(
vote_hash: Hash,
blockhash: Hash,
gossip_addr: SocketAddr,
socket_addr_space: &SocketAddrSpace,
) -> Result<(), GossipError> {
let vote_tx = vote_transaction::new_vote_transaction(
vec![vote_slot],
@ -439,5 +445,6 @@ pub fn submit_vote_to_cluster_gossip(
)],
node_keypair.pubkey(),
gossip_addr,
socket_addr_space,
)
}

View File

@ -37,6 +37,7 @@ use solana_sdk::{
transaction::Transaction,
};
use solana_stake_program::{config::create_account as create_stake_config_account, stake_state};
use solana_streamer::socket::SocketAddrSpace;
use solana_vote_program::{
vote_instruction,
vote_state::{VoteInit, VoteState},
@ -108,6 +109,7 @@ impl LocalCluster {
num_nodes: usize,
cluster_lamports: u64,
lamports_per_node: u64,
socket_addr_space: SocketAddrSpace,
) -> Self {
let stakes: Vec<_> = (0..num_nodes).map(|_| lamports_per_node).collect();
let mut config = ClusterConfig {
@ -119,10 +121,10 @@ impl LocalCluster {
),
..ClusterConfig::default()
};
Self::new(&mut config)
Self::new(&mut config, socket_addr_space)
}
pub fn new(config: &mut ClusterConfig) -> Self {
pub fn new(config: &mut ClusterConfig, socket_addr_space: SocketAddrSpace) -> Self {
assert_eq!(config.validator_configs.len(), config.node_stakes.len());
let mut validator_keys = {
if let Some(ref keys) = config.validator_keys {
@ -219,6 +221,7 @@ impl LocalCluster {
&leader_config,
true, // should_check_duplicate_instance
Arc::new(RwLock::new(ValidatorStartProgress::default())),
socket_addr_space,
);
let mut validators = HashMap::new();
@ -262,22 +265,35 @@ impl LocalCluster {
*stake,
key.clone(),
node_pubkey_to_vote_key.get(&key.pubkey()).cloned(),
socket_addr_space,
);
}
let mut listener_config = safe_clone_config(&config.validator_configs[0]);
listener_config.voting_disabled = true;
(0..config.num_listeners).for_each(|_| {
cluster.add_validator(&listener_config, 0, Arc::new(Keypair::new()), None);
cluster.add_validator(
&listener_config,
0,
Arc::new(Keypair::new()),
None,
socket_addr_space,
);
});
discover_cluster(
&cluster.entry_point_info.gossip,
config.node_stakes.len() + config.num_listeners as usize,
socket_addr_space,
)
.unwrap();
discover_cluster(&cluster.entry_point_info.gossip, config.node_stakes.len()).unwrap();
discover_cluster(
&cluster.entry_point_info.gossip,
config.node_stakes.len(),
socket_addr_space,
)
.unwrap();
cluster
}
@ -305,6 +321,7 @@ impl LocalCluster {
stake: u64,
validator_keypair: Arc<Keypair>,
mut voting_keypair: Option<Arc<Keypair>>,
socket_addr_space: SocketAddrSpace,
) -> Pubkey {
let client = create_client(
self.entry_point_info.client_facing_addr(),
@ -361,6 +378,7 @@ impl LocalCluster {
&config,
true, // should_check_duplicate_instance
Arc::new(RwLock::new(ValidatorStartProgress::default())),
socket_addr_space,
);
let validator_pubkey = validator_keypair.pubkey();
@ -400,7 +418,12 @@ impl LocalCluster {
Self::transfer_with_client(&client, source_keypair, dest_pubkey, lamports)
}
pub fn check_for_new_roots(&self, num_new_roots: usize, test_name: &str) {
pub fn check_for_new_roots(
&self,
num_new_roots: usize,
test_name: &str,
socket_addr_space: SocketAddrSpace,
) {
let alive_node_contact_infos: Vec<_> = self
.validators
.values()
@ -411,6 +434,7 @@ impl LocalCluster {
let cluster_nodes = discover_cluster(
&alive_node_contact_infos[0].gossip,
alive_node_contact_infos.len(),
socket_addr_space,
)
.unwrap();
info!("{} discovered {} nodes", test_name, cluster_nodes.len());
@ -419,7 +443,12 @@ impl LocalCluster {
info!("{} done waiting for roots", test_name);
}
pub fn check_no_new_roots(&self, num_slots_to_wait: usize, test_name: &str) {
pub fn check_no_new_roots(
&self,
num_slots_to_wait: usize,
test_name: &str,
socket_addr_space: SocketAddrSpace,
) {
let alive_node_contact_infos: Vec<_> = self
.validators
.values()
@ -430,6 +459,7 @@ impl LocalCluster {
let cluster_nodes = discover_cluster(
&alive_node_contact_infos[0].gossip,
alive_node_contact_infos.len(),
socket_addr_space,
)
.unwrap();
info!("{} discovered {} nodes", test_name, cluster_nodes.len());
@ -646,10 +676,18 @@ impl Cluster for LocalCluster {
(node, entry_point_info)
}
fn restart_node(&mut self, pubkey: &Pubkey, mut cluster_validator_info: ClusterValidatorInfo) {
fn restart_node(
&mut self,
pubkey: &Pubkey,
mut cluster_validator_info: ClusterValidatorInfo,
socket_addr_space: SocketAddrSpace,
) {
let restart_context = self.create_restart_context(pubkey, &mut cluster_validator_info);
let cluster_validator_info =
Self::restart_node_with_context(cluster_validator_info, restart_context);
let cluster_validator_info = Self::restart_node_with_context(
cluster_validator_info,
restart_context,
socket_addr_space,
);
self.add_node(pubkey, cluster_validator_info);
}
@ -660,6 +698,7 @@ impl Cluster for LocalCluster {
fn restart_node_with_context(
mut cluster_validator_info: ClusterValidatorInfo,
(node, entry_point_info): (Node, Option<ContactInfo>),
socket_addr_space: SocketAddrSpace,
) -> ClusterValidatorInfo {
// Restart the node
let validator_info = &cluster_validator_info.info;
@ -677,15 +716,21 @@ impl Cluster for LocalCluster {
&safe_clone_config(&cluster_validator_info.config),
true, // should_check_duplicate_instance
Arc::new(RwLock::new(ValidatorStartProgress::default())),
socket_addr_space,
);
cluster_validator_info.validator = Some(restarted_node);
cluster_validator_info
}
fn exit_restart_node(&mut self, pubkey: &Pubkey, validator_config: ValidatorConfig) {
fn exit_restart_node(
&mut self,
pubkey: &Pubkey,
validator_config: ValidatorConfig,
socket_addr_space: SocketAddrSpace,
) {
let mut cluster_validator_info = self.exit_node(pubkey);
cluster_validator_info.config = validator_config;
self.restart_node(pubkey, cluster_validator_info);
self.restart_node(pubkey, cluster_validator_info, socket_addr_space);
}
fn get_contact_info(&self, pubkey: &Pubkey) -> Option<&ContactInfo> {
@ -708,7 +753,8 @@ mod test {
fn test_local_cluster_start_and_exit() {
solana_logger::setup();
let num_nodes = 1;
let cluster = LocalCluster::new_with_equal_stakes(num_nodes, 100, 3);
let cluster =
LocalCluster::new_with_equal_stakes(num_nodes, 100, 3, SocketAddrSpace::Unspecified);
assert_eq!(cluster.validators.len(), num_nodes);
}
@ -728,7 +774,7 @@ mod test {
stakers_slot_offset: MINIMUM_SLOTS_PER_EPOCH as u64,
..ClusterConfig::default()
};
let cluster = LocalCluster::new(&mut config);
let cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
assert_eq!(cluster.validators.len(), NUM_NODES);
}
}

View File

@ -54,6 +54,7 @@ use solana_sdk::{
signature::{Keypair, Signer},
system_program, system_transaction,
};
use solana_streamer::socket::SocketAddrSpace;
use solana_vote_program::{vote_state::MAX_LOCKOUT_HISTORY, vote_transaction};
use std::{
collections::{BTreeSet, HashMap, HashSet},
@ -88,7 +89,7 @@ fn test_ledger_cleanup_service() {
validator_configs: make_identical_validator_configs(&validator_config, num_nodes),
..ClusterConfig::default()
};
let mut cluster = LocalCluster::new(&mut config);
let mut cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
// 200ms/per * 100 = 20 seconds, so sleep a little longer than that.
sleep(Duration::from_secs(60));
@ -97,6 +98,7 @@ fn test_ledger_cleanup_service() {
&cluster.funding_keypair,
num_nodes,
HashSet::new(),
SocketAddrSpace::Unspecified,
);
cluster.close_preserve_ledgers();
//check everyone's ledgers and make sure only ~100 slots are stored
@ -118,12 +120,14 @@ fn test_spend_and_verify_all_nodes_1() {
solana_logger::setup_with_default(RUST_LOG_FILTER);
error!("test_spend_and_verify_all_nodes_1");
let num_nodes = 1;
let local = LocalCluster::new_with_equal_stakes(num_nodes, 10_000, 100);
let local =
LocalCluster::new_with_equal_stakes(num_nodes, 10_000, 100, SocketAddrSpace::Unspecified);
cluster_tests::spend_and_verify_all_nodes(
&local.entry_point_info,
&local.funding_keypair,
num_nodes,
HashSet::new(),
SocketAddrSpace::Unspecified,
);
}
@ -133,12 +137,14 @@ fn test_spend_and_verify_all_nodes_2() {
solana_logger::setup_with_default(RUST_LOG_FILTER);
error!("test_spend_and_verify_all_nodes_2");
let num_nodes = 2;
let local = LocalCluster::new_with_equal_stakes(num_nodes, 10_000, 100);
let local =
LocalCluster::new_with_equal_stakes(num_nodes, 10_000, 100, SocketAddrSpace::Unspecified);
cluster_tests::spend_and_verify_all_nodes(
&local.entry_point_info,
&local.funding_keypair,
num_nodes,
HashSet::new(),
SocketAddrSpace::Unspecified,
);
}
@ -148,12 +154,14 @@ fn test_spend_and_verify_all_nodes_3() {
solana_logger::setup_with_default(RUST_LOG_FILTER);
error!("test_spend_and_verify_all_nodes_3");
let num_nodes = 3;
let local = LocalCluster::new_with_equal_stakes(num_nodes, 10_000, 100);
let local =
LocalCluster::new_with_equal_stakes(num_nodes, 10_000, 100, SocketAddrSpace::Unspecified);
cluster_tests::spend_and_verify_all_nodes(
&local.entry_point_info,
&local.funding_keypair,
num_nodes,
HashSet::new(),
SocketAddrSpace::Unspecified,
);
}
@ -162,7 +170,8 @@ fn test_spend_and_verify_all_nodes_3() {
fn test_local_cluster_signature_subscribe() {
solana_logger::setup_with_default(RUST_LOG_FILTER);
let num_nodes = 2;
let cluster = LocalCluster::new_with_equal_stakes(num_nodes, 10_000, 100);
let cluster =
LocalCluster::new_with_equal_stakes(num_nodes, 10_000, 100, SocketAddrSpace::Unspecified);
let nodes = cluster.get_node_pubkeys();
// Get non leader
@ -239,12 +248,14 @@ fn test_spend_and_verify_all_nodes_env_num_nodes() {
.expect("please set environment variable NUM_NODES")
.parse()
.expect("could not parse NUM_NODES as a number");
let local = LocalCluster::new_with_equal_stakes(num_nodes, 10_000, 100);
let local =
LocalCluster::new_with_equal_stakes(num_nodes, 10_000, 100, SocketAddrSpace::Unspecified);
cluster_tests::spend_and_verify_all_nodes(
&local.entry_point_info,
&local.funding_keypair,
num_nodes,
HashSet::new(),
SocketAddrSpace::Unspecified,
);
}
@ -262,7 +273,7 @@ fn test_leader_failure_4() {
validator_configs: make_identical_validator_configs(&validator_config, num_nodes),
..ClusterConfig::default()
};
let local = LocalCluster::new(&mut config);
let local = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
cluster_tests::kill_entry_and_spend_and_verify_rest(
&local.entry_point_info,
@ -275,6 +286,7 @@ fn test_leader_failure_4() {
&local.funding_keypair,
num_nodes,
config.ticks_per_slot * config.poh_config.target_tick_duration.as_millis() as u64,
SocketAddrSpace::Unspecified,
);
}
@ -364,7 +376,7 @@ fn run_cluster_partition<C>(
"PARTITION_TEST starting cluster with {:?} partitions slots_per_epoch: {}",
partitions, config.slots_per_epoch,
);
let mut cluster = LocalCluster::new(&mut config);
let mut cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
info!("PARTITION_TEST spend_and_verify_all_nodes(), ensure all nodes are caught up");
cluster_tests::spend_and_verify_all_nodes(
@ -372,9 +384,15 @@ fn run_cluster_partition<C>(
&cluster.funding_keypair,
num_nodes,
HashSet::new(),
SocketAddrSpace::Unspecified,
);
let cluster_nodes = discover_cluster(&cluster.entry_point_info.gossip, num_nodes).unwrap();
let cluster_nodes = discover_cluster(
&cluster.entry_point_info.gossip,
num_nodes,
SocketAddrSpace::Unspecified,
)
.unwrap();
// Check epochs have correct number of slots
info!("PARTITION_TEST sleeping until partition starting condition",);
@ -421,7 +439,7 @@ fn run_cluster_partition<C>(
fn test_cluster_partition_1_2() {
let empty = |_: &mut LocalCluster, _: &mut ()| {};
let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| {
cluster.check_for_new_roots(16, "PARTITION_TEST");
cluster.check_for_new_roots(16, "PARTITION_TEST", SocketAddrSpace::Unspecified);
};
run_cluster_partition(
&[vec![1], vec![1, 1]],
@ -441,7 +459,7 @@ fn test_cluster_partition_1_2() {
fn test_cluster_partition_1_1() {
let empty = |_: &mut LocalCluster, _: &mut ()| {};
let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| {
cluster.check_for_new_roots(16, "PARTITION_TEST");
cluster.check_for_new_roots(16, "PARTITION_TEST", SocketAddrSpace::Unspecified);
};
run_cluster_partition(
&[vec![1], vec![1]],
@ -461,7 +479,7 @@ fn test_cluster_partition_1_1() {
fn test_cluster_partition_1_1_1() {
let empty = |_: &mut LocalCluster, _: &mut ()| {};
let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| {
cluster.check_for_new_roots(16, "PARTITION_TEST");
cluster.check_for_new_roots(16, "PARTITION_TEST", SocketAddrSpace::Unspecified);
};
run_cluster_partition(
&[vec![1], vec![1], vec![1]],
@ -521,7 +539,7 @@ fn test_kill_heaviest_partition() {
let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| {
info!("Killing validator with id: {}", validator_to_kill);
cluster.exit_node(&validator_to_kill);
cluster.check_for_new_roots(16, "PARTITION_TEST");
cluster.check_for_new_roots(16, "PARTITION_TEST", SocketAddrSpace::Unspecified);
};
run_cluster_partition(
&partitions,
@ -853,6 +871,7 @@ fn test_switch_threshold_uses_gossip_votes() {
.get_contact_info(&context.heaviest_validator_key)
.unwrap()
.gossip,
&SocketAddrSpace::Unspecified,
)
.unwrap();
@ -935,7 +954,7 @@ fn test_kill_partition_switch_threshold_no_progress() {
|_: &mut LocalCluster, _: &[Pubkey], _: Vec<ClusterValidatorInfo>, _: &mut ()| {};
let on_before_partition_resolved = |_: &mut LocalCluster, _: &mut ()| {};
let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| {
cluster.check_no_new_roots(400, "PARTITION_TEST");
cluster.check_no_new_roots(400, "PARTITION_TEST", SocketAddrSpace::Unspecified);
};
// This kills `max_failures_stake`, so no progress should be made
@ -988,7 +1007,7 @@ fn test_kill_partition_switch_threshold_progress() {
|_: &mut LocalCluster, _: &[Pubkey], _: Vec<ClusterValidatorInfo>, _: &mut ()| {};
let on_before_partition_resolved = |_: &mut LocalCluster, _: &mut ()| {};
let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| {
cluster.check_for_new_roots(16, "PARTITION_TEST");
cluster.check_for_new_roots(16, "PARTITION_TEST", SocketAddrSpace::Unspecified);
};
run_kill_partition_switch_threshold(
&[&[(failures_stake as usize, 16)]],
@ -1176,6 +1195,7 @@ fn test_fork_choice_refresh_old_votes() {
cluster.restart_node(
&context.smallest_validator_key,
context.alive_stake3_info.take().unwrap(),
SocketAddrSpace::Unspecified,
);
loop {
@ -1219,7 +1239,7 @@ fn test_fork_choice_refresh_old_votes() {
// for lockouts built during partition to resolve and gives validators an opportunity
// to try and switch forks)
let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut PartitionContext| {
cluster.check_for_new_roots(16, "PARTITION_TEST");
cluster.check_for_new_roots(16, "PARTITION_TEST", SocketAddrSpace::Unspecified);
};
run_kill_partition_switch_threshold(
@ -1246,16 +1266,19 @@ fn test_two_unbalanced_stakes() {
let num_ticks_per_slot = 10;
let num_slots_per_epoch = MINIMUM_SLOTS_PER_EPOCH as u64;
let mut cluster = LocalCluster::new(&mut ClusterConfig {
node_stakes: vec![999_990, 3],
cluster_lamports: 1_000_000,
validator_configs: make_identical_validator_configs(&validator_config, 2),
ticks_per_slot: num_ticks_per_slot,
slots_per_epoch: num_slots_per_epoch,
stakers_slot_offset: num_slots_per_epoch,
poh_config: PohConfig::new_sleep(Duration::from_millis(1000 / num_ticks_per_second)),
..ClusterConfig::default()
});
let mut cluster = LocalCluster::new(
&mut ClusterConfig {
node_stakes: vec![999_990, 3],
cluster_lamports: 1_000_000,
validator_configs: make_identical_validator_configs(&validator_config, 2),
ticks_per_slot: num_ticks_per_slot,
slots_per_epoch: num_slots_per_epoch,
stakers_slot_offset: num_slots_per_epoch,
poh_config: PohConfig::new_sleep(Duration::from_millis(1000 / num_ticks_per_second)),
..ClusterConfig::default()
},
SocketAddrSpace::Unspecified,
);
cluster_tests::sleep_n_epochs(
10.0,
@ -1280,9 +1303,14 @@ fn test_forwarding() {
validator_configs: make_identical_validator_configs(&ValidatorConfig::default(), 2),
..ClusterConfig::default()
};
let cluster = LocalCluster::new(&mut config);
let cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
let cluster_nodes = discover_cluster(&cluster.entry_point_info.gossip, 2).unwrap();
let cluster_nodes = discover_cluster(
&cluster.entry_point_info.gossip,
2,
SocketAddrSpace::Unspecified,
)
.unwrap();
assert!(cluster_nodes.len() >= 2);
let leader_pubkey = cluster.entry_point_info.id;
@ -1304,15 +1332,18 @@ fn test_restart_node() {
let slots_per_epoch = MINIMUM_SLOTS_PER_EPOCH * 2;
let ticks_per_slot = 16;
let validator_config = ValidatorConfig::default();
let mut cluster = LocalCluster::new(&mut ClusterConfig {
node_stakes: vec![100; 1],
cluster_lamports: 100,
validator_configs: vec![safe_clone_config(&validator_config)],
ticks_per_slot,
slots_per_epoch,
stakers_slot_offset: slots_per_epoch,
..ClusterConfig::default()
});
let mut cluster = LocalCluster::new(
&mut ClusterConfig {
node_stakes: vec![100; 1],
cluster_lamports: 100,
validator_configs: vec![safe_clone_config(&validator_config)],
ticks_per_slot,
slots_per_epoch,
stakers_slot_offset: slots_per_epoch,
..ClusterConfig::default()
},
SocketAddrSpace::Unspecified,
);
let nodes = cluster.get_node_pubkeys();
cluster_tests::sleep_n_epochs(
1.0,
@ -1320,7 +1351,7 @@ fn test_restart_node() {
clock::DEFAULT_TICKS_PER_SLOT,
slots_per_epoch,
);
cluster.exit_restart_node(&nodes[0], validator_config);
cluster.exit_restart_node(&nodes[0], validator_config, SocketAddrSpace::Unspecified);
cluster_tests::sleep_n_epochs(
0.5,
&cluster.genesis_config.poh_config,
@ -1345,8 +1376,13 @@ fn test_listener_startup() {
validator_configs: make_identical_validator_configs(&ValidatorConfig::default(), 1),
..ClusterConfig::default()
};
let cluster = LocalCluster::new(&mut config);
let cluster_nodes = discover_cluster(&cluster.entry_point_info.gossip, 4).unwrap();
let cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
let cluster_nodes = discover_cluster(
&cluster.entry_point_info.gossip,
4,
SocketAddrSpace::Unspecified,
)
.unwrap();
assert_eq!(cluster_nodes.len(), 4);
}
@ -1362,8 +1398,13 @@ fn test_mainnet_beta_cluster_type() {
validator_configs: make_identical_validator_configs(&ValidatorConfig::default(), 1),
..ClusterConfig::default()
};
let cluster = LocalCluster::new(&mut config);
let cluster_nodes = discover_cluster(&cluster.entry_point_info.gossip, 1).unwrap();
let cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
let cluster_nodes = discover_cluster(
&cluster.entry_point_info.gossip,
1,
SocketAddrSpace::Unspecified,
)
.unwrap();
assert_eq!(cluster_nodes.len(), 1);
let client = create_client(
@ -1470,7 +1511,10 @@ fn test_frozen_account_from_genesis() {
}],
..ClusterConfig::default()
};
generate_frozen_account_panic(LocalCluster::new(&mut config), validator_identity);
generate_frozen_account_panic(
LocalCluster::new(&mut config, SocketAddrSpace::Unspecified),
validator_identity,
);
}
#[test]
@ -1494,7 +1538,7 @@ fn test_frozen_account_from_snapshot() {
),
..ClusterConfig::default()
};
let mut cluster = LocalCluster::new(&mut config);
let mut cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
let snapshot_package_output_path = &snapshot_test_config
.validator_config
@ -1511,7 +1555,11 @@ fn test_frozen_account_from_snapshot() {
// Restart the validator from a snapshot
let validator_info = cluster.exit_node(&validator_identity.pubkey());
cluster.restart_node(&validator_identity.pubkey(), validator_info);
cluster.restart_node(
&validator_identity.pubkey(),
validator_info,
SocketAddrSpace::Unspecified,
);
generate_frozen_account_panic(cluster, validator_identity);
}
@ -1538,10 +1586,15 @@ fn test_consistency_halt() {
..ClusterConfig::default()
};
let mut cluster = LocalCluster::new(&mut config);
let mut cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
sleep(Duration::from_millis(5000));
let cluster_nodes = discover_cluster(&cluster.entry_point_info.gossip, 1).unwrap();
let cluster_nodes = discover_cluster(
&cluster.entry_point_info.gossip,
1,
SocketAddrSpace::Unspecified,
)
.unwrap();
info!("num_nodes: {}", cluster_nodes.len());
// Add a validator with the leader as trusted, it should halt when it detects
@ -1565,19 +1618,28 @@ fn test_consistency_halt() {
validator_stake as u64,
Arc::new(Keypair::new()),
None,
SocketAddrSpace::Unspecified,
);
let num_nodes = 2;
assert_eq!(
discover_cluster(&cluster.entry_point_info.gossip, num_nodes)
.unwrap()
.len(),
discover_cluster(
&cluster.entry_point_info.gossip,
num_nodes,
SocketAddrSpace::Unspecified
)
.unwrap()
.len(),
num_nodes
);
// Check for only 1 node on the network.
let mut encountered_error = false;
loop {
let discover = discover_cluster(&cluster.entry_point_info.gossip, 2);
let discover = discover_cluster(
&cluster.entry_point_info.gossip,
2,
SocketAddrSpace::Unspecified,
);
match discover {
Err(_) => {
encountered_error = true;
@ -1629,7 +1691,7 @@ fn test_snapshot_download() {
..ClusterConfig::default()
};
let mut cluster = LocalCluster::new(&mut config);
let mut cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
// Get slot after which this was generated
let snapshot_package_output_path = &leader_snapshot_test_config
@ -1660,6 +1722,7 @@ fn test_snapshot_download() {
stake,
Arc::new(Keypair::new()),
None,
SocketAddrSpace::Unspecified,
);
}
@ -1687,7 +1750,7 @@ fn test_snapshot_restart_tower() {
..ClusterConfig::default()
};
let mut cluster = LocalCluster::new(&mut config);
let mut cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
// Let the nodes run for a while, then stop one of the validators
sleep(Duration::from_millis(5000));
@ -1723,7 +1786,7 @@ fn test_snapshot_restart_tower() {
// Restart validator from snapshot, the validator's tower state in this snapshot
// will contain slots < the root bank of the snapshot. Validator should not panic.
cluster.restart_node(&validator_id, validator_info);
cluster.restart_node(&validator_id, validator_info, SocketAddrSpace::Unspecified);
// Test cluster can still make progress and get confirmations in tower
// Use the restarted node as the discovery point so that we get updated
@ -1734,6 +1797,7 @@ fn test_snapshot_restart_tower() {
&cluster.funding_keypair,
1,
HashSet::new(),
SocketAddrSpace::Unspecified,
);
}
@ -1767,7 +1831,7 @@ fn test_snapshots_blockstore_floor() {
..ClusterConfig::default()
};
let mut cluster = LocalCluster::new(&mut config);
let mut cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
trace!("Waiting for snapshot tar to be generated with slot",);
@ -1797,7 +1861,12 @@ fn test_snapshots_blockstore_floor() {
// Start up a new node from a snapshot
let validator_stake = 5;
let cluster_nodes = discover_cluster(&cluster.entry_point_info.gossip, 1).unwrap();
let cluster_nodes = discover_cluster(
&cluster.entry_point_info.gossip,
1,
SocketAddrSpace::Unspecified,
)
.unwrap();
let mut trusted_validators = HashSet::new();
trusted_validators.insert(cluster_nodes[0].id);
validator_snapshot_test_config
@ -1809,6 +1878,7 @@ fn test_snapshots_blockstore_floor() {
validator_stake,
Arc::new(Keypair::new()),
None,
SocketAddrSpace::Unspecified,
);
let all_pubkeys = cluster.get_node_pubkeys();
let validator_id = all_pubkeys
@ -1877,7 +1947,7 @@ fn test_snapshots_restart_validity() {
// Create and reboot the node from snapshot `num_runs` times
let num_runs = 3;
let mut expected_balances = HashMap::new();
let mut cluster = LocalCluster::new(&mut config);
let mut cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
for i in 1..num_runs {
info!("run {}", i);
// Push transactions to one of the nodes and confirm that transactions were
@ -1907,6 +1977,7 @@ fn test_snapshots_restart_validity() {
cluster.exit_restart_node(
&nodes[0],
safe_clone_config(&snapshot_test_config.validator_config),
SocketAddrSpace::Unspecified,
);
// Verify account balances on validator
@ -1920,6 +1991,7 @@ fn test_snapshots_restart_validity() {
&cluster.funding_keypair,
1,
HashSet::new(),
SocketAddrSpace::Unspecified,
);
}
}
@ -1931,7 +2003,11 @@ fn test_snapshots_restart_validity() {
fn test_fail_entry_verification_leader() {
let (cluster, _) =
test_faulty_node(BroadcastStageType::FailEntryVerification, vec![60, 50, 60]);
cluster.check_for_new_roots(16, "test_fail_entry_verification_leader");
cluster.check_for_new_roots(
16,
"test_fail_entry_verification_leader",
SocketAddrSpace::Unspecified,
);
}
#[test]
@ -1941,7 +2017,11 @@ fn test_fail_entry_verification_leader() {
fn test_fake_shreds_broadcast_leader() {
let node_stakes = vec![300, 100];
let (cluster, _) = test_faulty_node(BroadcastStageType::BroadcastFakeShreds, node_stakes);
cluster.check_for_new_roots(16, "test_fake_shreds_broadcast_leader");
cluster.check_for_new_roots(
16,
"test_fake_shreds_broadcast_leader",
SocketAddrSpace::Unspecified,
);
}
#[test]
@ -2029,6 +2109,7 @@ fn test_duplicate_shreds_broadcast_leader() {
None,
0,
false,
SocketAddrSpace::Unspecified,
);
let t_voter = {
@ -2127,7 +2208,11 @@ fn test_duplicate_shreds_broadcast_leader() {
};
// 4) Check that the cluster is making progress
cluster.check_for_new_roots(16, "test_duplicate_shreds_broadcast_leader");
cluster.check_for_new_roots(
16,
"test_duplicate_shreds_broadcast_leader",
SocketAddrSpace::Unspecified,
);
// Clean up threads
exit.store(true, Ordering::Relaxed);
@ -2167,7 +2252,7 @@ fn test_faulty_node(
..ClusterConfig::default()
};
let cluster = LocalCluster::new(&mut cluster_config);
let cluster = LocalCluster::new(&mut cluster_config, SocketAddrSpace::Unspecified);
let validator_keys: Vec<Arc<Keypair>> = validator_keys
.into_iter()
.map(|(keypair, _)| keypair)
@ -2186,7 +2271,7 @@ fn test_wait_for_max_stake() {
validator_configs: make_identical_validator_configs(&validator_config, 4),
..ClusterConfig::default()
};
let cluster = LocalCluster::new(&mut config);
let cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
let client = RpcClient::new_socket(cluster.entry_point_info.rpc);
assert!(client
@ -2210,7 +2295,7 @@ fn test_no_voting() {
validator_configs: vec![validator_config],
..ClusterConfig::default()
};
let mut cluster = LocalCluster::new(&mut config);
let mut cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
let client = cluster
.get_validator_client(&cluster.entry_point_info.id)
.unwrap();
@ -2264,7 +2349,7 @@ fn test_optimistic_confirmation_violation_detection() {
skip_warmup_slots: true,
..ClusterConfig::default()
};
let mut cluster = LocalCluster::new(&mut config);
let mut cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
let entry_point_id = cluster.entry_point_info.id;
// Let the nodes run for a while. Wait for validators to vote on slot `S`
// so that the vote on `S-1` is definitely in gossip and optimistic confirmation is
@ -2315,7 +2400,11 @@ fn test_optimistic_confirmation_violation_detection() {
let buf = std::env::var("OPTIMISTIC_CONF_TEST_DUMP_LOG")
.err()
.map(|_| BufferRedirect::stderr().unwrap());
cluster.restart_node(&entry_point_id, exited_validator_info);
cluster.restart_node(
&entry_point_id,
exited_validator_info,
SocketAddrSpace::Unspecified,
);
// Wait for a root > prev_voted_slot to be set. Because the root is on a
// different fork than `prev_voted_slot`, then optimistic confirmation is
@ -2383,7 +2472,7 @@ fn test_validator_saves_tower() {
validator_keys: Some(vec![(validator_identity_keypair.clone(), true)]),
..ClusterConfig::default()
};
let mut cluster = LocalCluster::new(&mut config);
let mut cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
let validator_client = cluster.get_validator_client(&validator_id).unwrap();
@ -2416,7 +2505,7 @@ fn test_validator_saves_tower() {
assert_eq!(tower1.root(), 0);
// Restart the validator and wait for a new root
cluster.restart_node(&validator_id, validator_info);
cluster.restart_node(&validator_id, validator_info, SocketAddrSpace::Unspecified);
let validator_client = cluster.get_validator_client(&validator_id).unwrap();
// Wait for the first root
@ -2448,7 +2537,7 @@ fn test_validator_saves_tower() {
// without having to wait for that snapshot to be generated in this test
tower1.save(&validator_identity_keypair).unwrap();
cluster.restart_node(&validator_id, validator_info);
cluster.restart_node(&validator_id, validator_info, SocketAddrSpace::Unspecified);
let validator_client = cluster.get_validator_client(&validator_id).unwrap();
// Wait for a new root, demonstrating the validator was able to make progress from the older `tower1`
@ -2480,7 +2569,7 @@ fn test_validator_saves_tower() {
remove_tower(&ledger_path, &validator_id);
validator_info.config.require_tower = false;
cluster.restart_node(&validator_id, validator_info);
cluster.restart_node(&validator_id, validator_info, SocketAddrSpace::Unspecified);
let validator_client = cluster.get_validator_client(&validator_id).unwrap();
// Wait for a couple more slots to pass so another vote occurs
@ -2623,7 +2712,7 @@ fn do_test_optimistic_confirmation_violation_with_or_without_tower(with_tower: b
skip_warmup_slots: true,
..ClusterConfig::default()
};
let mut cluster = LocalCluster::new(&mut config);
let mut cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
let base_slot = 26; // S2
let next_slot_on_a = 27; // S3
@ -2704,7 +2793,11 @@ fn do_test_optimistic_confirmation_violation_with_or_without_tower(with_tower: b
// Run validator C only to make it produce and vote on its own fork.
info!("Restart validator C again!!!");
let val_c_ledger_path = validator_c_info.info.ledger_path.clone();
cluster.restart_node(&validator_c_pubkey, validator_c_info);
cluster.restart_node(
&validator_c_pubkey,
validator_c_info,
SocketAddrSpace::Unspecified,
);
let mut votes_on_c_fork = std::collections::BTreeSet::new(); // S4 and S5
for _ in 0..100 {
@ -2726,7 +2819,11 @@ fn do_test_optimistic_confirmation_violation_with_or_without_tower(with_tower: b
// Step 4:
// verify whether there was violation or not
info!("Restart validator A again!!!");
cluster.restart_node(&validator_a_pubkey, validator_a_info);
cluster.restart_node(
&validator_a_pubkey,
validator_a_info,
SocketAddrSpace::Unspecified,
);
// monitor for actual votes from validator A
let mut bad_vote_detected = false;
@ -2813,7 +2910,7 @@ fn do_test_future_tower(cluster_mode: ClusterMode) {
skip_warmup_slots: true,
..ClusterConfig::default()
};
let mut cluster = LocalCluster::new(&mut config);
let mut cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
let val_a_ledger_path = cluster.ledger_path(&validator_a_pubkey);
@ -2838,7 +2935,11 @@ fn do_test_future_tower(cluster_mode: ClusterMode) {
purge_slots(&blockstore, purged_slot_before_restart, 100);
}
cluster.restart_node(&validator_a_pubkey, validator_a_info);
cluster.restart_node(
&validator_a_pubkey,
validator_a_info,
SocketAddrSpace::Unspecified,
);
let mut newly_rooted = false;
let some_root_after_restart = purged_slot_before_restart + 25; // 25 is arbitrary; just wait a bit
@ -2920,7 +3021,10 @@ fn test_hard_fork_invalidates_tower() {
skip_warmup_slots: true,
..ClusterConfig::default()
};
let cluster = std::sync::Arc::new(std::sync::Mutex::new(LocalCluster::new(&mut config)));
let cluster = std::sync::Arc::new(std::sync::Mutex::new(LocalCluster::new(
&mut config,
SocketAddrSpace::Unspecified,
)));
let val_a_ledger_path = cluster.lock().unwrap().ledger_path(&validator_a_pubkey);
@ -2965,8 +3069,11 @@ fn test_hard_fork_invalidates_tower() {
.lock()
.unwrap()
.create_restart_context(&validator_a_pubkey, &mut validator_a_info);
let restarted_validator_info =
LocalCluster::restart_node_with_context(validator_a_info, restart_context);
let restarted_validator_info = LocalCluster::restart_node_with_context(
validator_a_info,
restart_context,
SocketAddrSpace::Unspecified,
);
cluster_for_a
.lock()
.unwrap()
@ -2988,16 +3095,20 @@ fn test_hard_fork_invalidates_tower() {
}
// restart validator B normally
cluster
.lock()
.unwrap()
.restart_node(&validator_b_pubkey, validator_b_info);
cluster.lock().unwrap().restart_node(
&validator_b_pubkey,
validator_b_info,
SocketAddrSpace::Unspecified,
);
// validator A should now start so join its thread here
thread.join().unwrap();
// new slots should be rooted after hard-fork cluster relaunch
cluster.lock().unwrap().check_for_new_roots(16, "hard fork");
cluster
.lock()
.unwrap()
.check_for_new_roots(16, "hard fork", SocketAddrSpace::Unspecified);
}
#[test]
@ -3056,7 +3167,11 @@ fn run_test_load_program_accounts_partition(scan_commitment: CommitmentConfig) {
let on_partition_before_resolved = |_: &mut LocalCluster, _: &mut ()| {};
let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| {
cluster.check_for_new_roots(20, "run_test_load_program_accounts_partition");
cluster.check_for_new_roots(
20,
"run_test_load_program_accounts_partition",
SocketAddrSpace::Unspecified,
);
exit.store(true, Ordering::Relaxed);
t_update.join().unwrap();
t_scan.join().unwrap();
@ -3231,7 +3346,7 @@ fn run_test_load_program_accounts(scan_commitment: CommitmentConfig) {
additional_accounts: starting_accounts,
..ClusterConfig::default()
};
let cluster = LocalCluster::new(&mut config);
let cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
// Give the threads a client to use for querying the cluster
let all_pubkeys = cluster.get_node_pubkeys();
@ -3247,7 +3362,11 @@ fn run_test_load_program_accounts(scan_commitment: CommitmentConfig) {
scan_client_sender.send(scan_client).unwrap();
// Wait for some roots to pass
cluster.check_for_new_roots(40, "run_test_load_program_accounts");
cluster.check_for_new_roots(
40,
"run_test_load_program_accounts",
SocketAddrSpace::Unspecified,
);
// Exit and ensure no violations of consistency were found
exit.store(true, Ordering::Relaxed);

View File

@ -39,6 +39,7 @@ solana-perf = { path = "../perf", version = "=1.8.0" }
solana-poh = { path = "../poh", version = "=1.8.0" }
solana-runtime = { path = "../runtime", version = "=1.8.0" }
solana-sdk = { path = "../sdk", version = "=1.8.0" }
solana-streamer = { path = "../streamer", version = "=1.8.0" }
solana-storage-bigtable = { path = "../storage-bigtable", version = "=1.8.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.8.0" }
solana-version = { path = "../version", version = "=1.8.0" }

View File

@ -69,6 +69,7 @@ use {
sysvar::stake_history,
transaction::{self, Transaction, TransactionError},
},
solana_streamer::socket::SocketAddrSpace,
solana_transaction_status::{
EncodedConfirmedTransaction, Reward, RewardType, TransactionConfirmationStatus,
TransactionStatus, UiConfirmedBlock, UiTransactionEncoding,
@ -276,7 +277,7 @@ impl JsonRpcRequestProcessor {
}
// Useful for unit testing
pub fn new_from_bank(bank: &Arc<Bank>) -> Self {
pub fn new_from_bank(bank: &Arc<Bank>, socket_addr_space: SocketAddrSpace) -> Self {
let genesis_hash = bank.hash();
let bank_forks = Arc::new(RwLock::new(BankForks::new_from_banks(
&[bank.clone()],
@ -284,7 +285,11 @@ impl JsonRpcRequestProcessor {
)));
let blockstore = Arc::new(Blockstore::open(&get_tmp_ledger_path!()).unwrap());
let exit = Arc::new(AtomicBool::new(false));
let cluster_info = Arc::new(ClusterInfo::default());
let cluster_info = Arc::new(ClusterInfo::new(
ContactInfo::default(),
Arc::new(Keypair::new()),
socket_addr_space,
));
let tpu_address = cluster_info.my_contact_info().tpu;
let (sender, receiver) = channel();
SendTransactionService::new(tpu_address, &bank_forks, None, receiver, 1000, 1);
@ -3097,20 +3102,21 @@ pub mod rpc_full {
fn get_cluster_nodes(&self, meta: Self::Metadata) -> Result<Vec<RpcContactInfo>> {
debug!("get_cluster_nodes rpc request received");
let cluster_info = &meta.cluster_info;
fn valid_address_or_none(addr: &SocketAddr) -> Option<SocketAddr> {
if ContactInfo::is_valid_address(addr) {
let socket_addr_space = cluster_info.socket_addr_space();
let valid_address_or_none = |addr: &SocketAddr| -> Option<SocketAddr> {
if ContactInfo::is_valid_address(addr, socket_addr_space) {
Some(*addr)
} else {
None
}
}
};
let my_shred_version = cluster_info.my_shred_version();
Ok(cluster_info
.all_peers()
.iter()
.filter_map(|(contact_info, _)| {
if my_shred_version == contact_info.shred_version
&& ContactInfo::is_valid_address(&contact_info.gossip)
&& ContactInfo::is_valid_address(&contact_info.gossip, socket_addr_space)
{
let (version, feature_set) = if let Some(version) =
cluster_info.get_node_version(&contact_info.id)
@ -4147,10 +4153,14 @@ pub mod tests {
let tx = system_transaction::transfer(&alice, pubkey, std::u64::MAX, blockhash);
let _ = bank.process_transaction(&tx);
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(ContactInfo {
id: alice.pubkey(),
..ContactInfo::default()
}));
let cluster_info = Arc::new(ClusterInfo::new(
ContactInfo {
id: alice.pubkey(),
..ContactInfo::default()
},
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
));
let tpu_address = cluster_info.my_contact_info().tpu;
cluster_info.insert_info(ContactInfo::new_with_pubkey_socketaddr(
@ -4226,7 +4236,8 @@ pub mod tests {
let bank = Arc::new(Bank::new(&genesis.genesis_config));
bank.transfer(20, &genesis.mint_keypair, &bob_pubkey)
.unwrap();
let request_processor = JsonRpcRequestProcessor::new_from_bank(&bank);
let request_processor =
JsonRpcRequestProcessor::new_from_bank(&bank, SocketAddrSpace::Unspecified);
assert_eq!(request_processor.get_transaction_count(None), 1);
}
@ -4235,7 +4246,7 @@ pub mod tests {
let genesis = create_genesis_config(20);
let mint_pubkey = genesis.mint_keypair.pubkey();
let bank = Arc::new(Bank::new(&genesis.genesis_config));
let meta = JsonRpcRequestProcessor::new_from_bank(&bank);
let meta = JsonRpcRequestProcessor::new_from_bank(&bank, SocketAddrSpace::Unspecified);
let mut io = MetaIoHandler::default();
io.extend_with(rpc_minimal::MinimalImpl.to_delegate());
@ -4263,7 +4274,7 @@ pub mod tests {
let genesis = create_genesis_config(20);
let mint_pubkey = genesis.mint_keypair.pubkey();
let bank = Arc::new(Bank::new(&genesis.genesis_config));
let meta = JsonRpcRequestProcessor::new_from_bank(&bank);
let meta = JsonRpcRequestProcessor::new_from_bank(&bank, SocketAddrSpace::Unspecified);
let mut io = MetaIoHandler::default();
io.extend_with(rpc_minimal::MinimalImpl.to_delegate());
@ -4404,7 +4415,7 @@ pub mod tests {
bank.transfer(4, &genesis.mint_keypair, &bob_pubkey)
.unwrap();
let meta = JsonRpcRequestProcessor::new_from_bank(&bank);
let meta = JsonRpcRequestProcessor::new_from_bank(&bank, SocketAddrSpace::Unspecified);
let mut io = MetaIoHandler::default();
io.extend_with(rpc_minimal::MinimalImpl.to_delegate());
@ -5708,7 +5719,7 @@ pub mod tests {
fn test_rpc_send_bad_tx() {
let genesis = create_genesis_config(100);
let bank = Arc::new(Bank::new(&genesis.genesis_config));
let meta = JsonRpcRequestProcessor::new_from_bank(&bank);
let meta = JsonRpcRequestProcessor::new_from_bank(&bank, SocketAddrSpace::Unspecified);
let mut io = MetaIoHandler::default();
io.extend_with(rpc_full::FullImpl.to_delegate());
@ -5735,8 +5746,10 @@ pub mod tests {
let mut io = MetaIoHandler::default();
io.extend_with(rpc_full::FullImpl.to_delegate());
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(
let cluster_info = Arc::new(ClusterInfo::new(
ContactInfo::new_with_socketaddr(&socketaddr!("127.0.0.1:1234")),
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
));
let tpu_address = cluster_info.my_contact_info().tpu;
let (meta, receiver) = JsonRpcRequestProcessor::new(
@ -6016,7 +6029,11 @@ pub mod tests {
CommitmentSlots::new_from_slot(bank_forks.read().unwrap().highest_slot()),
)));
let cluster_info = Arc::new(ClusterInfo::default());
let cluster_info = Arc::new(ClusterInfo::new(
ContactInfo::default(),
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
));
let tpu_address = cluster_info.my_contact_info().tpu;
let (request_processor, receiver) = JsonRpcRequestProcessor::new(
JsonRpcConfig::default(),
@ -7414,8 +7431,11 @@ pub mod tests {
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
let cluster_info = Arc::new(ClusterInfo::default());
let cluster_info = Arc::new(ClusterInfo::new(
ContactInfo::default(),
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
));
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(100);
let bank = Bank::new(&genesis_config);

View File

@ -117,8 +117,16 @@ impl RpcHealth {
#[cfg(test)]
pub(crate) fn stub() -> Arc<Self> {
use {
solana_gossip::contact_info::ContactInfo, solana_sdk::signer::keypair::Keypair,
solana_streamer::socket::SocketAddrSpace,
};
Arc::new(Self::new(
Arc::new(ClusterInfo::default()),
Arc::new(ClusterInfo::new(
ContactInfo::default(),
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
)),
None,
42,
Arc::new(AtomicBool::new(false)),

View File

@ -493,7 +493,10 @@ mod tests {
use {
super::*,
crate::rpc::create_validator_exit,
solana_gossip::crds_value::{CrdsData, CrdsValue, SnapshotHash},
solana_gossip::{
contact_info::ContactInfo,
crds_value::{CrdsData, CrdsValue, SnapshotHash},
},
solana_ledger::{
genesis_utils::{create_genesis_config, GenesisConfigInfo},
get_tmp_ledger_path,
@ -507,7 +510,9 @@ mod tests {
solana_sdk::{
genesis_config::{ClusterType, DEFAULT_GENESIS_ARCHIVE},
signature::Signer,
signer::keypair::Keypair,
},
solana_streamer::socket::SocketAddrSpace,
std::{
io::Write,
net::{IpAddr, Ipv4Addr},
@ -524,7 +529,11 @@ mod tests {
let exit = Arc::new(AtomicBool::new(false));
let validator_exit = create_validator_exit(&exit);
let bank = Bank::new(&genesis_config);
let cluster_info = Arc::new(ClusterInfo::default());
let cluster_info = Arc::new(ClusterInfo::new(
ContactInfo::default(),
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
));
let ip_addr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0));
let rpc_addr = SocketAddr::new(
ip_addr,
@ -718,7 +727,11 @@ mod tests {
#[test]
fn test_health_check_with_trusted_validators() {
let cluster_info = Arc::new(ClusterInfo::default());
let cluster_info = Arc::new(ClusterInfo::new(
ContactInfo::default(),
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
));
let health_check_slot_distance = 123;
let override_health_check = Arc::new(AtomicBool::new(false));
let trusted_validators = vec![

View File

@ -333,6 +333,7 @@ mod test {
system_program, system_transaction,
timing::timestamp,
},
solana_streamer::socket::SocketAddrSpace,
std::sync::{atomic::AtomicBool, mpsc::channel},
};
@ -821,6 +822,7 @@ mod test {
let cluster_info = Arc::new(ClusterInfo::new(
ContactInfo::new_localhost(&node_keypair.pubkey(), timestamp()),
node_keypair,
SocketAddrSpace::Unspecified,
));
let validator0_socket = SocketAddr::from(([127, 0, 0, 1], 1111));

View File

@ -1,7 +1,7 @@
//! The `packet` module defines data structures and methods to pull data from the network.
use crate::{
recvmmsg::{recv_mmsg, NUM_RCVMMSGS},
socket::is_global,
socket::SocketAddrSpace,
};
pub use solana_perf::packet::{
limited_deserialize, to_packets_chunked, Packets, PacketsRecycler, NUM_PACKETS,
@ -57,10 +57,14 @@ pub fn recv_from(obj: &mut Packets, socket: &UdpSocket, max_wait_ms: u64) -> Res
Ok(i)
}
pub fn send_to(obj: &Packets, socket: &UdpSocket) -> Result<()> {
pub fn send_to(
obj: &Packets,
socket: &UdpSocket,
socket_addr_space: &SocketAddrSpace,
) -> Result<()> {
for p in &obj.packets {
let addr = p.meta.addr();
if is_global(&addr) {
if socket_addr_space.check(&addr) {
socket.send_to(&p.data[..p.meta.size], &addr)?;
}
}
@ -99,7 +103,7 @@ mod tests {
m.meta.set_addr(&addr);
m.meta.size = PACKET_DATA_SIZE;
}
send_to(&p, &send_socket).unwrap();
send_to(&p, &send_socket, &SocketAddrSpace::Unspecified).unwrap();
let recvd = recv_from(&mut p, &recv_socket, 1).unwrap();
@ -152,7 +156,7 @@ mod tests {
m.meta.set_addr(&addr);
m.meta.size = 1;
}
send_to(&p, &send_socket).unwrap();
send_to(&p, &send_socket, &SocketAddrSpace::Unspecified).unwrap();
}
let recvd = recv_from(&mut p, &recv_socket, 100).unwrap();

View File

@ -1,28 +1,39 @@
use std::net::SocketAddr;
use std::net::{IpAddr, SocketAddr};
// TODO: remove these once IpAddr::is_global is stable.
#[cfg(test)]
pub fn is_global(_: &SocketAddr) -> bool {
true
#[derive(Clone, Copy, PartialEq)]
pub enum SocketAddrSpace {
Unspecified,
Global,
}
#[cfg(not(test))]
pub fn is_global(addr: &SocketAddr) -> bool {
use std::net::IpAddr;
match addr.ip() {
IpAddr::V4(addr) => {
// TODO: Consider excluding:
// addr.is_loopback() || addr.is_link_local()
// || addr.is_broadcast() || addr.is_documentation()
// || addr.is_unspecified()
!addr.is_private()
impl SocketAddrSpace {
pub fn new(allow_private_addr: bool) -> Self {
if allow_private_addr {
SocketAddrSpace::Unspecified
} else {
SocketAddrSpace::Global
}
IpAddr::V6(_) => {
// TODO: Consider excluding:
// addr.is_loopback() || addr.is_unspecified(),
true
}
/// Returns true if the IP address is valid.
pub fn check(&self, addr: &SocketAddr) -> bool {
if self == &SocketAddrSpace::Unspecified {
return true;
}
// TODO: remove these once IpAddr::is_global is stable.
match addr.ip() {
IpAddr::V4(addr) => {
// TODO: Consider excluding:
// addr.is_loopback() || addr.is_link_local()
// || addr.is_broadcast() || addr.is_documentation()
// || addr.is_unspecified()
!addr.is_private()
}
IpAddr::V6(_) => {
// TODO: Consider excluding:
// addr.is_loopback() || addr.is_unspecified(),
true
}
}
}
}

View File

@ -1,8 +1,11 @@
//! The `streamer` module defines a set of services for efficiently pulling data from UDP sockets.
//!
use crate::packet::{self, send_to, Packets, PacketsRecycler, PACKETS_PER_BATCH};
use crate::recvmmsg::NUM_RCVMMSGS;
use crate::{
packet::{self, send_to, Packets, PacketsRecycler, PACKETS_PER_BATCH},
recvmmsg::NUM_RCVMMSGS,
socket::SocketAddrSpace,
};
use solana_sdk::timing::{duration_as_ms, timestamp};
use std::net::UdpSocket;
use std::sync::atomic::{AtomicBool, Ordering};
@ -112,10 +115,14 @@ pub fn receiver(
.unwrap()
}
fn recv_send(sock: &UdpSocket, r: &PacketReceiver) -> Result<()> {
fn recv_send(
sock: &UdpSocket,
r: &PacketReceiver,
socket_addr_space: &SocketAddrSpace,
) -> Result<()> {
let timer = Duration::new(1, 0);
let msgs = r.recv_timeout(timer)?;
send_to(&msgs, sock)?;
send_to(&msgs, sock, socket_addr_space)?;
Ok(())
}
@ -138,7 +145,12 @@ pub fn recv_batch(recvr: &PacketReceiver, max_batch: usize) -> Result<(Vec<Packe
Ok((batch, len, duration_as_ms(&recv_start.elapsed())))
}
pub fn responder(name: &'static str, sock: Arc<UdpSocket>, r: PacketReceiver) -> JoinHandle<()> {
pub fn responder(
name: &'static str,
sock: Arc<UdpSocket>,
r: PacketReceiver,
socket_addr_space: SocketAddrSpace,
) -> JoinHandle<()> {
Builder::new()
.name(format!("solana-responder-{}", name))
.spawn(move || {
@ -146,7 +158,7 @@ pub fn responder(name: &'static str, sock: Arc<UdpSocket>, r: PacketReceiver) ->
let mut last_error = None;
let mut last_print = 0;
loop {
if let Err(e) = recv_send(&sock, &r) {
if let Err(e) = recv_send(&sock, &r, &socket_addr_space) {
match e {
StreamerError::RecvTimeout(RecvTimeoutError::Disconnected) => break,
StreamerError::RecvTimeout(RecvTimeoutError::Timeout) => (),
@ -222,7 +234,12 @@ mod test {
);
let t_responder = {
let (s_responder, r_responder) = channel();
let t_responder = responder("streamer_send_test", Arc::new(send), r_responder);
let t_responder = responder(
"streamer_send_test",
Arc::new(send),
r_responder,
SocketAddrSpace::Unspecified,
);
let mut msgs = Packets::default();
for i in 0..5 {
let mut b = Packet::default();

View File

@ -39,3 +39,4 @@ bincode = "1.3.3"
solana-core = { path = "../core", version = "=1.8.0" }
solana-logger = { path = "../logger", version = "=1.8.0" }
solana-program-test = { path = "../program-test", version = "=1.8.0" }
solana-streamer = { path = "../streamer", version = "=1.8.0" }

View File

@ -1208,12 +1208,14 @@ mod tests {
signature::{read_keypair_file, write_keypair_file, Signer},
stake::instruction::StakeInstruction,
};
use solana_streamer::socket::SocketAddrSpace;
use solana_transaction_status::TransactionConfirmationStatus;
#[test]
fn test_process_token_allocations() {
let alice = Keypair::new();
let test_validator = TestValidator::with_no_fees(alice.pubkey(), None);
let test_validator =
TestValidator::with_no_fees(alice.pubkey(), None, SocketAddrSpace::Unspecified);
let url = test_validator.rpc_url();
let client = RpcClient::new_with_commitment(url, CommitmentConfig::processed());
@ -1223,7 +1225,8 @@ mod tests {
#[test]
fn test_process_transfer_amount_allocations() {
let alice = Keypair::new();
let test_validator = TestValidator::with_no_fees(alice.pubkey(), None);
let test_validator =
TestValidator::with_no_fees(alice.pubkey(), None, SocketAddrSpace::Unspecified);
let url = test_validator.rpc_url();
let client = RpcClient::new_with_commitment(url, CommitmentConfig::processed());
@ -1233,7 +1236,8 @@ mod tests {
#[test]
fn test_create_stake_allocations() {
let alice = Keypair::new();
let test_validator = TestValidator::with_no_fees(alice.pubkey(), None);
let test_validator =
TestValidator::with_no_fees(alice.pubkey(), None, SocketAddrSpace::Unspecified);
let url = test_validator.rpc_url();
let client = RpcClient::new_with_commitment(url, CommitmentConfig::processed());
@ -1243,7 +1247,8 @@ mod tests {
#[test]
fn test_process_stake_allocations() {
let alice = Keypair::new();
let test_validator = TestValidator::with_no_fees(alice.pubkey(), None);
let test_validator =
TestValidator::with_no_fees(alice.pubkey(), None, SocketAddrSpace::Unspecified);
let url = test_validator.rpc_url();
let client = RpcClient::new_with_commitment(url, CommitmentConfig::processed());
@ -1563,7 +1568,12 @@ mod tests {
let fees_in_sol = lamports_to_sol(fees);
let alice = Keypair::new();
let test_validator = TestValidator::with_custom_fees(alice.pubkey(), fees, None);
let test_validator = TestValidator::with_custom_fees(
alice.pubkey(),
fees,
None,
SocketAddrSpace::Unspecified,
);
let url = test_validator.rpc_url();
let client = RpcClient::new_with_commitment(url, CommitmentConfig::processed());
@ -1646,7 +1656,12 @@ mod tests {
let fees = 10_000;
let fees_in_sol = lamports_to_sol(fees);
let alice = Keypair::new();
let test_validator = TestValidator::with_custom_fees(alice.pubkey(), fees, None);
let test_validator = TestValidator::with_custom_fees(
alice.pubkey(),
fees,
None,
SocketAddrSpace::Unspecified,
);
let url = test_validator.rpc_url();
let client = RpcClient::new_with_commitment(url, CommitmentConfig::processed());
@ -1761,7 +1776,12 @@ mod tests {
let fees = 10_000;
let fees_in_sol = lamports_to_sol(fees);
let alice = Keypair::new();
let test_validator = TestValidator::with_custom_fees(alice.pubkey(), fees, None);
let test_validator = TestValidator::with_custom_fees(
alice.pubkey(),
fees,
None,
SocketAddrSpace::Unspecified,
);
let url = test_validator.rpc_url();
let client = RpcClient::new_with_commitment(url, CommitmentConfig::processed());
@ -1870,7 +1890,12 @@ mod tests {
let fees = 10_000;
let fees_in_sol = lamports_to_sol(fees);
let alice = Keypair::new();
let test_validator = TestValidator::with_custom_fees(alice.pubkey(), fees, None);
let test_validator = TestValidator::with_custom_fees(
alice.pubkey(),
fees,
None,
SocketAddrSpace::Unspecified,
);
let url = test_validator.rpc_url();
let client = RpcClient::new_with_commitment(url, CommitmentConfig::processed());
@ -2185,7 +2210,11 @@ mod tests {
#[test]
fn test_distribute_allocations_dump_db() {
let sender_keypair = Keypair::new();
let test_validator = TestValidator::with_no_fees(sender_keypair.pubkey(), None);
let test_validator = TestValidator::with_no_fees(
sender_keypair.pubkey(),
None,
SocketAddrSpace::Unspecified,
);
let url = test_validator.rpc_url();
let client = RpcClient::new_with_commitment(url, CommitmentConfig::processed());

View File

@ -1,6 +1,7 @@
use solana_client::rpc_client::RpcClient;
use solana_core::test_validator::TestValidator;
use solana_sdk::signature::{Keypair, Signer};
use solana_streamer::socket::SocketAddrSpace;
use solana_tokens::commands::test_process_distribute_tokens_with_client;
#[test]
@ -8,7 +9,8 @@ fn test_process_distribute_with_rpc_client() {
solana_logger::setup();
let mint_keypair = Keypair::new();
let test_validator = TestValidator::with_no_fees(mint_keypair.pubkey(), None);
let test_validator =
TestValidator::with_no_fees(mint_keypair.pubkey(), None, SocketAddrSpace::Unspecified);
let client = RpcClient::new(test_validator.rpc_url());
test_process_distribute_tokens_with_client(&client, mint_keypair, None);

View File

@ -46,6 +46,7 @@ solana-poh = { path = "../poh", version = "=1.8.0" }
solana-rpc = { path = "../rpc", version = "=1.8.0" }
solana-runtime = { path = "../runtime", version = "=1.8.0" }
solana-sdk = { path = "../sdk", version = "=1.8.0" }
solana-streamer = { path = "../streamer", version = "=1.8.0" }
solana-version = { path = "../version", version = "=1.8.0" }
solana-vote-program = { path = "../programs/vote", version = "=1.8.0" }
symlink = "0.1.0"

View File

@ -21,6 +21,7 @@ use {
signature::{read_keypair_file, write_keypair_file, Keypair, Signer},
system_program,
},
solana_streamer::socket::SocketAddrSpace,
solana_validator::{
admin_rpc_service, dashboard::Dashboard, println_name_value, redirect_stderr_to_file,
test_validator::*,
@ -278,8 +279,16 @@ fn main() {
If the ledger already exists then this parameter is silently ignored",
),
)
.arg(
Arg::with_name("allow_private_addr")
.long("allow-private-addr")
.takes_value(false)
.help("Allow contacting private ip addresses")
.hidden(true),
)
.get_matches();
let socket_addr_space = SocketAddrSpace::new(matches.is_present("allow_private_addr"));
let cli_config = if let Some(config_file) = matches.value_of("config_file") {
solana_cli_config::Config::load(config_file).unwrap_or_default()
} else {
@ -584,7 +593,7 @@ fn main() {
genesis.bind_ip_addr(bind_address);
}
match genesis.start_with_mint_address(mint_address) {
match genesis.start_with_mint_address(mint_address, socket_addr_space) {
Ok(test_validator) => {
*admin_service_cluster_info.write().unwrap() = Some(test_validator.cluster_info());
if let Some(dashboard) = dashboard {

View File

@ -58,6 +58,7 @@ use {
pubkey::Pubkey,
signature::{Keypair, Signer},
},
solana_streamer::socket::SocketAddrSpace,
solana_validator::{
admin_rpc_service, dashboard::Dashboard, new_spinner_progress_bar, println_name_value,
redirect_stderr_to_file,
@ -368,13 +369,14 @@ fn start_gossip_node(
expected_shred_version: Option<u16>,
gossip_validators: Option<HashSet<Pubkey>>,
should_check_duplicate_instance: bool,
socket_addr_space: SocketAddrSpace,
) -> (Arc<ClusterInfo>, Arc<AtomicBool>, GossipService) {
let contact_info = ClusterInfo::gossip_contact_info(
identity_keypair.pubkey(),
*gossip_addr,
expected_shred_version.unwrap_or(0),
);
let mut cluster_info = ClusterInfo::new(contact_info, identity_keypair);
let mut cluster_info = ClusterInfo::new(contact_info, identity_keypair, socket_addr_space);
cluster_info.set_entrypoints(cluster_entrypoints.to_vec());
cluster_info.restore_contact_info(ledger_path, 0);
let cluster_info = Arc::new(cluster_info);
@ -671,24 +673,25 @@ fn verify_reachable_ports(
node: &Node,
cluster_entrypoint: &ContactInfo,
validator_config: &ValidatorConfig,
socket_addr_space: &SocketAddrSpace,
) -> bool {
let mut udp_sockets = vec![&node.sockets.gossip, &node.sockets.repair];
if ContactInfo::is_valid_address(&node.info.serve_repair) {
if ContactInfo::is_valid_address(&node.info.serve_repair, socket_addr_space) {
udp_sockets.push(&node.sockets.serve_repair);
}
if ContactInfo::is_valid_address(&node.info.tpu) {
if ContactInfo::is_valid_address(&node.info.tpu, socket_addr_space) {
udp_sockets.extend(node.sockets.tpu.iter());
}
if ContactInfo::is_valid_address(&node.info.tpu_forwards) {
if ContactInfo::is_valid_address(&node.info.tpu_forwards, socket_addr_space) {
udp_sockets.extend(node.sockets.tpu_forwards.iter());
}
if ContactInfo::is_valid_address(&node.info.tvu) {
if ContactInfo::is_valid_address(&node.info.tvu, socket_addr_space) {
udp_sockets.extend(node.sockets.tvu.iter());
udp_sockets.extend(node.sockets.broadcast.iter());
udp_sockets.extend(node.sockets.retransmit_sockets.iter());
}
if ContactInfo::is_valid_address(&node.info.tvu_forwards) {
if ContactInfo::is_valid_address(&node.info.tvu_forwards, socket_addr_space) {
udp_sockets.extend(node.sockets.tvu_forwards.iter());
}
@ -698,7 +701,7 @@ fn verify_reachable_ports(
("RPC", rpc_addr, &node.info.rpc),
("RPC pubsub", rpc_pubsub_addr, &node.info.rpc_pubsub),
] {
if ContactInfo::is_valid_address(public_addr) {
if ContactInfo::is_valid_address(public_addr, socket_addr_space) {
tcp_listeners.push((
bind_addr.port(),
TcpListener::bind(bind_addr).unwrap_or_else(|err| {
@ -763,14 +766,19 @@ fn rpc_bootstrap(
start_progress: &Arc<RwLock<ValidatorStartProgress>>,
minimal_snapshot_download_speed: f32,
maximum_snapshot_download_abort: u64,
socket_addr_space: SocketAddrSpace,
) {
if !no_port_check {
let mut order: Vec<_> = (0..cluster_entrypoints.len()).collect();
order.shuffle(&mut thread_rng());
if order
.into_iter()
.all(|i| !verify_reachable_ports(node, &cluster_entrypoints[i], validator_config))
{
if order.into_iter().all(|i| {
!verify_reachable_ports(
node,
&cluster_entrypoints[i],
validator_config,
&socket_addr_space,
)
}) {
exit(1);
}
}
@ -795,6 +803,7 @@ fn rpc_bootstrap(
validator_config.expected_shred_version,
validator_config.gossip_validators.clone(),
should_check_duplicate_instance,
socket_addr_space,
));
}
@ -1859,6 +1868,13 @@ pub fn main() {
.help("Disables duplicate instance check")
.hidden(true),
)
.arg(
Arg::with_name("allow_private_addr")
.long("allow-private-addr")
.takes_value(false)
.help("Allow contacting private ip addresses")
.hidden(true),
)
.after_help("The default subcommand is run")
.subcommand(
SubCommand::with_name("exit")
@ -1975,6 +1991,7 @@ pub fn main() {
)
.get_matches();
let socket_addr_space = SocketAddrSpace::new(matches.is_present("allow_private_addr"));
let ledger_path = PathBuf::from(matches.value_of("ledger_path").unwrap());
let operation = match matches.subcommand() {
@ -2669,6 +2686,7 @@ pub fn main() {
&start_progress,
minimal_snapshot_download_speed,
maximum_snapshot_download_abort,
socket_addr_space,
);
*start_progress.write().unwrap() = ValidatorStartProgress::Initializing;
}
@ -2688,6 +2706,7 @@ pub fn main() {
&validator_config,
should_check_duplicate_instance,
start_progress,
socket_addr_space,
);
*admin_service_cluster_info.write().unwrap() = Some(validator.cluster_info.clone());