Clippy
This commit is contained in:
parent
0d139d7ef3
commit
7143aaa89b
|
@ -81,7 +81,7 @@ pub fn parse_account_data(
|
||||||
) -> Result<ParsedAccount, ParseAccountError> {
|
) -> Result<ParsedAccount, ParseAccountError> {
|
||||||
let program_name = PARSABLE_PROGRAM_IDS
|
let program_name = PARSABLE_PROGRAM_IDS
|
||||||
.get(program_id)
|
.get(program_id)
|
||||||
.ok_or_else(|| ParseAccountError::ProgramNotParsable)?;
|
.ok_or(ParseAccountError::ProgramNotParsable)?;
|
||||||
let additional_data = additional_data.unwrap_or_default();
|
let additional_data = additional_data.unwrap_or_default();
|
||||||
let parsed_json = match program_name {
|
let parsed_json = match program_name {
|
||||||
ParsableAccount::Config => serde_json::to_value(parse_config(data, pubkey)?)?,
|
ParsableAccount::Config => serde_json::to_value(parse_config(data, pubkey)?)?,
|
||||||
|
|
|
@ -217,7 +217,6 @@ mod test {
|
||||||
account::create_account, fee_calculator::FeeCalculator, hash::Hash,
|
account::create_account, fee_calculator::FeeCalculator, hash::Hash,
|
||||||
sysvar::recent_blockhashes::IterItem,
|
sysvar::recent_blockhashes::IterItem,
|
||||||
};
|
};
|
||||||
use std::iter::FromIterator;
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_parse_sysvars() {
|
fn test_parse_sysvars() {
|
||||||
|
@ -250,8 +249,9 @@ mod test {
|
||||||
let fee_calculator = FeeCalculator {
|
let fee_calculator = FeeCalculator {
|
||||||
lamports_per_signature: 10,
|
lamports_per_signature: 10,
|
||||||
};
|
};
|
||||||
let recent_blockhashes =
|
let recent_blockhashes: RecentBlockhashes = vec![IterItem(0, &hash, &fee_calculator)]
|
||||||
RecentBlockhashes::from_iter(vec![IterItem(0, &hash, &fee_calculator)].into_iter());
|
.into_iter()
|
||||||
|
.collect();
|
||||||
let recent_blockhashes_sysvar = create_account(&recent_blockhashes, 1);
|
let recent_blockhashes_sysvar = create_account(&recent_blockhashes, 1);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
parse_sysvar(
|
parse_sysvar(
|
||||||
|
|
|
@ -130,9 +130,11 @@ mod test {
|
||||||
let mut vote_account_data: Vec<u8> = vec![0; VoteState::size_of()];
|
let mut vote_account_data: Vec<u8> = vec![0; VoteState::size_of()];
|
||||||
let versioned = VoteStateVersions::Current(Box::new(vote_state));
|
let versioned = VoteStateVersions::Current(Box::new(vote_state));
|
||||||
VoteState::serialize(&versioned, &mut vote_account_data).unwrap();
|
VoteState::serialize(&versioned, &mut vote_account_data).unwrap();
|
||||||
let mut expected_vote_state = UiVoteState::default();
|
let expected_vote_state = UiVoteState {
|
||||||
expected_vote_state.node_pubkey = Pubkey::default().to_string();
|
node_pubkey: Pubkey::default().to_string(),
|
||||||
expected_vote_state.authorized_withdrawer = Pubkey::default().to_string();
|
authorized_withdrawer: Pubkey::default().to_string(),
|
||||||
|
..UiVoteState::default()
|
||||||
|
};
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
parse_vote(&vote_account_data).unwrap(),
|
parse_vote(&vote_account_data).unwrap(),
|
||||||
VoteAccountType::Vote(expected_vote_state)
|
VoteAccountType::Vote(expected_vote_state)
|
||||||
|
|
|
@ -163,7 +163,8 @@ pub fn build_args<'a, 'b>(version: &'b str) -> App<'a, 'b> {
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn extract_args<'a>(matches: &ArgMatches<'a>) -> Config {
|
#[allow(clippy::field_reassign_with_default)]
|
||||||
|
pub fn extract_args(matches: &ArgMatches) -> Config {
|
||||||
let mut args = Config::default();
|
let mut args = Config::default();
|
||||||
|
|
||||||
args.entrypoint_addr = solana_net_utils::parse_host_port(
|
args.entrypoint_addr = solana_net_utils::parse_host_port(
|
||||||
|
|
|
@ -22,15 +22,17 @@ fn test_exchange_local_cluster() {
|
||||||
|
|
||||||
const NUM_NODES: usize = 1;
|
const NUM_NODES: usize = 1;
|
||||||
|
|
||||||
let mut config = Config::default();
|
let config = Config {
|
||||||
config.identity = Keypair::new();
|
identity: Keypair::new(),
|
||||||
config.duration = Duration::from_secs(1);
|
duration: Duration::from_secs(1),
|
||||||
config.fund_amount = 100_000;
|
fund_amount: 100_000,
|
||||||
config.threads = 1;
|
threads: 1,
|
||||||
config.transfer_delay = 20; // 15
|
transfer_delay: 20, // 15
|
||||||
config.batch_size = 100; // 1000;
|
batch_size: 100, // 1000
|
||||||
config.chunk_size = 10; // 200;
|
chunk_size: 10, // 200
|
||||||
config.account_groups = 1; // 10;
|
account_groups: 1, // 10
|
||||||
|
..Config::default()
|
||||||
|
};
|
||||||
let Config {
|
let Config {
|
||||||
fund_amount,
|
fund_amount,
|
||||||
batch_size,
|
batch_size,
|
||||||
|
@ -89,15 +91,18 @@ fn test_exchange_bank_client() {
|
||||||
bank.add_builtin("exchange_program", id(), process_instruction);
|
bank.add_builtin("exchange_program", id(), process_instruction);
|
||||||
let clients = vec![BankClient::new(bank)];
|
let clients = vec![BankClient::new(bank)];
|
||||||
|
|
||||||
let mut config = Config::default();
|
do_bench_exchange(
|
||||||
config.identity = identity;
|
clients,
|
||||||
config.duration = Duration::from_secs(1);
|
Config {
|
||||||
config.fund_amount = 100_000;
|
identity,
|
||||||
config.threads = 1;
|
duration: Duration::from_secs(1),
|
||||||
config.transfer_delay = 20; // 0;
|
fund_amount: 100_000,
|
||||||
config.batch_size = 100; // 1500;
|
threads: 1,
|
||||||
config.chunk_size = 10; // 1500;
|
transfer_delay: 20, // 0;
|
||||||
config.account_groups = 1; // 50;
|
batch_size: 100, // 1500;
|
||||||
|
chunk_size: 10, // 1500;
|
||||||
do_bench_exchange(clients, config);
|
account_groups: 1, // 50;
|
||||||
|
..Config::default()
|
||||||
|
},
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
|
@ -938,10 +938,12 @@ mod tests {
|
||||||
let bank = Bank::new(&genesis_config);
|
let bank = Bank::new(&genesis_config);
|
||||||
let client = Arc::new(BankClient::new(bank));
|
let client = Arc::new(BankClient::new(bank));
|
||||||
|
|
||||||
let mut config = Config::default();
|
let config = Config {
|
||||||
config.id = id;
|
id,
|
||||||
config.tx_count = 10;
|
tx_count: 10,
|
||||||
config.duration = Duration::from_secs(5);
|
duration: Duration::from_secs(5),
|
||||||
|
..Config::default()
|
||||||
|
};
|
||||||
|
|
||||||
let keypair_count = config.tx_count * config.keypair_multiplier;
|
let keypair_count = config.tx_count * config.keypair_multiplier;
|
||||||
let keypairs =
|
let keypairs =
|
||||||
|
|
|
@ -196,7 +196,7 @@ pub fn build_args<'a, 'b>(version: &'b str) -> App<'a, 'b> {
|
||||||
/// * `matches` - command line arguments parsed by clap
|
/// * `matches` - command line arguments parsed by clap
|
||||||
/// # Panics
|
/// # Panics
|
||||||
/// Panics if there is trouble parsing any of the arguments
|
/// Panics if there is trouble parsing any of the arguments
|
||||||
pub fn extract_args<'a>(matches: &ArgMatches<'a>) -> Config {
|
pub fn extract_args(matches: &ArgMatches) -> Config {
|
||||||
let mut args = Config::default();
|
let mut args = Config::default();
|
||||||
|
|
||||||
if let Some(addr) = matches.value_of("entrypoint") {
|
if let Some(addr) = matches.value_of("entrypoint") {
|
||||||
|
|
|
@ -60,9 +60,9 @@ fn test_bench_tps_local_cluster(config: Config) {
|
||||||
#[test]
|
#[test]
|
||||||
#[serial]
|
#[serial]
|
||||||
fn test_bench_tps_local_cluster_solana() {
|
fn test_bench_tps_local_cluster_solana() {
|
||||||
let mut config = Config::default();
|
test_bench_tps_local_cluster(Config {
|
||||||
config.tx_count = 100;
|
tx_count: 100,
|
||||||
config.duration = Duration::from_secs(10);
|
duration: Duration::from_secs(10),
|
||||||
|
..Config::default()
|
||||||
test_bench_tps_local_cluster(config);
|
});
|
||||||
}
|
}
|
||||||
|
|
|
@ -56,9 +56,7 @@ _ "$cargo" stable fmt --all -- --check
|
||||||
|
|
||||||
# -Z... is needed because of clippy bug: https://github.com/rust-lang/rust-clippy/issues/4612
|
# -Z... is needed because of clippy bug: https://github.com/rust-lang/rust-clippy/issues/4612
|
||||||
# run nightly clippy for `sdk/` as there's a moderate amount of nightly-only code there
|
# run nightly clippy for `sdk/` as there's a moderate amount of nightly-only code there
|
||||||
_ "$cargo" nightly clippy \
|
_ "$cargo" nightly clippy -Zunstable-options --workspace --all-targets -- --deny=warnings
|
||||||
-Zunstable-options --workspace --all-targets \
|
|
||||||
-- --deny=warnings --allow=clippy::stable_sort_primitive
|
|
||||||
|
|
||||||
cargo_audit_ignores=(
|
cargo_audit_ignores=(
|
||||||
# failure is officially deprecated/unmaintained
|
# failure is officially deprecated/unmaintained
|
||||||
|
@ -97,9 +95,7 @@ _ scripts/cargo-for-all-lock-files.sh +"$rust_stable" audit "${cargo_audit_ignor
|
||||||
cd "$project"
|
cd "$project"
|
||||||
_ "$cargo" stable fmt -- --check
|
_ "$cargo" stable fmt -- --check
|
||||||
_ "$cargo" nightly test
|
_ "$cargo" nightly test
|
||||||
_ "$cargo" nightly clippy -- --deny=warnings \
|
_ "$cargo" nightly clippy -- --deny=warnings --allow=clippy::missing_safety_doc
|
||||||
--allow=clippy::missing_safety_doc \
|
|
||||||
--allow=clippy::stable_sort_primitive
|
|
||||||
)
|
)
|
||||||
done
|
done
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,7 +27,7 @@ pub fn build_balance_message(lamports: u64, use_lamports_unit: bool, show_unit:
|
||||||
|
|
||||||
// Pretty print a "name value"
|
// Pretty print a "name value"
|
||||||
pub fn println_name_value(name: &str, value: &str) {
|
pub fn println_name_value(name: &str, value: &str) {
|
||||||
let styled_value = if value == "" {
|
let styled_value = if value.is_empty() {
|
||||||
style("(not set)").italic()
|
style("(not set)").italic()
|
||||||
} else {
|
} else {
|
||||||
style(value)
|
style(value)
|
||||||
|
@ -36,7 +36,7 @@ pub fn println_name_value(name: &str, value: &str) {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn writeln_name_value(f: &mut fmt::Formatter, name: &str, value: &str) -> fmt::Result {
|
pub fn writeln_name_value(f: &mut fmt::Formatter, name: &str, value: &str) -> fmt::Result {
|
||||||
let styled_value = if value == "" {
|
let styled_value = if value.is_empty() {
|
||||||
style("(not set)").italic()
|
style("(not set)").italic()
|
||||||
} else {
|
} else {
|
||||||
style(value)
|
style(value)
|
||||||
|
|
|
@ -444,7 +444,7 @@ impl CliConfig<'_> {
|
||||||
) -> (SettingType, String) {
|
) -> (SettingType, String) {
|
||||||
settings
|
settings
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.find(|(_, value)| value != "")
|
.find(|(_, value)| !value.is_empty())
|
||||||
.expect("no nonempty setting")
|
.expect("no nonempty setting")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -502,13 +502,14 @@ impl CliConfig<'_> {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn recent_for_tests() -> Self {
|
pub fn recent_for_tests() -> Self {
|
||||||
let mut config = Self::default();
|
Self {
|
||||||
config.commitment = CommitmentConfig::recent();
|
commitment: CommitmentConfig::recent(),
|
||||||
config.send_transaction_config = RpcSendTransactionConfig {
|
send_transaction_config: RpcSendTransactionConfig {
|
||||||
skip_preflight: true,
|
skip_preflight: true,
|
||||||
..RpcSendTransactionConfig::default()
|
..RpcSendTransactionConfig::default()
|
||||||
};
|
},
|
||||||
config
|
..Self::default()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -995,6 +996,7 @@ fn process_confirm(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::unnecessary_wraps)]
|
||||||
fn process_decode_transaction(transaction: &Transaction) -> ProcessResult {
|
fn process_decode_transaction(transaction: &Transaction) -> ProcessResult {
|
||||||
println_transaction(transaction, &None, "");
|
println_transaction(transaction, &None, "");
|
||||||
Ok("".to_string())
|
Ok("".to_string())
|
||||||
|
@ -2763,9 +2765,11 @@ mod tests {
|
||||||
#[allow(clippy::cognitive_complexity)]
|
#[allow(clippy::cognitive_complexity)]
|
||||||
fn test_cli_process_command() {
|
fn test_cli_process_command() {
|
||||||
// Success cases
|
// Success cases
|
||||||
let mut config = CliConfig::default();
|
let mut config = CliConfig {
|
||||||
config.rpc_client = Some(RpcClient::new_mock("succeeds".to_string()));
|
rpc_client: Some(RpcClient::new_mock("succeeds".to_string())),
|
||||||
config.json_rpc_url = "http://127.0.0.1:8899".to_string();
|
json_rpc_url: "http://127.0.0.1:8899".to_string(),
|
||||||
|
..CliConfig::default()
|
||||||
|
};
|
||||||
|
|
||||||
let keypair = Keypair::new();
|
let keypair = Keypair::new();
|
||||||
let pubkey = keypair.pubkey().to_string();
|
let pubkey = keypair.pubkey().to_string();
|
||||||
|
|
|
@ -1097,8 +1097,10 @@ fn test_stake_set_lockup() {
|
||||||
let stake_keypair = keypair_from_seed(&[0u8; 32]).unwrap();
|
let stake_keypair = keypair_from_seed(&[0u8; 32]).unwrap();
|
||||||
let stake_account_pubkey = stake_keypair.pubkey();
|
let stake_account_pubkey = stake_keypair.pubkey();
|
||||||
|
|
||||||
let mut lockup = Lockup::default();
|
let lockup = Lockup {
|
||||||
lockup.custodian = config.signers[0].pubkey();
|
custodian: config.signers[0].pubkey(),
|
||||||
|
..Lockup::default()
|
||||||
|
};
|
||||||
|
|
||||||
config.signers.push(&stake_keypair);
|
config.signers.push(&stake_keypair);
|
||||||
config.command = CliCommand::CreateStakeAccount {
|
config.command = CliCommand::CreateStakeAccount {
|
||||||
|
|
|
@ -1171,8 +1171,10 @@ mod tests {
|
||||||
Blockstore::open(&ledger_path)
|
Blockstore::open(&ledger_path)
|
||||||
.expect("Expected to be able to open database ledger"),
|
.expect("Expected to be able to open database ledger"),
|
||||||
);
|
);
|
||||||
let mut poh_config = PohConfig::default();
|
let poh_config = PohConfig {
|
||||||
poh_config.target_tick_count = Some(bank.max_tick_height() + num_extra_ticks);
|
target_tick_count: Some(bank.max_tick_height() + num_extra_ticks),
|
||||||
|
..PohConfig::default()
|
||||||
|
};
|
||||||
let (exit, poh_recorder, poh_service, entry_receiver) =
|
let (exit, poh_recorder, poh_service, entry_receiver) =
|
||||||
create_test_recorder(&bank, &blockstore, Some(poh_config));
|
create_test_recorder(&bank, &blockstore, Some(poh_config));
|
||||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
||||||
|
@ -1236,9 +1238,12 @@ mod tests {
|
||||||
Blockstore::open(&ledger_path)
|
Blockstore::open(&ledger_path)
|
||||||
.expect("Expected to be able to open database ledger"),
|
.expect("Expected to be able to open database ledger"),
|
||||||
);
|
);
|
||||||
let mut poh_config = PohConfig::default();
|
let poh_config = PohConfig {
|
||||||
// limit tick count to avoid clearing working_bank at PohRecord then PohRecorderError(MaxHeightReached) at BankingStage
|
// limit tick count to avoid clearing working_bank at PohRecord then
|
||||||
poh_config.target_tick_count = Some(bank.max_tick_height() - 1);
|
// PohRecorderError(MaxHeightReached) at BankingStage
|
||||||
|
target_tick_count: Some(bank.max_tick_height() - 1),
|
||||||
|
..PohConfig::default()
|
||||||
|
};
|
||||||
let (exit, poh_recorder, poh_service, entry_receiver) =
|
let (exit, poh_recorder, poh_service, entry_receiver) =
|
||||||
create_test_recorder(&bank, &blockstore, Some(poh_config));
|
create_test_recorder(&bank, &blockstore, Some(poh_config));
|
||||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
||||||
|
@ -1381,9 +1386,12 @@ mod tests {
|
||||||
Blockstore::open(&ledger_path)
|
Blockstore::open(&ledger_path)
|
||||||
.expect("Expected to be able to open database ledger"),
|
.expect("Expected to be able to open database ledger"),
|
||||||
);
|
);
|
||||||
let mut poh_config = PohConfig::default();
|
let poh_config = PohConfig {
|
||||||
// limit tick count to avoid clearing working_bank at PohRecord then PohRecorderError(MaxHeightReached) at BankingStage
|
// limit tick count to avoid clearing working_bank at
|
||||||
poh_config.target_tick_count = Some(bank.max_tick_height() - 1);
|
// PohRecord then PohRecorderError(MaxHeightReached) at BankingStage
|
||||||
|
target_tick_count: Some(bank.max_tick_height() - 1),
|
||||||
|
..PohConfig::default()
|
||||||
|
};
|
||||||
let (exit, poh_recorder, poh_service, entry_receiver) =
|
let (exit, poh_recorder, poh_service, entry_receiver) =
|
||||||
create_test_recorder(&bank, &blockstore, Some(poh_config));
|
create_test_recorder(&bank, &blockstore, Some(poh_config));
|
||||||
let cluster_info =
|
let cluster_info =
|
||||||
|
@ -1973,7 +1981,7 @@ mod tests {
|
||||||
|
|
||||||
assert_eq!(processed_transactions_count, 0,);
|
assert_eq!(processed_transactions_count, 0,);
|
||||||
|
|
||||||
retryable_txs.sort();
|
retryable_txs.sort_unstable();
|
||||||
let expected: Vec<usize> = (0..transactions.len()).collect();
|
let expected: Vec<usize> = (0..transactions.len()).collect();
|
||||||
assert_eq!(retryable_txs, expected);
|
assert_eq!(retryable_txs, expected);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
//! A stage to broadcast data from a leader node to validators
|
//! A stage to broadcast data from a leader node to validators
|
||||||
|
#![allow(clippy::rc_buffer)]
|
||||||
use self::{
|
use self::{
|
||||||
broadcast_fake_shreds_run::BroadcastFakeShredsRun, broadcast_metrics::*,
|
broadcast_fake_shreds_run::BroadcastFakeShredsRun, broadcast_metrics::*,
|
||||||
fail_entry_verification_broadcast_run::FailEntryVerificationBroadcastRun,
|
fail_entry_verification_broadcast_run::FailEntryVerificationBroadcastRun,
|
||||||
|
@ -518,8 +519,10 @@ pub mod test {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_num_live_peers() {
|
fn test_num_live_peers() {
|
||||||
let mut ci = ContactInfo::default();
|
let mut ci = ContactInfo {
|
||||||
ci.wallclock = std::u64::MAX;
|
wallclock: std::u64::MAX,
|
||||||
|
..ContactInfo::default()
|
||||||
|
};
|
||||||
assert_eq!(num_live_peers(&[ci.clone()]), 1);
|
assert_eq!(num_live_peers(&[ci.clone()]), 1);
|
||||||
ci.wallclock = timestamp() - 1;
|
ci.wallclock = timestamp() - 1;
|
||||||
assert_eq!(num_live_peers(&[ci.clone()]), 2);
|
assert_eq!(num_live_peers(&[ci.clone()]), 2);
|
||||||
|
|
|
@ -270,10 +270,9 @@ mod test {
|
||||||
}
|
}
|
||||||
|
|
||||||
assert!(slot_broadcast_stats.lock().unwrap().0.get(&slot).is_none());
|
assert!(slot_broadcast_stats.lock().unwrap().0.get(&slot).is_none());
|
||||||
let (returned_count, returned_slot, returned_instant) = receiver.recv().unwrap();
|
let (returned_count, returned_slot, _returned_instant) = receiver.recv().unwrap();
|
||||||
assert_eq!(returned_count, num_threads);
|
assert_eq!(returned_count, num_threads);
|
||||||
assert_eq!(returned_slot, slot);
|
assert_eq!(returned_slot, slot);
|
||||||
assert_eq!(returned_instant, returned_instant);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
#![allow(clippy::rc_buffer)]
|
||||||
|
|
||||||
use super::{
|
use super::{
|
||||||
broadcast_utils::{self, ReceiveResults},
|
broadcast_utils::{self, ReceiveResults},
|
||||||
*,
|
*,
|
||||||
|
@ -284,7 +286,7 @@ impl StandardBroadcastRun {
|
||||||
blockstore: &Arc<Blockstore>,
|
blockstore: &Arc<Blockstore>,
|
||||||
shreds: Arc<Vec<Shred>>,
|
shreds: Arc<Vec<Shred>>,
|
||||||
broadcast_shred_batch_info: Option<BroadcastShredBatchInfo>,
|
broadcast_shred_batch_info: Option<BroadcastShredBatchInfo>,
|
||||||
) -> Result<()> {
|
) {
|
||||||
// Insert shreds into blockstore
|
// Insert shreds into blockstore
|
||||||
let insert_shreds_start = Instant::now();
|
let insert_shreds_start = Instant::now();
|
||||||
// The first shred is inserted synchronously
|
// The first shred is inserted synchronously
|
||||||
|
@ -302,7 +304,6 @@ impl StandardBroadcastRun {
|
||||||
num_shreds: shreds.len(),
|
num_shreds: shreds.len(),
|
||||||
};
|
};
|
||||||
self.update_insertion_metrics(&new_insert_shreds_stats, &broadcast_shred_batch_info);
|
self.update_insertion_metrics(&new_insert_shreds_stats, &broadcast_shred_batch_info);
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn update_insertion_metrics(
|
fn update_insertion_metrics(
|
||||||
|
@ -438,7 +439,8 @@ impl BroadcastRun for StandardBroadcastRun {
|
||||||
blockstore: &Arc<Blockstore>,
|
blockstore: &Arc<Blockstore>,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let (shreds, slot_start_ts) = receiver.lock().unwrap().recv()?;
|
let (shreds, slot_start_ts) = receiver.lock().unwrap().recv()?;
|
||||||
self.insert(blockstore, shreds, slot_start_ts)
|
self.insert(blockstore, shreds, slot_start_ts);
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -884,7 +884,7 @@ impl ClusterInfo {
|
||||||
))
|
))
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
current_slots.sort();
|
current_slots.sort_unstable();
|
||||||
let min_slot: Slot = current_slots
|
let min_slot: Slot = current_slots
|
||||||
.iter()
|
.iter()
|
||||||
.map(|((_, s), _)| *s)
|
.map(|((_, s), _)| *s)
|
||||||
|
@ -4139,8 +4139,10 @@ mod tests {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_protocol_sanitize() {
|
fn test_protocol_sanitize() {
|
||||||
let mut pd = PruneData::default();
|
let pd = PruneData {
|
||||||
pd.wallclock = MAX_WALLCLOCK;
|
wallclock: MAX_WALLCLOCK,
|
||||||
|
..PruneData::default()
|
||||||
|
};
|
||||||
let msg = Protocol::PruneMessage(Pubkey::default(), pd);
|
let msg = Protocol::PruneMessage(Pubkey::default(), pd);
|
||||||
assert_eq!(msg.sanitize(), Err(SanitizeError::ValueOutOfBounds));
|
assert_eq!(msg.sanitize(), Err(SanitizeError::ValueOutOfBounds));
|
||||||
}
|
}
|
||||||
|
|
|
@ -125,6 +125,7 @@ impl ClusterSlotsService {
|
||||||
while let Ok(mut more) = completed_slots_receiver.try_recv() {
|
while let Ok(mut more) = completed_slots_receiver.try_recv() {
|
||||||
slots.append(&mut more);
|
slots.append(&mut more);
|
||||||
}
|
}
|
||||||
|
#[allow(clippy::stable_sort_primitive)]
|
||||||
slots.sort();
|
slots.sort();
|
||||||
if !slots.is_empty() {
|
if !slots.is_empty() {
|
||||||
cluster_info.push_epoch_slots(&slots);
|
cluster_info.push_epoch_slots(&slots);
|
||||||
|
@ -163,7 +164,7 @@ impl ClusterSlotsService {
|
||||||
while let Ok(mut more) = completed_slots_receiver.try_recv() {
|
while let Ok(mut more) = completed_slots_receiver.try_recv() {
|
||||||
slots.append(&mut more);
|
slots.append(&mut more);
|
||||||
}
|
}
|
||||||
slots.sort();
|
slots.sort_unstable();
|
||||||
slots.dedup();
|
slots.dedup();
|
||||||
if !slots.is_empty() {
|
if !slots.is_empty() {
|
||||||
cluster_info.push_epoch_slots(&slots);
|
cluster_info.push_epoch_slots(&slots);
|
||||||
|
|
|
@ -1574,9 +1574,11 @@ pub mod test {
|
||||||
fn gen_stakes(stake_votes: &[(u64, &[u64])]) -> Vec<(Pubkey, (u64, ArcVoteAccount))> {
|
fn gen_stakes(stake_votes: &[(u64, &[u64])]) -> Vec<(Pubkey, (u64, ArcVoteAccount))> {
|
||||||
let mut stakes = vec![];
|
let mut stakes = vec![];
|
||||||
for (lamports, votes) in stake_votes {
|
for (lamports, votes) in stake_votes {
|
||||||
let mut account = Account::default();
|
let mut account = Account {
|
||||||
account.data = vec![0; VoteState::size_of()];
|
data: vec![0; VoteState::size_of()],
|
||||||
account.lamports = *lamports;
|
lamports: *lamports,
|
||||||
|
..Account::default()
|
||||||
|
};
|
||||||
let mut vote_state = VoteState::default();
|
let mut vote_state = VoteState::default();
|
||||||
for slot in *votes {
|
for slot in *votes {
|
||||||
vote_state.process_slot_vote_unchecked(*slot);
|
vote_state.process_slot_vote_unchecked(*slot);
|
||||||
|
@ -2059,7 +2061,7 @@ pub mod test {
|
||||||
#[test]
|
#[test]
|
||||||
fn test_check_vote_threshold_without_votes() {
|
fn test_check_vote_threshold_without_votes() {
|
||||||
let tower = Tower::new_for_tests(1, 0.67);
|
let tower = Tower::new_for_tests(1, 0.67);
|
||||||
let stakes = vec![(0, 1 as Stake)].into_iter().collect();
|
let stakes = vec![(0, 1)].into_iter().collect();
|
||||||
assert!(tower.check_vote_stake_threshold(0, &stakes, 2));
|
assert!(tower.check_vote_stake_threshold(0, &stakes, 2));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2069,7 +2071,7 @@ pub mod test {
|
||||||
let mut tower = Tower::new_for_tests(4, 0.67);
|
let mut tower = Tower::new_for_tests(4, 0.67);
|
||||||
let mut stakes = HashMap::new();
|
let mut stakes = HashMap::new();
|
||||||
for i in 0..(MAX_LOCKOUT_HISTORY as u64 + 1) {
|
for i in 0..(MAX_LOCKOUT_HISTORY as u64 + 1) {
|
||||||
stakes.insert(i, 1 as Stake);
|
stakes.insert(i, 1);
|
||||||
tower.record_vote(i, Hash::default());
|
tower.record_vote(i, Hash::default());
|
||||||
}
|
}
|
||||||
assert!(!tower.check_vote_stake_threshold(MAX_LOCKOUT_HISTORY as u64 + 1, &stakes, 2,));
|
assert!(!tower.check_vote_stake_threshold(MAX_LOCKOUT_HISTORY as u64 + 1, &stakes, 2,));
|
||||||
|
@ -2078,7 +2080,7 @@ pub mod test {
|
||||||
#[test]
|
#[test]
|
||||||
fn test_is_slot_confirmed_not_enough_stake_failure() {
|
fn test_is_slot_confirmed_not_enough_stake_failure() {
|
||||||
let tower = Tower::new_for_tests(1, 0.67);
|
let tower = Tower::new_for_tests(1, 0.67);
|
||||||
let stakes = vec![(0, 1 as Stake)].into_iter().collect();
|
let stakes = vec![(0, 1)].into_iter().collect();
|
||||||
assert!(!tower.is_slot_confirmed(0, &stakes, 2));
|
assert!(!tower.is_slot_confirmed(0, &stakes, 2));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2092,7 +2094,7 @@ pub mod test {
|
||||||
#[test]
|
#[test]
|
||||||
fn test_is_slot_confirmed_pass() {
|
fn test_is_slot_confirmed_pass() {
|
||||||
let tower = Tower::new_for_tests(1, 0.67);
|
let tower = Tower::new_for_tests(1, 0.67);
|
||||||
let stakes = vec![(0, 2 as Stake)].into_iter().collect();
|
let stakes = vec![(0, 2)].into_iter().collect();
|
||||||
assert!(tower.is_slot_confirmed(0, &stakes, 2));
|
assert!(tower.is_slot_confirmed(0, &stakes, 2));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2205,14 +2207,14 @@ pub mod test {
|
||||||
#[test]
|
#[test]
|
||||||
fn test_check_vote_threshold_below_threshold() {
|
fn test_check_vote_threshold_below_threshold() {
|
||||||
let mut tower = Tower::new_for_tests(1, 0.67);
|
let mut tower = Tower::new_for_tests(1, 0.67);
|
||||||
let stakes = vec![(0, 1 as Stake)].into_iter().collect();
|
let stakes = vec![(0, 1)].into_iter().collect();
|
||||||
tower.record_vote(0, Hash::default());
|
tower.record_vote(0, Hash::default());
|
||||||
assert!(!tower.check_vote_stake_threshold(1, &stakes, 2));
|
assert!(!tower.check_vote_stake_threshold(1, &stakes, 2));
|
||||||
}
|
}
|
||||||
#[test]
|
#[test]
|
||||||
fn test_check_vote_threshold_above_threshold() {
|
fn test_check_vote_threshold_above_threshold() {
|
||||||
let mut tower = Tower::new_for_tests(1, 0.67);
|
let mut tower = Tower::new_for_tests(1, 0.67);
|
||||||
let stakes = vec![(0, 2 as Stake)].into_iter().collect();
|
let stakes = vec![(0, 2)].into_iter().collect();
|
||||||
tower.record_vote(0, Hash::default());
|
tower.record_vote(0, Hash::default());
|
||||||
assert!(tower.check_vote_stake_threshold(1, &stakes, 2));
|
assert!(tower.check_vote_stake_threshold(1, &stakes, 2));
|
||||||
}
|
}
|
||||||
|
@ -2220,7 +2222,7 @@ pub mod test {
|
||||||
#[test]
|
#[test]
|
||||||
fn test_check_vote_threshold_above_threshold_after_pop() {
|
fn test_check_vote_threshold_above_threshold_after_pop() {
|
||||||
let mut tower = Tower::new_for_tests(1, 0.67);
|
let mut tower = Tower::new_for_tests(1, 0.67);
|
||||||
let stakes = vec![(0, 2 as Stake)].into_iter().collect();
|
let stakes = vec![(0, 2)].into_iter().collect();
|
||||||
tower.record_vote(0, Hash::default());
|
tower.record_vote(0, Hash::default());
|
||||||
tower.record_vote(1, Hash::default());
|
tower.record_vote(1, Hash::default());
|
||||||
tower.record_vote(2, Hash::default());
|
tower.record_vote(2, Hash::default());
|
||||||
|
@ -2239,7 +2241,7 @@ pub mod test {
|
||||||
fn test_check_vote_threshold_lockouts_not_updated() {
|
fn test_check_vote_threshold_lockouts_not_updated() {
|
||||||
solana_logger::setup();
|
solana_logger::setup();
|
||||||
let mut tower = Tower::new_for_tests(1, 0.67);
|
let mut tower = Tower::new_for_tests(1, 0.67);
|
||||||
let stakes = vec![(0, 1 as Stake), (1, 2 as Stake)].into_iter().collect();
|
let stakes = vec![(0, 1), (1, 2)].into_iter().collect();
|
||||||
tower.record_vote(0, Hash::default());
|
tower.record_vote(0, Hash::default());
|
||||||
tower.record_vote(1, Hash::default());
|
tower.record_vote(1, Hash::default());
|
||||||
tower.record_vote(2, Hash::default());
|
tower.record_vote(2, Hash::default());
|
||||||
|
@ -2249,8 +2251,10 @@ pub mod test {
|
||||||
#[test]
|
#[test]
|
||||||
fn test_stake_is_updated_for_entire_branch() {
|
fn test_stake_is_updated_for_entire_branch() {
|
||||||
let mut voted_stakes = HashMap::new();
|
let mut voted_stakes = HashMap::new();
|
||||||
let mut account = Account::default();
|
let account = Account {
|
||||||
account.lamports = 1;
|
lamports: 1,
|
||||||
|
..Account::default()
|
||||||
|
};
|
||||||
let set: HashSet<u64> = vec![0u64, 1u64].into_iter().collect();
|
let set: HashSet<u64> = vec![0u64, 1u64].into_iter().collect();
|
||||||
let ancestors: HashMap<u64, HashSet<u64>> = [(2u64, set)].iter().cloned().collect();
|
let ancestors: HashMap<u64, HashSet<u64>> = [(2u64, set)].iter().cloned().collect();
|
||||||
Tower::update_ancestor_voted_stakes(&mut voted_stakes, 2, account.lamports, &ancestors);
|
Tower::update_ancestor_voted_stakes(&mut voted_stakes, 2, account.lamports, &ancestors);
|
||||||
|
|
|
@ -304,8 +304,10 @@ mod test {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_prune_errors() {
|
fn test_prune_errors() {
|
||||||
let mut crds_gossip = CrdsGossip::default();
|
let mut crds_gossip = CrdsGossip {
|
||||||
crds_gossip.id = Pubkey::new(&[0; 32]);
|
id: Pubkey::new(&[0; 32]),
|
||||||
|
..CrdsGossip::default()
|
||||||
|
};
|
||||||
let id = crds_gossip.id;
|
let id = crds_gossip.id;
|
||||||
let ci = ContactInfo::new_localhost(&Pubkey::new(&[1; 32]), 0);
|
let ci = ContactInfo::new_localhost(&Pubkey::new(&[1; 32]), 0);
|
||||||
let prune_pubkey = Pubkey::new(&[2; 32]);
|
let prune_pubkey = Pubkey::new(&[2; 32]);
|
||||||
|
|
|
@ -337,10 +337,7 @@ impl CrdsGossipPull {
|
||||||
for r in responses {
|
for r in responses {
|
||||||
let owner = r.label().pubkey();
|
let owner = r.label().pubkey();
|
||||||
// Check if the crds value is older than the msg_timeout
|
// Check if the crds value is older than the msg_timeout
|
||||||
if now
|
if now > r.wallclock().checked_add(self.msg_timeout).unwrap_or(0)
|
||||||
> r.wallclock()
|
|
||||||
.checked_add(self.msg_timeout)
|
|
||||||
.unwrap_or_else(|| 0)
|
|
||||||
|| now + self.msg_timeout < r.wallclock()
|
|| now + self.msg_timeout < r.wallclock()
|
||||||
{
|
{
|
||||||
match &r.label() {
|
match &r.label() {
|
||||||
|
@ -350,7 +347,7 @@ impl CrdsGossipPull {
|
||||||
let timeout = *timeouts
|
let timeout = *timeouts
|
||||||
.get(&owner)
|
.get(&owner)
|
||||||
.unwrap_or_else(|| timeouts.get(&Pubkey::default()).unwrap());
|
.unwrap_or_else(|| timeouts.get(&Pubkey::default()).unwrap());
|
||||||
if now > r.wallclock().checked_add(timeout).unwrap_or_else(|| 0)
|
if now > r.wallclock().checked_add(timeout).unwrap_or(0)
|
||||||
|| now + timeout < r.wallclock()
|
|| now + timeout < r.wallclock()
|
||||||
{
|
{
|
||||||
stats.timeout_count += 1;
|
stats.timeout_count += 1;
|
||||||
|
|
|
@ -175,12 +175,7 @@ impl CrdsGossipPush {
|
||||||
now: u64,
|
now: u64,
|
||||||
) -> Result<Option<VersionedCrdsValue>, CrdsGossipError> {
|
) -> Result<Option<VersionedCrdsValue>, CrdsGossipError> {
|
||||||
self.num_total += 1;
|
self.num_total += 1;
|
||||||
if now
|
if now > value.wallclock().checked_add(self.msg_timeout).unwrap_or(0) {
|
||||||
> value
|
|
||||||
.wallclock()
|
|
||||||
.checked_add(self.msg_timeout)
|
|
||||||
.unwrap_or_else(|| 0)
|
|
||||||
{
|
|
||||||
return Err(CrdsGossipError::PushMessageTimeout);
|
return Err(CrdsGossipError::PushMessageTimeout);
|
||||||
}
|
}
|
||||||
if now + self.msg_timeout < value.wallclock() {
|
if now + self.msg_timeout < value.wallclock() {
|
||||||
|
@ -208,7 +203,7 @@ impl CrdsGossipPush {
|
||||||
/// push pull responses
|
/// push pull responses
|
||||||
pub fn push_pull_responses(&mut self, values: Vec<(CrdsValueLabel, Hash, u64)>, now: u64) {
|
pub fn push_pull_responses(&mut self, values: Vec<(CrdsValueLabel, Hash, u64)>, now: u64) {
|
||||||
for (label, value_hash, wc) in values {
|
for (label, value_hash, wc) in values {
|
||||||
if now > wc.checked_add(self.msg_timeout).unwrap_or_else(|| 0) {
|
if now > wc.checked_add(self.msg_timeout).unwrap_or(0) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
self.push_messages.insert(label, value_hash);
|
self.push_messages.insert(label, value_hash);
|
||||||
|
|
|
@ -116,7 +116,7 @@ impl Uncompressed {
|
||||||
pub fn to_slots(&self, min_slot: Slot) -> Vec<Slot> {
|
pub fn to_slots(&self, min_slot: Slot) -> Vec<Slot> {
|
||||||
let mut rv = vec![];
|
let mut rv = vec![];
|
||||||
let start = if min_slot < self.first_slot {
|
let start = if min_slot < self.first_slot {
|
||||||
0 as usize
|
0
|
||||||
} else {
|
} else {
|
||||||
(min_slot - self.first_slot) as usize
|
(min_slot - self.first_slot) as usize
|
||||||
};
|
};
|
||||||
|
|
|
@ -1228,8 +1228,10 @@ mod tests {
|
||||||
init_ticks + bank.ticks_per_slot()
|
init_ticks + bank.ticks_per_slot()
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut parent_meta = SlotMeta::default();
|
let parent_meta = SlotMeta {
|
||||||
parent_meta.received = 1;
|
received: 1,
|
||||||
|
..SlotMeta::default()
|
||||||
|
};
|
||||||
poh_recorder
|
poh_recorder
|
||||||
.blockstore
|
.blockstore
|
||||||
.put_meta_bytes(0, &serialize(&parent_meta).unwrap())
|
.put_meta_bytes(0, &serialize(&parent_meta).unwrap())
|
||||||
|
|
|
@ -736,7 +736,7 @@ mod test {
|
||||||
let num_slots = 2;
|
let num_slots = 2;
|
||||||
|
|
||||||
// Create some shreds
|
// Create some shreds
|
||||||
let (mut shreds, _) = make_many_slot_entries(0, num_slots as u64, 150 as u64);
|
let (mut shreds, _) = make_many_slot_entries(0, num_slots as u64, 150);
|
||||||
let num_shreds = shreds.len() as u64;
|
let num_shreds = shreds.len() as u64;
|
||||||
let num_shreds_per_slot = num_shreds / num_slots;
|
let num_shreds_per_slot = num_shreds / num_slots;
|
||||||
|
|
||||||
|
@ -852,9 +852,10 @@ mod test {
|
||||||
// sides of the range)
|
// sides of the range)
|
||||||
for start in 0..slots.len() {
|
for start in 0..slots.len() {
|
||||||
for end in start..slots.len() {
|
for end in start..slots.len() {
|
||||||
let mut repair_slot_range = RepairSlotRange::default();
|
let repair_slot_range = RepairSlotRange {
|
||||||
repair_slot_range.start = slots[start];
|
start: slots[start],
|
||||||
repair_slot_range.end = slots[end];
|
end: slots[end],
|
||||||
|
};
|
||||||
let expected: Vec<RepairType> = (repair_slot_range.start
|
let expected: Vec<RepairType> = (repair_slot_range.start
|
||||||
..=repair_slot_range.end)
|
..=repair_slot_range.end)
|
||||||
.map(|slot_index| {
|
.map(|slot_index| {
|
||||||
|
@ -907,9 +908,7 @@ mod test {
|
||||||
RepairType::HighestShred(end, 0),
|
RepairType::HighestShred(end, 0),
|
||||||
];
|
];
|
||||||
|
|
||||||
let mut repair_slot_range = RepairSlotRange::default();
|
let repair_slot_range = RepairSlotRange { start: 2, end };
|
||||||
repair_slot_range.start = 2;
|
|
||||||
repair_slot_range.end = end;
|
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
RepairService::generate_repairs_in_range(
|
RepairService::generate_repairs_in_range(
|
||||||
|
|
|
@ -455,7 +455,7 @@ impl ReplayStage {
|
||||||
&mut heaviest_subtree_fork_choice,
|
&mut heaviest_subtree_fork_choice,
|
||||||
&cache_block_time_sender,
|
&cache_block_time_sender,
|
||||||
&bank_notification_sender,
|
&bank_notification_sender,
|
||||||
)?;
|
);
|
||||||
};
|
};
|
||||||
voting_time.stop();
|
voting_time.stop();
|
||||||
|
|
||||||
|
@ -1047,7 +1047,7 @@ impl ReplayStage {
|
||||||
heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice,
|
heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice,
|
||||||
cache_block_time_sender: &Option<CacheBlockTimeSender>,
|
cache_block_time_sender: &Option<CacheBlockTimeSender>,
|
||||||
bank_notification_sender: &Option<BankNotificationSender>,
|
bank_notification_sender: &Option<BankNotificationSender>,
|
||||||
) -> Result<()> {
|
) {
|
||||||
if bank.is_empty() {
|
if bank.is_empty() {
|
||||||
inc_new_counter_info!("replay_stage-voted_empty_bank", 1);
|
inc_new_counter_info!("replay_stage-voted_empty_bank", 1);
|
||||||
}
|
}
|
||||||
|
@ -1130,7 +1130,6 @@ impl ReplayStage {
|
||||||
tower_index,
|
tower_index,
|
||||||
switch_fork_decision,
|
switch_fork_decision,
|
||||||
);
|
);
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn push_vote(
|
fn push_vote(
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
//! The `retransmit_stage` retransmits shreds between validators
|
//! The `retransmit_stage` retransmits shreds between validators
|
||||||
|
#![allow(clippy::rc_buffer)]
|
||||||
|
|
||||||
use crate::shred_fetch_stage::ShredFetchStage;
|
use crate::shred_fetch_stage::ShredFetchStage;
|
||||||
use crate::shred_fetch_stage::ShredFetchStats;
|
use crate::shred_fetch_stage::ShredFetchStats;
|
||||||
|
|
|
@ -4511,8 +4511,10 @@ pub mod tests {
|
||||||
let ledger_path = get_tmp_ledger_path!();
|
let ledger_path = get_tmp_ledger_path!();
|
||||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||||
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
|
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
|
||||||
let mut config = JsonRpcConfig::default();
|
let config = JsonRpcConfig {
|
||||||
config.enable_validator_exit = true;
|
enable_validator_exit: true,
|
||||||
|
..JsonRpcConfig::default()
|
||||||
|
};
|
||||||
let bank_forks = new_bank_forks().0;
|
let bank_forks = new_bank_forks().0;
|
||||||
let cluster_info = Arc::new(ClusterInfo::default());
|
let cluster_info = Arc::new(ClusterInfo::default());
|
||||||
let tpu_address = cluster_info.my_contact_info().tpu;
|
let tpu_address = cluster_info.my_contact_info().tpu;
|
||||||
|
@ -4601,8 +4603,10 @@ pub mod tests {
|
||||||
CommitmentSlots::new_from_slot(bank_forks.read().unwrap().highest_slot()),
|
CommitmentSlots::new_from_slot(bank_forks.read().unwrap().highest_slot()),
|
||||||
)));
|
)));
|
||||||
|
|
||||||
let mut config = JsonRpcConfig::default();
|
let config = JsonRpcConfig {
|
||||||
config.enable_validator_exit = true;
|
enable_validator_exit: true,
|
||||||
|
..JsonRpcConfig::default()
|
||||||
|
};
|
||||||
let cluster_info = Arc::new(ClusterInfo::default());
|
let cluster_info = Arc::new(ClusterInfo::default());
|
||||||
let tpu_address = cluster_info.my_contact_info().tpu;
|
let tpu_address = cluster_info.my_contact_info().tpu;
|
||||||
let (request_processor, receiver) = JsonRpcRequestProcessor::new(
|
let (request_processor, receiver) = JsonRpcRequestProcessor::new(
|
||||||
|
|
|
@ -534,8 +534,10 @@ mod tests {
|
||||||
.get(current_slot)
|
.get(current_slot)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.process_transaction(tx)?;
|
.process_transaction(tx)?;
|
||||||
let mut commitment_slots = CommitmentSlots::default();
|
let commitment_slots = CommitmentSlots {
|
||||||
commitment_slots.slot = current_slot;
|
slot: current_slot,
|
||||||
|
..CommitmentSlots::default()
|
||||||
|
};
|
||||||
subscriptions.notify_subscribers(commitment_slots);
|
subscriptions.notify_subscribers(commitment_slots);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -1018,8 +1020,10 @@ mod tests {
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.process_transaction(&tx)
|
.process_transaction(&tx)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let mut commitment_slots = CommitmentSlots::default();
|
let commitment_slots = CommitmentSlots {
|
||||||
commitment_slots.slot = 1;
|
slot: 1,
|
||||||
|
..CommitmentSlots::default()
|
||||||
|
};
|
||||||
rpc.subscriptions.notify_subscribers(commitment_slots);
|
rpc.subscriptions.notify_subscribers(commitment_slots);
|
||||||
|
|
||||||
let commitment_slots = CommitmentSlots {
|
let commitment_slots = CommitmentSlots {
|
||||||
|
|
|
@ -973,7 +973,7 @@ impl RpcSubscriptions {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn notify_roots(&self, mut rooted_slots: Vec<Slot>) {
|
pub fn notify_roots(&self, mut rooted_slots: Vec<Slot>) {
|
||||||
rooted_slots.sort();
|
rooted_slots.sort_unstable();
|
||||||
rooted_slots.into_iter().for_each(|root| {
|
rooted_slots.into_iter().for_each(|root| {
|
||||||
self.enqueue_notification(NotificationEntry::Root(root));
|
self.enqueue_notification(NotificationEntry::Root(root));
|
||||||
});
|
});
|
||||||
|
@ -1359,8 +1359,8 @@ pub(crate) mod tests {
|
||||||
let (create_sub, _id_receiver, create_recv) = Subscriber::new_test("accountNotification");
|
let (create_sub, _id_receiver, create_recv) = Subscriber::new_test("accountNotification");
|
||||||
let (close_sub, _id_receiver, close_recv) = Subscriber::new_test("accountNotification");
|
let (close_sub, _id_receiver, close_recv) = Subscriber::new_test("accountNotification");
|
||||||
|
|
||||||
let create_sub_id = SubscriptionId::Number(0 as u64);
|
let create_sub_id = SubscriptionId::Number(0);
|
||||||
let close_sub_id = SubscriptionId::Number(1 as u64);
|
let close_sub_id = SubscriptionId::Number(1);
|
||||||
|
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
let subscriptions = RpcSubscriptions::new(
|
let subscriptions = RpcSubscriptions::new(
|
||||||
|
@ -1404,8 +1404,10 @@ pub(crate) mod tests {
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.process_transaction(&tx)
|
.process_transaction(&tx)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let mut commitment_slots = CommitmentSlots::default();
|
let commitment_slots = CommitmentSlots {
|
||||||
commitment_slots.slot = 1;
|
slot: 1,
|
||||||
|
..CommitmentSlots::default()
|
||||||
|
};
|
||||||
subscriptions.notify_subscribers(commitment_slots);
|
subscriptions.notify_subscribers(commitment_slots);
|
||||||
let (response, _) = robust_poll_or_panic(create_recv);
|
let (response, _) = robust_poll_or_panic(create_recv);
|
||||||
let expected = json!({
|
let expected = json!({
|
||||||
|
@ -1513,7 +1515,7 @@ pub(crate) mod tests {
|
||||||
|
|
||||||
let (subscriber, _id_receiver, transport_receiver) =
|
let (subscriber, _id_receiver, transport_receiver) =
|
||||||
Subscriber::new_test("programNotification");
|
Subscriber::new_test("programNotification");
|
||||||
let sub_id = SubscriptionId::Number(0 as u64);
|
let sub_id = SubscriptionId::Number(0);
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
let optimistically_confirmed_bank =
|
let optimistically_confirmed_bank =
|
||||||
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks);
|
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks);
|
||||||
|
@ -1659,7 +1661,7 @@ pub(crate) mod tests {
|
||||||
commitment: Some(CommitmentConfig::recent()),
|
commitment: Some(CommitmentConfig::recent()),
|
||||||
enable_received_notification: Some(false),
|
enable_received_notification: Some(false),
|
||||||
}),
|
}),
|
||||||
SubscriptionId::Number(1 as u64),
|
SubscriptionId::Number(1),
|
||||||
past_bank_sub1,
|
past_bank_sub1,
|
||||||
);
|
);
|
||||||
subscriptions.add_signature_subscription(
|
subscriptions.add_signature_subscription(
|
||||||
|
@ -1668,7 +1670,7 @@ pub(crate) mod tests {
|
||||||
commitment: Some(CommitmentConfig::root()),
|
commitment: Some(CommitmentConfig::root()),
|
||||||
enable_received_notification: Some(false),
|
enable_received_notification: Some(false),
|
||||||
}),
|
}),
|
||||||
SubscriptionId::Number(2 as u64),
|
SubscriptionId::Number(2),
|
||||||
past_bank_sub2,
|
past_bank_sub2,
|
||||||
);
|
);
|
||||||
subscriptions.add_signature_subscription(
|
subscriptions.add_signature_subscription(
|
||||||
|
@ -1677,7 +1679,7 @@ pub(crate) mod tests {
|
||||||
commitment: Some(CommitmentConfig::recent()),
|
commitment: Some(CommitmentConfig::recent()),
|
||||||
enable_received_notification: Some(false),
|
enable_received_notification: Some(false),
|
||||||
}),
|
}),
|
||||||
SubscriptionId::Number(3 as u64),
|
SubscriptionId::Number(3),
|
||||||
processed_sub,
|
processed_sub,
|
||||||
);
|
);
|
||||||
subscriptions.add_signature_subscription(
|
subscriptions.add_signature_subscription(
|
||||||
|
@ -1686,7 +1688,7 @@ pub(crate) mod tests {
|
||||||
commitment: Some(CommitmentConfig::recent()),
|
commitment: Some(CommitmentConfig::recent()),
|
||||||
enable_received_notification: Some(false),
|
enable_received_notification: Some(false),
|
||||||
}),
|
}),
|
||||||
SubscriptionId::Number(4 as u64),
|
SubscriptionId::Number(4),
|
||||||
Subscriber::new_test("signatureNotification").0,
|
Subscriber::new_test("signatureNotification").0,
|
||||||
);
|
);
|
||||||
// Add a subscription that gets `received` notifications
|
// Add a subscription that gets `received` notifications
|
||||||
|
@ -1696,7 +1698,7 @@ pub(crate) mod tests {
|
||||||
commitment: Some(CommitmentConfig::recent()),
|
commitment: Some(CommitmentConfig::recent()),
|
||||||
enable_received_notification: Some(true),
|
enable_received_notification: Some(true),
|
||||||
}),
|
}),
|
||||||
SubscriptionId::Number(5 as u64),
|
SubscriptionId::Number(5),
|
||||||
processed_sub3,
|
processed_sub3,
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -1789,7 +1791,7 @@ pub(crate) mod tests {
|
||||||
fn test_check_slot_subscribe() {
|
fn test_check_slot_subscribe() {
|
||||||
let (subscriber, _id_receiver, transport_receiver) =
|
let (subscriber, _id_receiver, transport_receiver) =
|
||||||
Subscriber::new_test("slotNotification");
|
Subscriber::new_test("slotNotification");
|
||||||
let sub_id = SubscriptionId::Number(0 as u64);
|
let sub_id = SubscriptionId::Number(0);
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
|
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
|
||||||
let bank = Bank::new(&genesis_config);
|
let bank = Bank::new(&genesis_config);
|
||||||
|
@ -1840,7 +1842,7 @@ pub(crate) mod tests {
|
||||||
fn test_check_root_subscribe() {
|
fn test_check_root_subscribe() {
|
||||||
let (subscriber, _id_receiver, mut transport_receiver) =
|
let (subscriber, _id_receiver, mut transport_receiver) =
|
||||||
Subscriber::new_test("rootNotification");
|
Subscriber::new_test("rootNotification");
|
||||||
let sub_id = SubscriptionId::Number(0 as u64);
|
let sub_id = SubscriptionId::Number(0);
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
|
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
|
||||||
let bank = Bank::new(&genesis_config);
|
let bank = Bank::new(&genesis_config);
|
||||||
|
@ -1976,7 +1978,7 @@ pub(crate) mod tests {
|
||||||
))),
|
))),
|
||||||
optimistically_confirmed_bank.clone(),
|
optimistically_confirmed_bank.clone(),
|
||||||
));
|
));
|
||||||
let sub_id0 = SubscriptionId::Number(0 as u64);
|
let sub_id0 = SubscriptionId::Number(0);
|
||||||
subscriptions.add_account_subscription(
|
subscriptions.add_account_subscription(
|
||||||
alice.pubkey(),
|
alice.pubkey(),
|
||||||
Some(RpcAccountInfoConfig {
|
Some(RpcAccountInfoConfig {
|
||||||
|
@ -2057,7 +2059,7 @@ pub(crate) mod tests {
|
||||||
assert_eq!(serde_json::to_string(&expected).unwrap(), response);
|
assert_eq!(serde_json::to_string(&expected).unwrap(), response);
|
||||||
subscriptions.remove_account_subscription(&sub_id0);
|
subscriptions.remove_account_subscription(&sub_id0);
|
||||||
|
|
||||||
let sub_id1 = SubscriptionId::Number(1 as u64);
|
let sub_id1 = SubscriptionId::Number(1);
|
||||||
subscriptions.add_account_subscription(
|
subscriptions.add_account_subscription(
|
||||||
alice.pubkey(),
|
alice.pubkey(),
|
||||||
Some(RpcAccountInfoConfig {
|
Some(RpcAccountInfoConfig {
|
||||||
|
|
|
@ -708,11 +708,15 @@ mod tests {
|
||||||
nonce,
|
nonce,
|
||||||
);
|
);
|
||||||
assert!(rv.is_none());
|
assert!(rv.is_none());
|
||||||
let mut common_header = ShredCommonHeader::default();
|
let common_header = ShredCommonHeader {
|
||||||
common_header.slot = slot;
|
slot,
|
||||||
common_header.index = 1;
|
index: 1,
|
||||||
let mut data_header = DataShredHeader::default();
|
..ShredCommonHeader::default()
|
||||||
data_header.parent_offset = 1;
|
};
|
||||||
|
let data_header = DataShredHeader {
|
||||||
|
parent_offset: 1,
|
||||||
|
..DataShredHeader::default()
|
||||||
|
};
|
||||||
let shred_info = Shred::new_empty_from_header(
|
let shred_info = Shred::new_empty_from_header(
|
||||||
common_header,
|
common_header,
|
||||||
data_header,
|
data_header,
|
||||||
|
|
|
@ -63,8 +63,8 @@ mod tests {
|
||||||
#[test]
|
#[test]
|
||||||
fn test_get_latest_votes() {
|
fn test_get_latest_votes() {
|
||||||
let pubkey = solana_sdk::pubkey::new_rand();
|
let pubkey = solana_sdk::pubkey::new_rand();
|
||||||
let label1 = CrdsValueLabel::Vote(0 as u8, pubkey);
|
let label1 = CrdsValueLabel::Vote(0, pubkey);
|
||||||
let label2 = CrdsValueLabel::Vote(1 as u8, pubkey);
|
let label2 = CrdsValueLabel::Vote(1, pubkey);
|
||||||
let mut verified_vote_packets = VerifiedVotePackets(HashMap::new());
|
let mut verified_vote_packets = VerifiedVotePackets(HashMap::new());
|
||||||
|
|
||||||
let data = Packet {
|
let data = Packet {
|
||||||
|
@ -107,8 +107,8 @@ mod tests {
|
||||||
fn test_get_and_process_vote_packets() {
|
fn test_get_and_process_vote_packets() {
|
||||||
let (s, r) = unbounded();
|
let (s, r) = unbounded();
|
||||||
let pubkey = solana_sdk::pubkey::new_rand();
|
let pubkey = solana_sdk::pubkey::new_rand();
|
||||||
let label1 = CrdsValueLabel::Vote(0 as u8, pubkey);
|
let label1 = CrdsValueLabel::Vote(0, pubkey);
|
||||||
let label2 = CrdsValueLabel::Vote(1 as u8, pubkey);
|
let label2 = CrdsValueLabel::Vote(1, pubkey);
|
||||||
let mut update_version = 0;
|
let mut update_version = 0;
|
||||||
s.send(vec![(label1.clone(), Packets::default())]).unwrap();
|
s.send(vec![(label1.clone(), Packets::default())]).unwrap();
|
||||||
s.send(vec![(label2.clone(), Packets::default())]).unwrap();
|
s.send(vec![(label2.clone(), Packets::default())]).unwrap();
|
||||||
|
|
|
@ -624,8 +624,10 @@ fn test_star_network_large_push() {
|
||||||
}
|
}
|
||||||
#[test]
|
#[test]
|
||||||
fn test_prune_errors() {
|
fn test_prune_errors() {
|
||||||
let mut crds_gossip = CrdsGossip::default();
|
let mut crds_gossip = CrdsGossip {
|
||||||
crds_gossip.id = Pubkey::new(&[0; 32]);
|
id: Pubkey::new(&[0; 32]),
|
||||||
|
..CrdsGossip::default()
|
||||||
|
};
|
||||||
let id = crds_gossip.id;
|
let id = crds_gossip.id;
|
||||||
let ci = ContactInfo::new_localhost(&Pubkey::new(&[1; 32]), 0);
|
let ci = ContactInfo::new_localhost(&Pubkey::new(&[1; 32]), 0);
|
||||||
let prune_pubkey = Pubkey::new(&[2; 32]);
|
let prune_pubkey = Pubkey::new(&[2; 32]);
|
||||||
|
|
|
@ -190,7 +190,7 @@ impl Tower {
|
||||||
.map(|(i, v)| (*scores.get(&v).unwrap_or(&0), v.time, i))
|
.map(|(i, v)| (*scores.get(&v).unwrap_or(&0), v.time, i))
|
||||||
.collect();
|
.collect();
|
||||||
// highest score, latest vote first
|
// highest score, latest vote first
|
||||||
best.sort();
|
best.sort_unstable();
|
||||||
if self.parasite {
|
if self.parasite {
|
||||||
best.reverse();
|
best.reverse();
|
||||||
}
|
}
|
||||||
|
|
|
@ -284,8 +284,7 @@ fn test_rpc_subscriptions() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(_err) => {
|
Err(_err) => {
|
||||||
assert!(
|
panic!(
|
||||||
false,
|
|
||||||
"recv_timeout, {}/{} signatures remaining",
|
"recv_timeout, {}/{} signatures remaining",
|
||||||
signature_set.len(),
|
signature_set.len(),
|
||||||
transactions.len()
|
transactions.len()
|
||||||
|
@ -304,8 +303,7 @@ fn test_rpc_subscriptions() {
|
||||||
account_notifications -= 1;
|
account_notifications -= 1;
|
||||||
}
|
}
|
||||||
Err(_err) => {
|
Err(_err) => {
|
||||||
assert!(
|
panic!(
|
||||||
false,
|
|
||||||
"recv_timeout, {}/{} accounts remaining",
|
"recv_timeout, {}/{} accounts remaining",
|
||||||
account_notifications,
|
account_notifications,
|
||||||
transactions.len()
|
transactions.len()
|
||||||
|
|
|
@ -6,13 +6,12 @@ use solana_faucet::{
|
||||||
};
|
};
|
||||||
use solana_sdk::signature::read_keypair_file;
|
use solana_sdk::signature::read_keypair_file;
|
||||||
use std::{
|
use std::{
|
||||||
error,
|
|
||||||
net::{Ipv4Addr, SocketAddr},
|
net::{Ipv4Addr, SocketAddr},
|
||||||
sync::{Arc, Mutex},
|
sync::{Arc, Mutex},
|
||||||
thread,
|
thread,
|
||||||
};
|
};
|
||||||
|
|
||||||
fn main() -> Result<(), Box<dyn error::Error>> {
|
fn main() {
|
||||||
let default_keypair = solana_cli_config::Config::default().keypair_path;
|
let default_keypair = solana_cli_config::Config::default().keypair_path;
|
||||||
|
|
||||||
solana_logger::setup_with_default("solana=info");
|
solana_logger::setup_with_default("solana=info");
|
||||||
|
@ -78,5 +77,4 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||||
});
|
});
|
||||||
|
|
||||||
run_faucet(faucet, faucet_addr, None);
|
run_faucet(faucet, faucet_addr, None);
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -137,6 +137,7 @@ impl AbiDigester {
|
||||||
self.update(&[&label]);
|
self.update(&[&label]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::unnecessary_wraps)]
|
||||||
fn digest_primitive<T: Serialize>(mut self) -> Result<AbiDigester, DigestError> {
|
fn digest_primitive<T: Serialize>(mut self) -> Result<AbiDigester, DigestError> {
|
||||||
self.update_with_type::<T>("primitive");
|
self.update_with_type::<T>("primitive");
|
||||||
Ok(self)
|
Ok(self)
|
||||||
|
@ -164,6 +165,7 @@ impl AbiDigester {
|
||||||
self.create_child().digest_data(v).map(|_| ())
|
self.create_child().digest_data(v).map(|_| ())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::unnecessary_wraps)]
|
||||||
fn check_for_enum(
|
fn check_for_enum(
|
||||||
&mut self,
|
&mut self,
|
||||||
label: &'static str,
|
label: &'static str,
|
||||||
|
|
|
@ -443,11 +443,13 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||||
);
|
);
|
||||||
fee_rate_governor.burn_percent = value_t_or_exit!(matches, "fee_burn_percentage", u8);
|
fee_rate_governor.burn_percent = value_t_or_exit!(matches, "fee_burn_percentage", u8);
|
||||||
|
|
||||||
let mut poh_config = PohConfig::default();
|
let mut poh_config = PohConfig {
|
||||||
poh_config.target_tick_duration = if matches.is_present("target_tick_duration") {
|
target_tick_duration: if matches.is_present("target_tick_duration") {
|
||||||
Duration::from_micros(value_t_or_exit!(matches, "target_tick_duration", u64))
|
Duration::from_micros(value_t_or_exit!(matches, "target_tick_duration", u64))
|
||||||
} else {
|
} else {
|
||||||
Duration::from_micros(default_target_tick_duration)
|
Duration::from_micros(default_target_tick_duration)
|
||||||
|
},
|
||||||
|
..PohConfig::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let cluster_type = cluster_type_of(&matches, "cluster_type").unwrap();
|
let cluster_type = cluster_type_of(&matches, "cluster_type").unwrap();
|
||||||
|
@ -663,7 +665,7 @@ mod tests {
|
||||||
solana_sdk::pubkey::new_rand().to_string(),
|
solana_sdk::pubkey::new_rand().to_string(),
|
||||||
Base64Account {
|
Base64Account {
|
||||||
owner: solana_sdk::pubkey::new_rand().to_string(),
|
owner: solana_sdk::pubkey::new_rand().to_string(),
|
||||||
balance: 2 as u64,
|
balance: 2,
|
||||||
executable: false,
|
executable: false,
|
||||||
data: String::from("aGVsbG8="),
|
data: String::from("aGVsbG8="),
|
||||||
},
|
},
|
||||||
|
@ -672,7 +674,7 @@ mod tests {
|
||||||
solana_sdk::pubkey::new_rand().to_string(),
|
solana_sdk::pubkey::new_rand().to_string(),
|
||||||
Base64Account {
|
Base64Account {
|
||||||
owner: solana_sdk::pubkey::new_rand().to_string(),
|
owner: solana_sdk::pubkey::new_rand().to_string(),
|
||||||
balance: 1 as u64,
|
balance: 1,
|
||||||
executable: true,
|
executable: true,
|
||||||
data: String::from("aGVsbG8gd29ybGQ="),
|
data: String::from("aGVsbG8gd29ybGQ="),
|
||||||
},
|
},
|
||||||
|
@ -681,7 +683,7 @@ mod tests {
|
||||||
solana_sdk::pubkey::new_rand().to_string(),
|
solana_sdk::pubkey::new_rand().to_string(),
|
||||||
Base64Account {
|
Base64Account {
|
||||||
owner: solana_sdk::pubkey::new_rand().to_string(),
|
owner: solana_sdk::pubkey::new_rand().to_string(),
|
||||||
balance: 3 as u64,
|
balance: 3,
|
||||||
executable: true,
|
executable: true,
|
||||||
data: String::from("bWUgaGVsbG8gdG8gd29ybGQ="),
|
data: String::from("bWUgaGVsbG8gdG8gd29ybGQ="),
|
||||||
},
|
},
|
||||||
|
@ -736,7 +738,7 @@ mod tests {
|
||||||
solana_sdk::pubkey::new_rand().to_string(),
|
solana_sdk::pubkey::new_rand().to_string(),
|
||||||
Base64Account {
|
Base64Account {
|
||||||
owner: solana_sdk::pubkey::new_rand().to_string(),
|
owner: solana_sdk::pubkey::new_rand().to_string(),
|
||||||
balance: 6 as u64,
|
balance: 6,
|
||||||
executable: true,
|
executable: true,
|
||||||
data: String::from("eW91IGFyZQ=="),
|
data: String::from("eW91IGFyZQ=="),
|
||||||
},
|
},
|
||||||
|
@ -745,7 +747,7 @@ mod tests {
|
||||||
solana_sdk::pubkey::new_rand().to_string(),
|
solana_sdk::pubkey::new_rand().to_string(),
|
||||||
Base64Account {
|
Base64Account {
|
||||||
owner: solana_sdk::pubkey::new_rand().to_string(),
|
owner: solana_sdk::pubkey::new_rand().to_string(),
|
||||||
balance: 5 as u64,
|
balance: 5,
|
||||||
executable: false,
|
executable: false,
|
||||||
data: String::from("bWV0YSBzdHJpbmc="),
|
data: String::from("bWV0YSBzdHJpbmc="),
|
||||||
},
|
},
|
||||||
|
@ -754,7 +756,7 @@ mod tests {
|
||||||
solana_sdk::pubkey::new_rand().to_string(),
|
solana_sdk::pubkey::new_rand().to_string(),
|
||||||
Base64Account {
|
Base64Account {
|
||||||
owner: solana_sdk::pubkey::new_rand().to_string(),
|
owner: solana_sdk::pubkey::new_rand().to_string(),
|
||||||
balance: 10 as u64,
|
balance: 10,
|
||||||
executable: false,
|
executable: false,
|
||||||
data: String::from("YmFzZTY0IHN0cmluZw=="),
|
data: String::from("YmFzZTY0IHN0cmluZw=="),
|
||||||
},
|
},
|
||||||
|
@ -819,7 +821,7 @@ mod tests {
|
||||||
serde_json::to_string(&account_keypairs[0].to_bytes().to_vec()).unwrap(),
|
serde_json::to_string(&account_keypairs[0].to_bytes().to_vec()).unwrap(),
|
||||||
Base64Account {
|
Base64Account {
|
||||||
owner: solana_sdk::pubkey::new_rand().to_string(),
|
owner: solana_sdk::pubkey::new_rand().to_string(),
|
||||||
balance: 20 as u64,
|
balance: 20,
|
||||||
executable: true,
|
executable: true,
|
||||||
data: String::from("Y2F0IGRvZw=="),
|
data: String::from("Y2F0IGRvZw=="),
|
||||||
},
|
},
|
||||||
|
@ -828,7 +830,7 @@ mod tests {
|
||||||
serde_json::to_string(&account_keypairs[1].to_bytes().to_vec()).unwrap(),
|
serde_json::to_string(&account_keypairs[1].to_bytes().to_vec()).unwrap(),
|
||||||
Base64Account {
|
Base64Account {
|
||||||
owner: solana_sdk::pubkey::new_rand().to_string(),
|
owner: solana_sdk::pubkey::new_rand().to_string(),
|
||||||
balance: 15 as u64,
|
balance: 15,
|
||||||
executable: false,
|
executable: false,
|
||||||
data: String::from("bW9ua2V5IGVsZXBoYW50"),
|
data: String::from("bW9ua2V5IGVsZXBoYW50"),
|
||||||
},
|
},
|
||||||
|
@ -837,7 +839,7 @@ mod tests {
|
||||||
serde_json::to_string(&account_keypairs[2].to_bytes().to_vec()).unwrap(),
|
serde_json::to_string(&account_keypairs[2].to_bytes().to_vec()).unwrap(),
|
||||||
Base64Account {
|
Base64Account {
|
||||||
owner: solana_sdk::pubkey::new_rand().to_string(),
|
owner: solana_sdk::pubkey::new_rand().to_string(),
|
||||||
balance: 30 as u64,
|
balance: 30,
|
||||||
executable: true,
|
executable: true,
|
||||||
data: String::from("Y29tYSBtb2Nh"),
|
data: String::from("Y29tYSBtb2Nh"),
|
||||||
},
|
},
|
||||||
|
|
|
@ -360,7 +360,7 @@ fn get_windows_path_var() -> Result<Option<String>, String> {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(windows)]
|
#[cfg(windows)]
|
||||||
fn add_to_path(new_path: &str) -> Result<bool, String> {
|
fn add_to_path(new_path: &str) -> bool {
|
||||||
use std::ptr;
|
use std::ptr;
|
||||||
use winapi::shared::minwindef::*;
|
use winapi::shared::minwindef::*;
|
||||||
use winapi::um::winuser::{
|
use winapi::um::winuser::{
|
||||||
|
@ -372,7 +372,7 @@ fn add_to_path(new_path: &str) -> Result<bool, String> {
|
||||||
let old_path = if let Some(s) = get_windows_path_var()? {
|
let old_path = if let Some(s) = get_windows_path_var()? {
|
||||||
s
|
s
|
||||||
} else {
|
} else {
|
||||||
return Ok(false);
|
return false;
|
||||||
};
|
};
|
||||||
|
|
||||||
if !old_path.contains(&new_path) {
|
if !old_path.contains(&new_path) {
|
||||||
|
@ -416,11 +416,11 @@ fn add_to_path(new_path: &str) -> Result<bool, String> {
|
||||||
new_path,
|
new_path,
|
||||||
style("Future applications will automatically have the correct environment, but you may need to restart your current shell.").bold()
|
style("Future applications will automatically have the correct environment, but you may need to restart your current shell.").bold()
|
||||||
);
|
);
|
||||||
Ok(true)
|
true
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(unix)]
|
#[cfg(unix)]
|
||||||
fn add_to_path(new_path: &str) -> Result<bool, String> {
|
fn add_to_path(new_path: &str) -> bool {
|
||||||
let shell_export_string = format!(r#"export PATH="{}:$PATH""#, new_path);
|
let shell_export_string = format!(r#"export PATH="{}:$PATH""#, new_path);
|
||||||
let mut modified_rcfiles = false;
|
let mut modified_rcfiles = false;
|
||||||
|
|
||||||
|
@ -502,7 +502,7 @@ fn add_to_path(new_path: &str) -> Result<bool, String> {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(modified_rcfiles)
|
modified_rcfiles
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn init(
|
pub fn init(
|
||||||
|
@ -533,7 +533,7 @@ pub fn init(
|
||||||
update(config_file)?;
|
update(config_file)?;
|
||||||
|
|
||||||
let path_modified = if !no_modify_path {
|
let path_modified = if !no_modify_path {
|
||||||
add_to_path(&config.active_release_bin_dir().to_str().unwrap())?
|
add_to_path(&config.active_release_bin_dir().to_str().unwrap())
|
||||||
} else {
|
} else {
|
||||||
false
|
false
|
||||||
};
|
};
|
||||||
|
|
|
@ -55,7 +55,7 @@ fn get_keypair_from_matches(
|
||||||
let mut path = dirs_next::home_dir().expect("home directory");
|
let mut path = dirs_next::home_dir().expect("home directory");
|
||||||
let path = if matches.is_present("keypair") {
|
let path = if matches.is_present("keypair") {
|
||||||
matches.value_of("keypair").unwrap()
|
matches.value_of("keypair").unwrap()
|
||||||
} else if config.keypair_path != "" {
|
} else if !config.keypair_path.is_empty() {
|
||||||
&config.keypair_path
|
&config.keypair_path
|
||||||
} else {
|
} else {
|
||||||
path.extend(&[".config", "solana", "id.json"]);
|
path.extend(&[".config", "solana", "id.json"]);
|
||||||
|
|
|
@ -70,30 +70,23 @@ enum LedgerOutputMethod {
|
||||||
Json,
|
Json,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn output_slot_rewards(
|
fn output_slot_rewards(blockstore: &Blockstore, slot: Slot, method: &LedgerOutputMethod) {
|
||||||
blockstore: &Blockstore,
|
|
||||||
slot: Slot,
|
|
||||||
method: &LedgerOutputMethod,
|
|
||||||
) -> Result<(), String> {
|
|
||||||
// Note: rewards are not output in JSON yet
|
// Note: rewards are not output in JSON yet
|
||||||
if *method == LedgerOutputMethod::Print {
|
if *method == LedgerOutputMethod::Print {
|
||||||
if let Ok(rewards) = blockstore.read_rewards(slot) {
|
if let Ok(Some(rewards)) = blockstore.read_rewards(slot) {
|
||||||
if let Some(rewards) = rewards {
|
if !rewards.is_empty() {
|
||||||
if !rewards.is_empty() {
|
println!(" Rewards:");
|
||||||
println!(" Rewards:");
|
for reward in rewards {
|
||||||
for reward in rewards {
|
println!(
|
||||||
println!(
|
" Account {}: {}{} SOL",
|
||||||
" Account {}: {}{} SOL",
|
reward.pubkey,
|
||||||
reward.pubkey,
|
if reward.lamports < 0 { '-' } else { ' ' },
|
||||||
if reward.lamports < 0 { '-' } else { ' ' },
|
lamports_to_sol(reward.lamports.abs().try_into().unwrap())
|
||||||
lamports_to_sol(reward.lamports.abs().try_into().unwrap())
|
);
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn output_entry(
|
fn output_entry(
|
||||||
|
@ -181,7 +174,7 @@ fn output_slot(
|
||||||
output_entry(blockstore, method, slot, entry_index, entry);
|
output_entry(blockstore, method, slot, entry_index, entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
output_slot_rewards(blockstore, slot, method)?;
|
output_slot_rewards(blockstore, slot, method);
|
||||||
} else if verbose_level >= 1 {
|
} else if verbose_level >= 1 {
|
||||||
let mut transactions = 0;
|
let mut transactions = 0;
|
||||||
let mut hashes = 0;
|
let mut hashes = 0;
|
||||||
|
@ -526,7 +519,7 @@ fn analyze_column<
|
||||||
db: &Database,
|
db: &Database,
|
||||||
name: &str,
|
name: &str,
|
||||||
key_size: usize,
|
key_size: usize,
|
||||||
) -> Result<(), String> {
|
) {
|
||||||
let mut key_tot: u64 = 0;
|
let mut key_tot: u64 = 0;
|
||||||
let mut val_hist = histogram::Histogram::new();
|
let mut val_hist = histogram::Histogram::new();
|
||||||
let mut val_tot: u64 = 0;
|
let mut val_tot: u64 = 0;
|
||||||
|
@ -587,38 +580,34 @@ fn analyze_column<
|
||||||
};
|
};
|
||||||
|
|
||||||
println!("{}", serde_json::to_string_pretty(&json_result).unwrap());
|
println!("{}", serde_json::to_string_pretty(&json_result).unwrap());
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn analyze_storage(database: &Database) -> Result<(), String> {
|
fn analyze_storage(database: &Database) {
|
||||||
use blockstore_db::columns::*;
|
use blockstore_db::columns::*;
|
||||||
analyze_column::<SlotMeta>(database, "SlotMeta", SlotMeta::key_size())?;
|
analyze_column::<SlotMeta>(database, "SlotMeta", SlotMeta::key_size());
|
||||||
analyze_column::<Orphans>(database, "Orphans", Orphans::key_size())?;
|
analyze_column::<Orphans>(database, "Orphans", Orphans::key_size());
|
||||||
analyze_column::<DeadSlots>(database, "DeadSlots", DeadSlots::key_size())?;
|
analyze_column::<DeadSlots>(database, "DeadSlots", DeadSlots::key_size());
|
||||||
analyze_column::<ErasureMeta>(database, "ErasureMeta", ErasureMeta::key_size())?;
|
analyze_column::<ErasureMeta>(database, "ErasureMeta", ErasureMeta::key_size());
|
||||||
analyze_column::<Root>(database, "Root", Root::key_size())?;
|
analyze_column::<Root>(database, "Root", Root::key_size());
|
||||||
analyze_column::<Index>(database, "Index", Index::key_size())?;
|
analyze_column::<Index>(database, "Index", Index::key_size());
|
||||||
analyze_column::<ShredData>(database, "ShredData", ShredData::key_size())?;
|
analyze_column::<ShredData>(database, "ShredData", ShredData::key_size());
|
||||||
analyze_column::<ShredCode>(database, "ShredCode", ShredCode::key_size())?;
|
analyze_column::<ShredCode>(database, "ShredCode", ShredCode::key_size());
|
||||||
analyze_column::<TransactionStatus>(
|
analyze_column::<TransactionStatus>(
|
||||||
database,
|
database,
|
||||||
"TransactionStatus",
|
"TransactionStatus",
|
||||||
TransactionStatus::key_size(),
|
TransactionStatus::key_size(),
|
||||||
)?;
|
);
|
||||||
analyze_column::<TransactionStatus>(
|
analyze_column::<TransactionStatus>(
|
||||||
database,
|
database,
|
||||||
"TransactionStatusIndex",
|
"TransactionStatusIndex",
|
||||||
TransactionStatusIndex::key_size(),
|
TransactionStatusIndex::key_size(),
|
||||||
)?;
|
);
|
||||||
analyze_column::<AddressSignatures>(
|
analyze_column::<AddressSignatures>(
|
||||||
database,
|
database,
|
||||||
"AddressSignatures",
|
"AddressSignatures",
|
||||||
AddressSignatures::key_size(),
|
AddressSignatures::key_size(),
|
||||||
)?;
|
);
|
||||||
analyze_column::<Rewards>(database, "Rewards", Rewards::key_size())?;
|
analyze_column::<Rewards>(database, "Rewards", Rewards::key_size());
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn open_blockstore(
|
fn open_blockstore(
|
||||||
|
@ -2757,7 +2746,7 @@ fn main() {
|
||||||
println!("Ledger is empty");
|
println!("Ledger is empty");
|
||||||
} else {
|
} else {
|
||||||
let first = slots.first().unwrap();
|
let first = slots.first().unwrap();
|
||||||
let last = slots.last().unwrap_or_else(|| first);
|
let last = slots.last().unwrap_or(first);
|
||||||
if first != last {
|
if first != last {
|
||||||
println!("Ledger has data for slots {:?} to {:?}", first, last);
|
println!("Ledger has data for slots {:?} to {:?}", first, last);
|
||||||
if all {
|
if all {
|
||||||
|
@ -2775,18 +2764,11 @@ fn main() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
("analyze-storage", _) => {
|
("analyze-storage", _) => {
|
||||||
match analyze_storage(&open_database(
|
analyze_storage(&open_database(
|
||||||
&ledger_path,
|
&ledger_path,
|
||||||
AccessType::TryPrimaryThenSecondary,
|
AccessType::TryPrimaryThenSecondary,
|
||||||
)) {
|
));
|
||||||
Ok(()) => {
|
println!("Ok.");
|
||||||
println!("Ok.");
|
|
||||||
}
|
|
||||||
Err(err) => {
|
|
||||||
eprintln!("Unable to read the Ledger: {:?}", err);
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
("", _) => {
|
("", _) => {
|
||||||
eprintln!("{}", matches.usage());
|
eprintln!("{}", matches.usage());
|
||||||
|
|
|
@ -107,7 +107,7 @@ pub async fn upload_confirmed_blocks(
|
||||||
.difference(&bigtable_slots)
|
.difference(&bigtable_slots)
|
||||||
.cloned()
|
.cloned()
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
blocks_to_upload.sort();
|
blocks_to_upload.sort_unstable();
|
||||||
blocks_to_upload
|
blocks_to_upload
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -441,10 +441,8 @@ impl Blockstore {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_full(&self, slot: Slot) -> bool {
|
pub fn is_full(&self, slot: Slot) -> bool {
|
||||||
if let Ok(meta) = self.meta_cf.get(slot) {
|
if let Ok(Some(meta)) = self.meta_cf.get(slot) {
|
||||||
if let Some(meta) = meta {
|
return meta.is_full();
|
||||||
return meta.is_full();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
|
@ -467,10 +465,10 @@ impl Blockstore {
|
||||||
.unwrap_or(0)
|
.unwrap_or(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn slot_meta_iterator<'a>(
|
pub fn slot_meta_iterator(
|
||||||
&'a self,
|
&self,
|
||||||
slot: Slot,
|
slot: Slot,
|
||||||
) -> Result<impl Iterator<Item = (Slot, SlotMeta)> + 'a> {
|
) -> Result<impl Iterator<Item = (Slot, SlotMeta)> + '_> {
|
||||||
let meta_iter = self
|
let meta_iter = self
|
||||||
.db
|
.db
|
||||||
.iter::<cf::SlotMeta>(IteratorMode::From(slot, IteratorDirection::Forward))?;
|
.iter::<cf::SlotMeta>(IteratorMode::From(slot, IteratorDirection::Forward))?;
|
||||||
|
@ -484,21 +482,18 @@ impl Blockstore {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
pub fn live_slots_iterator<'a>(
|
pub fn live_slots_iterator(&self, root: Slot) -> impl Iterator<Item = (Slot, SlotMeta)> + '_ {
|
||||||
&'a self,
|
|
||||||
root: Slot,
|
|
||||||
) -> impl Iterator<Item = (Slot, SlotMeta)> + 'a {
|
|
||||||
let root_forks = NextSlotsIterator::new(root, self);
|
let root_forks = NextSlotsIterator::new(root, self);
|
||||||
|
|
||||||
let orphans_iter = self.orphans_iterator(root + 1).unwrap();
|
let orphans_iter = self.orphans_iterator(root + 1).unwrap();
|
||||||
root_forks.chain(orphans_iter.flat_map(move |orphan| NextSlotsIterator::new(orphan, self)))
|
root_forks.chain(orphans_iter.flat_map(move |orphan| NextSlotsIterator::new(orphan, self)))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn slot_data_iterator<'a>(
|
pub fn slot_data_iterator(
|
||||||
&'a self,
|
&self,
|
||||||
slot: Slot,
|
slot: Slot,
|
||||||
index: u64,
|
index: u64,
|
||||||
) -> Result<impl Iterator<Item = ((u64, u64), Box<[u8]>)> + 'a> {
|
) -> Result<impl Iterator<Item = ((u64, u64), Box<[u8]>)> + '_> {
|
||||||
let slot_iterator = self.db.iter::<cf::ShredData>(IteratorMode::From(
|
let slot_iterator = self.db.iter::<cf::ShredData>(IteratorMode::From(
|
||||||
(slot, index),
|
(slot, index),
|
||||||
IteratorDirection::Forward,
|
IteratorDirection::Forward,
|
||||||
|
@ -506,11 +501,11 @@ impl Blockstore {
|
||||||
Ok(slot_iterator.take_while(move |((shred_slot, _), _)| *shred_slot == slot))
|
Ok(slot_iterator.take_while(move |((shred_slot, _), _)| *shred_slot == slot))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn slot_coding_iterator<'a>(
|
pub fn slot_coding_iterator(
|
||||||
&'a self,
|
&self,
|
||||||
slot: Slot,
|
slot: Slot,
|
||||||
index: u64,
|
index: u64,
|
||||||
) -> Result<impl Iterator<Item = ((u64, u64), Box<[u8]>)> + 'a> {
|
) -> Result<impl Iterator<Item = ((u64, u64), Box<[u8]>)> + '_> {
|
||||||
let slot_iterator = self.db.iter::<cf::ShredCode>(IteratorMode::From(
|
let slot_iterator = self.db.iter::<cf::ShredCode>(IteratorMode::From(
|
||||||
(slot, index),
|
(slot, index),
|
||||||
IteratorDirection::Forward,
|
IteratorDirection::Forward,
|
||||||
|
@ -518,10 +513,7 @@ impl Blockstore {
|
||||||
Ok(slot_iterator.take_while(move |((shred_slot, _), _)| *shred_slot == slot))
|
Ok(slot_iterator.take_while(move |((shred_slot, _), _)| *shred_slot == slot))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn rooted_slot_iterator<'a>(
|
pub fn rooted_slot_iterator(&self, slot: Slot) -> Result<impl Iterator<Item = u64> + '_> {
|
||||||
&'a self,
|
|
||||||
slot: Slot,
|
|
||||||
) -> Result<impl Iterator<Item = u64> + 'a> {
|
|
||||||
let slot_iterator = self
|
let slot_iterator = self
|
||||||
.db
|
.db
|
||||||
.iter::<cf::Root>(IteratorMode::From(slot, IteratorDirection::Forward))?;
|
.iter::<cf::Root>(IteratorMode::From(slot, IteratorDirection::Forward))?;
|
||||||
|
@ -929,7 +921,7 @@ impl Blockstore {
|
||||||
&self.completed_slots_senders,
|
&self.completed_slots_senders,
|
||||||
should_signal,
|
should_signal,
|
||||||
newly_completed_slots,
|
newly_completed_slots,
|
||||||
)?;
|
);
|
||||||
|
|
||||||
total_start.stop();
|
total_start.stop();
|
||||||
|
|
||||||
|
@ -1690,7 +1682,7 @@ impl Blockstore {
|
||||||
.map(|(iter_slot, _)| iter_slot)
|
.map(|(iter_slot, _)| iter_slot)
|
||||||
.take(timestamp_sample_range)
|
.take(timestamp_sample_range)
|
||||||
.collect();
|
.collect();
|
||||||
timestamp_slots.sort();
|
timestamp_slots.sort_unstable();
|
||||||
get_slots.stop();
|
get_slots.stop();
|
||||||
datapoint_info!(
|
datapoint_info!(
|
||||||
"blockstore-get-timestamp-slots",
|
"blockstore-get-timestamp-slots",
|
||||||
|
@ -2746,17 +2738,14 @@ impl Blockstore {
|
||||||
.is_some()
|
.is_some()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn orphans_iterator<'a>(&'a self, slot: Slot) -> Result<impl Iterator<Item = u64> + 'a> {
|
pub fn orphans_iterator(&self, slot: Slot) -> Result<impl Iterator<Item = u64> + '_> {
|
||||||
let orphans_iter = self
|
let orphans_iter = self
|
||||||
.db
|
.db
|
||||||
.iter::<cf::Orphans>(IteratorMode::From(slot, IteratorDirection::Forward))?;
|
.iter::<cf::Orphans>(IteratorMode::From(slot, IteratorDirection::Forward))?;
|
||||||
Ok(orphans_iter.map(|(slot, _)| slot))
|
Ok(orphans_iter.map(|(slot, _)| slot))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn dead_slots_iterator<'a>(
|
pub fn dead_slots_iterator(&self, slot: Slot) -> Result<impl Iterator<Item = Slot> + '_> {
|
||||||
&'a self,
|
|
||||||
slot: Slot,
|
|
||||||
) -> Result<impl Iterator<Item = Slot> + 'a> {
|
|
||||||
let dead_slots_iterator = self
|
let dead_slots_iterator = self
|
||||||
.db
|
.db
|
||||||
.iter::<cf::DeadSlots>(IteratorMode::From(slot, IteratorDirection::Forward))?;
|
.iter::<cf::DeadSlots>(IteratorMode::From(slot, IteratorDirection::Forward))?;
|
||||||
|
@ -2981,7 +2970,7 @@ fn send_signals(
|
||||||
completed_slots_senders: &[SyncSender<Vec<u64>>],
|
completed_slots_senders: &[SyncSender<Vec<u64>>],
|
||||||
should_signal: bool,
|
should_signal: bool,
|
||||||
newly_completed_slots: Vec<u64>,
|
newly_completed_slots: Vec<u64>,
|
||||||
) -> Result<()> {
|
) {
|
||||||
if should_signal {
|
if should_signal {
|
||||||
for signal in new_shreds_signals {
|
for signal in new_shreds_signals {
|
||||||
let _ = signal.try_send(true);
|
let _ = signal.try_send(true);
|
||||||
|
@ -3009,8 +2998,6 @@ fn send_signals(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn commit_slot_meta_working_set(
|
fn commit_slot_meta_working_set(
|
||||||
|
@ -3051,7 +3038,7 @@ fn find_slot_meta_else_create<'a>(
|
||||||
chained_slots: &'a mut HashMap<u64, Rc<RefCell<SlotMeta>>>,
|
chained_slots: &'a mut HashMap<u64, Rc<RefCell<SlotMeta>>>,
|
||||||
slot_index: u64,
|
slot_index: u64,
|
||||||
) -> Result<Rc<RefCell<SlotMeta>>> {
|
) -> Result<Rc<RefCell<SlotMeta>>> {
|
||||||
let result = find_slot_meta_in_cached_state(working_set, chained_slots, slot_index)?;
|
let result = find_slot_meta_in_cached_state(working_set, chained_slots, slot_index);
|
||||||
if let Some(slot) = result {
|
if let Some(slot) = result {
|
||||||
Ok(slot)
|
Ok(slot)
|
||||||
} else {
|
} else {
|
||||||
|
@ -3061,10 +3048,10 @@ fn find_slot_meta_else_create<'a>(
|
||||||
|
|
||||||
// Search the database for that slot metadata. If still no luck, then
|
// Search the database for that slot metadata. If still no luck, then
|
||||||
// create a dummy orphan slot in the database
|
// create a dummy orphan slot in the database
|
||||||
fn find_slot_meta_in_db_else_create<'a>(
|
fn find_slot_meta_in_db_else_create(
|
||||||
db: &Database,
|
db: &Database,
|
||||||
slot: Slot,
|
slot: Slot,
|
||||||
insert_map: &'a mut HashMap<u64, Rc<RefCell<SlotMeta>>>,
|
insert_map: &mut HashMap<u64, Rc<RefCell<SlotMeta>>>,
|
||||||
) -> Result<Rc<RefCell<SlotMeta>>> {
|
) -> Result<Rc<RefCell<SlotMeta>>> {
|
||||||
if let Some(slot_meta) = db.column::<cf::SlotMeta>().get(slot)? {
|
if let Some(slot_meta) = db.column::<cf::SlotMeta>().get(slot)? {
|
||||||
insert_map.insert(slot, Rc::new(RefCell::new(slot_meta)));
|
insert_map.insert(slot, Rc::new(RefCell::new(slot_meta)));
|
||||||
|
@ -3083,13 +3070,13 @@ fn find_slot_meta_in_cached_state<'a>(
|
||||||
working_set: &'a HashMap<u64, SlotMetaWorkingSetEntry>,
|
working_set: &'a HashMap<u64, SlotMetaWorkingSetEntry>,
|
||||||
chained_slots: &'a HashMap<u64, Rc<RefCell<SlotMeta>>>,
|
chained_slots: &'a HashMap<u64, Rc<RefCell<SlotMeta>>>,
|
||||||
slot: Slot,
|
slot: Slot,
|
||||||
) -> Result<Option<Rc<RefCell<SlotMeta>>>> {
|
) -> Option<Rc<RefCell<SlotMeta>>> {
|
||||||
if let Some(entry) = working_set.get(&slot) {
|
if let Some(entry) = working_set.get(&slot) {
|
||||||
Ok(Some(entry.new_slot_meta.clone()))
|
Some(entry.new_slot_meta.clone())
|
||||||
} else if let Some(entry) = chained_slots.get(&slot) {
|
} else if let Some(entry) = chained_slots.get(&slot) {
|
||||||
Ok(Some(entry.clone()))
|
Some(entry.clone())
|
||||||
} else {
|
} else {
|
||||||
Ok(None)
|
None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3596,7 +3583,7 @@ pub mod tests {
|
||||||
use solana_storage_proto::convert::generated;
|
use solana_storage_proto::convert::generated;
|
||||||
use solana_transaction_status::{InnerInstructions, Reward, Rewards};
|
use solana_transaction_status::{InnerInstructions, Reward, Rewards};
|
||||||
use solana_vote_program::{vote_instruction, vote_state::Vote};
|
use solana_vote_program::{vote_instruction, vote_state::Vote};
|
||||||
use std::{iter::FromIterator, time::Duration};
|
use std::time::Duration;
|
||||||
|
|
||||||
// used for tests only
|
// used for tests only
|
||||||
pub(crate) fn make_slot_entries_with_transactions(num_entries: u64) -> Vec<Entry> {
|
pub(crate) fn make_slot_entries_with_transactions(num_entries: u64) -> Vec<Entry> {
|
||||||
|
@ -4062,7 +4049,7 @@ pub mod tests {
|
||||||
let blockstore = Blockstore::open(&blockstore_path).unwrap();
|
let blockstore = Blockstore::open(&blockstore_path).unwrap();
|
||||||
|
|
||||||
// Write entries
|
// Write entries
|
||||||
let num_slots = 5 as u64;
|
let num_slots = 5_u64;
|
||||||
let mut index = 0;
|
let mut index = 0;
|
||||||
for slot in 0..num_slots {
|
for slot in 0..num_slots {
|
||||||
let entries = create_ticks(slot + 1, 0, Hash::default());
|
let entries = create_ticks(slot + 1, 0, Hash::default());
|
||||||
|
@ -4094,8 +4081,8 @@ pub mod tests {
|
||||||
let blockstore_path = get_tmp_ledger_path!();
|
let blockstore_path = get_tmp_ledger_path!();
|
||||||
{
|
{
|
||||||
let blockstore = Blockstore::open(&blockstore_path).unwrap();
|
let blockstore = Blockstore::open(&blockstore_path).unwrap();
|
||||||
let num_slots = 5 as u64;
|
let num_slots = 5_u64;
|
||||||
let shreds_per_slot = 5 as u64;
|
let shreds_per_slot = 5_u64;
|
||||||
let entry_serialized_size =
|
let entry_serialized_size =
|
||||||
bincode::serialized_size(&create_ticks(1, 0, Hash::default())).unwrap();
|
bincode::serialized_size(&create_ticks(1, 0, Hash::default())).unwrap();
|
||||||
let entries_per_slot =
|
let entries_per_slot =
|
||||||
|
@ -4437,9 +4424,9 @@ pub mod tests {
|
||||||
all_shreds.shuffle(&mut thread_rng());
|
all_shreds.shuffle(&mut thread_rng());
|
||||||
ledger.insert_shreds(all_shreds, None, false).unwrap();
|
ledger.insert_shreds(all_shreds, None, false).unwrap();
|
||||||
let mut result = recvr.try_recv().unwrap();
|
let mut result = recvr.try_recv().unwrap();
|
||||||
result.sort();
|
result.sort_unstable();
|
||||||
slots.push(disconnected_slot);
|
slots.push(disconnected_slot);
|
||||||
slots.sort();
|
slots.sort_unstable();
|
||||||
assert_eq!(result, slots);
|
assert_eq!(result, slots);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4799,23 +4786,22 @@ pub mod tests {
|
||||||
blockstore.meta_cf.put(0, &meta0).unwrap();
|
blockstore.meta_cf.put(0, &meta0).unwrap();
|
||||||
|
|
||||||
// Slot exists, chains to nothing
|
// Slot exists, chains to nothing
|
||||||
let expected: HashMap<u64, Vec<u64>> =
|
let expected: HashMap<u64, Vec<u64>> = vec![(0, vec![])].into_iter().collect();
|
||||||
HashMap::from_iter(vec![(0, vec![])].into_iter());
|
|
||||||
assert_eq!(blockstore.get_slots_since(&[0]).unwrap(), expected);
|
assert_eq!(blockstore.get_slots_since(&[0]).unwrap(), expected);
|
||||||
meta0.next_slots = vec![1, 2];
|
meta0.next_slots = vec![1, 2];
|
||||||
blockstore.meta_cf.put(0, &meta0).unwrap();
|
blockstore.meta_cf.put(0, &meta0).unwrap();
|
||||||
|
|
||||||
// Slot exists, chains to some other slots
|
// Slot exists, chains to some other slots
|
||||||
let expected: HashMap<u64, Vec<u64>> =
|
let expected: HashMap<u64, Vec<u64>> = vec![(0, vec![1, 2])].into_iter().collect();
|
||||||
HashMap::from_iter(vec![(0, vec![1, 2])].into_iter());
|
|
||||||
assert_eq!(blockstore.get_slots_since(&[0]).unwrap(), expected);
|
assert_eq!(blockstore.get_slots_since(&[0]).unwrap(), expected);
|
||||||
assert_eq!(blockstore.get_slots_since(&[0, 1]).unwrap(), expected);
|
assert_eq!(blockstore.get_slots_since(&[0, 1]).unwrap(), expected);
|
||||||
|
|
||||||
let mut meta3 = SlotMeta::new(3, 1);
|
let mut meta3 = SlotMeta::new(3, 1);
|
||||||
meta3.next_slots = vec![10, 5];
|
meta3.next_slots = vec![10, 5];
|
||||||
blockstore.meta_cf.put(3, &meta3).unwrap();
|
blockstore.meta_cf.put(3, &meta3).unwrap();
|
||||||
let expected: HashMap<u64, Vec<u64>> =
|
let expected: HashMap<u64, Vec<u64>> = vec![(0, vec![1, 2]), (3, vec![10, 5])]
|
||||||
HashMap::from_iter(vec![(0, vec![1, 2]), (3, vec![10, 5])].into_iter());
|
.into_iter()
|
||||||
|
.collect();
|
||||||
assert_eq!(blockstore.get_slots_since(&[0, 1, 3]).unwrap(), expected);
|
assert_eq!(blockstore.get_slots_since(&[0, 1, 3]).unwrap(), expected);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4902,7 +4888,7 @@ pub mod tests {
|
||||||
let blockstore = Blockstore::open(&blockstore_path).unwrap();
|
let blockstore = Blockstore::open(&blockstore_path).unwrap();
|
||||||
|
|
||||||
// Create shreds and entries
|
// Create shreds and entries
|
||||||
let num_entries = 20 as u64;
|
let num_entries = 20_u64;
|
||||||
let mut entries = vec![];
|
let mut entries = vec![];
|
||||||
let mut shreds = vec![];
|
let mut shreds = vec![];
|
||||||
let mut num_shreds_per_slot = 0;
|
let mut num_shreds_per_slot = 0;
|
||||||
|
@ -5807,8 +5793,10 @@ pub mod tests {
|
||||||
ledger.insert_shreds(more_shreds, None, false).unwrap();
|
ledger.insert_shreds(more_shreds, None, false).unwrap();
|
||||||
ledger.set_roots(&[slot - 1, slot, slot + 1]).unwrap();
|
ledger.set_roots(&[slot - 1, slot, slot + 1]).unwrap();
|
||||||
|
|
||||||
let mut parent_meta = SlotMeta::default();
|
let parent_meta = SlotMeta {
|
||||||
parent_meta.parent_slot = std::u64::MAX;
|
parent_slot: std::u64::MAX,
|
||||||
|
..SlotMeta::default()
|
||||||
|
};
|
||||||
ledger
|
ledger
|
||||||
.put_meta_bytes(slot - 1, &serialize(&parent_meta).unwrap())
|
.put_meta_bytes(slot - 1, &serialize(&parent_meta).unwrap())
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
|
@ -95,7 +95,7 @@ impl Blockstore {
|
||||||
.batch()
|
.batch()
|
||||||
.expect("Database Error: Failed to get write batch");
|
.expect("Database Error: Failed to get write batch");
|
||||||
// delete range cf is not inclusive
|
// delete range cf is not inclusive
|
||||||
let to_slot = to_slot.checked_add(1).unwrap_or_else(|| std::u64::MAX);
|
let to_slot = to_slot.checked_add(1).unwrap_or(std::u64::MAX);
|
||||||
|
|
||||||
let mut delete_range_timer = Measure::start("delete_range");
|
let mut delete_range_timer = Measure::start("delete_range");
|
||||||
let mut columns_purged = self
|
let mut columns_purged = self
|
||||||
|
|
|
@ -360,11 +360,7 @@ impl Rocks {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn iterator_cf<C>(
|
fn iterator_cf<C>(&self, cf: &ColumnFamily, iterator_mode: IteratorMode<C::Index>) -> DBIterator
|
||||||
&self,
|
|
||||||
cf: &ColumnFamily,
|
|
||||||
iterator_mode: IteratorMode<C::Index>,
|
|
||||||
) -> Result<DBIterator>
|
|
||||||
where
|
where
|
||||||
C: Column,
|
C: Column,
|
||||||
{
|
{
|
||||||
|
@ -377,18 +373,15 @@ impl Rocks {
|
||||||
IteratorMode::Start => RocksIteratorMode::Start,
|
IteratorMode::Start => RocksIteratorMode::Start,
|
||||||
IteratorMode::End => RocksIteratorMode::End,
|
IteratorMode::End => RocksIteratorMode::End,
|
||||||
};
|
};
|
||||||
let iter = self.0.iterator_cf(cf, iterator_mode);
|
self.0.iterator_cf(cf, iterator_mode)
|
||||||
Ok(iter)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn raw_iterator_cf(&self, cf: &ColumnFamily) -> Result<DBRawIterator> {
|
fn raw_iterator_cf(&self, cf: &ColumnFamily) -> DBRawIterator {
|
||||||
let raw_iter = self.0.raw_iterator_cf(cf);
|
self.0.raw_iterator_cf(cf)
|
||||||
|
|
||||||
Ok(raw_iter)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn batch(&self) -> Result<RWriteBatch> {
|
fn batch(&self) -> RWriteBatch {
|
||||||
Ok(RWriteBatch::default())
|
RWriteBatch::default()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn write(&self, batch: RWriteBatch) -> Result<()> {
|
fn write(&self, batch: RWriteBatch) -> Result<()> {
|
||||||
|
@ -766,15 +759,15 @@ impl Database {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn iter<'a, C>(
|
pub fn iter<C>(
|
||||||
&'a self,
|
&self,
|
||||||
iterator_mode: IteratorMode<C::Index>,
|
iterator_mode: IteratorMode<C::Index>,
|
||||||
) -> Result<impl Iterator<Item = (C::Index, Box<[u8]>)> + 'a>
|
) -> Result<impl Iterator<Item = (C::Index, Box<[u8]>)> + '_>
|
||||||
where
|
where
|
||||||
C: Column + ColumnName,
|
C: Column + ColumnName,
|
||||||
{
|
{
|
||||||
let cf = self.cf_handle::<C>();
|
let cf = self.cf_handle::<C>();
|
||||||
let iter = self.backend.iterator_cf::<C>(cf, iterator_mode)?;
|
let iter = self.backend.iterator_cf::<C>(cf, iterator_mode);
|
||||||
Ok(iter.map(|(key, value)| (C::index(&key), value)))
|
Ok(iter.map(|(key, value)| (C::index(&key), value)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -798,11 +791,11 @@ impl Database {
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn raw_iterator_cf(&self, cf: &ColumnFamily) -> Result<DBRawIterator> {
|
pub fn raw_iterator_cf(&self, cf: &ColumnFamily) -> Result<DBRawIterator> {
|
||||||
self.backend.raw_iterator_cf(cf)
|
Ok(self.backend.raw_iterator_cf(cf))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn batch(&self) -> Result<WriteBatch> {
|
pub fn batch(&self) -> Result<WriteBatch> {
|
||||||
let write_batch = self.backend.batch()?;
|
let write_batch = self.backend.batch();
|
||||||
let map = self
|
let map = self
|
||||||
.backend
|
.backend
|
||||||
.columns()
|
.columns()
|
||||||
|
@ -845,12 +838,12 @@ where
|
||||||
self.backend.get_cf(self.handle(), &C::key(key))
|
self.backend.get_cf(self.handle(), &C::key(key))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn iter<'a>(
|
pub fn iter(
|
||||||
&'a self,
|
&self,
|
||||||
iterator_mode: IteratorMode<C::Index>,
|
iterator_mode: IteratorMode<C::Index>,
|
||||||
) -> Result<impl Iterator<Item = (C::Index, Box<[u8]>)> + 'a> {
|
) -> Result<impl Iterator<Item = (C::Index, Box<[u8]>)> + '_> {
|
||||||
let cf = self.handle();
|
let cf = self.handle();
|
||||||
let iter = self.backend.iterator_cf::<C>(cf, iterator_mode)?;
|
let iter = self.backend.iterator_cf::<C>(cf, iterator_mode);
|
||||||
Ok(iter.map(|(key, value)| (C::index(&key), value)))
|
Ok(iter.map(|(key, value)| (C::index(&key), value)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -906,7 +899,7 @@ where
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub fn is_empty(&self) -> Result<bool> {
|
pub fn is_empty(&self) -> Result<bool> {
|
||||||
let mut iter = self.backend.raw_iterator_cf(self.handle())?;
|
let mut iter = self.backend.raw_iterator_cf(self.handle());
|
||||||
iter.seek_to_first();
|
iter.seek_to_first();
|
||||||
Ok(!iter.valid())
|
Ok(!iter.valid())
|
||||||
}
|
}
|
||||||
|
|
|
@ -375,7 +375,7 @@ pub fn process_blockstore(
|
||||||
let bank0 = Arc::new(bank0);
|
let bank0 = Arc::new(bank0);
|
||||||
info!("processing ledger for slot 0...");
|
info!("processing ledger for slot 0...");
|
||||||
let recyclers = VerifyRecyclers::default();
|
let recyclers = VerifyRecyclers::default();
|
||||||
process_bank_0(&bank0, blockstore, &opts, &recyclers)?;
|
process_bank_0(&bank0, blockstore, &opts, &recyclers);
|
||||||
do_process_blockstore_from_root(blockstore, bank0, &opts, &recyclers, None)
|
do_process_blockstore_from_root(blockstore, bank0, &opts, &recyclers, None)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -738,7 +738,7 @@ fn process_bank_0(
|
||||||
blockstore: &Blockstore,
|
blockstore: &Blockstore,
|
||||||
opts: &ProcessOptions,
|
opts: &ProcessOptions,
|
||||||
recyclers: &VerifyRecyclers,
|
recyclers: &VerifyRecyclers,
|
||||||
) -> result::Result<(), BlockstoreProcessorError> {
|
) {
|
||||||
assert_eq!(bank0.slot(), 0);
|
assert_eq!(bank0.slot(), 0);
|
||||||
let mut progress = ConfirmationProgress::new(bank0.last_blockhash());
|
let mut progress = ConfirmationProgress::new(bank0.last_blockhash());
|
||||||
confirm_full_slot(
|
confirm_full_slot(
|
||||||
|
@ -752,7 +752,6 @@ fn process_bank_0(
|
||||||
)
|
)
|
||||||
.expect("processing for bank 0 must succeed");
|
.expect("processing for bank 0 must succeed");
|
||||||
bank0.freeze();
|
bank0.freeze();
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Given a bank, add its children to the pending slots queue if those children slots are
|
// Given a bank, add its children to the pending slots queue if those children slots are
|
||||||
|
@ -2715,7 +2714,7 @@ pub mod tests {
|
||||||
..ProcessOptions::default()
|
..ProcessOptions::default()
|
||||||
};
|
};
|
||||||
let recyclers = VerifyRecyclers::default();
|
let recyclers = VerifyRecyclers::default();
|
||||||
process_bank_0(&bank0, &blockstore, &opts, &recyclers).unwrap();
|
process_bank_0(&bank0, &blockstore, &opts, &recyclers);
|
||||||
let bank1 = Arc::new(Bank::new_from_parent(&bank0, &Pubkey::default(), 1));
|
let bank1 = Arc::new(Bank::new_from_parent(&bank0, &Pubkey::default(), 1));
|
||||||
confirm_full_slot(
|
confirm_full_slot(
|
||||||
&blockstore,
|
&blockstore,
|
||||||
|
@ -2901,7 +2900,7 @@ pub mod tests {
|
||||||
|
|
||||||
fn frozen_bank_slots(bank_forks: &BankForks) -> Vec<Slot> {
|
fn frozen_bank_slots(bank_forks: &BankForks) -> Vec<Slot> {
|
||||||
let mut slots: Vec<_> = bank_forks.frozen_banks().keys().cloned().collect();
|
let mut slots: Vec<_> = bank_forks.frozen_banks().keys().cloned().collect();
|
||||||
slots.sort();
|
slots.sort_unstable();
|
||||||
slots
|
slots
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3210,6 +3209,7 @@ pub mod tests {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
#[allow(clippy::field_reassign_with_default)]
|
||||||
fn test_supermajority_root_from_vote_accounts() {
|
fn test_supermajority_root_from_vote_accounts() {
|
||||||
let convert_to_vote_accounts =
|
let convert_to_vote_accounts =
|
||||||
|roots_stakes: Vec<(Slot, u64)>| -> Vec<(Pubkey, (u64, ArcVoteAccount))> {
|
|roots_stakes: Vec<(Slot, u64)>| -> Vec<(Pubkey, (u64, ArcVoteAccount))> {
|
||||||
|
|
|
@ -322,7 +322,7 @@ mod tests {
|
||||||
LeaderScheduleCache::retain_latest(&mut cached_schedules, &mut order, MAX_SCHEDULES);
|
LeaderScheduleCache::retain_latest(&mut cached_schedules, &mut order, MAX_SCHEDULES);
|
||||||
assert_eq!(cached_schedules.len(), MAX_SCHEDULES);
|
assert_eq!(cached_schedules.len(), MAX_SCHEDULES);
|
||||||
let mut keys: Vec<_> = cached_schedules.keys().cloned().collect();
|
let mut keys: Vec<_> = cached_schedules.keys().cloned().collect();
|
||||||
keys.sort();
|
keys.sort_unstable();
|
||||||
let expected: Vec<_> = (1..=MAX_SCHEDULES as u64).collect();
|
let expected: Vec<_> = (1..=MAX_SCHEDULES as u64).collect();
|
||||||
let expected_order: VecDeque<_> = (1..=MAX_SCHEDULES as u64).collect();
|
let expected_order: VecDeque<_> = (1..=MAX_SCHEDULES as u64).collect();
|
||||||
assert_eq!(expected, keys);
|
assert_eq!(expected, keys);
|
||||||
|
|
|
@ -500,6 +500,7 @@ impl Shredder {
|
||||||
reference_tick: u8,
|
reference_tick: u8,
|
||||||
version: u16,
|
version: u16,
|
||||||
) -> Result<Self> {
|
) -> Result<Self> {
|
||||||
|
#[allow(clippy::manual_range_contains)]
|
||||||
if fec_rate > 1.0 || fec_rate < 0.0 {
|
if fec_rate > 1.0 || fec_rate < 0.0 {
|
||||||
Err(ShredError::InvalidFecRate(fec_rate))
|
Err(ShredError::InvalidFecRate(fec_rate))
|
||||||
} else if slot < parent_slot || slot - parent_slot > u64::from(std::u16::MAX) {
|
} else if slot < parent_slot || slot - parent_slot > u64::from(std::u16::MAX) {
|
||||||
|
|
|
@ -37,7 +37,7 @@ fn test_multiple_threads_insert_shred() {
|
||||||
|
|
||||||
// Check slot 0 has the correct children
|
// Check slot 0 has the correct children
|
||||||
let mut meta0 = blockstore.meta(0).unwrap().unwrap();
|
let mut meta0 = blockstore.meta(0).unwrap().unwrap();
|
||||||
meta0.next_slots.sort();
|
meta0.next_slots.sort_unstable();
|
||||||
let expected_next_slots: Vec<_> = (1..num_threads + 1).collect();
|
let expected_next_slots: Vec<_> = (1..num_threads + 1).collect();
|
||||||
assert_eq!(meta0.next_slots, expected_next_slots);
|
assert_eq!(meta0.next_slots, expected_next_slots);
|
||||||
|
|
||||||
|
|
|
@ -68,8 +68,10 @@ fn test_ledger_cleanup_service() {
|
||||||
solana_logger::setup();
|
solana_logger::setup();
|
||||||
error!("test_ledger_cleanup_service");
|
error!("test_ledger_cleanup_service");
|
||||||
let num_nodes = 3;
|
let num_nodes = 3;
|
||||||
let mut validator_config = ValidatorConfig::default();
|
let validator_config = ValidatorConfig {
|
||||||
validator_config.max_ledger_shreds = Some(100);
|
max_ledger_shreds: Some(100),
|
||||||
|
..ValidatorConfig::default()
|
||||||
|
};
|
||||||
let mut config = ClusterConfig {
|
let mut config = ClusterConfig {
|
||||||
cluster_lamports: 10_000,
|
cluster_lamports: 10_000,
|
||||||
poh_config: PohConfig::new_sleep(Duration::from_millis(50)),
|
poh_config: PohConfig::new_sleep(Duration::from_millis(50)),
|
||||||
|
@ -322,8 +324,10 @@ fn run_cluster_partition<E, F>(
|
||||||
assert_eq!(node_stakes.len(), num_nodes);
|
assert_eq!(node_stakes.len(), num_nodes);
|
||||||
let cluster_lamports = node_stakes.iter().sum::<u64>() * 2;
|
let cluster_lamports = node_stakes.iter().sum::<u64>() * 2;
|
||||||
let enable_partition = Arc::new(AtomicBool::new(true));
|
let enable_partition = Arc::new(AtomicBool::new(true));
|
||||||
let mut validator_config = ValidatorConfig::default();
|
let mut validator_config = ValidatorConfig {
|
||||||
validator_config.enable_partition = Some(enable_partition.clone());
|
enable_partition: Some(enable_partition.clone()),
|
||||||
|
..ValidatorConfig::default()
|
||||||
|
};
|
||||||
|
|
||||||
// Returns:
|
// Returns:
|
||||||
// 1) The keys for the validators
|
// 1) The keys for the validators
|
||||||
|
@ -702,7 +706,7 @@ fn test_forwarding() {
|
||||||
fn test_restart_node() {
|
fn test_restart_node() {
|
||||||
solana_logger::setup();
|
solana_logger::setup();
|
||||||
error!("test_restart_node");
|
error!("test_restart_node");
|
||||||
let slots_per_epoch = MINIMUM_SLOTS_PER_EPOCH * 2 as u64;
|
let slots_per_epoch = MINIMUM_SLOTS_PER_EPOCH * 2;
|
||||||
let ticks_per_slot = 16;
|
let ticks_per_slot = 16;
|
||||||
let validator_config = ValidatorConfig::default();
|
let validator_config = ValidatorConfig::default();
|
||||||
let mut cluster = LocalCluster::new(&mut ClusterConfig {
|
let mut cluster = LocalCluster::new(&mut ClusterConfig {
|
||||||
|
@ -1326,8 +1330,10 @@ fn test_fake_shreds_broadcast_leader() {
|
||||||
fn test_faulty_node(faulty_node_type: BroadcastStageType) {
|
fn test_faulty_node(faulty_node_type: BroadcastStageType) {
|
||||||
solana_logger::setup();
|
solana_logger::setup();
|
||||||
let num_nodes = 2;
|
let num_nodes = 2;
|
||||||
let mut error_validator_config = ValidatorConfig::default();
|
let error_validator_config = ValidatorConfig {
|
||||||
error_validator_config.broadcast_stage_type = faulty_node_type;
|
broadcast_stage_type: faulty_node_type,
|
||||||
|
..ValidatorConfig::default()
|
||||||
|
};
|
||||||
let mut validator_configs = Vec::with_capacity(num_nodes - 1);
|
let mut validator_configs = Vec::with_capacity(num_nodes - 1);
|
||||||
validator_configs.resize_with(num_nodes - 1, ValidatorConfig::default);
|
validator_configs.resize_with(num_nodes - 1, ValidatorConfig::default);
|
||||||
|
|
||||||
|
@ -1339,8 +1345,8 @@ fn test_faulty_node(faulty_node_type: BroadcastStageType) {
|
||||||
cluster_lamports: 10_000,
|
cluster_lamports: 10_000,
|
||||||
node_stakes,
|
node_stakes,
|
||||||
validator_configs,
|
validator_configs,
|
||||||
slots_per_epoch: MINIMUM_SLOTS_PER_EPOCH * 2 as u64,
|
slots_per_epoch: MINIMUM_SLOTS_PER_EPOCH * 2,
|
||||||
stakers_slot_offset: MINIMUM_SLOTS_PER_EPOCH * 2 as u64,
|
stakers_slot_offset: MINIMUM_SLOTS_PER_EPOCH * 2,
|
||||||
..ClusterConfig::default()
|
..ClusterConfig::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -2156,7 +2162,7 @@ fn test_run_test_load_program_accounts_partition_root() {
|
||||||
|
|
||||||
fn run_test_load_program_accounts_partition(scan_commitment: CommitmentConfig) {
|
fn run_test_load_program_accounts_partition(scan_commitment: CommitmentConfig) {
|
||||||
let num_slots_per_validator = 8;
|
let num_slots_per_validator = 8;
|
||||||
let partitions: [&[usize]; 2] = [&[(1 as usize)], &[(1 as usize)]];
|
let partitions: [&[usize]; 2] = [&[(1)], &[(1)]];
|
||||||
let (leader_schedule, validator_keys) =
|
let (leader_schedule, validator_keys) =
|
||||||
create_custom_leader_schedule(partitions.len(), num_slots_per_validator);
|
create_custom_leader_schedule(partitions.len(), num_slots_per_validator);
|
||||||
|
|
||||||
|
|
|
@ -581,7 +581,7 @@ mod tests {
|
||||||
3000
|
3000
|
||||||
);
|
);
|
||||||
let port = find_available_port_in_range(ip_addr, (3000, 3050)).unwrap();
|
let port = find_available_port_in_range(ip_addr, (3000, 3050)).unwrap();
|
||||||
assert!(3000 <= port && port < 3050);
|
assert!((3000..3050).contains(&port));
|
||||||
|
|
||||||
let _socket = bind_to(ip_addr, port, false).unwrap();
|
let _socket = bind_to(ip_addr, port, false).unwrap();
|
||||||
find_available_port_in_range(ip_addr, (port, port + 1)).unwrap_err();
|
find_available_port_in_range(ip_addr, (port, port + 1)).unwrap_err();
|
||||||
|
@ -591,7 +591,7 @@ mod tests {
|
||||||
fn test_bind_common_in_range() {
|
fn test_bind_common_in_range() {
|
||||||
let ip_addr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0));
|
let ip_addr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0));
|
||||||
let (port, _sockets) = bind_common_in_range(ip_addr, (3100, 3150)).unwrap();
|
let (port, _sockets) = bind_common_in_range(ip_addr, (3100, 3150)).unwrap();
|
||||||
assert!(3100 <= port && port < 3150);
|
assert!((3100..3150).contains(&port));
|
||||||
|
|
||||||
bind_common_in_range(ip_addr, (port, port + 1)).unwrap_err();
|
bind_common_in_range(ip_addr, (port, port + 1)).unwrap_err();
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,8 +33,6 @@ fn bench_get_offsets(bencher: &mut Bencher) {
|
||||||
let recycler = Recycler::default();
|
let recycler = Recycler::default();
|
||||||
// verify packets
|
// verify packets
|
||||||
bencher.iter(|| {
|
bencher.iter(|| {
|
||||||
let ans = sigverify::generate_offsets(&batches, &recycler);
|
let _ans = sigverify::generate_offsets(&batches, &recycler);
|
||||||
assert!(ans.is_ok());
|
|
||||||
let _ans = ans.unwrap();
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -192,10 +192,7 @@ fn get_packet_offsets(packet: &Packet, current_offset: u32) -> PacketOffsets {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn generate_offsets(
|
pub fn generate_offsets(batches: &[Packets], recycler: &Recycler<TxOffset>) -> TxOffsets {
|
||||||
batches: &[Packets],
|
|
||||||
recycler: &Recycler<TxOffset>,
|
|
||||||
) -> Result<TxOffsets, ()> {
|
|
||||||
debug!("allocating..");
|
debug!("allocating..");
|
||||||
let mut signature_offsets: PinnedVec<_> = recycler.allocate("sig_offsets");
|
let mut signature_offsets: PinnedVec<_> = recycler.allocate("sig_offsets");
|
||||||
signature_offsets.set_pinnable();
|
signature_offsets.set_pinnable();
|
||||||
|
@ -236,13 +233,13 @@ pub fn generate_offsets(
|
||||||
});
|
});
|
||||||
v_sig_lens.push(sig_lens);
|
v_sig_lens.push(sig_lens);
|
||||||
});
|
});
|
||||||
Ok((
|
(
|
||||||
signature_offsets,
|
signature_offsets,
|
||||||
pubkey_offsets,
|
pubkey_offsets,
|
||||||
msg_start_offsets,
|
msg_start_offsets,
|
||||||
msg_sizes,
|
msg_sizes,
|
||||||
v_sig_lens,
|
v_sig_lens,
|
||||||
))
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn ed25519_verify_cpu(batches: &[Packets]) -> Vec<Vec<u8>> {
|
pub fn ed25519_verify_cpu(batches: &[Packets]) -> Vec<Vec<u8>> {
|
||||||
|
@ -346,7 +343,7 @@ pub fn ed25519_verify(
|
||||||
}
|
}
|
||||||
|
|
||||||
let (signature_offsets, pubkey_offsets, msg_start_offsets, msg_sizes, sig_lens) =
|
let (signature_offsets, pubkey_offsets, msg_start_offsets, msg_sizes, sig_lens) =
|
||||||
generate_offsets(batches, recycler).unwrap();
|
generate_offsets(batches, recycler);
|
||||||
|
|
||||||
debug!("CUDA ECDSA for {}", batch_size(batches));
|
debug!("CUDA ECDSA for {}", batch_size(batches));
|
||||||
debug!("allocating out..");
|
debug!("allocating out..");
|
||||||
|
|
|
@ -74,9 +74,11 @@ pub fn builtin_process_instruction(
|
||||||
input: &[u8],
|
input: &[u8],
|
||||||
invoke_context: &mut dyn InvokeContext,
|
invoke_context: &mut dyn InvokeContext,
|
||||||
) -> Result<(), InstructionError> {
|
) -> Result<(), InstructionError> {
|
||||||
let mut mock_invoke_context = MockInvokeContext::default();
|
let mock_invoke_context = MockInvokeContext {
|
||||||
mock_invoke_context.programs = invoke_context.get_programs().to_vec();
|
programs: invoke_context.get_programs().to_vec(),
|
||||||
mock_invoke_context.key = *program_id;
|
key: *program_id,
|
||||||
|
..MockInvokeContext::default()
|
||||||
|
};
|
||||||
// TODO: Populate MockInvokeContext more, or rework to avoid MockInvokeContext entirely.
|
// TODO: Populate MockInvokeContext more, or rework to avoid MockInvokeContext entirely.
|
||||||
// The context being passed into the program is incomplete...
|
// The context being passed into the program is incomplete...
|
||||||
let local_invoke_context = RefCell::new(Rc::new(mock_invoke_context));
|
let local_invoke_context = RefCell::new(Rc::new(mock_invoke_context));
|
||||||
|
|
|
@ -19,7 +19,7 @@ pub extern "C" fn entrypoint(_input: *mut u8) -> u64 {
|
||||||
assert_eq!(z, 340_282_366_920_938_463_463_374_607_431_768_211_454);
|
assert_eq!(z, 340_282_366_920_938_463_463_374_607_431_768_211_454);
|
||||||
|
|
||||||
assert_eq!(u128::from(1u32.to_le()), 1);
|
assert_eq!(u128::from(1u32.to_le()), 1);
|
||||||
assert_eq!(u128::from(1u32.to_be()), 0x1_000_000);
|
assert_eq!(u128::from(1u32.to_be()), 0x0100_0000);
|
||||||
|
|
||||||
assert_eq!(solana_bpf_rust_128bit_dep::uadd(10, 20), 30u128);
|
assert_eq!(solana_bpf_rust_128bit_dep::uadd(10, 20), 30u128);
|
||||||
assert_eq!(solana_bpf_rust_128bit_dep::usubtract(30, 20), 10u128);
|
assert_eq!(solana_bpf_rust_128bit_dep::usubtract(30, 20), 10u128);
|
||||||
|
|
|
@ -53,7 +53,8 @@ unsafe impl std::alloc::GlobalAlloc for BumpAllocator {
|
||||||
static A: BumpAllocator = BumpAllocator;
|
static A: BumpAllocator = BumpAllocator;
|
||||||
|
|
||||||
entrypoint!(process_instruction);
|
entrypoint!(process_instruction);
|
||||||
fn process_instruction(
|
#[allow(clippy::unnecessary_wraps)]
|
||||||
|
pub fn process_instruction(
|
||||||
_program_id: &Pubkey,
|
_program_id: &Pubkey,
|
||||||
_accounts: &[AccountInfo],
|
_accounts: &[AccountInfo],
|
||||||
_instruction_data: &[u8],
|
_instruction_data: &[u8],
|
||||||
|
|
|
@ -27,6 +27,7 @@ fn custom_panic(info: &core::panic::PanicInfo<'_>) {
|
||||||
}
|
}
|
||||||
|
|
||||||
entrypoint_deprecated!(process_instruction);
|
entrypoint_deprecated!(process_instruction);
|
||||||
|
#[allow(clippy::unnecessary_wraps)]
|
||||||
fn process_instruction(
|
fn process_instruction(
|
||||||
program_id: &Pubkey,
|
program_id: &Pubkey,
|
||||||
accounts: &[AccountInfo],
|
accounts: &[AccountInfo],
|
||||||
|
|
|
@ -6,6 +6,7 @@ use solana_program::{
|
||||||
};
|
};
|
||||||
|
|
||||||
entrypoint!(process_instruction);
|
entrypoint!(process_instruction);
|
||||||
|
#[allow(clippy::unnecessary_wraps)]
|
||||||
fn process_instruction(
|
fn process_instruction(
|
||||||
_program_id: &Pubkey,
|
_program_id: &Pubkey,
|
||||||
accounts: &[AccountInfo],
|
accounts: &[AccountInfo],
|
||||||
|
|
|
@ -7,6 +7,7 @@ use solana_program::{
|
||||||
};
|
};
|
||||||
|
|
||||||
entrypoint!(process_instruction);
|
entrypoint!(process_instruction);
|
||||||
|
#[allow(clippy::unnecessary_wraps)]
|
||||||
fn process_instruction(
|
fn process_instruction(
|
||||||
_program_id: &Pubkey,
|
_program_id: &Pubkey,
|
||||||
accounts: &[AccountInfo],
|
accounts: &[AccountInfo],
|
||||||
|
|
|
@ -6,6 +6,7 @@ use solana_program::{
|
||||||
};
|
};
|
||||||
|
|
||||||
entrypoint!(process_instruction);
|
entrypoint!(process_instruction);
|
||||||
|
#[allow(clippy::unnecessary_wraps)]
|
||||||
fn process_instruction(
|
fn process_instruction(
|
||||||
_program_id: &Pubkey,
|
_program_id: &Pubkey,
|
||||||
_accounts: &[AccountInfo],
|
_accounts: &[AccountInfo],
|
||||||
|
|
|
@ -14,6 +14,7 @@ use solana_program::{
|
||||||
};
|
};
|
||||||
|
|
||||||
entrypoint!(process_instruction);
|
entrypoint!(process_instruction);
|
||||||
|
#[allow(clippy::unnecessary_wraps)]
|
||||||
fn process_instruction(
|
fn process_instruction(
|
||||||
_program_id: &Pubkey,
|
_program_id: &Pubkey,
|
||||||
_accounts: &[AccountInfo],
|
_accounts: &[AccountInfo],
|
||||||
|
|
|
@ -8,6 +8,7 @@ use solana_program::{
|
||||||
};
|
};
|
||||||
|
|
||||||
entrypoint!(process_instruction);
|
entrypoint!(process_instruction);
|
||||||
|
#[allow(clippy::unnecessary_wraps)]
|
||||||
fn process_instruction(
|
fn process_instruction(
|
||||||
_program_id: &Pubkey,
|
_program_id: &Pubkey,
|
||||||
_accounts: &[AccountInfo],
|
_accounts: &[AccountInfo],
|
||||||
|
|
|
@ -21,6 +21,7 @@ fn return_sstruct() -> SStruct {
|
||||||
}
|
}
|
||||||
|
|
||||||
entrypoint!(process_instruction);
|
entrypoint!(process_instruction);
|
||||||
|
#[allow(clippy::unnecessary_wraps)]
|
||||||
fn process_instruction(
|
fn process_instruction(
|
||||||
program_id: &Pubkey,
|
program_id: &Pubkey,
|
||||||
accounts: &[AccountInfo],
|
accounts: &[AccountInfo],
|
||||||
|
|
|
@ -3,6 +3,7 @@ use solana_program::{
|
||||||
};
|
};
|
||||||
|
|
||||||
entrypoint!(process_instruction);
|
entrypoint!(process_instruction);
|
||||||
|
#[allow(clippy::unnecessary_wraps)]
|
||||||
fn process_instruction(
|
fn process_instruction(
|
||||||
_program_id: &Pubkey,
|
_program_id: &Pubkey,
|
||||||
accounts: &[AccountInfo],
|
accounts: &[AccountInfo],
|
||||||
|
|
|
@ -16,6 +16,7 @@ use solana_program::{
|
||||||
};
|
};
|
||||||
|
|
||||||
entrypoint!(process_instruction);
|
entrypoint!(process_instruction);
|
||||||
|
#[allow(clippy::unnecessary_wraps)]
|
||||||
fn process_instruction(
|
fn process_instruction(
|
||||||
_program_id: &Pubkey,
|
_program_id: &Pubkey,
|
||||||
accounts: &[AccountInfo],
|
accounts: &[AccountInfo],
|
||||||
|
|
|
@ -141,9 +141,10 @@ pub fn process_instruction(
|
||||||
trace!("contract already exists");
|
trace!("contract already exists");
|
||||||
return Err(InstructionError::AccountAlreadyInitialized);
|
return Err(InstructionError::AccountAlreadyInitialized);
|
||||||
}
|
}
|
||||||
let mut budget_state = BudgetState::default();
|
let budget_state = BudgetState {
|
||||||
budget_state.pending_budget = Some(*expr);
|
pending_budget: Some(*expr),
|
||||||
budget_state.initialized = true;
|
initialized: true,
|
||||||
|
};
|
||||||
budget_state.serialize(&mut contract_keyed_account.try_account_ref_mut()?.data)
|
budget_state.serialize(&mut contract_keyed_account.try_account_ref_mut()?.data)
|
||||||
}
|
}
|
||||||
BudgetInstruction::ApplyTimestamp(dt) => {
|
BudgetInstruction::ApplyTimestamp(dt) => {
|
||||||
|
|
|
@ -10,7 +10,7 @@ solana_sdk::declare_program!(
|
||||||
process_instruction
|
process_instruction
|
||||||
);
|
);
|
||||||
|
|
||||||
fn process_instruction(
|
pub fn process_instruction(
|
||||||
program_id: &Pubkey,
|
program_id: &Pubkey,
|
||||||
keyed_accounts: &[KeyedAccount],
|
keyed_accounts: &[KeyedAccount],
|
||||||
data: &[u8],
|
data: &[u8],
|
||||||
|
|
|
@ -562,13 +562,15 @@ mod tests {
|
||||||
} else if sysvar::rent::check_id(&meta.pubkey) {
|
} else if sysvar::rent::check_id(&meta.pubkey) {
|
||||||
account::create_account(&Rent::default(), 1)
|
account::create_account(&Rent::default(), 1)
|
||||||
} else if meta.pubkey == invalid_stake_state_pubkey() {
|
} else if meta.pubkey == invalid_stake_state_pubkey() {
|
||||||
let mut account = Account::default();
|
Account {
|
||||||
account.owner = id();
|
owner: id(),
|
||||||
account
|
..Account::default()
|
||||||
|
}
|
||||||
} else if meta.pubkey == invalid_vote_state_pubkey() {
|
} else if meta.pubkey == invalid_vote_state_pubkey() {
|
||||||
let mut account = Account::default();
|
Account {
|
||||||
account.owner = solana_vote_program::id();
|
owner: solana_vote_program::id(),
|
||||||
account
|
..Account::default()
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
Account::default()
|
Account::default()
|
||||||
})
|
})
|
||||||
|
|
|
@ -272,16 +272,18 @@ impl VoteState {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_max_sized_vote_state() -> VoteState {
|
fn get_max_sized_vote_state() -> VoteState {
|
||||||
let mut vote_state = Self::default();
|
|
||||||
vote_state.votes = VecDeque::from(vec![Lockout::default(); MAX_LOCKOUT_HISTORY]);
|
|
||||||
vote_state.root_slot = Some(std::u64::MAX);
|
|
||||||
vote_state.epoch_credits = vec![(0, 0, 0); MAX_EPOCH_CREDITS_HISTORY];
|
|
||||||
let mut authorized_voters = AuthorizedVoters::default();
|
let mut authorized_voters = AuthorizedVoters::default();
|
||||||
for i in 0..=MAX_LEADER_SCHEDULE_EPOCH_OFFSET {
|
for i in 0..=MAX_LEADER_SCHEDULE_EPOCH_OFFSET {
|
||||||
authorized_voters.insert(i, solana_sdk::pubkey::new_rand());
|
authorized_voters.insert(i, solana_sdk::pubkey::new_rand());
|
||||||
}
|
}
|
||||||
vote_state.authorized_voters = authorized_voters;
|
|
||||||
vote_state
|
VoteState {
|
||||||
|
votes: VecDeque::from(vec![Lockout::default(); MAX_LOCKOUT_HISTORY]),
|
||||||
|
root_slot: Some(std::u64::MAX),
|
||||||
|
epoch_credits: vec![(0, 0, 0); MAX_EPOCH_CREDITS_HISTORY],
|
||||||
|
authorized_voters,
|
||||||
|
..Self::default()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn check_slots_are_valid(
|
fn check_slots_are_valid(
|
||||||
|
@ -463,10 +465,7 @@ impl VoteState {
|
||||||
where
|
where
|
||||||
F: Fn(Pubkey) -> Result<(), InstructionError>,
|
F: Fn(Pubkey) -> Result<(), InstructionError>,
|
||||||
{
|
{
|
||||||
let epoch_authorized_voter = self.get_and_update_authorized_voter(current_epoch).expect(
|
let epoch_authorized_voter = self.get_and_update_authorized_voter(current_epoch);
|
||||||
"the clock epoch is monotonically increasing, so authorized voter must be known",
|
|
||||||
);
|
|
||||||
|
|
||||||
verify(epoch_authorized_voter)?;
|
verify(epoch_authorized_voter)?;
|
||||||
|
|
||||||
// The offset in slots `n` on which the target_epoch
|
// The offset in slots `n` on which the target_epoch
|
||||||
|
@ -514,17 +513,16 @@ impl VoteState {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_and_update_authorized_voter(&mut self, current_epoch: Epoch) -> Option<Pubkey> {
|
fn get_and_update_authorized_voter(&mut self, current_epoch: Epoch) -> Pubkey {
|
||||||
let pubkey = self
|
let pubkey = self
|
||||||
.authorized_voters
|
.authorized_voters
|
||||||
.get_and_cache_authorized_voter_for_epoch(current_epoch)
|
.get_and_cache_authorized_voter_for_epoch(current_epoch)
|
||||||
.expect(
|
.expect(
|
||||||
"Internal functions should
|
"Internal functions should only call this will monotonically increasing current_epoch",
|
||||||
only call this will monotonically increasing current_epoch",
|
|
||||||
);
|
);
|
||||||
self.authorized_voters
|
self.authorized_voters
|
||||||
.purge_authorized_voters(current_epoch);
|
.purge_authorized_voters(current_epoch);
|
||||||
Some(pubkey)
|
pubkey
|
||||||
}
|
}
|
||||||
|
|
||||||
fn pop_expired_votes(&mut self, slot: Slot) {
|
fn pop_expired_votes(&mut self, slot: Slot) {
|
||||||
|
@ -702,9 +700,7 @@ pub fn process_vote<S: std::hash::BuildHasher>(
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut vote_state = versioned.convert_to_current();
|
let mut vote_state = versioned.convert_to_current();
|
||||||
let authorized_voter = vote_state
|
let authorized_voter = vote_state.get_and_update_authorized_voter(clock.epoch);
|
||||||
.get_and_update_authorized_voter(clock.epoch)
|
|
||||||
.expect("the clock epoch is monotonically increasing, so authorized voter must be known");
|
|
||||||
verify_authorized_signer(&authorized_voter, signers)?;
|
verify_authorized_signer(&authorized_voter, signers)?;
|
||||||
|
|
||||||
vote_state.process_vote(vote, slot_hashes, clock.epoch)?;
|
vote_state.process_vote(vote, slot_hashes, clock.epoch)?;
|
||||||
|
@ -712,7 +708,7 @@ pub fn process_vote<S: std::hash::BuildHasher>(
|
||||||
vote.slots
|
vote.slots
|
||||||
.iter()
|
.iter()
|
||||||
.max()
|
.max()
|
||||||
.ok_or_else(|| VoteError::EmptySlots)
|
.ok_or(VoteError::EmptySlots)
|
||||||
.and_then(|slot| vote_state.process_timestamp(*slot, timestamp))?;
|
.and_then(|slot| vote_state.process_timestamp(*slot, timestamp))?;
|
||||||
}
|
}
|
||||||
vote_account.set_state(&VoteStateVersions::Current(Box::new(vote_state)))
|
vote_account.set_state(&VoteStateVersions::Current(Box::new(vote_state)))
|
||||||
|
@ -1571,8 +1567,10 @@ mod tests {
|
||||||
|
|
||||||
assert_eq!(vote_state.commission_split(1), (0, 1, false));
|
assert_eq!(vote_state.commission_split(1), (0, 1, false));
|
||||||
|
|
||||||
let mut vote_state = VoteState::default();
|
let mut vote_state = VoteState {
|
||||||
vote_state.commission = std::u8::MAX;
|
commission: std::u8::MAX,
|
||||||
|
..VoteState::default()
|
||||||
|
};
|
||||||
assert_eq!(vote_state.commission_split(1), (1, 0, false));
|
assert_eq!(vote_state.commission_split(1), (1, 0, false));
|
||||||
|
|
||||||
vote_state.commission = 99;
|
vote_state.commission = 99;
|
||||||
|
@ -1726,8 +1724,10 @@ mod tests {
|
||||||
#[test]
|
#[test]
|
||||||
fn test_vote_process_timestamp() {
|
fn test_vote_process_timestamp() {
|
||||||
let (slot, timestamp) = (15, 1_575_412_285);
|
let (slot, timestamp) = (15, 1_575_412_285);
|
||||||
let mut vote_state = VoteState::default();
|
let mut vote_state = VoteState {
|
||||||
vote_state.last_timestamp = BlockTimestamp { slot, timestamp };
|
last_timestamp: BlockTimestamp { slot, timestamp },
|
||||||
|
..VoteState::default()
|
||||||
|
};
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
vote_state.process_timestamp(slot - 1, timestamp + 1),
|
vote_state.process_timestamp(slot - 1, timestamp + 1),
|
||||||
|
@ -1791,14 +1791,14 @@ mod tests {
|
||||||
// If no new authorized voter was set, the same authorized voter
|
// If no new authorized voter was set, the same authorized voter
|
||||||
// is locked into the next epoch
|
// is locked into the next epoch
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
vote_state.get_and_update_authorized_voter(1).unwrap(),
|
vote_state.get_and_update_authorized_voter(1),
|
||||||
original_voter
|
original_voter
|
||||||
);
|
);
|
||||||
|
|
||||||
// Try to get the authorized voter for epoch 5, implies
|
// Try to get the authorized voter for epoch 5, implies
|
||||||
// the authorized voter for epochs 1-4 were unchanged
|
// the authorized voter for epochs 1-4 were unchanged
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
vote_state.get_and_update_authorized_voter(5).unwrap(),
|
vote_state.get_and_update_authorized_voter(5),
|
||||||
original_voter
|
original_voter
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -1820,7 +1820,7 @@ mod tests {
|
||||||
|
|
||||||
// Try to get the authorized voter for epoch 6, unchanged
|
// Try to get the authorized voter for epoch 6, unchanged
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
vote_state.get_and_update_authorized_voter(6).unwrap(),
|
vote_state.get_and_update_authorized_voter(6),
|
||||||
original_voter
|
original_voter
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -1828,7 +1828,7 @@ mod tests {
|
||||||
// be the new authorized voter
|
// be the new authorized voter
|
||||||
for i in 7..10 {
|
for i in 7..10 {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
vote_state.get_and_update_authorized_voter(i).unwrap(),
|
vote_state.get_and_update_authorized_voter(i),
|
||||||
new_authorized_voter
|
new_authorized_voter
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -1904,31 +1904,22 @@ mod tests {
|
||||||
// voters is correct
|
// voters is correct
|
||||||
for i in 9..epoch_offset {
|
for i in 9..epoch_offset {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
vote_state.get_and_update_authorized_voter(i).unwrap(),
|
vote_state.get_and_update_authorized_voter(i),
|
||||||
original_voter
|
original_voter
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
for i in epoch_offset..3 + epoch_offset {
|
for i in epoch_offset..3 + epoch_offset {
|
||||||
assert_eq!(
|
assert_eq!(vote_state.get_and_update_authorized_voter(i), new_voter);
|
||||||
vote_state.get_and_update_authorized_voter(i).unwrap(),
|
|
||||||
new_voter
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
for i in 3 + epoch_offset..6 + epoch_offset {
|
for i in 3 + epoch_offset..6 + epoch_offset {
|
||||||
assert_eq!(
|
assert_eq!(vote_state.get_and_update_authorized_voter(i), new_voter2);
|
||||||
vote_state.get_and_update_authorized_voter(i).unwrap(),
|
|
||||||
new_voter2
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
for i in 6 + epoch_offset..9 + epoch_offset {
|
for i in 6 + epoch_offset..9 + epoch_offset {
|
||||||
assert_eq!(
|
assert_eq!(vote_state.get_and_update_authorized_voter(i), new_voter3);
|
||||||
vote_state.get_and_update_authorized_voter(i).unwrap(),
|
|
||||||
new_voter3
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
for i in 9 + epoch_offset..=10 + epoch_offset {
|
for i in 9 + epoch_offset..=10 + epoch_offset {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
vote_state.get_and_update_authorized_voter(i).unwrap(),
|
vote_state.get_and_update_authorized_voter(i),
|
||||||
original_voter
|
original_voter
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,6 +30,7 @@ pub struct Results {
|
||||||
|
|
||||||
impl Results {
|
impl Results {
|
||||||
/// Keep any result entries which occurred before the starting round.
|
/// Keep any result entries which occurred before the starting round.
|
||||||
|
#[allow(clippy::manual_strip)]
|
||||||
pub fn new(
|
pub fn new(
|
||||||
file_path: String,
|
file_path: String,
|
||||||
mut previous_results: HashMap<String, Vec<String>>,
|
mut previous_results: HashMap<String, Vec<String>>,
|
||||||
|
@ -39,7 +40,6 @@ impl Results {
|
||||||
previous_results.drain().for_each(|(key, value)| {
|
previous_results.drain().for_each(|(key, value)| {
|
||||||
if key.starts_with(ROUND_KEY_PREFIX) {
|
if key.starts_with(ROUND_KEY_PREFIX) {
|
||||||
let round_str = &key[ROUND_KEY_PREFIX.len()..];
|
let round_str = &key[ROUND_KEY_PREFIX.len()..];
|
||||||
dbg!(round_str);
|
|
||||||
if let Ok(round) = u32::from_str(round_str) {
|
if let Ok(round) = u32::from_str(round_str) {
|
||||||
if round < start_round {
|
if round < start_round {
|
||||||
results.insert(Round(round), value);
|
results.insert(Round(round), value);
|
||||||
|
|
|
@ -260,11 +260,13 @@ impl RemoteWalletInfo {
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut wallet_info = RemoteWalletInfo::default();
|
let mut wallet_info = RemoteWalletInfo {
|
||||||
wallet_info.manufacturer = wallet_path.host_str().unwrap().to_string();
|
manufacturer: wallet_path.host_str().unwrap().to_string(),
|
||||||
|
..RemoteWalletInfo::default()
|
||||||
|
};
|
||||||
|
|
||||||
if let Some(wallet_id) = wallet_path.path_segments().map(|c| c.collect::<Vec<_>>()) {
|
if let Some(wallet_id) = wallet_path.path_segments().map(|c| c.collect::<Vec<_>>()) {
|
||||||
if wallet_id[0] != "" {
|
if !wallet_id[0].is_empty() {
|
||||||
wallet_info.pubkey = Pubkey::from_str(wallet_id[0]).map_err(|e| {
|
wallet_info.pubkey = Pubkey::from_str(wallet_id[0]).map_err(|e| {
|
||||||
RemoteWalletError::InvalidDerivationPath(format!(
|
RemoteWalletError::InvalidDerivationPath(format!(
|
||||||
"pubkey from_str error: {:?}",
|
"pubkey from_str error: {:?}",
|
||||||
|
@ -597,8 +599,10 @@ mod tests {
|
||||||
pubkey,
|
pubkey,
|
||||||
error: None,
|
error: None,
|
||||||
};
|
};
|
||||||
let mut test_info = RemoteWalletInfo::default();
|
let mut test_info = RemoteWalletInfo {
|
||||||
test_info.manufacturer = "Not Ledger".to_string();
|
manufacturer: "Not Ledger".to_string(),
|
||||||
|
..RemoteWalletInfo::default()
|
||||||
|
};
|
||||||
assert!(!info.matches(&test_info));
|
assert!(!info.matches(&test_info));
|
||||||
test_info.manufacturer = "Ledger".to_string();
|
test_info.manufacturer = "Ledger".to_string();
|
||||||
assert!(info.matches(&test_info));
|
assert!(info.matches(&test_info));
|
||||||
|
|
|
@ -30,6 +30,7 @@ const NOOP_PROGRAM_ID: [u8; 32] = [
|
||||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
|
||||||
];
|
];
|
||||||
|
|
||||||
|
#[allow(clippy::unnecessary_wraps)]
|
||||||
fn process_instruction(
|
fn process_instruction(
|
||||||
_program_id: &Pubkey,
|
_program_id: &Pubkey,
|
||||||
_keyed_accounts: &[KeyedAccount],
|
_keyed_accounts: &[KeyedAccount],
|
||||||
|
|
|
@ -16,7 +16,7 @@ use test::Bencher;
|
||||||
#[bench]
|
#[bench]
|
||||||
#[ignore]
|
#[ignore]
|
||||||
fn bench_bits_set(bencher: &mut Bencher) {
|
fn bench_bits_set(bencher: &mut Bencher) {
|
||||||
let mut bits: BitVec<u8> = BitVec::new_fill(false, 38_340_234 as u64);
|
let mut bits: BitVec<u8> = BitVec::new_fill(false, 38_340_234_u64);
|
||||||
let mut hasher = FnvHasher::default();
|
let mut hasher = FnvHasher::default();
|
||||||
|
|
||||||
bencher.iter(|| {
|
bencher.iter(|| {
|
||||||
|
@ -31,7 +31,7 @@ fn bench_bits_set(bencher: &mut Bencher) {
|
||||||
#[bench]
|
#[bench]
|
||||||
#[ignore]
|
#[ignore]
|
||||||
fn bench_bits_set_hasher(bencher: &mut Bencher) {
|
fn bench_bits_set_hasher(bencher: &mut Bencher) {
|
||||||
let bits: BitVec<u8> = BitVec::new_fill(false, 38_340_234 as u64);
|
let bits: BitVec<u8> = BitVec::new_fill(false, 38_340_234_u64);
|
||||||
let mut hasher = FnvHasher::default();
|
let mut hasher = FnvHasher::default();
|
||||||
|
|
||||||
bencher.iter(|| {
|
bencher.iter(|| {
|
||||||
|
|
|
@ -118,8 +118,10 @@ impl Accounts {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn construct_instructions_account(message: &Message) -> Account {
|
fn construct_instructions_account(message: &Message) -> Account {
|
||||||
let mut account = Account::default();
|
let mut account = Account {
|
||||||
account.data = message.serialize_instructions();
|
data: message.serialize_instructions(),
|
||||||
|
..Account::default()
|
||||||
|
};
|
||||||
|
|
||||||
// add room for current instruction index.
|
// add room for current instruction index.
|
||||||
account.data.resize(account.data.len() + 2, 0);
|
account.data.resize(account.data.len() + 2, 0);
|
||||||
|
|
|
@ -188,7 +188,7 @@ impl ABSRequestHandler {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn handle_pruned_banks<'a>(&'a self, bank: &Bank) -> usize {
|
pub fn handle_pruned_banks(&self, bank: &Bank) -> usize {
|
||||||
let mut count = 0;
|
let mut count = 0;
|
||||||
for pruned_slot in self.pruned_banks_receiver.try_iter() {
|
for pruned_slot in self.pruned_banks_receiver.try_iter() {
|
||||||
count += 1;
|
count += 1;
|
||||||
|
|
|
@ -44,7 +44,6 @@ use std::{
|
||||||
collections::{HashMap, HashSet},
|
collections::{HashMap, HashSet},
|
||||||
convert::TryInto,
|
convert::TryInto,
|
||||||
io::{Error as IOError, Result as IOResult},
|
io::{Error as IOError, Result as IOResult},
|
||||||
iter::FromIterator,
|
|
||||||
ops::RangeBounds,
|
ops::RangeBounds,
|
||||||
path::{Path, PathBuf},
|
path::{Path, PathBuf},
|
||||||
sync::atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering},
|
sync::atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering},
|
||||||
|
@ -901,10 +900,7 @@ impl AccountsDB {
|
||||||
let pubkey_to_slot_set: Vec<_> = purges
|
let pubkey_to_slot_set: Vec<_> = purges
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|(key, (slots_list, _ref_count))| {
|
.map(|(key, (slots_list, _ref_count))| {
|
||||||
(
|
(key, slots_list.into_iter().map(|(slot, _)| slot).collect())
|
||||||
key,
|
|
||||||
HashSet::from_iter(slots_list.into_iter().map(|(slot, _)| slot)),
|
|
||||||
)
|
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
|
@ -2875,6 +2871,7 @@ impl AccountsDB {
|
||||||
|
|
||||||
pub fn generate_index(&self) {
|
pub fn generate_index(&self) {
|
||||||
let mut slots = self.storage.all_slots();
|
let mut slots = self.storage.all_slots();
|
||||||
|
#[allow(clippy::stable_sort_primitive)]
|
||||||
slots.sort();
|
slots.sort();
|
||||||
|
|
||||||
let mut last_log_update = Instant::now();
|
let mut last_log_update = Instant::now();
|
||||||
|
@ -2982,6 +2979,7 @@ impl AccountsDB {
|
||||||
|
|
||||||
fn print_index(&self, label: &str) {
|
fn print_index(&self, label: &str) {
|
||||||
let mut roots: Vec<_> = self.accounts_index.all_roots();
|
let mut roots: Vec<_> = self.accounts_index.all_roots();
|
||||||
|
#[allow(clippy::stable_sort_primitive)]
|
||||||
roots.sort();
|
roots.sort();
|
||||||
info!("{}: accounts_index roots: {:?}", label, roots,);
|
info!("{}: accounts_index roots: {:?}", label, roots,);
|
||||||
for (pubkey, account_entry) in self.accounts_index.account_maps.read().unwrap().iter() {
|
for (pubkey, account_entry) in self.accounts_index.account_maps.read().unwrap().iter() {
|
||||||
|
@ -2995,12 +2993,14 @@ impl AccountsDB {
|
||||||
|
|
||||||
fn print_count_and_status(&self, label: &str) {
|
fn print_count_and_status(&self, label: &str) {
|
||||||
let mut slots: Vec<_> = self.storage.all_slots();
|
let mut slots: Vec<_> = self.storage.all_slots();
|
||||||
|
#[allow(clippy::stable_sort_primitive)]
|
||||||
slots.sort();
|
slots.sort();
|
||||||
info!("{}: count_and status for {} slots:", label, slots.len());
|
info!("{}: count_and status for {} slots:", label, slots.len());
|
||||||
for slot in &slots {
|
for slot in &slots {
|
||||||
let slot_stores = self.storage.get_slot_stores(*slot).unwrap();
|
let slot_stores = self.storage.get_slot_stores(*slot).unwrap();
|
||||||
let r_slot_stores = slot_stores.read().unwrap();
|
let r_slot_stores = slot_stores.read().unwrap();
|
||||||
let mut ids: Vec<_> = r_slot_stores.keys().cloned().collect();
|
let mut ids: Vec<_> = r_slot_stores.keys().cloned().collect();
|
||||||
|
#[allow(clippy::stable_sort_primitive)]
|
||||||
ids.sort();
|
ids.sort();
|
||||||
for id in &ids {
|
for id in &ids {
|
||||||
let entry = r_slot_stores.get(id).unwrap();
|
let entry = r_slot_stores.get(id).unwrap();
|
||||||
|
@ -3033,7 +3033,7 @@ pub mod tests {
|
||||||
use assert_matches::assert_matches;
|
use assert_matches::assert_matches;
|
||||||
use rand::{thread_rng, Rng};
|
use rand::{thread_rng, Rng};
|
||||||
use solana_sdk::{account::Account, hash::HASH_BYTES};
|
use solana_sdk::{account::Account, hash::HASH_BYTES};
|
||||||
use std::{fs, str::FromStr};
|
use std::{fs, iter::FromIterator, str::FromStr};
|
||||||
|
|
||||||
fn linear_ancestors(end_slot: u64) -> Ancestors {
|
fn linear_ancestors(end_slot: u64) -> Ancestors {
|
||||||
let mut ancestors: Ancestors = vec![(0, 0)].into_iter().collect();
|
let mut ancestors: Ancestors = vec![(0, 0)].into_iter().collect();
|
||||||
|
@ -3155,8 +3155,10 @@ pub mod tests {
|
||||||
let idx = thread_rng().gen_range(0, 99);
|
let idx = thread_rng().gen_range(0, 99);
|
||||||
let ancestors = vec![(0, 0)].into_iter().collect();
|
let ancestors = vec![(0, 0)].into_iter().collect();
|
||||||
let account = db.load_slow(&ancestors, &pubkeys[idx]).unwrap();
|
let account = db.load_slow(&ancestors, &pubkeys[idx]).unwrap();
|
||||||
let mut default_account = Account::default();
|
let default_account = Account {
|
||||||
default_account.lamports = (idx + 1) as u64;
|
lamports: (idx + 1) as u64,
|
||||||
|
..Account::default()
|
||||||
|
};
|
||||||
assert_eq!((default_account, 0), account);
|
assert_eq!((default_account, 0), account);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3169,8 +3171,10 @@ pub mod tests {
|
||||||
let account0 = db.load_slow(&ancestors, &pubkeys[idx]).unwrap();
|
let account0 = db.load_slow(&ancestors, &pubkeys[idx]).unwrap();
|
||||||
let ancestors = vec![(1, 1)].into_iter().collect();
|
let ancestors = vec![(1, 1)].into_iter().collect();
|
||||||
let account1 = db.load_slow(&ancestors, &pubkeys[idx]).unwrap();
|
let account1 = db.load_slow(&ancestors, &pubkeys[idx]).unwrap();
|
||||||
let mut default_account = Account::default();
|
let default_account = Account {
|
||||||
default_account.lamports = (idx + 1) as u64;
|
lamports: (idx + 1) as u64,
|
||||||
|
..Account::default()
|
||||||
|
};
|
||||||
assert_eq!(&default_account, &account0.0);
|
assert_eq!(&default_account, &account0.0);
|
||||||
assert_eq!(&default_account, &account1.0);
|
assert_eq!(&default_account, &account1.0);
|
||||||
}
|
}
|
||||||
|
@ -3361,8 +3365,10 @@ pub mod tests {
|
||||||
let ancestors = vec![(slot, 0)].into_iter().collect();
|
let ancestors = vec![(slot, 0)].into_iter().collect();
|
||||||
assert!(accounts.load_slow(&ancestors, &pubkeys[idx]).is_none());
|
assert!(accounts.load_slow(&ancestors, &pubkeys[idx]).is_none());
|
||||||
} else {
|
} else {
|
||||||
let mut default_account = Account::default();
|
let default_account = Account {
|
||||||
default_account.lamports = account.lamports;
|
lamports: account.lamports,
|
||||||
|
..Account::default()
|
||||||
|
};
|
||||||
assert_eq!(default_account, account);
|
assert_eq!(default_account, account);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3443,8 +3449,10 @@ pub mod tests {
|
||||||
create_account(&db, &mut pubkeys, 0, 1, 0, 0);
|
create_account(&db, &mut pubkeys, 0, 1, 0, 0);
|
||||||
let ancestors = vec![(0, 0)].into_iter().collect();
|
let ancestors = vec![(0, 0)].into_iter().collect();
|
||||||
let account = db.load_slow(&ancestors, &pubkeys[0]).unwrap();
|
let account = db.load_slow(&ancestors, &pubkeys[0]).unwrap();
|
||||||
let mut default_account = Account::default();
|
let default_account = Account {
|
||||||
default_account.lamports = 1;
|
lamports: 1,
|
||||||
|
..Account::default()
|
||||||
|
};
|
||||||
assert_eq!((default_account, 0), account);
|
assert_eq!((default_account, 0), account);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4434,7 +4442,7 @@ pub mod tests {
|
||||||
|
|
||||||
db.print_accounts_stats("pre");
|
db.print_accounts_stats("pre");
|
||||||
|
|
||||||
let slots: HashSet<Slot> = HashSet::from_iter(vec![1].into_iter());
|
let slots: HashSet<Slot> = vec![1].into_iter().collect();
|
||||||
let purge_keys = vec![(key1, slots)];
|
let purge_keys = vec![(key1, slots)];
|
||||||
let (_reclaims, dead_keys) = db.purge_keys_exact(purge_keys);
|
let (_reclaims, dead_keys) = db.purge_keys_exact(purge_keys);
|
||||||
|
|
||||||
|
@ -5242,7 +5250,7 @@ pub mod tests {
|
||||||
|
|
||||||
accounts.reset_uncleaned_roots();
|
accounts.reset_uncleaned_roots();
|
||||||
let mut actual_slots = accounts.shrink_candidate_slots.lock().unwrap().clone();
|
let mut actual_slots = accounts.shrink_candidate_slots.lock().unwrap().clone();
|
||||||
actual_slots.sort();
|
actual_slots.sort_unstable();
|
||||||
assert_eq!(actual_slots, vec![0, 1, 2]);
|
assert_eq!(actual_slots, vec![0, 1, 2]);
|
||||||
|
|
||||||
accounts.accounts_index.clear_roots();
|
accounts.accounts_index.clear_roots();
|
||||||
|
@ -5435,7 +5443,7 @@ pub mod tests {
|
||||||
store_counts.insert(3, (1, HashSet::from_iter(vec![key2])));
|
store_counts.insert(3, (1, HashSet::from_iter(vec![key2])));
|
||||||
AccountsDB::calc_delete_dependencies(&purges, &mut store_counts);
|
AccountsDB::calc_delete_dependencies(&purges, &mut store_counts);
|
||||||
let mut stores: Vec<_> = store_counts.keys().cloned().collect();
|
let mut stores: Vec<_> = store_counts.keys().cloned().collect();
|
||||||
stores.sort();
|
stores.sort_unstable();
|
||||||
for store in &stores {
|
for store in &stores {
|
||||||
info!(
|
info!(
|
||||||
"store: {:?} : {:?}",
|
"store: {:?} : {:?}",
|
||||||
|
|
|
@ -196,12 +196,8 @@ impl<T: 'static + Clone> AccountsIndex<T> {
|
||||||
AccountsIndexIterator::new(&self.account_maps, range)
|
AccountsIndexIterator::new(&self.account_maps, range)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn do_checked_scan_accounts<'a, F, R>(
|
fn do_checked_scan_accounts<F, R>(&self, ancestors: &Ancestors, func: F, range: Option<R>)
|
||||||
&'a self,
|
where
|
||||||
ancestors: &Ancestors,
|
|
||||||
func: F,
|
|
||||||
range: Option<R>,
|
|
||||||
) where
|
|
||||||
F: FnMut(&Pubkey, (&T, Slot)),
|
F: FnMut(&Pubkey, (&T, Slot)),
|
||||||
R: RangeBounds<Pubkey>,
|
R: RangeBounds<Pubkey>,
|
||||||
{
|
{
|
||||||
|
@ -349,12 +345,8 @@ impl<T: 'static + Clone> AccountsIndex<T> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn do_unchecked_scan_accounts<'a, F, R>(
|
fn do_unchecked_scan_accounts<F, R>(&self, ancestors: &Ancestors, func: F, range: Option<R>)
|
||||||
&'a self,
|
where
|
||||||
ancestors: &Ancestors,
|
|
||||||
func: F,
|
|
||||||
range: Option<R>,
|
|
||||||
) where
|
|
||||||
F: FnMut(&Pubkey, (&T, Slot)),
|
F: FnMut(&Pubkey, (&T, Slot)),
|
||||||
R: RangeBounds<Pubkey>,
|
R: RangeBounds<Pubkey>,
|
||||||
{
|
{
|
||||||
|
@ -364,8 +356,8 @@ impl<T: 'static + Clone> AccountsIndex<T> {
|
||||||
// Scan accounts and return latest version of each account that is either:
|
// Scan accounts and return latest version of each account that is either:
|
||||||
// 1) rooted or
|
// 1) rooted or
|
||||||
// 2) present in ancestors
|
// 2) present in ancestors
|
||||||
fn do_scan_accounts<'a, F, R>(
|
fn do_scan_accounts<F, R>(
|
||||||
&'a self,
|
&self,
|
||||||
ancestors: &Ancestors,
|
ancestors: &Ancestors,
|
||||||
mut func: F,
|
mut func: F,
|
||||||
range: Option<R>,
|
range: Option<R>,
|
||||||
|
@ -922,7 +914,7 @@ mod tests {
|
||||||
run_test_range_indexes(
|
run_test_range_indexes(
|
||||||
&index,
|
&index,
|
||||||
&pubkeys,
|
&pubkeys,
|
||||||
Some(ITER_BATCH_SIZE - 1 as usize),
|
Some(ITER_BATCH_SIZE - 1_usize),
|
||||||
Some(2 * ITER_BATCH_SIZE as usize + 1),
|
Some(2 * ITER_BATCH_SIZE as usize + 1),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
|
@ -397,7 +397,7 @@ impl AppendVec {
|
||||||
self.path.clone()
|
self.path.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn accounts<'a>(&'a self, mut start: usize) -> Vec<StoredAccount<'a>> {
|
pub fn accounts(&self, mut start: usize) -> Vec<StoredAccount> {
|
||||||
let mut accounts = vec![];
|
let mut accounts = vec![];
|
||||||
while let Some((account, next)) = self.get_account(start) {
|
while let Some((account, next)) = self.get_account(start) {
|
||||||
accounts.push(account);
|
accounts.push(account);
|
||||||
|
@ -685,7 +685,7 @@ pub mod tests {
|
||||||
|
|
||||||
let pubkey = solana_sdk::pubkey::new_rand();
|
let pubkey = solana_sdk::pubkey::new_rand();
|
||||||
let owner = Pubkey::default();
|
let owner = Pubkey::default();
|
||||||
let data_len = 3 as u64;
|
let data_len = 3_u64;
|
||||||
let mut account = Account::new(0, data_len as usize, &owner);
|
let mut account = Account::new(0, data_len as usize, &owner);
|
||||||
account.data = b"abc".to_vec();
|
account.data = b"abc".to_vec();
|
||||||
let stored_meta = StoredMeta {
|
let stored_meta = StoredMeta {
|
||||||
|
|
|
@ -832,6 +832,7 @@ pub struct Bank {
|
||||||
bpf_compute_budget: Option<BpfComputeBudget>,
|
bpf_compute_budget: Option<BpfComputeBudget>,
|
||||||
|
|
||||||
/// Builtin programs activated dynamically by feature
|
/// Builtin programs activated dynamically by feature
|
||||||
|
#[allow(clippy::rc_buffer)]
|
||||||
feature_builtins: Arc<Vec<(Builtin, Pubkey, ActivationType)>>,
|
feature_builtins: Arc<Vec<(Builtin, Pubkey, ActivationType)>>,
|
||||||
|
|
||||||
/// Last time when the cluster info vote listener has synced with this bank
|
/// Last time when the cluster info vote listener has synced with this bank
|
||||||
|
@ -887,9 +888,9 @@ impl Bank {
|
||||||
additional_builtins: Option<&Builtins>,
|
additional_builtins: Option<&Builtins>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let mut bank = Self::default();
|
let mut bank = Self::default();
|
||||||
|
bank.ancestors.insert(bank.slot(), 0);
|
||||||
bank.transaction_debug_keys = debug_keys;
|
bank.transaction_debug_keys = debug_keys;
|
||||||
bank.cluster_type = Some(genesis_config.cluster_type);
|
bank.cluster_type = Some(genesis_config.cluster_type);
|
||||||
bank.ancestors.insert(bank.slot(), 0);
|
|
||||||
|
|
||||||
bank.rc.accounts = Arc::new(Accounts::new(paths, &genesis_config.cluster_type));
|
bank.rc.accounts = Arc::new(Accounts::new(paths, &genesis_config.cluster_type));
|
||||||
bank.process_genesis_config(genesis_config);
|
bank.process_genesis_config(genesis_config);
|
||||||
|
@ -1257,6 +1258,7 @@ impl Bank {
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut ancestors: Vec<_> = roots.into_iter().collect();
|
let mut ancestors: Vec<_> = roots.into_iter().collect();
|
||||||
|
#[allow(clippy::stable_sort_primitive)]
|
||||||
ancestors.sort();
|
ancestors.sort();
|
||||||
ancestors
|
ancestors
|
||||||
}
|
}
|
||||||
|
@ -4940,13 +4942,13 @@ pub(crate) mod tests {
|
||||||
impl Bank {
|
impl Bank {
|
||||||
fn epoch_stake_keys(&self) -> Vec<Epoch> {
|
fn epoch_stake_keys(&self) -> Vec<Epoch> {
|
||||||
let mut keys: Vec<Epoch> = self.epoch_stakes.keys().copied().collect();
|
let mut keys: Vec<Epoch> = self.epoch_stakes.keys().copied().collect();
|
||||||
keys.sort();
|
keys.sort_unstable();
|
||||||
keys
|
keys
|
||||||
}
|
}
|
||||||
|
|
||||||
fn epoch_stake_key_info(&self) -> (Epoch, Epoch, usize) {
|
fn epoch_stake_key_info(&self) -> (Epoch, Epoch, usize) {
|
||||||
let mut keys: Vec<Epoch> = self.epoch_stakes.keys().copied().collect();
|
let mut keys: Vec<Epoch> = self.epoch_stakes.keys().copied().collect();
|
||||||
keys.sort();
|
keys.sort_unstable();
|
||||||
(*keys.first().unwrap(), *keys.last().unwrap(), keys.len())
|
(*keys.first().unwrap(), *keys.last().unwrap(), keys.len())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -9521,6 +9523,7 @@ pub(crate) mod tests {
|
||||||
let (genesis_config, mint_keypair) = create_genesis_config(500);
|
let (genesis_config, mint_keypair) = create_genesis_config(500);
|
||||||
let mut bank = Bank::new(&genesis_config);
|
let mut bank = Bank::new(&genesis_config);
|
||||||
|
|
||||||
|
#[allow(clippy::unnecessary_wraps)]
|
||||||
fn mock_process_instruction(
|
fn mock_process_instruction(
|
||||||
_program_id: &Pubkey,
|
_program_id: &Pubkey,
|
||||||
_keyed_accounts: &[KeyedAccount],
|
_keyed_accounts: &[KeyedAccount],
|
||||||
|
@ -9704,6 +9707,7 @@ pub(crate) mod tests {
|
||||||
assert_eq!(result, Err(TransactionError::SanitizeFailure));
|
assert_eq!(result, Err(TransactionError::SanitizeFailure));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::unnecessary_wraps)]
|
||||||
fn mock_ok_vote_processor(
|
fn mock_ok_vote_processor(
|
||||||
_pubkey: &Pubkey,
|
_pubkey: &Pubkey,
|
||||||
_ka: &[KeyedAccount],
|
_ka: &[KeyedAccount],
|
||||||
|
@ -10035,7 +10039,7 @@ pub(crate) mod tests {
|
||||||
let mut consumed_budgets = (0..3)
|
let mut consumed_budgets = (0..3)
|
||||||
.map(|_| bank.process_stale_slot_with_budget(0, force_to_return_alive_account))
|
.map(|_| bank.process_stale_slot_with_budget(0, force_to_return_alive_account))
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
consumed_budgets.sort();
|
consumed_budgets.sort_unstable();
|
||||||
// consumed_budgets represents the count of alive accounts in the three slots 0,1,2
|
// consumed_budgets represents the count of alive accounts in the three slots 0,1,2
|
||||||
assert_eq!(consumed_budgets, vec![0, 1, 9]);
|
assert_eq!(consumed_budgets, vec![0, 1, 9]);
|
||||||
}
|
}
|
||||||
|
@ -10136,6 +10140,7 @@ pub(crate) mod tests {
|
||||||
fn test_add_builtin_no_overwrite() {
|
fn test_add_builtin_no_overwrite() {
|
||||||
let (genesis_config, _mint_keypair) = create_genesis_config(100_000);
|
let (genesis_config, _mint_keypair) = create_genesis_config(100_000);
|
||||||
|
|
||||||
|
#[allow(clippy::unnecessary_wraps)]
|
||||||
fn mock_ix_processor(
|
fn mock_ix_processor(
|
||||||
_pubkey: &Pubkey,
|
_pubkey: &Pubkey,
|
||||||
_ka: &[KeyedAccount],
|
_ka: &[KeyedAccount],
|
||||||
|
@ -10181,6 +10186,7 @@ pub(crate) mod tests {
|
||||||
fn test_add_builtin_loader_no_overwrite() {
|
fn test_add_builtin_loader_no_overwrite() {
|
||||||
let (genesis_config, _mint_keypair) = create_genesis_config(100_000);
|
let (genesis_config, _mint_keypair) = create_genesis_config(100_000);
|
||||||
|
|
||||||
|
#[allow(clippy::unnecessary_wraps)]
|
||||||
fn mock_ix_processor(
|
fn mock_ix_processor(
|
||||||
_pubkey: &Pubkey,
|
_pubkey: &Pubkey,
|
||||||
_ka: &[KeyedAccount],
|
_ka: &[KeyedAccount],
|
||||||
|
|
|
@ -254,8 +254,8 @@ mod test {
|
||||||
fn test_random() {
|
fn test_random() {
|
||||||
let mut b1: Bloom<Hash> = Bloom::random(10, 0.1, 100);
|
let mut b1: Bloom<Hash> = Bloom::random(10, 0.1, 100);
|
||||||
let mut b2: Bloom<Hash> = Bloom::random(10, 0.1, 100);
|
let mut b2: Bloom<Hash> = Bloom::random(10, 0.1, 100);
|
||||||
b1.keys.sort();
|
b1.keys.sort_unstable();
|
||||||
b2.keys.sort();
|
b2.keys.sort_unstable();
|
||||||
assert_ne!(b1.keys, b2.keys);
|
assert_ne!(b1.keys, b2.keys);
|
||||||
}
|
}
|
||||||
// Bloom filter math in python
|
// Bloom filter math in python
|
||||||
|
|
|
@ -255,6 +255,7 @@ fn unpack_genesis<A: Read, P: AsRef<Path>>(
|
||||||
|
|
||||||
fn is_valid_genesis_archive_entry(parts: &[&str], kind: tar::EntryType) -> bool {
|
fn is_valid_genesis_archive_entry(parts: &[&str], kind: tar::EntryType) -> bool {
|
||||||
trace!("validating: {:?} {:?}", parts, kind);
|
trace!("validating: {:?} {:?}", parts, kind);
|
||||||
|
#[allow(clippy::match_like_matches_macro)]
|
||||||
match (parts, kind) {
|
match (parts, kind) {
|
||||||
(["genesis.bin"], GNUSparse) => true,
|
(["genesis.bin"], GNUSparse) => true,
|
||||||
(["genesis.bin"], Regular) => true,
|
(["genesis.bin"], Regular) => true,
|
||||||
|
|
|
@ -422,7 +422,7 @@ impl MessageProcessor {
|
||||||
instruction: &'a CompiledInstruction,
|
instruction: &'a CompiledInstruction,
|
||||||
executable_accounts: &'a [(Pubkey, RefCell<Account>)],
|
executable_accounts: &'a [(Pubkey, RefCell<Account>)],
|
||||||
accounts: &'a [Rc<RefCell<Account>>],
|
accounts: &'a [Rc<RefCell<Account>>],
|
||||||
) -> Result<Vec<KeyedAccount<'a>>, InstructionError> {
|
) -> Vec<KeyedAccount<'a>> {
|
||||||
let mut keyed_accounts = create_keyed_readonly_accounts(&executable_accounts);
|
let mut keyed_accounts = create_keyed_readonly_accounts(&executable_accounts);
|
||||||
let mut keyed_accounts2: Vec<_> = instruction
|
let mut keyed_accounts2: Vec<_> = instruction
|
||||||
.accounts
|
.accounts
|
||||||
|
@ -440,7 +440,7 @@ impl MessageProcessor {
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
keyed_accounts.append(&mut keyed_accounts2);
|
keyed_accounts.append(&mut keyed_accounts2);
|
||||||
Ok(keyed_accounts)
|
keyed_accounts
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Process an instruction
|
/// Process an instruction
|
||||||
|
@ -604,7 +604,7 @@ impl MessageProcessor {
|
||||||
|
|
||||||
// Construct keyed accounts
|
// Construct keyed accounts
|
||||||
let keyed_accounts =
|
let keyed_accounts =
|
||||||
Self::create_keyed_accounts(message, instruction, executable_accounts, accounts)?;
|
Self::create_keyed_accounts(message, instruction, executable_accounts, accounts);
|
||||||
|
|
||||||
// Invoke callee
|
// Invoke callee
|
||||||
invoke_context.push(instruction.program_id(&message.account_keys))?;
|
invoke_context.push(instruction.program_id(&message.account_keys))?;
|
||||||
|
@ -794,7 +794,7 @@ impl MessageProcessor {
|
||||||
feature_set,
|
feature_set,
|
||||||
);
|
);
|
||||||
let keyed_accounts =
|
let keyed_accounts =
|
||||||
Self::create_keyed_accounts(message, instruction, executable_accounts, accounts)?;
|
Self::create_keyed_accounts(message, instruction, executable_accounts, accounts);
|
||||||
self.process_instruction(&keyed_accounts, &instruction.data, &mut invoke_context)?;
|
self.process_instruction(&keyed_accounts, &instruction.data, &mut invoke_context)?;
|
||||||
Self::verify(
|
Self::verify(
|
||||||
message,
|
message,
|
||||||
|
@ -854,7 +854,6 @@ mod tests {
|
||||||
message::Message,
|
message::Message,
|
||||||
native_loader::create_loadable_account,
|
native_loader::create_loadable_account,
|
||||||
};
|
};
|
||||||
use std::iter::FromIterator;
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_invoke_context() {
|
fn test_invoke_context() {
|
||||||
|
@ -920,8 +919,7 @@ mod tests {
|
||||||
|
|
||||||
// modify account owned by the program
|
// modify account owned by the program
|
||||||
accounts[owned_index].borrow_mut().data[0] = (MAX_DEPTH + owned_index) as u8;
|
accounts[owned_index].borrow_mut().data[0] = (MAX_DEPTH + owned_index) as u8;
|
||||||
let mut these_accounts =
|
let mut these_accounts = accounts[not_owned_index..owned_index + 1].to_vec();
|
||||||
Vec::from_iter(accounts[not_owned_index..owned_index + 1].iter().cloned());
|
|
||||||
these_accounts.push(Rc::new(RefCell::new(Account::new(
|
these_accounts.push(Rc::new(RefCell::new(Account::new(
|
||||||
1,
|
1,
|
||||||
1,
|
1,
|
||||||
|
@ -1805,6 +1803,7 @@ mod tests {
|
||||||
#[test]
|
#[test]
|
||||||
fn test_debug() {
|
fn test_debug() {
|
||||||
let mut message_processor = MessageProcessor::default();
|
let mut message_processor = MessageProcessor::default();
|
||||||
|
#[allow(clippy::unnecessary_wraps)]
|
||||||
fn mock_process_instruction(
|
fn mock_process_instruction(
|
||||||
_program_id: &Pubkey,
|
_program_id: &Pubkey,
|
||||||
_keyed_accounts: &[KeyedAccount],
|
_keyed_accounts: &[KeyedAccount],
|
||||||
|
@ -1813,6 +1812,7 @@ mod tests {
|
||||||
) -> Result<(), InstructionError> {
|
) -> Result<(), InstructionError> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
#[allow(clippy::unnecessary_wraps)]
|
||||||
fn mock_ix_processor(
|
fn mock_ix_processor(
|
||||||
_pubkey: &Pubkey,
|
_pubkey: &Pubkey,
|
||||||
_ka: &[KeyedAccount],
|
_ka: &[KeyedAccount],
|
||||||
|
|
|
@ -127,9 +127,11 @@ mod tests {
|
||||||
let new_epoch = 3;
|
let new_epoch = 3;
|
||||||
|
|
||||||
let (mut created_account, mut existing_account) = {
|
let (mut created_account, mut existing_account) = {
|
||||||
let mut account = Account::default();
|
let account = Account {
|
||||||
account.lamports = old_lamports;
|
lamports: old_lamports,
|
||||||
account.rent_epoch = old_epoch;
|
rent_epoch: old_epoch,
|
||||||
|
..Account::default()
|
||||||
|
};
|
||||||
|
|
||||||
(account.clone(), account)
|
(account.clone(), account)
|
||||||
};
|
};
|
||||||
|
|
|
@ -261,7 +261,7 @@ mod test_bank_serialize {
|
||||||
|
|
||||||
// These some what long test harness is required to freeze the ABI of
|
// These some what long test harness is required to freeze the ABI of
|
||||||
// Bank's serialization due to versioned nature
|
// Bank's serialization due to versioned nature
|
||||||
#[frozen_abi(digest = "8bNY87hccyDYCRar1gM3NSvpvtiUM3W3rGeJLJayz42e")]
|
#[frozen_abi(digest = "5NHt6PLRJPWJH9FUcweSsUWgN5hXMfXj1BduDrDHH73w")]
|
||||||
#[derive(Serialize, AbiExample)]
|
#[derive(Serialize, AbiExample)]
|
||||||
pub struct BankAbiTestWrapperFuture {
|
pub struct BankAbiTestWrapperFuture {
|
||||||
#[serde(serialize_with = "wrapper_future")]
|
#[serde(serialize_with = "wrapper_future")]
|
||||||
|
|
|
@ -272,7 +272,7 @@ pub mod tests {
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
// add stake to a vote_pubkey ( stake )
|
// add stake to a vote_pubkey ( stake )
|
||||||
pub fn create_warming_stake_account(
|
pub fn create_warming_stake_account(
|
||||||
stake: u64,
|
stake: u64,
|
||||||
epoch: Epoch,
|
epoch: Epoch,
|
||||||
|
@ -295,8 +295,10 @@ pub mod tests {
|
||||||
#[test]
|
#[test]
|
||||||
fn test_stakes_basic() {
|
fn test_stakes_basic() {
|
||||||
for i in 0..4 {
|
for i in 0..4 {
|
||||||
let mut stakes = Stakes::default();
|
let mut stakes = Stakes {
|
||||||
stakes.epoch = i;
|
epoch: i,
|
||||||
|
..Stakes::default()
|
||||||
|
};
|
||||||
|
|
||||||
let ((vote_pubkey, vote_account), (stake_pubkey, mut stake_account)) =
|
let ((vote_pubkey, vote_account), (stake_pubkey, mut stake_account)) =
|
||||||
create_staked_node_accounts(10);
|
create_staked_node_accounts(10);
|
||||||
|
@ -372,8 +374,10 @@ pub mod tests {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_stakes_vote_account_disappear_reappear() {
|
fn test_stakes_vote_account_disappear_reappear() {
|
||||||
let mut stakes = Stakes::default();
|
let mut stakes = Stakes {
|
||||||
stakes.epoch = 4;
|
epoch: 4,
|
||||||
|
..Stakes::default()
|
||||||
|
};
|
||||||
|
|
||||||
let ((vote_pubkey, mut vote_account), (stake_pubkey, stake_account)) =
|
let ((vote_pubkey, mut vote_account), (stake_pubkey, stake_account)) =
|
||||||
create_staked_node_accounts(10);
|
create_staked_node_accounts(10);
|
||||||
|
@ -406,8 +410,10 @@ pub mod tests {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_stakes_change_delegate() {
|
fn test_stakes_change_delegate() {
|
||||||
let mut stakes = Stakes::default();
|
let mut stakes = Stakes {
|
||||||
stakes.epoch = 4;
|
epoch: 4,
|
||||||
|
..Stakes::default()
|
||||||
|
};
|
||||||
|
|
||||||
let ((vote_pubkey, vote_account), (stake_pubkey, stake_account)) =
|
let ((vote_pubkey, vote_account), (stake_pubkey, stake_account)) =
|
||||||
create_staked_node_accounts(10);
|
create_staked_node_accounts(10);
|
||||||
|
@ -450,8 +456,10 @@ pub mod tests {
|
||||||
}
|
}
|
||||||
#[test]
|
#[test]
|
||||||
fn test_stakes_multiple_stakers() {
|
fn test_stakes_multiple_stakers() {
|
||||||
let mut stakes = Stakes::default();
|
let mut stakes = Stakes {
|
||||||
stakes.epoch = 4;
|
epoch: 4,
|
||||||
|
..Stakes::default()
|
||||||
|
};
|
||||||
|
|
||||||
let ((vote_pubkey, vote_account), (stake_pubkey, stake_account)) =
|
let ((vote_pubkey, vote_account), (stake_pubkey, stake_account)) =
|
||||||
create_staked_node_accounts(10);
|
create_staked_node_accounts(10);
|
||||||
|
@ -500,8 +508,10 @@ pub mod tests {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_stakes_not_delegate() {
|
fn test_stakes_not_delegate() {
|
||||||
let mut stakes = Stakes::default();
|
let mut stakes = Stakes {
|
||||||
stakes.epoch = 4;
|
epoch: 4,
|
||||||
|
..Stakes::default()
|
||||||
|
};
|
||||||
|
|
||||||
let ((vote_pubkey, vote_account), (stake_pubkey, stake_account)) =
|
let ((vote_pubkey, vote_account), (stake_pubkey, stake_account)) =
|
||||||
create_staked_node_accounts(10);
|
create_staked_node_accounts(10);
|
||||||
|
|
|
@ -226,7 +226,7 @@ impl<T: Serialize + Clone> StatusCache<T> {
|
||||||
(
|
(
|
||||||
*slot,
|
*slot,
|
||||||
self.roots.contains(slot),
|
self.roots.contains(slot),
|
||||||
self.slot_deltas.get(slot).unwrap_or_else(|| &empty).clone(),
|
self.slot_deltas.get(slot).unwrap_or(&empty).clone(),
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
.collect()
|
.collect()
|
||||||
|
|
|
@ -309,9 +309,11 @@ mod tests {
|
||||||
fn test_fee_rate_governor_derived_adjust() {
|
fn test_fee_rate_governor_derived_adjust() {
|
||||||
solana_logger::setup();
|
solana_logger::setup();
|
||||||
|
|
||||||
let mut f = FeeRateGovernor::default();
|
let mut f = FeeRateGovernor {
|
||||||
f.target_lamports_per_signature = 100;
|
target_lamports_per_signature: 100,
|
||||||
f.target_signatures_per_slot = 100;
|
target_signatures_per_slot: 100,
|
||||||
|
..FeeRateGovernor::default()
|
||||||
|
};
|
||||||
f = FeeRateGovernor::new_derived(&f, 0);
|
f = FeeRateGovernor::new_derived(&f, 0);
|
||||||
|
|
||||||
// Ramp fees up
|
// Ramp fees up
|
||||||
|
|
|
@ -109,9 +109,11 @@ mod tests {
|
||||||
(0, true)
|
(0, true)
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut custom_rent = Rent::default();
|
let custom_rent = Rent {
|
||||||
custom_rent.lamports_per_byte_year = 5;
|
lamports_per_byte_year: 5,
|
||||||
custom_rent.exemption_threshold = 2.5;
|
exemption_threshold: 2.5,
|
||||||
|
..Rent::default()
|
||||||
|
};
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
custom_rent.due(0, 2, 1.2),
|
custom_rent.due(0, 2, 1.2),
|
||||||
|
|
|
@ -199,6 +199,7 @@ impl<'de, T: Deserialize<'de>> Deserialize<'de> for ShortVec<T> {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the decoded value and how many bytes it consumed.
|
/// Return the decoded value and how many bytes it consumed.
|
||||||
|
#[allow(clippy::result_unit_err)]
|
||||||
pub fn decode_len(bytes: &[u8]) -> Result<(usize, usize), ()> {
|
pub fn decode_len(bytes: &[u8]) -> Result<(usize, usize), ()> {
|
||||||
let mut len = 0;
|
let mut len = 0;
|
||||||
let mut size = 0;
|
let mut size = 0;
|
||||||
|
|
|
@ -136,7 +136,7 @@ pub fn create_test_recent_blockhashes(start: usize) -> RecentBlockhashes {
|
||||||
.iter()
|
.iter()
|
||||||
.map(|(i, hash, fee_calc)| IterItem(*i, hash, fee_calc))
|
.map(|(i, hash, fee_calc)| IterItem(*i, hash, fee_calc))
|
||||||
.collect();
|
.collect();
|
||||||
RecentBlockhashes::from_iter(bhq.into_iter())
|
bhq.into_iter().collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
|
|
@ -22,6 +22,7 @@ impl HardForks {
|
||||||
} else {
|
} else {
|
||||||
self.hard_forks.push((new_slot, 1));
|
self.hard_forks.push((new_slot, 1));
|
||||||
}
|
}
|
||||||
|
#[allow(clippy::stable_sort_primitive)]
|
||||||
self.hard_forks.sort();
|
self.hard_forks.sort();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -178,10 +178,9 @@ mod test {
|
||||||
nonce::{self, State},
|
nonce::{self, State},
|
||||||
nonce_account::verify_nonce_account,
|
nonce_account::verify_nonce_account,
|
||||||
system_instruction::NonceError,
|
system_instruction::NonceError,
|
||||||
sysvar::recent_blockhashes::{create_test_recent_blockhashes, RecentBlockhashes},
|
sysvar::recent_blockhashes::create_test_recent_blockhashes,
|
||||||
};
|
};
|
||||||
use solana_program::hash::Hash;
|
use solana_program::hash::Hash;
|
||||||
use std::iter::FromIterator;
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn default_is_uninitialized() {
|
fn default_is_uninitialized() {
|
||||||
|
@ -322,7 +321,7 @@ mod test {
|
||||||
keyed_account
|
keyed_account
|
||||||
.initialize_nonce_account(&authorized, &recent_blockhashes, &rent)
|
.initialize_nonce_account(&authorized, &recent_blockhashes, &rent)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let recent_blockhashes = RecentBlockhashes::from_iter(vec![].into_iter());
|
let recent_blockhashes = vec![].into_iter().collect();
|
||||||
let result = keyed_account.advance_nonce_account(&recent_blockhashes, &signers);
|
let result = keyed_account.advance_nonce_account(&recent_blockhashes, &signers);
|
||||||
assert_eq!(result, Err(NonceError::NoRecentBlockhashes.into()));
|
assert_eq!(result, Err(NonceError::NoRecentBlockhashes.into()));
|
||||||
})
|
})
|
||||||
|
@ -764,7 +763,7 @@ mod test {
|
||||||
with_test_keyed_account(min_lamports + 42, true, |keyed_account| {
|
with_test_keyed_account(min_lamports + 42, true, |keyed_account| {
|
||||||
let mut signers = HashSet::new();
|
let mut signers = HashSet::new();
|
||||||
signers.insert(*keyed_account.signer_key().unwrap());
|
signers.insert(*keyed_account.signer_key().unwrap());
|
||||||
let recent_blockhashes = RecentBlockhashes::from_iter(vec![].into_iter());
|
let recent_blockhashes = vec![].into_iter().collect();
|
||||||
let authorized = *keyed_account.unsigned_key();
|
let authorized = *keyed_account.unsigned_key();
|
||||||
let result =
|
let result =
|
||||||
keyed_account.initialize_nonce_account(&authorized, &recent_blockhashes, &rent);
|
keyed_account.initialize_nonce_account(&authorized, &recent_blockhashes, &rent);
|
||||||
|
|
|
@ -85,7 +85,7 @@ impl PartialEq for Packet {
|
||||||
fn eq(&self, other: &Packet) -> bool {
|
fn eq(&self, other: &Packet) -> bool {
|
||||||
let self_data: &[u8] = self.data.as_ref();
|
let self_data: &[u8] = self.data.as_ref();
|
||||||
let other_data: &[u8] = other.data.as_ref();
|
let other_data: &[u8] = other.data.as_ref();
|
||||||
self.meta == other.meta && self_data[..self.meta.size] == other_data[..other.meta.size]
|
self.meta == other.meta && self_data[..self.meta.size] == other_data[..self.meta.size]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -282,7 +282,7 @@ pub struct MockInvokeContext {
|
||||||
pub bpf_compute_budget: BpfComputeBudget,
|
pub bpf_compute_budget: BpfComputeBudget,
|
||||||
pub compute_meter: MockComputeMeter,
|
pub compute_meter: MockComputeMeter,
|
||||||
pub programs: Vec<(Pubkey, ProcessInstructionWithContext)>,
|
pub programs: Vec<(Pubkey, ProcessInstructionWithContext)>,
|
||||||
invoke_depth: usize,
|
pub invoke_depth: usize,
|
||||||
}
|
}
|
||||||
impl Default for MockInvokeContext {
|
impl Default for MockInvokeContext {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
|
|
|
@ -11,7 +11,7 @@ where
|
||||||
let sorted = BinaryHeap::from_iter(recent_blockhash_iter);
|
let sorted = BinaryHeap::from_iter(recent_blockhash_iter);
|
||||||
let sorted_iter = IntoIterSorted::new(sorted);
|
let sorted_iter = IntoIterSorted::new(sorted);
|
||||||
let recent_blockhash_iter = sorted_iter.take(MAX_ENTRIES);
|
let recent_blockhash_iter = sorted_iter.take(MAX_ENTRIES);
|
||||||
let recent_blockhashes = RecentBlockhashes::from_iter(recent_blockhash_iter);
|
let recent_blockhashes: RecentBlockhashes = recent_blockhash_iter.collect();
|
||||||
to_account(&recent_blockhashes, account)
|
to_account(&recent_blockhashes, account)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -27,7 +27,6 @@ use std::{
|
||||||
collections::{HashMap, HashSet},
|
collections::{HashMap, HashSet},
|
||||||
error,
|
error,
|
||||||
fs::File,
|
fs::File,
|
||||||
iter::FromIterator,
|
|
||||||
path::PathBuf,
|
path::PathBuf,
|
||||||
process,
|
process,
|
||||||
str::FromStr,
|
str::FromStr,
|
||||||
|
@ -296,7 +295,7 @@ fn classify_block_producers(
|
||||||
};
|
};
|
||||||
|
|
||||||
let confirmed_blocks = rpc_client.get_confirmed_blocks(first_slot, Some(last_slot_in_epoch))?;
|
let confirmed_blocks = rpc_client.get_confirmed_blocks(first_slot, Some(last_slot_in_epoch))?;
|
||||||
let confirmed_blocks: HashSet<Slot> = HashSet::from_iter(confirmed_blocks.into_iter());
|
let confirmed_blocks: HashSet<Slot> = confirmed_blocks.into_iter().collect();
|
||||||
|
|
||||||
let mut poor_block_producers = HashSet::new();
|
let mut poor_block_producers = HashSet::new();
|
||||||
let mut quality_block_producers = HashSet::new();
|
let mut quality_block_producers = HashSet::new();
|
||||||
|
|
|
@ -445,7 +445,7 @@ impl BigTable {
|
||||||
rows.into_iter()
|
rows.into_iter()
|
||||||
.next()
|
.next()
|
||||||
.map(|r| r.1)
|
.map(|r| r.1)
|
||||||
.ok_or_else(|| Error::RowNotFound)
|
.ok_or(Error::RowNotFound)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Store data for one or more `table` rows in the `family_name` Column family
|
/// Store data for one or more `table` rows in the `family_name` Column family
|
||||||
|
|
|
@ -37,7 +37,7 @@ pub fn recv_mmsg(socket: &UdpSocket, packets: &mut [Packet]) -> io::Result<(usiz
|
||||||
#[cfg(target_os = "linux")]
|
#[cfg(target_os = "linux")]
|
||||||
pub fn recv_mmsg(sock: &UdpSocket, packets: &mut [Packet]) -> io::Result<(usize, usize)> {
|
pub fn recv_mmsg(sock: &UdpSocket, packets: &mut [Packet]) -> io::Result<(usize, usize)> {
|
||||||
use libc::{
|
use libc::{
|
||||||
c_void, iovec, mmsghdr, recvmmsg, sockaddr_in, socklen_t, time_t, timespec, MSG_WAITFORONE,
|
c_void, iovec, mmsghdr, recvmmsg, sockaddr_in, socklen_t, timespec, MSG_WAITFORONE,
|
||||||
};
|
};
|
||||||
use nix::sys::socket::InetAddr;
|
use nix::sys::socket::InetAddr;
|
||||||
use std::mem;
|
use std::mem;
|
||||||
|
@ -62,7 +62,7 @@ pub fn recv_mmsg(sock: &UdpSocket, packets: &mut [Packet]) -> io::Result<(usize,
|
||||||
hdrs[i].msg_hdr.msg_iovlen = 1;
|
hdrs[i].msg_hdr.msg_iovlen = 1;
|
||||||
}
|
}
|
||||||
let mut ts = timespec {
|
let mut ts = timespec {
|
||||||
tv_sec: 1 as time_t,
|
tv_sec: 1,
|
||||||
tv_nsec: 0,
|
tv_nsec: 0,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -169,7 +169,7 @@ mod test {
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
fn get_msgs(r: PacketReceiver, num: &mut usize) -> Result<()> {
|
fn get_msgs(r: PacketReceiver, num: &mut usize) {
|
||||||
for _ in 0..10 {
|
for _ in 0..10 {
|
||||||
let m = r.recv_timeout(Duration::new(1, 0));
|
let m = r.recv_timeout(Duration::new(1, 0));
|
||||||
if m.is_err() {
|
if m.is_err() {
|
||||||
|
@ -182,9 +182,8 @@ mod test {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn streamer_debug() {
|
fn streamer_debug() {
|
||||||
write!(io::sink(), "{:?}", Packet::default()).unwrap();
|
write!(io::sink(), "{:?}", Packet::default()).unwrap();
|
||||||
|
@ -218,7 +217,7 @@ mod test {
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut num = 5;
|
let mut num = 5;
|
||||||
get_msgs(r_reader, &mut num).expect("get_msgs");
|
get_msgs(r_reader, &mut num);
|
||||||
assert_eq!(num, 0);
|
assert_eq!(num, 0);
|
||||||
exit.store(true, Ordering::Relaxed);
|
exit.store(true, Ordering::Relaxed);
|
||||||
t_receiver.join().expect("join");
|
t_receiver.join().expect("join");
|
||||||
|
|
|
@ -265,7 +265,7 @@ fn build_messages(
|
||||||
return Err(Error::ExitSignal);
|
return Err(Error::ExitSignal);
|
||||||
}
|
}
|
||||||
let new_stake_account_keypair = Keypair::new();
|
let new_stake_account_keypair = Keypair::new();
|
||||||
let lockup_date = if allocation.lockup_date == "" {
|
let lockup_date = if allocation.lockup_date.is_empty() {
|
||||||
None
|
None
|
||||||
} else {
|
} else {
|
||||||
Some(allocation.lockup_date.parse::<DateTime<Utc>>().unwrap())
|
Some(allocation.lockup_date.parse::<DateTime<Utc>>().unwrap())
|
||||||
|
@ -336,7 +336,7 @@ fn send_messages(
|
||||||
signers.push(&*stake_args.stake_authority);
|
signers.push(&*stake_args.stake_authority);
|
||||||
signers.push(&*stake_args.withdraw_authority);
|
signers.push(&*stake_args.withdraw_authority);
|
||||||
signers.push(&new_stake_account_keypair);
|
signers.push(&new_stake_account_keypair);
|
||||||
if allocation.lockup_date != "" {
|
if !allocation.lockup_date.is_empty() {
|
||||||
if let Some(lockup_authority) = &stake_args.lockup_authority {
|
if let Some(lockup_authority) = &stake_args.lockup_authority {
|
||||||
signers.push(&**lockup_authority);
|
signers.push(&**lockup_authority);
|
||||||
} else {
|
} else {
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue