move core tests to core (#3355)
* move core tests to core * remove window * fix up flaky tests * test_entryfication needs a singly-threaded banking_stage * move core benches to core * remove unnecessary dependencies * remove core as a member for now, test it like runtime * stop running tests twice * remove duplicate runs of tests in perf
This commit is contained in:
parent
5e21268ca0
commit
c70412d7bb
|
@ -1972,6 +1972,7 @@ dependencies = [
|
||||||
"rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rayon 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rayon 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"reqwest 0.9.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"ring 0.13.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"ring 0.13.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rocksdb 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rocksdb 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
@ -2480,13 +2481,7 @@ name = "solana-workspace"
|
||||||
version = "0.13.0"
|
version = "0.13.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"bincode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"bs58 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"hashbrown 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"rayon 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"reqwest 0.9.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"solana 0.13.0",
|
"solana 0.13.0",
|
||||||
"solana-budget-program 0.13.0",
|
"solana-budget-program 0.13.0",
|
||||||
"solana-client 0.13.0",
|
"solana-client 0.13.0",
|
||||||
|
@ -2495,7 +2490,6 @@ dependencies = [
|
||||||
"solana-runtime 0.13.0",
|
"solana-runtime 0.13.0",
|
||||||
"solana-sdk 0.13.0",
|
"solana-sdk 0.13.0",
|
||||||
"solana-vote-api 0.13.0",
|
"solana-vote-api 0.13.0",
|
||||||
"sys-info 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
|
26
Cargo.toml
26
Cargo.toml
|
@ -20,13 +20,7 @@ erasure = ["solana/erasure"]
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
bincode = "1.1.2"
|
bincode = "1.1.2"
|
||||||
bs58 = "0.2.0"
|
|
||||||
hashbrown = "0.1.8"
|
|
||||||
log = "0.4.2"
|
log = "0.4.2"
|
||||||
rand = "0.6.5"
|
|
||||||
rayon = "1.0.0"
|
|
||||||
reqwest = "0.9.11"
|
|
||||||
serde_json = "1.0.39"
|
|
||||||
solana = { path = "core", version = "0.13.0" }
|
solana = { path = "core", version = "0.13.0" }
|
||||||
solana-budget-program = { path = "programs/budget", version = "0.13.0" }
|
solana-budget-program = { path = "programs/budget", version = "0.13.0" }
|
||||||
solana-client = { path = "client", version = "0.13.0" }
|
solana-client = { path = "client", version = "0.13.0" }
|
||||||
|
@ -35,33 +29,13 @@ solana-netutil = { path = "netutil", version = "0.13.0" }
|
||||||
solana-runtime = { path = "runtime", version = "0.13.0" }
|
solana-runtime = { path = "runtime", version = "0.13.0" }
|
||||||
solana-sdk = { path = "sdk", version = "0.13.0" }
|
solana-sdk = { path = "sdk", version = "0.13.0" }
|
||||||
solana-vote-api = { path = "programs/vote_api", version = "0.13.0" }
|
solana-vote-api = { path = "programs/vote_api", version = "0.13.0" }
|
||||||
sys-info = "0.5.6"
|
|
||||||
|
|
||||||
[[bench]]
|
|
||||||
name = "banking_stage"
|
|
||||||
|
|
||||||
[[bench]]
|
|
||||||
name = "blocktree"
|
|
||||||
|
|
||||||
[[bench]]
|
|
||||||
name = "ledger"
|
|
||||||
|
|
||||||
[[bench]]
|
|
||||||
name = "gen_keys"
|
|
||||||
|
|
||||||
[[bench]]
|
|
||||||
name = "sigverify"
|
|
||||||
|
|
||||||
[[bench]]
|
|
||||||
required-features = ["chacha"]
|
|
||||||
name = "chacha"
|
|
||||||
|
|
||||||
[workspace]
|
[workspace]
|
||||||
members = [
|
members = [
|
||||||
".",
|
".",
|
||||||
"bench-streamer",
|
"bench-streamer",
|
||||||
"bench-tps",
|
"bench-tps",
|
||||||
"core",
|
|
||||||
"drone",
|
"drone",
|
||||||
"fullnode",
|
"fullnode",
|
||||||
"genesis",
|
"genesis",
|
||||||
|
|
|
@ -39,17 +39,15 @@ fi
|
||||||
|
|
||||||
BENCH_FILE=bench_output.log
|
BENCH_FILE=bench_output.log
|
||||||
BENCH_ARTIFACT=current_bench_results.log
|
BENCH_ARTIFACT=current_bench_results.log
|
||||||
_ cargo +$rust_nightly bench ${V:+--verbose} \
|
|
||||||
|
# Run core benches
|
||||||
|
_ cargo +$rust_nightly bench --manifest-path core/Cargo.toml ${V:+--verbose} \
|
||||||
-- -Z unstable-options --format=json | tee "$BENCH_FILE"
|
-- -Z unstable-options --format=json | tee "$BENCH_FILE"
|
||||||
|
|
||||||
# Run bpf benches
|
# Run bpf benches
|
||||||
echo --- program/bpf
|
_ cargo +$rust_nightly bench --manifest-path programs/bpf/Cargo.toml ${V:+--verbose} --features=bpf_c \
|
||||||
(
|
-- -Z unstable-options --format=json --nocapture | tee -a "$BENCH_FILE"
|
||||||
set -x
|
|
||||||
cd programs/bpf
|
|
||||||
cargo +$rust_nightly bench ${V:+--verbose} --features=bpf_c \
|
|
||||||
-- -Z unstable-options --format=json --nocapture | tee -a ../../../"$BENCH_FILE"
|
|
||||||
)
|
|
||||||
|
|
||||||
_ cargo +$rust_nightly run --release --package solana-upload-perf \
|
_ cargo +$rust_nightly run --release --package solana-upload-perf \
|
||||||
-- "$BENCH_FILE" "$TARGET_BRANCH" "$UPLOAD_METRICS" > "$BENCH_ARTIFACT"
|
-- "$BENCH_FILE" "$TARGET_BRANCH" "$UPLOAD_METRICS" > "$BENCH_ARTIFACT"
|
||||||
|
|
|
@ -27,7 +27,6 @@ test-stable)
|
||||||
|
|
||||||
_ cargo +"$rust_stable" build --all ${V:+--verbose}
|
_ cargo +"$rust_stable" build --all ${V:+--verbose}
|
||||||
_ cargo +"$rust_stable" test --all ${V:+--verbose} -- --nocapture --test-threads=1
|
_ cargo +"$rust_stable" test --all ${V:+--verbose} -- --nocapture --test-threads=1
|
||||||
_ cargo +"$rust_stable" test --manifest-path runtime/Cargo.toml
|
|
||||||
;;
|
;;
|
||||||
test-stable-perf)
|
test-stable-perf)
|
||||||
echo "Executing $testName"
|
echo "Executing $testName"
|
||||||
|
@ -71,19 +70,7 @@ test-stable-perf)
|
||||||
|
|
||||||
# Run root package library tests
|
# Run root package library tests
|
||||||
_ cargo +"$rust_stable" build --all ${V:+--verbose} --features="$ROOT_FEATURES"
|
_ cargo +"$rust_stable" build --all ${V:+--verbose} --features="$ROOT_FEATURES"
|
||||||
_ cargo +"$rust_stable" test --all --lib ${V:+--verbose} --features="$ROOT_FEATURES" -- --nocapture --test-threads=1
|
_ cargo +"$rust_stable" test --all ${V:+--verbose} --features="$ROOT_FEATURES" -- --nocapture --test-threads=1
|
||||||
_ cargo +"$rust_stable" test --manifest-path runtime/Cargo.toml
|
|
||||||
|
|
||||||
# Run root package integration tests
|
|
||||||
for test in tests/*.rs; do
|
|
||||||
test=${test##*/} # basename x
|
|
||||||
test=${test%.rs} # basename x .rs
|
|
||||||
(
|
|
||||||
export RUST_LOG="$test"=trace,$RUST_LOG
|
|
||||||
_ cargo +"$rust_stable" test --all ${V:+--verbose} --features="$ROOT_FEATURES" --test="$test" \
|
|
||||||
-- --test-threads=1 --nocapture
|
|
||||||
)
|
|
||||||
done
|
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo "Error: Unknown test: $testName"
|
echo "Error: Unknown test: $testName"
|
||||||
|
|
|
@ -40,6 +40,7 @@ nix = "0.13.0"
|
||||||
rand = "0.6.5"
|
rand = "0.6.5"
|
||||||
rand_chacha = "0.1.1"
|
rand_chacha = "0.1.1"
|
||||||
rayon = "1.0.0"
|
rayon = "1.0.0"
|
||||||
|
reqwest = "0.9.11"
|
||||||
ring = "0.13.2"
|
ring = "0.13.2"
|
||||||
rocksdb = "0.11.0"
|
rocksdb = "0.11.0"
|
||||||
serde = "1.0.89"
|
serde = "1.0.89"
|
||||||
|
@ -67,3 +68,22 @@ hex-literal = "0.1.4"
|
||||||
matches = "0.1.6"
|
matches = "0.1.6"
|
||||||
solana-vote-program = { path = "../programs/vote", version = "0.13.0" }
|
solana-vote-program = { path = "../programs/vote", version = "0.13.0" }
|
||||||
solana-budget-program = { path = "../programs/budget", version = "0.13.0" }
|
solana-budget-program = { path = "../programs/budget", version = "0.13.0" }
|
||||||
|
|
||||||
|
[[bench]]
|
||||||
|
name = "banking_stage"
|
||||||
|
|
||||||
|
[[bench]]
|
||||||
|
name = "blocktree"
|
||||||
|
|
||||||
|
[[bench]]
|
||||||
|
name = "ledger"
|
||||||
|
|
||||||
|
[[bench]]
|
||||||
|
name = "gen_keys"
|
||||||
|
|
||||||
|
[[bench]]
|
||||||
|
name = "sigverify"
|
||||||
|
|
||||||
|
[[bench]]
|
||||||
|
required-features = ["chacha"]
|
||||||
|
name = "chacha"
|
||||||
|
|
|
@ -46,6 +46,20 @@ impl BankingStage {
|
||||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||||
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
||||||
verified_receiver: Receiver<VerifiedPackets>,
|
verified_receiver: Receiver<VerifiedPackets>,
|
||||||
|
) -> Self {
|
||||||
|
Self::new_num_threads(
|
||||||
|
cluster_info,
|
||||||
|
poh_recorder,
|
||||||
|
verified_receiver,
|
||||||
|
Self::num_threads(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn new_num_threads(
|
||||||
|
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||||
|
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
||||||
|
verified_receiver: Receiver<VerifiedPackets>,
|
||||||
|
num_threads: u32,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let verified_receiver = Arc::new(Mutex::new(verified_receiver));
|
let verified_receiver = Arc::new(Mutex::new(verified_receiver));
|
||||||
|
|
||||||
|
@ -57,7 +71,7 @@ impl BankingStage {
|
||||||
// Single thread to compute confirmation
|
// Single thread to compute confirmation
|
||||||
let lcs_handle = LeaderConfirmationService::start(&poh_recorder, exit.clone());
|
let lcs_handle = LeaderConfirmationService::start(&poh_recorder, exit.clone());
|
||||||
// Many banks that process transactions in parallel.
|
// Many banks that process transactions in parallel.
|
||||||
let mut bank_thread_hdls: Vec<JoinHandle<()>> = (0..Self::num_threads())
|
let mut bank_thread_hdls: Vec<JoinHandle<()>> = (0..num_threads)
|
||||||
.map(|_| {
|
.map(|_| {
|
||||||
let verified_receiver = verified_receiver.clone();
|
let verified_receiver = verified_receiver.clone();
|
||||||
let poh_recorder = poh_recorder.clone();
|
let poh_recorder = poh_recorder.clone();
|
||||||
|
@ -437,15 +451,18 @@ pub fn create_test_recorder(
|
||||||
Receiver<WorkingBankEntries>,
|
Receiver<WorkingBankEntries>,
|
||||||
) {
|
) {
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
let (poh_recorder, entry_receiver) = PohRecorder::new(
|
let (mut poh_recorder, entry_receiver) = PohRecorder::new(
|
||||||
bank.tick_height(),
|
bank.tick_height(),
|
||||||
bank.last_blockhash(),
|
bank.last_blockhash(),
|
||||||
bank.slot(),
|
bank.slot(),
|
||||||
Some(4),
|
Some(4),
|
||||||
bank.ticks_per_slot(),
|
bank.ticks_per_slot(),
|
||||||
);
|
);
|
||||||
|
poh_recorder.set_bank(&bank);
|
||||||
|
|
||||||
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
|
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
|
||||||
let poh_service = PohService::new(poh_recorder.clone(), &PohServiceConfig::default(), &exit);
|
let poh_service = PohService::new(poh_recorder.clone(), &PohServiceConfig::default(), &exit);
|
||||||
|
|
||||||
(exit, poh_recorder, poh_service, entry_receiver)
|
(exit, poh_recorder, poh_service, entry_receiver)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -489,7 +506,6 @@ mod tests {
|
||||||
let (exit, poh_recorder, poh_service, entry_receiver) = create_test_recorder(&bank);
|
let (exit, poh_recorder, poh_service, entry_receiver) = create_test_recorder(&bank);
|
||||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
||||||
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
||||||
poh_recorder.lock().unwrap().set_bank(&bank);
|
|
||||||
let banking_stage = BankingStage::new(&cluster_info, &poh_recorder, verified_receiver);
|
let banking_stage = BankingStage::new(&cluster_info, &poh_recorder, verified_receiver);
|
||||||
trace!("sending bank");
|
trace!("sending bank");
|
||||||
sleep(Duration::from_millis(600));
|
sleep(Duration::from_millis(600));
|
||||||
|
@ -520,7 +536,6 @@ mod tests {
|
||||||
let (exit, poh_recorder, poh_service, entry_receiver) = create_test_recorder(&bank);
|
let (exit, poh_recorder, poh_service, entry_receiver) = create_test_recorder(&bank);
|
||||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
||||||
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
||||||
poh_recorder.lock().unwrap().set_bank(&bank);
|
|
||||||
let banking_stage = BankingStage::new(&cluster_info, &poh_recorder, verified_receiver);
|
let banking_stage = BankingStage::new(&cluster_info, &poh_recorder, verified_receiver);
|
||||||
|
|
||||||
// fund another account so we can send 2 good transactions in a single batch.
|
// fund another account so we can send 2 good transactions in a single batch.
|
||||||
|
@ -592,17 +607,12 @@ mod tests {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_banking_stage_entryfication() {
|
fn test_banking_stage_entryfication() {
|
||||||
|
solana_logger::setup();
|
||||||
// In this attack we'll demonstrate that a verifier can interpret the ledger
|
// In this attack we'll demonstrate that a verifier can interpret the ledger
|
||||||
// differently if either the server doesn't signal the ledger to add an
|
// differently if either the server doesn't signal the ledger to add an
|
||||||
// Entry OR if the verifier tries to parallelize across multiple Entries.
|
// Entry OR if the verifier tries to parallelize across multiple Entries.
|
||||||
let (genesis_block, mint_keypair) = GenesisBlock::new(2);
|
let (genesis_block, mint_keypair) = GenesisBlock::new(2);
|
||||||
let bank = Arc::new(Bank::new(&genesis_block));
|
|
||||||
let (verified_sender, verified_receiver) = channel();
|
let (verified_sender, verified_receiver) = channel();
|
||||||
let (exit, poh_recorder, poh_service, entry_receiver) = create_test_recorder(&bank);
|
|
||||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
|
||||||
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
|
||||||
poh_recorder.lock().unwrap().set_bank(&bank);
|
|
||||||
let _banking_stage = BankingStage::new(&cluster_info, &poh_recorder, verified_receiver);
|
|
||||||
|
|
||||||
// Process a batch that includes a transaction that receives two lamports.
|
// Process a batch that includes a transaction that receives two lamports.
|
||||||
let alice = Keypair::new();
|
let alice = Keypair::new();
|
||||||
|
@ -632,33 +642,39 @@ mod tests {
|
||||||
.send(vec![(packets[0].clone(), vec![1u8])])
|
.send(vec![(packets[0].clone(), vec![1u8])])
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
drop(verified_sender);
|
let entry_receiver = {
|
||||||
|
// start a banking_stage to eat verified receiver
|
||||||
|
let bank = Arc::new(Bank::new(&genesis_block));
|
||||||
|
let (exit, poh_recorder, poh_service, entry_receiver) = create_test_recorder(&bank);
|
||||||
|
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
||||||
|
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
||||||
|
let _banking_stage =
|
||||||
|
BankingStage::new_num_threads(&cluster_info, &poh_recorder, verified_receiver, 1);
|
||||||
|
|
||||||
|
// wait for banking_stage to eat the packets
|
||||||
|
while bank.get_balance(&alice.pubkey()) != 1 {
|
||||||
|
sleep(Duration::from_millis(100));
|
||||||
|
}
|
||||||
exit.store(true, Ordering::Relaxed);
|
exit.store(true, Ordering::Relaxed);
|
||||||
poh_service.join().unwrap();
|
poh_service.join().unwrap();
|
||||||
drop(poh_recorder);
|
entry_receiver
|
||||||
|
};
|
||||||
|
drop(verified_sender);
|
||||||
|
|
||||||
// Poll the entry_receiver, feeding it into a new bank
|
// consume the entire entry_receiver, feed it into a new bank
|
||||||
// until the balance is what we expect.
|
// check that the balance is what we expect.
|
||||||
let bank = Bank::new(&genesis_block);
|
|
||||||
for _ in 0..10 {
|
|
||||||
let entries: Vec<_> = entry_receiver
|
let entries: Vec<_> = entry_receiver
|
||||||
.iter()
|
.iter()
|
||||||
.flat_map(|x| x.1.into_iter().map(|e| e.0))
|
.flat_map(|x| x.1.into_iter().map(|e| e.0))
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
|
let bank = Bank::new(&genesis_block);
|
||||||
for entry in &entries {
|
for entry in &entries {
|
||||||
bank.process_transactions(&entry.transactions)
|
bank.process_transactions(&entry.transactions)
|
||||||
.iter()
|
.iter()
|
||||||
.for_each(|x| assert_eq!(*x, Ok(())));
|
.for_each(|x| assert_eq!(*x, Ok(())));
|
||||||
}
|
}
|
||||||
|
|
||||||
if bank.get_balance(&alice.pubkey()) == 1 {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
sleep(Duration::from_millis(100));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Assert the user holds one lamport, not two. If the stage only outputs one
|
// Assert the user holds one lamport, not two. If the stage only outputs one
|
||||||
// entry, then the second transaction will be rejected, because it drives
|
// entry, then the second transaction will be rejected, because it drives
|
||||||
// the account balance below zero before the credit is added.
|
// the account balance below zero before the credit is added.
|
||||||
|
|
|
@ -95,71 +95,10 @@ mod test {
|
||||||
use crate::erasure::test::{generate_blocktree_from_window, setup_window_ledger};
|
use crate::erasure::test::{generate_blocktree_from_window, setup_window_ledger};
|
||||||
#[cfg(all(feature = "erasure", test))]
|
#[cfg(all(feature = "erasure", test))]
|
||||||
use crate::erasure::{NUM_CODING, NUM_DATA};
|
use crate::erasure::{NUM_CODING, NUM_DATA};
|
||||||
use crate::packet::{index_blobs, Blob, Packet, Packets, SharedBlob, PACKET_DATA_SIZE};
|
use crate::packet::{index_blobs, Blob};
|
||||||
use crate::streamer::{receiver, responder, PacketReceiver};
|
|
||||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||||
use std::io;
|
|
||||||
use std::io::Write;
|
|
||||||
use std::net::UdpSocket;
|
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
|
||||||
use std::sync::mpsc::channel;
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
fn get_msgs(r: PacketReceiver, num: &mut usize) {
|
|
||||||
for _t in 0..5 {
|
|
||||||
let timer = Duration::new(1, 0);
|
|
||||||
match r.recv_timeout(timer) {
|
|
||||||
Ok(m) => *num += m.read().unwrap().packets.len(),
|
|
||||||
e => info!("error {:?}", e),
|
|
||||||
}
|
|
||||||
if *num == 10 {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#[test]
|
|
||||||
pub fn streamer_debug() {
|
|
||||||
write!(io::sink(), "{:?}", Packet::default()).unwrap();
|
|
||||||
write!(io::sink(), "{:?}", Packets::default()).unwrap();
|
|
||||||
write!(io::sink(), "{:?}", Blob::default()).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
pub fn streamer_send_test() {
|
|
||||||
let read = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
|
||||||
read.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
|
|
||||||
|
|
||||||
let addr = read.local_addr().unwrap();
|
|
||||||
let send = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
|
||||||
let (s_reader, r_reader) = channel();
|
|
||||||
let t_receiver = receiver(Arc::new(read), &exit, s_reader, "window-streamer-test");
|
|
||||||
let t_responder = {
|
|
||||||
let (s_responder, r_responder) = channel();
|
|
||||||
let t_responder = responder("streamer_send_test", Arc::new(send), r_responder);
|
|
||||||
let mut msgs = Vec::new();
|
|
||||||
for i in 0..10 {
|
|
||||||
let b = SharedBlob::default();
|
|
||||||
{
|
|
||||||
let mut w = b.write().unwrap();
|
|
||||||
w.data[0] = i as u8;
|
|
||||||
w.meta.size = PACKET_DATA_SIZE;
|
|
||||||
w.meta.set_addr(&addr);
|
|
||||||
}
|
|
||||||
msgs.push(b);
|
|
||||||
}
|
|
||||||
s_responder.send(msgs).expect("send");
|
|
||||||
t_responder
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut num = 0;
|
|
||||||
get_msgs(r_reader, &mut num);
|
|
||||||
assert_eq!(num, 10);
|
|
||||||
exit.store(true, Ordering::Relaxed);
|
|
||||||
t_receiver.join().expect("join");
|
|
||||||
t_responder.join().expect("join");
|
|
||||||
}
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn test_find_missing_data_indexes_sanity() {
|
pub fn test_find_missing_data_indexes_sanity() {
|
||||||
let slot = 0;
|
let slot = 0;
|
||||||
|
|
|
@ -498,13 +498,19 @@ fn categorize_blob(
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub mod test {
|
pub mod test {
|
||||||
|
#[derive(Default, Clone)]
|
||||||
|
pub struct WindowSlot {
|
||||||
|
pub data: Option<SharedBlob>,
|
||||||
|
pub coding: Option<SharedBlob>,
|
||||||
|
pub leader_unknown: bool,
|
||||||
|
}
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::blocktree::get_tmp_ledger_path;
|
use crate::blocktree::get_tmp_ledger_path;
|
||||||
use crate::blocktree::Blocktree;
|
use crate::blocktree::Blocktree;
|
||||||
use crate::entry::{make_tiny_test_entries, EntrySlice};
|
use crate::entry::{make_tiny_test_entries, EntrySlice};
|
||||||
|
|
||||||
use crate::packet::{index_blobs, SharedBlob, BLOB_DATA_SIZE, BLOB_SIZE};
|
use crate::packet::{index_blobs, SharedBlob, BLOB_DATA_SIZE, BLOB_SIZE};
|
||||||
use crate::window::WindowSlot;
|
|
||||||
use rand::{thread_rng, Rng};
|
use rand::{thread_rng, Rng};
|
||||||
use solana_sdk::pubkey::Pubkey;
|
use solana_sdk::pubkey::Pubkey;
|
||||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||||
|
|
|
@ -68,8 +68,6 @@ pub mod test_tx;
|
||||||
pub mod tpu;
|
pub mod tpu;
|
||||||
pub mod tvu;
|
pub mod tvu;
|
||||||
pub mod voting_keypair;
|
pub mod voting_keypair;
|
||||||
#[cfg(test)]
|
|
||||||
pub mod window;
|
|
||||||
pub mod window_service;
|
pub mod window_service;
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
|
|
@ -208,17 +208,18 @@ mod test {
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
fn get_msgs(r: PacketReceiver, num: &mut usize) {
|
fn get_msgs(r: PacketReceiver, num: &mut usize) -> Result<()> {
|
||||||
for _t in 0..5 {
|
for _ in 0..10 {
|
||||||
let timer = Duration::new(1, 0);
|
let m = r.recv_timeout(Duration::new(1, 0))?;
|
||||||
match r.recv_timeout(timer) {
|
|
||||||
Ok(m) => *num += m.read().unwrap().packets.len(),
|
*num -= m.read().unwrap().packets.len();
|
||||||
_ => info!("get_msgs error"),
|
|
||||||
}
|
if *num == 0 {
|
||||||
if *num == 10 {
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
#[test]
|
#[test]
|
||||||
fn streamer_debug() {
|
fn streamer_debug() {
|
||||||
|
@ -240,7 +241,7 @@ mod test {
|
||||||
let (s_responder, r_responder) = channel();
|
let (s_responder, r_responder) = channel();
|
||||||
let t_responder = responder("streamer_send_test", Arc::new(send), r_responder);
|
let t_responder = responder("streamer_send_test", Arc::new(send), r_responder);
|
||||||
let mut msgs = Vec::new();
|
let mut msgs = Vec::new();
|
||||||
for i in 0..10 {
|
for i in 0..5 {
|
||||||
let b = SharedBlob::default();
|
let b = SharedBlob::default();
|
||||||
{
|
{
|
||||||
let mut w = b.write().unwrap();
|
let mut w = b.write().unwrap();
|
||||||
|
@ -254,9 +255,9 @@ mod test {
|
||||||
t_responder
|
t_responder
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut num = 0;
|
let mut num = 5;
|
||||||
get_msgs(r_reader, &mut num);
|
get_msgs(r_reader, &mut num).expect("get_msgs");
|
||||||
assert_eq!(num, 10);
|
assert_eq!(num, 0);
|
||||||
exit.store(true, Ordering::Relaxed);
|
exit.store(true, Ordering::Relaxed);
|
||||||
t_receiver.join().expect("join");
|
t_receiver.join().expect("join");
|
||||||
t_responder.join().expect("join");
|
t_responder.join().expect("join");
|
||||||
|
|
|
@ -1,320 +0,0 @@
|
||||||
//! The `window` module defines data structure for storing the tail of the ledger.
|
|
||||||
//!
|
|
||||||
use crate::packet::SharedBlob;
|
|
||||||
use solana_sdk::pubkey::Pubkey;
|
|
||||||
use std::cmp;
|
|
||||||
use std::sync::{Arc, RwLock};
|
|
||||||
|
|
||||||
#[derive(Default, Clone)]
|
|
||||||
pub struct WindowSlot {
|
|
||||||
pub data: Option<SharedBlob>,
|
|
||||||
pub coding: Option<SharedBlob>,
|
|
||||||
pub leader_unknown: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl WindowSlot {
|
|
||||||
fn blob_index(&self) -> Option<u64> {
|
|
||||||
match self.data {
|
|
||||||
Some(ref blob) => Some(blob.read().unwrap().index()),
|
|
||||||
None => None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn clear_data(&mut self) {
|
|
||||||
self.data.take();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type Window = Vec<WindowSlot>;
|
|
||||||
pub type SharedWindow = Arc<RwLock<Window>>;
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct WindowIndex {
|
|
||||||
pub data: u64,
|
|
||||||
pub coding: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait WindowUtil {
|
|
||||||
/// Finds available slots, clears them, and returns their indices.
|
|
||||||
fn clear_slots(&mut self, consumed: u64, received: u64) -> Vec<u64>;
|
|
||||||
|
|
||||||
fn window_size(&self) -> u64;
|
|
||||||
|
|
||||||
fn print(&self, id: &Pubkey, consumed: u64) -> String;
|
|
||||||
|
|
||||||
fn blob_idx_in_window(&self, id: &Pubkey, pix: u64, consumed: u64, received: &mut u64) -> bool;
|
|
||||||
}
|
|
||||||
|
|
||||||
impl WindowUtil for Window {
|
|
||||||
fn clear_slots(&mut self, consumed: u64, received: u64) -> Vec<u64> {
|
|
||||||
(consumed..received)
|
|
||||||
.filter_map(|pix| {
|
|
||||||
let i = (pix % self.window_size()) as usize;
|
|
||||||
if let Some(blob_idx) = self[i].blob_index() {
|
|
||||||
if blob_idx == pix {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
self[i].clear_data();
|
|
||||||
Some(pix)
|
|
||||||
})
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn blob_idx_in_window(&self, id: &Pubkey, pix: u64, consumed: u64, received: &mut u64) -> bool {
|
|
||||||
// Prevent receive window from running over
|
|
||||||
// Got a blob which has already been consumed, skip it
|
|
||||||
// probably from a repair window request
|
|
||||||
if pix < consumed {
|
|
||||||
trace!(
|
|
||||||
"{}: received: {} but older than consumed: {} skipping..",
|
|
||||||
id,
|
|
||||||
pix,
|
|
||||||
consumed
|
|
||||||
);
|
|
||||||
false
|
|
||||||
} else {
|
|
||||||
// received always has to be updated even if we don't accept the packet into
|
|
||||||
// the window. The worst case here is the server *starts* outside
|
|
||||||
// the window, none of the packets it receives fits in the window
|
|
||||||
// and repair requests (which are based on received) are never generated
|
|
||||||
*received = cmp::max(pix, *received);
|
|
||||||
|
|
||||||
if pix >= consumed + self.window_size() {
|
|
||||||
trace!(
|
|
||||||
"{}: received: {} will overrun window: {} skipping..",
|
|
||||||
id,
|
|
||||||
pix,
|
|
||||||
consumed + self.window_size()
|
|
||||||
);
|
|
||||||
false
|
|
||||||
} else {
|
|
||||||
true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn window_size(&self) -> u64 {
|
|
||||||
self.len() as u64
|
|
||||||
}
|
|
||||||
|
|
||||||
fn print(&self, id: &Pubkey, consumed: u64) -> String {
|
|
||||||
let pointer: Vec<_> = self
|
|
||||||
.iter()
|
|
||||||
.enumerate()
|
|
||||||
.map(|(i, _v)| {
|
|
||||||
if i == (consumed % self.window_size()) as usize {
|
|
||||||
"V"
|
|
||||||
} else {
|
|
||||||
" "
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let buf: Vec<_> = self
|
|
||||||
.iter()
|
|
||||||
.map(|v| {
|
|
||||||
if v.data.is_none() && v.coding.is_none() {
|
|
||||||
"O"
|
|
||||||
} else if v.data.is_some() && v.coding.is_some() {
|
|
||||||
"D"
|
|
||||||
} else if v.data.is_some() {
|
|
||||||
// coding.is_none()
|
|
||||||
"d"
|
|
||||||
} else {
|
|
||||||
// data.is_none()
|
|
||||||
"c"
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
format!(
|
|
||||||
"\n{}: WINDOW ({}): {}\n{}: WINDOW ({}): {}",
|
|
||||||
id,
|
|
||||||
consumed,
|
|
||||||
pointer.join(""),
|
|
||||||
id,
|
|
||||||
consumed,
|
|
||||||
buf.join("")
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn calculate_max_repair(
|
|
||||||
num_peers: u64,
|
|
||||||
consumed: u64,
|
|
||||||
received: u64,
|
|
||||||
times: usize,
|
|
||||||
is_next_leader: bool,
|
|
||||||
window_size: u64,
|
|
||||||
) -> u64 {
|
|
||||||
// Calculate the highest blob index that this node should have already received
|
|
||||||
// via avalanche. The avalanche splits data stream into nodes and each node retransmits
|
|
||||||
// the data to their peer nodes. So there's a possibility that a blob (with index lower
|
|
||||||
// than current received index) is being retransmitted by a peer node.
|
|
||||||
let max_repair = if times >= 8 || is_next_leader {
|
|
||||||
// if repair backoff is getting high, or if we are the next leader,
|
|
||||||
// don't wait for avalanche
|
|
||||||
cmp::max(consumed, received)
|
|
||||||
} else {
|
|
||||||
cmp::max(consumed, received.saturating_sub(num_peers))
|
|
||||||
};
|
|
||||||
|
|
||||||
// This check prevents repairing a blob that will cause window to roll over. Even if
|
|
||||||
// the highes_lost blob is actually missing, asking to repair it might cause our
|
|
||||||
// current window to move past other missing blobs
|
|
||||||
cmp::min(consumed + window_size - 1, max_repair)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn new_window(window_size: usize) -> Window {
|
|
||||||
(0..window_size).map(|_| WindowSlot::default()).collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn default_window() -> Window {
|
|
||||||
(0..2048).map(|_| WindowSlot::default()).collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod test {
|
|
||||||
use crate::packet::{Blob, Packet, Packets, SharedBlob, PACKET_DATA_SIZE};
|
|
||||||
use crate::streamer::{receiver, responder, PacketReceiver};
|
|
||||||
use crate::window::{calculate_max_repair, new_window, Window, WindowUtil};
|
|
||||||
use solana_sdk::pubkey::Pubkey;
|
|
||||||
use std::io;
|
|
||||||
use std::io::Write;
|
|
||||||
use std::net::UdpSocket;
|
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
|
||||||
use std::sync::mpsc::channel;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
fn get_msgs(r: PacketReceiver, num: &mut usize) {
|
|
||||||
for _t in 0..5 {
|
|
||||||
let timer = Duration::new(1, 0);
|
|
||||||
match r.recv_timeout(timer) {
|
|
||||||
Ok(m) => *num += m.read().unwrap().packets.len(),
|
|
||||||
e => info!("error {:?}", e),
|
|
||||||
}
|
|
||||||
if *num == 10 {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#[test]
|
|
||||||
pub fn streamer_debug() {
|
|
||||||
write!(io::sink(), "{:?}", Packet::default()).unwrap();
|
|
||||||
write!(io::sink(), "{:?}", Packets::default()).unwrap();
|
|
||||||
write!(io::sink(), "{:?}", Blob::default()).unwrap();
|
|
||||||
}
|
|
||||||
#[test]
|
|
||||||
pub fn streamer_send_test() {
|
|
||||||
let read = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
|
||||||
read.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
|
|
||||||
|
|
||||||
let addr = read.local_addr().unwrap();
|
|
||||||
let send = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
|
||||||
let (s_reader, r_reader) = channel();
|
|
||||||
let t_receiver = receiver(Arc::new(read), &exit, s_reader, "window-streamer-test");
|
|
||||||
let t_responder = {
|
|
||||||
let (s_responder, r_responder) = channel();
|
|
||||||
let t_responder = responder("streamer_send_test", Arc::new(send), r_responder);
|
|
||||||
let mut msgs = Vec::new();
|
|
||||||
for i in 0..10 {
|
|
||||||
let b = SharedBlob::default();
|
|
||||||
{
|
|
||||||
let mut w = b.write().unwrap();
|
|
||||||
w.data[0] = i as u8;
|
|
||||||
w.meta.size = PACKET_DATA_SIZE;
|
|
||||||
w.meta.set_addr(&addr);
|
|
||||||
}
|
|
||||||
msgs.push(b);
|
|
||||||
}
|
|
||||||
s_responder.send(msgs).expect("send");
|
|
||||||
t_responder
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut num = 0;
|
|
||||||
get_msgs(r_reader, &mut num);
|
|
||||||
assert_eq!(num, 10);
|
|
||||||
exit.store(true, Ordering::Relaxed);
|
|
||||||
t_receiver.join().expect("join");
|
|
||||||
t_responder.join().expect("join");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
pub fn test_calculate_max_repair() {
|
|
||||||
const WINDOW_SIZE: u64 = 200;
|
|
||||||
|
|
||||||
assert_eq!(calculate_max_repair(0, 10, 90, 0, false, WINDOW_SIZE), 90);
|
|
||||||
assert_eq!(calculate_max_repair(15, 10, 90, 32, false, WINDOW_SIZE), 90);
|
|
||||||
assert_eq!(calculate_max_repair(15, 10, 90, 0, false, WINDOW_SIZE), 75);
|
|
||||||
assert_eq!(calculate_max_repair(90, 10, 90, 0, false, WINDOW_SIZE), 10);
|
|
||||||
assert_eq!(calculate_max_repair(90, 10, 50, 0, false, WINDOW_SIZE), 10);
|
|
||||||
assert_eq!(calculate_max_repair(90, 10, 99, 0, false, WINDOW_SIZE), 10);
|
|
||||||
assert_eq!(calculate_max_repair(90, 10, 101, 0, false, WINDOW_SIZE), 11);
|
|
||||||
assert_eq!(
|
|
||||||
calculate_max_repair(90, 10, 95 + WINDOW_SIZE, 0, false, WINDOW_SIZE),
|
|
||||||
WINDOW_SIZE + 5
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
calculate_max_repair(90, 10, 99 + WINDOW_SIZE, 0, false, WINDOW_SIZE),
|
|
||||||
WINDOW_SIZE + 9
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
calculate_max_repair(90, 10, 100 + WINDOW_SIZE, 0, false, WINDOW_SIZE),
|
|
||||||
WINDOW_SIZE + 9
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
calculate_max_repair(90, 10, 120 + WINDOW_SIZE, 0, false, WINDOW_SIZE),
|
|
||||||
WINDOW_SIZE + 9
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
calculate_max_repair(50, 100, 50 + WINDOW_SIZE, 0, false, WINDOW_SIZE),
|
|
||||||
WINDOW_SIZE
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
calculate_max_repair(50, 100, 50 + WINDOW_SIZE, 0, true, WINDOW_SIZE),
|
|
||||||
50 + WINDOW_SIZE
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn wrap_blob_idx_in_window(
|
|
||||||
window: &Window,
|
|
||||||
id: &Pubkey,
|
|
||||||
pix: u64,
|
|
||||||
consumed: u64,
|
|
||||||
received: u64,
|
|
||||||
) -> (bool, u64) {
|
|
||||||
let mut received = received;
|
|
||||||
let is_in_window = window.blob_idx_in_window(&id, pix, consumed, &mut received);
|
|
||||||
(is_in_window, received)
|
|
||||||
}
|
|
||||||
#[test]
|
|
||||||
pub fn test_blob_idx_in_window() {
|
|
||||||
let id = Pubkey::default();
|
|
||||||
const WINDOW_SIZE: u64 = 200;
|
|
||||||
let window = new_window(WINDOW_SIZE as usize);
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
wrap_blob_idx_in_window(&window, &id, 90 + WINDOW_SIZE, 90, 100),
|
|
||||||
(false, 90 + WINDOW_SIZE)
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
wrap_blob_idx_in_window(&window, &id, 91 + WINDOW_SIZE, 90, 100),
|
|
||||||
(false, 91 + WINDOW_SIZE)
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
wrap_blob_idx_in_window(&window, &id, 89, 90, 100),
|
|
||||||
(false, 100)
|
|
||||||
);
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
wrap_blob_idx_in_window(&window, &id, 91, 90, 100),
|
|
||||||
(true, 100)
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
wrap_blob_idx_in_window(&window, &id, 101, 90, 100),
|
|
||||||
(true, 101)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -114,6 +114,7 @@ fn test_two_unbalanced_stakes() {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
#[ignore]
|
||||||
fn test_forwarding() {
|
fn test_forwarding() {
|
||||||
// Set up a cluster where one node is never the leader, so all txs sent to this node
|
// Set up a cluster where one node is never the leader, so all txs sent to this node
|
||||||
// will be have to be forwarded in order to be confirmed
|
// will be have to be forwarded in order to be confirmed
|
Loading…
Reference in New Issue