Bench RaptorQ (#10886)

Co-authored-by: Carl <carl@solana.com>
This commit is contained in:
carllin 2020-07-02 18:31:32 -07:00 committed by GitHub
parent f17ac70bb2
commit f1699721ef
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 109 additions and 2 deletions

10
Cargo.lock generated
View File

@ -3004,6 +3004,15 @@ dependencies = [
"rand_core 0.3.1",
]
[[package]]
name = "raptorq"
version = "1.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e3e0cd5c27717803cbd3151329de9aa784376703a3a850b00c0dae30da86cf2"
dependencies = [
"serde",
]
[[package]]
name = "rayon"
version = "1.3.1"
@ -3937,6 +3946,7 @@ dependencies = [
"num_cpus",
"rand 0.7.3",
"rand_chacha 0.2.2",
"raptorq",
"rayon",
"regex",
"reqwest",

View File

@ -37,6 +37,7 @@ num_cpus = "1.13.0"
num-traits = "0.2"
rand = "0.7.0"
rand_chacha = "0.2.2"
raptorq = "1.4.2"
rayon = "1.3.1"
regex = "1.3.9"
serde = "1.0.112"

View File

@ -2,10 +2,13 @@
extern crate test;
use rand::seq::SliceRandom;
use raptorq::{Decoder, Encoder};
use solana_ledger::entry::{create_ticks, Entry};
use solana_ledger::shred::{
max_entries_per_n_shred, max_ticks_per_n_shreds, Shred, Shredder, RECOMMENDED_FEC_RATE,
SIZE_OF_DATA_SHRED_PAYLOAD,
max_entries_per_n_shred, max_ticks_per_n_shreds, Shred, Shredder,
MAX_DATA_SHREDS_PER_FEC_BLOCK, RECOMMENDED_FEC_RATE, SHRED_PAYLOAD_SIZE,
SIZE_OF_DATA_SHRED_IGNORED_TAIL, SIZE_OF_DATA_SHRED_PAYLOAD,
};
use solana_perf::test_tx;
use solana_sdk::hash::Hash;
@ -26,6 +29,34 @@ fn make_large_unchained_entries(txs_per_entry: u64, num_entries: u64) -> Vec<Ent
.collect()
}
fn make_shreds(num_shreds: usize) -> Vec<Shred> {
let shred_size = SIZE_OF_DATA_SHRED_PAYLOAD;
let txs_per_entry = 128;
let num_entries = max_entries_per_n_shred(
&make_test_entry(txs_per_entry),
2 * num_shreds as u64,
Some(shred_size),
);
let entries = make_large_unchained_entries(txs_per_entry, num_entries);
let shredder =
Shredder::new(1, 0, RECOMMENDED_FEC_RATE, Arc::new(Keypair::new()), 0, 0).unwrap();
let data_shreds = shredder.entries_to_data_shreds(&entries, true, 0).0;
assert!(data_shreds.len() >= num_shreds);
data_shreds
}
fn make_concatenated_shreds(num_shreds: usize) -> Vec<u8> {
let data_shreds = make_shreds(num_shreds);
let valid_shred_data_len = (SHRED_PAYLOAD_SIZE - SIZE_OF_DATA_SHRED_IGNORED_TAIL) as usize;
let mut data: Vec<u8> = vec![0; num_shreds * valid_shred_data_len];
for (i, shred) in (data_shreds[0..num_shreds]).iter().enumerate() {
data[i * valid_shred_data_len..(i + 1) * valid_shred_data_len]
.copy_from_slice(&shred.payload[..valid_shred_data_len]);
}
data
}
#[bench]
fn bench_shredder_ticks(bencher: &mut Bencher) {
let kp = Arc::new(Keypair::new());
@ -86,3 +117,68 @@ fn bench_deserialize_hdr(bencher: &mut Bencher) {
let _ = Shred::new_from_serialized_shred(payload).unwrap();
})
}
#[bench]
fn bench_shredder_coding(bencher: &mut Bencher) {
let symbol_count = MAX_DATA_SHREDS_PER_FEC_BLOCK as usize;
let data_shreds = make_shreds(symbol_count);
bencher.iter(|| {
Shredder::generate_coding_shreds(0, RECOMMENDED_FEC_RATE, &data_shreds[..symbol_count], 0)
.len();
})
}
#[bench]
fn bench_shredder_decoding(bencher: &mut Bencher) {
let symbol_count = MAX_DATA_SHREDS_PER_FEC_BLOCK as usize;
let data_shreds = make_shreds(symbol_count);
let coding_shreds =
Shredder::generate_coding_shreds(0, RECOMMENDED_FEC_RATE, &data_shreds[..symbol_count], 0);
bencher.iter(|| {
Shredder::try_recovery(
coding_shreds[..].to_vec(),
symbol_count,
symbol_count,
0,
0,
1,
)
.unwrap();
})
}
#[bench]
fn bench_shredder_coding_raptorq(bencher: &mut Bencher) {
let symbol_count = MAX_DATA_SHREDS_PER_FEC_BLOCK;
let data = make_concatenated_shreds(symbol_count as usize);
let valid_shred_data_len = (SHRED_PAYLOAD_SIZE - SIZE_OF_DATA_SHRED_IGNORED_TAIL) as usize;
bencher.iter(|| {
let encoder = Encoder::with_defaults(&data, valid_shred_data_len as u16);
encoder.get_encoded_packets(symbol_count);
})
}
#[bench]
fn bench_shredder_decoding_raptorq(bencher: &mut Bencher) {
let symbol_count = MAX_DATA_SHREDS_PER_FEC_BLOCK;
let data = make_concatenated_shreds(symbol_count as usize);
let valid_shred_data_len = (SHRED_PAYLOAD_SIZE - SIZE_OF_DATA_SHRED_IGNORED_TAIL) as usize;
let encoder = Encoder::with_defaults(&data, valid_shred_data_len as u16);
let mut packets = encoder.get_encoded_packets(symbol_count as u32);
packets.shuffle(&mut rand::thread_rng());
// Here we simulate losing 1 less than 50% of the packets randomly
packets.truncate(packets.len() - packets.len() / 2 + 1);
bencher.iter(|| {
let mut decoder = Decoder::new(encoder.get_config());
let mut result = None;
for packet in &packets {
result = decoder.decode(packet.clone());
if result != None {
break;
}
}
assert_eq!(result.unwrap(), data);
})
}