remove raptor coding experiments (#255)

This commit is contained in:
Trent Nelson 2024-03-14 22:01:15 -06:00 committed by GHA: Update Upstream From Fork
parent 9e394bd0e5
commit e80f8fa9e6
3 changed files with 2 additions and 64 deletions

7
Cargo.lock generated
View File

@ -4274,12 +4274,6 @@ dependencies = [
"rand_core 0.6.4",
]
[[package]]
name = "raptorq"
version = "1.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7cc8cd0bcb2d520fff368264b5a6295e064c60955349517d09b14473afae4856"
[[package]]
name = "rayon"
version = "1.9.0"
@ -5796,7 +5790,6 @@ dependencies = [
"quinn",
"rand 0.8.5",
"rand_chacha 0.3.1",
"raptorq",
"rayon",
"rolling-file",
"rustc_version 0.4.0",

View File

@ -85,7 +85,6 @@ trees = { workspace = true }
[dev-dependencies]
assert_matches = { workspace = true }
fs_extra = { workspace = true }
raptorq = { workspace = true }
serde_json = { workspace = true }
serial_test = { workspace = true }
# See order-crates-for-publishing.py for using this unusual `path = "."`

View File

@ -4,26 +4,17 @@
extern crate test;
use {
rand::{seq::SliceRandom, Rng},
raptorq::{Decoder, Encoder},
rand::Rng,
solana_entry::entry::{create_ticks, Entry},
solana_ledger::shred::{
max_entries_per_n_shred, max_ticks_per_n_shreds, ProcessShredsStats, ReedSolomonCache,
Shred, ShredFlags, Shredder, DATA_SHREDS_PER_FEC_BLOCK, LEGACY_SHRED_DATA_CAPACITY,
},
solana_perf::test_tx,
solana_sdk::{hash::Hash, packet::PACKET_DATA_SIZE, signature::Keypair},
solana_sdk::{hash::Hash, signature::Keypair},
test::Bencher,
};
// Copied these values here to avoid exposing shreds
// internals only for the sake of benchmarks.
// size of nonce: 4
// size of common shred header: 83
// size of coding shred header: 6
const VALID_SHRED_DATA_LEN: usize = PACKET_DATA_SIZE - 4 - 83 - 6;
fn make_test_entry(txs_per_entry: u64) -> Entry {
Entry {
num_hashes: 100_000,
@ -61,17 +52,6 @@ fn make_shreds(num_shreds: usize) -> Vec<Shred> {
data_shreds
}
fn make_concatenated_shreds(num_shreds: usize) -> Vec<u8> {
let data_shreds = make_shreds(num_shreds);
let mut data: Vec<u8> = vec![0; num_shreds * VALID_SHRED_DATA_LEN];
for (i, shred) in (data_shreds[0..num_shreds]).iter().enumerate() {
data[i * VALID_SHRED_DATA_LEN..(i + 1) * VALID_SHRED_DATA_LEN]
.copy_from_slice(&shred.payload()[..VALID_SHRED_DATA_LEN]);
}
data
}
#[bench]
fn bench_shredder_ticks(bencher: &mut Bencher) {
let kp = Keypair::new();
@ -197,37 +177,3 @@ fn bench_shredder_decoding(bencher: &mut Bencher) {
Shredder::try_recovery(coding_shreds[..].to_vec(), &reed_solomon_cache).unwrap();
})
}
#[bench]
fn bench_shredder_coding_raptorq(bencher: &mut Bencher) {
let symbol_count = DATA_SHREDS_PER_FEC_BLOCK;
let data = make_concatenated_shreds(symbol_count);
bencher.iter(|| {
let encoder = Encoder::with_defaults(&data, VALID_SHRED_DATA_LEN as u16);
encoder.get_encoded_packets(symbol_count as u32);
})
}
#[bench]
fn bench_shredder_decoding_raptorq(bencher: &mut Bencher) {
let symbol_count = DATA_SHREDS_PER_FEC_BLOCK;
let data = make_concatenated_shreds(symbol_count);
let encoder = Encoder::with_defaults(&data, VALID_SHRED_DATA_LEN as u16);
let mut packets = encoder.get_encoded_packets(symbol_count as u32);
packets.shuffle(&mut rand::thread_rng());
// Here we simulate losing 1 less than 50% of the packets randomly
packets.truncate(packets.len() - packets.len() / 2 + 1);
bencher.iter(|| {
let mut decoder = Decoder::new(encoder.get_config());
let mut result = None;
for packet in &packets {
result = decoder.decode(packet.clone());
if result.is_some() {
break;
}
}
assert_eq!(result.unwrap(), data);
})
}