removes Shred::new_empty_data_shred (#24714)
Shred::new_empty_data_shred returns an invalid shred (i.e. shred.sanitize() returns error). The method is only used in tests and can be easily replaced with Shred::new_from_data. To keep the shred api surface small, this commit removes this method.
This commit is contained in:
parent
412a5a0d33
commit
081c844d6e
|
@ -51,7 +51,8 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) {
|
|||
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
|
||||
|
||||
const NUM_SHREDS: usize = 32;
|
||||
let shreds = vec![Shred::new_empty_data_shred(); NUM_SHREDS];
|
||||
let shred = Shred::new_from_data(0, 0, 0, None, false, false, 0, 0, 0);
|
||||
let shreds = vec![shred; NUM_SHREDS];
|
||||
let mut stakes = HashMap::new();
|
||||
const NUM_PEERS: usize = 200;
|
||||
for _ in 0..NUM_PEERS {
|
||||
|
|
|
@ -14,7 +14,7 @@ use {
|
|||
shred::Shred,
|
||||
},
|
||||
solana_runtime::bank::Bank,
|
||||
solana_sdk::pubkey::Pubkey,
|
||||
solana_sdk::{clock::Slot, pubkey::Pubkey},
|
||||
test::Bencher,
|
||||
};
|
||||
|
||||
|
@ -31,16 +31,18 @@ fn make_cluster_nodes<R: Rng>(
|
|||
|
||||
fn get_retransmit_peers_deterministic(
|
||||
cluster_nodes: &ClusterNodes<RetransmitStage>,
|
||||
shred: &mut Shred,
|
||||
slot: Slot,
|
||||
slot_leader: &Pubkey,
|
||||
root_bank: &Bank,
|
||||
num_simulated_shreds: usize,
|
||||
) {
|
||||
let parent_offset = if slot == 0 { 0 } else { 1 };
|
||||
for i in 0..num_simulated_shreds {
|
||||
shred.set_index(i as u32);
|
||||
let index = i as u32;
|
||||
let shred = Shred::new_from_data(slot, index, parent_offset, None, false, false, 0, 0, 0);
|
||||
let (_neighbors, _children) = cluster_nodes.get_retransmit_peers(
|
||||
*slot_leader,
|
||||
shred,
|
||||
&shred,
|
||||
root_bank,
|
||||
solana_gossip::cluster_info::DATA_PLANE_FANOUT,
|
||||
);
|
||||
|
@ -54,12 +56,10 @@ fn get_retransmit_peers_deterministic_wrapper(b: &mut Bencher, unstaked_ratio: O
|
|||
let (nodes, cluster_nodes) = make_cluster_nodes(&mut rng, unstaked_ratio);
|
||||
let slot_leader = nodes[1..].choose(&mut rng).unwrap().id;
|
||||
let slot = rand::random::<u64>();
|
||||
let mut shred = Shred::new_empty_data_shred();
|
||||
shred.set_slot(slot);
|
||||
b.iter(|| {
|
||||
get_retransmit_peers_deterministic(
|
||||
&cluster_nodes,
|
||||
&mut shred,
|
||||
slot,
|
||||
&slot_leader,
|
||||
&bank,
|
||||
NUM_SIMULATED_SHREDS,
|
||||
|
|
|
@ -107,7 +107,7 @@ pub(crate) mod tests {
|
|||
let repair_type = ShredRepairType::Orphan(9);
|
||||
let mut outstanding_requests = OutstandingRequests::default();
|
||||
let nonce = outstanding_requests.add_request(repair_type, timestamp());
|
||||
let shred = Shred::new_empty_data_shred();
|
||||
let shred = Shred::new_from_data(0, 0, 0, None, false, false, 0, 0, 0);
|
||||
|
||||
let expire_timestamp = outstanding_requests
|
||||
.requests
|
||||
|
@ -127,7 +127,7 @@ pub(crate) mod tests {
|
|||
let mut outstanding_requests = OutstandingRequests::default();
|
||||
let nonce = outstanding_requests.add_request(repair_type, timestamp());
|
||||
|
||||
let shred = Shred::new_empty_data_shred();
|
||||
let shred = Shred::new_from_data(0, 0, 0, None, false, false, 0, 0, 0);
|
||||
let mut expire_timestamp = outstanding_requests
|
||||
.requests
|
||||
.get(&nonce)
|
||||
|
|
|
@ -1305,6 +1305,9 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_verify_shred_response() {
|
||||
fn new_test_data_shred(slot: Slot, index: u32) -> Shred {
|
||||
Shred::new_from_data(slot, index, 1, None, false, false, 0, 0, 0)
|
||||
}
|
||||
let repair = ShredRepairType::Orphan(9);
|
||||
// Ensure new options are addded to this test
|
||||
match repair {
|
||||
|
@ -1317,41 +1320,34 @@ mod tests {
|
|||
let index = 5;
|
||||
|
||||
// Orphan
|
||||
let mut shred = Shred::new_empty_data_shred();
|
||||
shred.set_slot(slot);
|
||||
let shred = new_test_data_shred(slot, 0);
|
||||
let request = ShredRepairType::Orphan(slot);
|
||||
assert!(request.verify_response(&shred));
|
||||
shred.set_slot(slot - 1);
|
||||
let shred = new_test_data_shred(slot - 1, 0);
|
||||
assert!(request.verify_response(&shred));
|
||||
shred.set_slot(slot + 1);
|
||||
let shred = new_test_data_shred(slot + 1, 0);
|
||||
assert!(!request.verify_response(&shred));
|
||||
|
||||
// HighestShred
|
||||
shred = Shred::new_empty_data_shred();
|
||||
shred.set_slot(slot);
|
||||
shred.set_index(index);
|
||||
let shred = new_test_data_shred(slot, index);
|
||||
let request = ShredRepairType::HighestShred(slot, index as u64);
|
||||
assert!(request.verify_response(&shred));
|
||||
shred.set_index(index + 1);
|
||||
let shred = new_test_data_shred(slot, index + 1);
|
||||
assert!(request.verify_response(&shred));
|
||||
shred.set_index(index - 1);
|
||||
let shred = new_test_data_shred(slot, index - 1);
|
||||
assert!(!request.verify_response(&shred));
|
||||
shred.set_slot(slot - 1);
|
||||
shred.set_index(index);
|
||||
let shred = new_test_data_shred(slot - 1, index);
|
||||
assert!(!request.verify_response(&shred));
|
||||
shred.set_slot(slot + 1);
|
||||
let shred = new_test_data_shred(slot + 1, index);
|
||||
assert!(!request.verify_response(&shred));
|
||||
|
||||
// Shred
|
||||
shred = Shred::new_empty_data_shred();
|
||||
shred.set_slot(slot);
|
||||
shred.set_index(index);
|
||||
let shred = new_test_data_shred(slot, index);
|
||||
let request = ShredRepairType::Shred(slot, index as u64);
|
||||
assert!(request.verify_response(&shred));
|
||||
shred.set_index(index + 1);
|
||||
let shred = new_test_data_shred(slot, index + 1);
|
||||
assert!(!request.verify_response(&shred));
|
||||
shred.set_slot(slot + 1);
|
||||
shred.set_index(index);
|
||||
let shred = new_test_data_shred(slot + 1, index);
|
||||
assert!(!request.verify_response(&shred));
|
||||
}
|
||||
|
||||
|
|
|
@ -371,7 +371,7 @@ impl Shred {
|
|||
position: u16,
|
||||
version: u16,
|
||||
) -> Self {
|
||||
let header = ShredCommonHeader {
|
||||
let common_header = ShredCommonHeader {
|
||||
shred_type: ShredType::Code,
|
||||
index,
|
||||
slot,
|
||||
|
@ -384,14 +384,6 @@ impl Shred {
|
|||
num_coding_shreds,
|
||||
position,
|
||||
};
|
||||
Shred::new_empty_from_header(header, DataShredHeader::default(), coding_header)
|
||||
}
|
||||
|
||||
fn new_empty_from_header(
|
||||
common_header: ShredCommonHeader,
|
||||
data_header: DataShredHeader,
|
||||
coding_header: CodingShredHeader,
|
||||
) -> Self {
|
||||
let mut payload = vec![0; SHRED_PAYLOAD_SIZE];
|
||||
let mut start = 0;
|
||||
Self::serialize_obj_into(
|
||||
|
@ -401,38 +393,21 @@ impl Shred {
|
|||
&common_header,
|
||||
)
|
||||
.expect("Failed to write header into shred buffer");
|
||||
match common_header.shred_type {
|
||||
ShredType::Data => Self::serialize_obj_into(
|
||||
&mut start,
|
||||
SIZE_OF_DATA_SHRED_HEADER,
|
||||
&mut payload,
|
||||
&data_header,
|
||||
)
|
||||
.expect("Failed to write data header into shred buffer"),
|
||||
ShredType::Code => Self::serialize_obj_into(
|
||||
&mut start,
|
||||
SIZE_OF_CODING_SHRED_HEADER,
|
||||
&mut payload,
|
||||
&coding_header,
|
||||
)
|
||||
.expect("Failed to write coding header into shred buffer"),
|
||||
};
|
||||
Self::serialize_obj_into(
|
||||
&mut start,
|
||||
SIZE_OF_CODING_SHRED_HEADER,
|
||||
&mut payload,
|
||||
&coding_header,
|
||||
)
|
||||
.expect("Failed to write coding header into shred buffer");
|
||||
Shred {
|
||||
common_header,
|
||||
data_header,
|
||||
data_header: DataShredHeader::default(),
|
||||
coding_header,
|
||||
payload,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_empty_data_shred() -> Self {
|
||||
Self::new_empty_from_header(
|
||||
ShredCommonHeader::default(),
|
||||
DataShredHeader::default(),
|
||||
CodingShredHeader::default(),
|
||||
)
|
||||
}
|
||||
|
||||
/// Unique identifier for each shred.
|
||||
pub fn id(&self) -> ShredId {
|
||||
ShredId(self.slot(), self.index(), self.shred_type())
|
||||
|
|
Loading…
Reference in New Issue