2021-12-03 09:00:31 -08:00
|
|
|
use {
|
|
|
|
crate::{
|
|
|
|
cluster_slots::ClusterSlots,
|
|
|
|
duplicate_repair_status::ANCESTOR_HASH_REPAIR_SAMPLE_SIZE,
|
|
|
|
repair_response,
|
2022-09-26 14:16:56 -07:00
|
|
|
repair_service::{OutstandingShredRepairs, RepairStats, REPAIR_MS},
|
2021-12-03 09:00:31 -08:00
|
|
|
request_response::RequestResponse,
|
|
|
|
result::{Error, Result},
|
|
|
|
},
|
|
|
|
bincode::serialize,
|
|
|
|
lru::LruCache,
|
|
|
|
rand::{
|
|
|
|
distributions::{Distribution, WeightedError, WeightedIndex},
|
|
|
|
Rng,
|
|
|
|
},
|
|
|
|
solana_gossip::{
|
|
|
|
cluster_info::{ClusterInfo, ClusterInfoError},
|
2023-01-08 08:00:55 -08:00
|
|
|
legacy_contact_info::{LegacyContactInfo as ContactInfo, LegacyContactInfo},
|
2022-07-31 15:48:51 -07:00
|
|
|
ping_pong::{self, PingCache, Pong},
|
2022-04-05 12:19:22 -07:00
|
|
|
weighted_shuffle::WeightedShuffle,
|
2021-12-03 09:00:31 -08:00
|
|
|
},
|
|
|
|
solana_ledger::{
|
|
|
|
ancestor_iterator::{AncestorIterator, AncestorIteratorWithHash},
|
|
|
|
blockstore::Blockstore,
|
2022-07-31 15:48:51 -07:00
|
|
|
shred::{Nonce, Shred, ShredFetchStats, SIZE_OF_NONCE},
|
2021-12-03 09:00:31 -08:00
|
|
|
},
|
|
|
|
solana_metrics::inc_new_counter_debug,
|
2022-07-24 18:44:22 -07:00
|
|
|
solana_perf::{
|
|
|
|
data_budget::DataBudget,
|
2022-07-31 15:48:51 -07:00
|
|
|
packet::{Packet, PacketBatch, PacketBatchRecycler},
|
2022-07-24 18:44:22 -07:00
|
|
|
},
|
2022-10-10 14:09:45 -07:00
|
|
|
solana_runtime::bank_forks::BankForks,
|
2021-12-03 09:00:31 -08:00
|
|
|
solana_sdk::{
|
2022-07-31 15:48:51 -07:00
|
|
|
clock::Slot,
|
2022-11-16 16:30:41 -08:00
|
|
|
genesis_config::ClusterType,
|
2022-07-31 15:48:51 -07:00
|
|
|
hash::{Hash, HASH_BYTES},
|
|
|
|
packet::PACKET_DATA_SIZE,
|
|
|
|
pubkey::{Pubkey, PUBKEY_BYTES},
|
|
|
|
signature::{Signable, Signature, Signer, SIGNATURE_BYTES},
|
|
|
|
signer::keypair::Keypair,
|
|
|
|
timing::{duration_as_ms, timestamp},
|
|
|
|
},
|
|
|
|
solana_streamer::{
|
|
|
|
sendmmsg::{batch_send, SendPktsError},
|
|
|
|
streamer::{PacketBatchReceiver, PacketBatchSender},
|
2021-12-03 09:00:31 -08:00
|
|
|
},
|
|
|
|
std::{
|
2022-11-16 16:30:41 -08:00
|
|
|
cmp::Reverse,
|
|
|
|
collections::HashSet,
|
2022-07-31 15:48:51 -07:00
|
|
|
net::{SocketAddr, UdpSocket},
|
2021-12-03 09:00:31 -08:00
|
|
|
sync::{
|
|
|
|
atomic::{AtomicBool, Ordering},
|
|
|
|
Arc, RwLock,
|
|
|
|
},
|
|
|
|
thread::{Builder, JoinHandle},
|
|
|
|
time::{Duration, Instant},
|
|
|
|
},
|
2020-01-31 14:23:51 -08:00
|
|
|
};
|
|
|
|
|
2021-07-15 19:29:53 -07:00
|
|
|
type SlotHash = (Slot, Hash);
|
|
|
|
|
2020-01-31 14:23:51 -08:00
|
|
|
/// the number of slots to respond with when responding to `Orphan` requests
|
2022-05-17 18:45:45 -07:00
|
|
|
pub const MAX_ORPHAN_REPAIR_RESPONSES: usize = 11;
|
2021-07-07 07:12:09 -07:00
|
|
|
// Number of slots to cache their respective repair peers and sampling weights.
|
|
|
|
pub(crate) const REPAIR_PEERS_CACHE_CAPACITY: usize = 128;
|
|
|
|
// Limit cache entries ttl in order to avoid re-using outdated data.
|
|
|
|
const REPAIR_PEERS_CACHE_TTL: Duration = Duration::from_secs(10);
|
2021-07-15 19:29:53 -07:00
|
|
|
pub const MAX_ANCESTOR_BYTES_IN_PACKET: usize =
|
|
|
|
PACKET_DATA_SIZE -
|
|
|
|
SIZE_OF_NONCE -
|
|
|
|
4 /*(response version enum discriminator)*/ -
|
|
|
|
4 /*slot_hash length*/;
|
|
|
|
pub const MAX_ANCESTOR_RESPONSES: usize =
|
|
|
|
MAX_ANCESTOR_BYTES_IN_PACKET / std::mem::size_of::<SlotHash>();
|
2022-07-31 15:48:51 -07:00
|
|
|
/// Number of bytes in the randomly generated token sent with ping messages.
|
|
|
|
pub(crate) const REPAIR_PING_TOKEN_SIZE: usize = HASH_BYTES;
|
|
|
|
pub const REPAIR_PING_CACHE_CAPACITY: usize = 65536;
|
|
|
|
pub const REPAIR_PING_CACHE_TTL: Duration = Duration::from_secs(1280);
|
2022-09-26 14:16:56 -07:00
|
|
|
const REPAIR_PING_CACHE_RATE_LIMIT_DELAY: Duration = Duration::from_secs(2);
|
2022-07-31 15:48:51 -07:00
|
|
|
pub(crate) const REPAIR_RESPONSE_SERIALIZED_PING_BYTES: usize =
|
|
|
|
4 /*enum discriminator*/ + PUBKEY_BYTES + REPAIR_PING_TOKEN_SIZE + SIGNATURE_BYTES;
|
|
|
|
const SIGNED_REPAIR_TIME_WINDOW: Duration = Duration::from_secs(60 * 10); // 10 min
|
|
|
|
|
2021-07-15 19:29:53 -07:00
|
|
|
#[cfg(test)]
|
|
|
|
static_assertions::const_assert_eq!(MAX_ANCESTOR_RESPONSES, 30);
|
2020-01-31 14:23:51 -08:00
|
|
|
|
2020-07-06 22:49:40 -07:00
|
|
|
#[derive(Serialize, Deserialize, Debug, Clone, Copy, Hash, PartialEq, Eq)]
|
2021-07-15 19:29:53 -07:00
|
|
|
pub enum ShredRepairType {
|
2022-11-28 19:46:06 -08:00
|
|
|
/// Requesting `MAX_ORPHAN_REPAIR_RESPONSES ` parent shreds
|
2020-01-31 14:23:51 -08:00
|
|
|
Orphan(Slot),
|
2022-11-28 19:46:06 -08:00
|
|
|
/// Requesting any shred with index greater than or equal to the particular index
|
2020-01-31 14:23:51 -08:00
|
|
|
HighestShred(Slot, u64),
|
2022-11-28 19:46:06 -08:00
|
|
|
/// Requesting the missing shred at a particular index
|
2020-01-31 14:23:51 -08:00
|
|
|
Shred(Slot, u64),
|
|
|
|
}
|
|
|
|
|
2021-07-15 19:29:53 -07:00
|
|
|
impl ShredRepairType {
|
2020-01-31 14:23:51 -08:00
|
|
|
pub fn slot(&self) -> Slot {
|
|
|
|
match self {
|
2021-07-15 19:29:53 -07:00
|
|
|
ShredRepairType::Orphan(slot) => *slot,
|
|
|
|
ShredRepairType::HighestShred(slot, _) => *slot,
|
|
|
|
ShredRepairType::Shred(slot, _) => *slot,
|
2020-01-31 14:23:51 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-15 19:29:53 -07:00
|
|
|
impl RequestResponse for ShredRepairType {
|
2021-04-20 09:37:33 -07:00
|
|
|
type Response = Shred;
|
|
|
|
fn num_expected_responses(&self) -> u32 {
|
|
|
|
match self {
|
2022-05-17 18:45:45 -07:00
|
|
|
ShredRepairType::Orphan(_) => (MAX_ORPHAN_REPAIR_RESPONSES) as u32,
|
2021-07-15 19:29:53 -07:00
|
|
|
ShredRepairType::HighestShred(_, _) => 1,
|
|
|
|
ShredRepairType::Shred(_, _) => 1,
|
2021-04-20 09:37:33 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
fn verify_response(&self, response_shred: &Shred) -> bool {
|
|
|
|
match self {
|
2021-07-15 19:29:53 -07:00
|
|
|
ShredRepairType::Orphan(slot) => response_shred.slot() <= *slot,
|
|
|
|
ShredRepairType::HighestShred(slot, index) => {
|
2022-11-09 11:39:38 -08:00
|
|
|
response_shred.slot() == *slot && response_shred.index() as u64 >= *index
|
2021-04-20 09:37:33 -07:00
|
|
|
}
|
2021-07-15 19:29:53 -07:00
|
|
|
ShredRepairType::Shred(slot, index) => {
|
2022-11-09 11:39:38 -08:00
|
|
|
response_shred.slot() == *slot && response_shred.index() as u64 == *index
|
2021-04-20 09:37:33 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-23 16:54:47 -07:00
|
|
|
pub struct AncestorHashesRepairType(pub Slot);
|
|
|
|
impl AncestorHashesRepairType {
|
|
|
|
pub fn slot(&self) -> Slot {
|
|
|
|
self.0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-23 08:49:06 -08:00
|
|
|
#[derive(Debug, AbiEnumVisitor, AbiExample, Deserialize, Serialize)]
|
|
|
|
#[frozen_abi(digest = "AKpurCovzn6rsji4aQrP3hUdEHxjtXUfT7AatZXN7Rpz")]
|
2022-08-09 21:39:55 -07:00
|
|
|
pub enum AncestorHashesResponse {
|
|
|
|
Hashes(Vec<SlotHash>),
|
|
|
|
Ping(Ping),
|
2021-07-15 19:29:53 -07:00
|
|
|
}
|
|
|
|
|
2021-07-23 16:54:47 -07:00
|
|
|
impl RequestResponse for AncestorHashesRepairType {
|
2022-08-09 21:39:55 -07:00
|
|
|
type Response = AncestorHashesResponse;
|
2021-07-15 19:29:53 -07:00
|
|
|
fn num_expected_responses(&self) -> u32 {
|
|
|
|
1
|
|
|
|
}
|
2022-08-09 21:39:55 -07:00
|
|
|
fn verify_response(&self, response: &AncestorHashesResponse) -> bool {
|
|
|
|
match response {
|
|
|
|
AncestorHashesResponse::Hashes(hashes) => hashes.len() <= MAX_ANCESTOR_RESPONSES,
|
|
|
|
AncestorHashesResponse::Ping(ping) => ping.verify(),
|
|
|
|
}
|
2021-07-15 19:29:53 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-29 14:43:58 -07:00
|
|
|
#[derive(Default)]
|
2022-07-31 15:48:51 -07:00
|
|
|
struct ServeRepairStats {
|
|
|
|
total_requests: usize,
|
2022-09-24 23:20:05 -07:00
|
|
|
dropped_requests_outbound_bandwidth: usize,
|
|
|
|
dropped_requests_load_shed: usize,
|
2022-11-16 16:30:41 -08:00
|
|
|
dropped_requests_low_stake: usize,
|
2022-12-15 19:24:23 -08:00
|
|
|
whitelisted_requests: usize,
|
2022-07-31 15:48:51 -07:00
|
|
|
total_dropped_response_packets: usize,
|
|
|
|
total_response_packets: usize,
|
2022-10-04 17:37:24 -07:00
|
|
|
total_response_bytes_staked: usize,
|
|
|
|
total_response_bytes_unstaked: usize,
|
|
|
|
handle_requests_staked: usize,
|
|
|
|
handle_requests_unstaked: usize,
|
2022-07-31 15:48:51 -07:00
|
|
|
processed: usize,
|
|
|
|
self_repair: usize,
|
|
|
|
window_index: usize,
|
|
|
|
highest_window_index: usize,
|
|
|
|
orphan: usize,
|
|
|
|
pong: usize,
|
|
|
|
ancestor_hashes: usize,
|
2022-10-19 14:55:55 -07:00
|
|
|
ping_cache_check_failed: usize,
|
|
|
|
pings_sent: usize,
|
2022-11-16 16:30:41 -08:00
|
|
|
decode_time_us: u64,
|
2022-07-31 15:48:51 -07:00
|
|
|
err_time_skew: usize,
|
|
|
|
err_malformed: usize,
|
|
|
|
err_sig_verify: usize,
|
|
|
|
err_unsigned: usize,
|
|
|
|
err_id_mismatch: usize,
|
|
|
|
}
|
|
|
|
|
2023-01-23 08:49:06 -08:00
|
|
|
#[derive(Debug, AbiExample, Deserialize, Serialize)]
|
2022-07-31 15:48:51 -07:00
|
|
|
pub struct RepairRequestHeader {
|
|
|
|
signature: Signature,
|
|
|
|
sender: Pubkey,
|
|
|
|
recipient: Pubkey,
|
|
|
|
timestamp: u64,
|
|
|
|
nonce: Nonce,
|
2020-03-29 14:43:58 -07:00
|
|
|
}
|
|
|
|
|
2022-07-31 15:48:51 -07:00
|
|
|
impl RepairRequestHeader {
|
|
|
|
pub fn new(sender: Pubkey, recipient: Pubkey, timestamp: u64, nonce: Nonce) -> Self {
|
|
|
|
Self {
|
|
|
|
signature: Signature::default(),
|
|
|
|
sender,
|
|
|
|
recipient,
|
|
|
|
timestamp,
|
|
|
|
nonce,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub(crate) type Ping = ping_pong::Ping<[u8; REPAIR_PING_TOKEN_SIZE]>;
|
|
|
|
|
2020-01-31 14:23:51 -08:00
|
|
|
/// Window protocol messages
|
2023-01-23 08:49:06 -08:00
|
|
|
#[derive(Debug, AbiEnumVisitor, AbiExample, Deserialize, Serialize)]
|
|
|
|
#[frozen_abi(digest = "3bgE3sYHRqetvpo4fcDL6PTV3z2LMAtY6H8BoLFSjCwf")]
|
2020-03-29 14:44:25 -07:00
|
|
|
pub enum RepairProtocol {
|
2023-01-08 08:00:55 -08:00
|
|
|
LegacyWindowIndex(LegacyContactInfo, Slot, u64),
|
|
|
|
LegacyHighestWindowIndex(LegacyContactInfo, Slot, u64),
|
|
|
|
LegacyOrphan(LegacyContactInfo, Slot),
|
|
|
|
LegacyWindowIndexWithNonce(LegacyContactInfo, Slot, u64, Nonce),
|
|
|
|
LegacyHighestWindowIndexWithNonce(LegacyContactInfo, Slot, u64, Nonce),
|
|
|
|
LegacyOrphanWithNonce(LegacyContactInfo, Slot, Nonce),
|
|
|
|
LegacyAncestorHashes(LegacyContactInfo, Slot, Nonce),
|
2022-07-31 15:48:51 -07:00
|
|
|
Pong(ping_pong::Pong),
|
|
|
|
WindowIndex {
|
|
|
|
header: RepairRequestHeader,
|
|
|
|
slot: Slot,
|
|
|
|
shred_index: u64,
|
|
|
|
},
|
|
|
|
HighestWindowIndex {
|
|
|
|
header: RepairRequestHeader,
|
|
|
|
slot: Slot,
|
|
|
|
shred_index: u64,
|
|
|
|
},
|
|
|
|
Orphan {
|
|
|
|
header: RepairRequestHeader,
|
|
|
|
slot: Slot,
|
|
|
|
},
|
|
|
|
AncestorHashes {
|
|
|
|
header: RepairRequestHeader,
|
|
|
|
slot: Slot,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2023-01-23 08:49:06 -08:00
|
|
|
#[derive(Debug, AbiEnumVisitor, AbiExample, Deserialize, Serialize)]
|
|
|
|
#[frozen_abi(digest = "CkffjyMPCwuJgk9NiCMELXLCecAnTPZqpKEnUCb3VyVf")]
|
2022-08-09 21:39:55 -07:00
|
|
|
pub(crate) enum RepairResponse {
|
2022-07-31 15:48:51 -07:00
|
|
|
Ping(Ping),
|
|
|
|
}
|
|
|
|
|
|
|
|
impl RepairProtocol {
|
|
|
|
fn sender(&self) -> &Pubkey {
|
|
|
|
match self {
|
|
|
|
Self::LegacyWindowIndex(ci, _, _) => &ci.id,
|
|
|
|
Self::LegacyHighestWindowIndex(ci, _, _) => &ci.id,
|
|
|
|
Self::LegacyOrphan(ci, _) => &ci.id,
|
|
|
|
Self::LegacyWindowIndexWithNonce(ci, _, _, _) => &ci.id,
|
|
|
|
Self::LegacyHighestWindowIndexWithNonce(ci, _, _, _) => &ci.id,
|
|
|
|
Self::LegacyOrphanWithNonce(ci, _, _) => &ci.id,
|
|
|
|
Self::LegacyAncestorHashes(ci, _, _) => &ci.id,
|
|
|
|
Self::Pong(pong) => pong.from(),
|
|
|
|
Self::WindowIndex { header, .. } => &header.sender,
|
|
|
|
Self::HighestWindowIndex { header, .. } => &header.sender,
|
|
|
|
Self::Orphan { header, .. } => &header.sender,
|
|
|
|
Self::AncestorHashes { header, .. } => &header.sender,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn supports_signature(&self) -> bool {
|
|
|
|
match self {
|
|
|
|
Self::LegacyWindowIndex(_, _, _)
|
|
|
|
| Self::LegacyHighestWindowIndex(_, _, _)
|
|
|
|
| Self::LegacyOrphan(_, _)
|
|
|
|
| Self::LegacyWindowIndexWithNonce(_, _, _, _)
|
|
|
|
| Self::LegacyHighestWindowIndexWithNonce(_, _, _, _)
|
|
|
|
| Self::LegacyOrphanWithNonce(_, _, _)
|
|
|
|
| Self::LegacyAncestorHashes(_, _, _) => false,
|
|
|
|
Self::Pong(_)
|
|
|
|
| Self::WindowIndex { .. }
|
|
|
|
| Self::HighestWindowIndex { .. }
|
|
|
|
| Self::Orphan { .. }
|
|
|
|
| Self::AncestorHashes { .. } => true,
|
|
|
|
}
|
|
|
|
}
|
2020-01-31 14:23:51 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Clone)]
|
|
|
|
pub struct ServeRepair {
|
2020-04-21 12:54:45 -07:00
|
|
|
cluster_info: Arc<ClusterInfo>,
|
2022-07-31 15:48:51 -07:00
|
|
|
bank_forks: Arc<RwLock<BankForks>>,
|
2022-12-15 19:24:23 -08:00
|
|
|
repair_whitelist: Arc<RwLock<HashSet<Pubkey>>>,
|
2020-01-31 14:23:51 -08:00
|
|
|
}
|
|
|
|
|
2021-07-07 07:12:09 -07:00
|
|
|
// Cache entry for repair peers for a slot.
|
|
|
|
pub(crate) struct RepairPeers {
|
|
|
|
asof: Instant,
|
|
|
|
peers: Vec<(Pubkey, /*ContactInfo.serve_repair:*/ SocketAddr)>,
|
|
|
|
weighted_index: WeightedIndex<u64>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl RepairPeers {
|
|
|
|
fn new(asof: Instant, peers: &[ContactInfo], weights: &[u64]) -> Result<Self> {
|
|
|
|
if peers.is_empty() {
|
|
|
|
return Err(Error::from(ClusterInfoError::NoPeers));
|
|
|
|
}
|
|
|
|
if peers.len() != weights.len() {
|
|
|
|
return Err(Error::from(WeightedError::InvalidWeight));
|
|
|
|
}
|
|
|
|
let weighted_index = WeightedIndex::new(weights)?;
|
|
|
|
let peers = peers
|
|
|
|
.iter()
|
|
|
|
.map(|peer| (peer.id, peer.serve_repair))
|
|
|
|
.collect();
|
|
|
|
Ok(Self {
|
|
|
|
asof,
|
|
|
|
peers,
|
|
|
|
weighted_index,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
fn sample<R: Rng>(&self, rng: &mut R) -> (Pubkey, SocketAddr) {
|
|
|
|
let index = self.weighted_index.sample(rng);
|
|
|
|
self.peers[index]
|
|
|
|
}
|
|
|
|
}
|
2020-03-12 17:34:46 -07:00
|
|
|
|
2022-12-15 19:24:23 -08:00
|
|
|
struct RepairRequestWithMeta {
|
|
|
|
request: RepairProtocol,
|
|
|
|
from_addr: SocketAddr,
|
|
|
|
stake: u64,
|
|
|
|
whitelisted: bool,
|
|
|
|
}
|
|
|
|
|
2020-01-31 14:23:51 -08:00
|
|
|
impl ServeRepair {
|
2022-12-15 19:24:23 -08:00
|
|
|
pub fn new(
|
|
|
|
cluster_info: Arc<ClusterInfo>,
|
|
|
|
bank_forks: Arc<RwLock<BankForks>>,
|
|
|
|
repair_whitelist: Arc<RwLock<HashSet<Pubkey>>>,
|
|
|
|
) -> Self {
|
2022-07-31 15:48:51 -07:00
|
|
|
Self {
|
|
|
|
cluster_info,
|
|
|
|
bank_forks,
|
2022-12-15 19:24:23 -08:00
|
|
|
repair_whitelist,
|
2022-07-31 15:48:51 -07:00
|
|
|
}
|
2020-01-31 14:23:51 -08:00
|
|
|
}
|
|
|
|
|
2021-06-21 09:29:23 -07:00
|
|
|
pub(crate) fn my_id(&self) -> Pubkey {
|
|
|
|
self.cluster_info.id()
|
2020-01-31 14:23:51 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
fn handle_repair(
|
2021-12-11 06:44:15 -08:00
|
|
|
recycler: &PacketBatchRecycler,
|
2020-01-31 14:23:51 -08:00
|
|
|
from_addr: &SocketAddr,
|
2022-08-01 11:46:45 -07:00
|
|
|
blockstore: &Blockstore,
|
2020-01-31 14:23:51 -08:00
|
|
|
request: RepairProtocol,
|
2020-03-29 14:43:58 -07:00
|
|
|
stats: &mut ServeRepairStats,
|
2022-07-31 15:48:51 -07:00
|
|
|
ping_cache: &mut PingCache,
|
2021-12-11 06:44:15 -08:00
|
|
|
) -> Option<PacketBatch> {
|
2020-01-31 14:23:51 -08:00
|
|
|
let now = Instant::now();
|
|
|
|
let (res, label) = {
|
|
|
|
match &request {
|
2022-07-31 15:48:51 -07:00
|
|
|
RepairProtocol::WindowIndex {
|
|
|
|
header: RepairRequestHeader { nonce, .. },
|
|
|
|
slot,
|
|
|
|
shred_index,
|
|
|
|
}
|
|
|
|
| RepairProtocol::LegacyWindowIndexWithNonce(_, slot, shred_index, nonce) => {
|
2020-03-29 14:43:58 -07:00
|
|
|
stats.window_index += 1;
|
2020-01-31 14:23:51 -08:00
|
|
|
(
|
|
|
|
Self::run_window_request(
|
|
|
|
recycler,
|
2021-06-18 06:34:46 -07:00
|
|
|
from_addr,
|
2020-01-31 14:23:51 -08:00
|
|
|
blockstore,
|
|
|
|
*slot,
|
|
|
|
*shred_index,
|
2020-05-19 12:38:18 -07:00
|
|
|
*nonce,
|
2020-01-31 14:23:51 -08:00
|
|
|
),
|
2020-05-19 12:38:18 -07:00
|
|
|
"WindowIndexWithNonce",
|
2020-01-31 14:23:51 -08:00
|
|
|
)
|
|
|
|
}
|
2022-07-31 15:48:51 -07:00
|
|
|
RepairProtocol::HighestWindowIndex {
|
|
|
|
header: RepairRequestHeader { nonce, .. },
|
|
|
|
slot,
|
|
|
|
shred_index: highest_index,
|
|
|
|
}
|
|
|
|
| RepairProtocol::LegacyHighestWindowIndexWithNonce(
|
|
|
|
_,
|
|
|
|
slot,
|
|
|
|
highest_index,
|
|
|
|
nonce,
|
|
|
|
) => {
|
2020-03-29 14:43:58 -07:00
|
|
|
stats.highest_window_index += 1;
|
2020-01-31 14:23:51 -08:00
|
|
|
(
|
|
|
|
Self::run_highest_window_request(
|
|
|
|
recycler,
|
2021-06-18 06:34:46 -07:00
|
|
|
from_addr,
|
2020-01-31 14:23:51 -08:00
|
|
|
blockstore,
|
|
|
|
*slot,
|
|
|
|
*highest_index,
|
2020-05-19 12:38:18 -07:00
|
|
|
*nonce,
|
2020-01-31 14:23:51 -08:00
|
|
|
),
|
2020-05-19 12:38:18 -07:00
|
|
|
"HighestWindowIndexWithNonce",
|
2020-01-31 14:23:51 -08:00
|
|
|
)
|
|
|
|
}
|
2022-07-31 15:48:51 -07:00
|
|
|
RepairProtocol::Orphan {
|
|
|
|
header: RepairRequestHeader { nonce, .. },
|
|
|
|
slot,
|
|
|
|
}
|
|
|
|
| RepairProtocol::LegacyOrphanWithNonce(_, slot, nonce) => {
|
2020-03-29 14:43:58 -07:00
|
|
|
stats.orphan += 1;
|
2020-01-31 14:23:51 -08:00
|
|
|
(
|
|
|
|
Self::run_orphan(
|
|
|
|
recycler,
|
2021-06-18 06:34:46 -07:00
|
|
|
from_addr,
|
2020-01-31 14:23:51 -08:00
|
|
|
blockstore,
|
|
|
|
*slot,
|
|
|
|
MAX_ORPHAN_REPAIR_RESPONSES,
|
2020-05-19 12:38:18 -07:00
|
|
|
*nonce,
|
2020-01-31 14:23:51 -08:00
|
|
|
),
|
2020-05-19 12:38:18 -07:00
|
|
|
"OrphanWithNonce",
|
2020-01-31 14:23:51 -08:00
|
|
|
)
|
|
|
|
}
|
2022-07-31 15:48:51 -07:00
|
|
|
RepairProtocol::AncestorHashes {
|
|
|
|
header: RepairRequestHeader { nonce, .. },
|
|
|
|
slot,
|
|
|
|
}
|
|
|
|
| RepairProtocol::LegacyAncestorHashes(_, slot, nonce) => {
|
2021-07-15 19:29:53 -07:00
|
|
|
stats.ancestor_hashes += 1;
|
|
|
|
(
|
|
|
|
Self::run_ancestor_hashes(recycler, from_addr, blockstore, *slot, *nonce),
|
|
|
|
"AncestorHashes",
|
|
|
|
)
|
|
|
|
}
|
2022-07-31 15:48:51 -07:00
|
|
|
RepairProtocol::Pong(pong) => {
|
|
|
|
stats.pong += 1;
|
|
|
|
ping_cache.add(pong, *from_addr, Instant::now());
|
|
|
|
(None, "Pong")
|
|
|
|
}
|
|
|
|
RepairProtocol::LegacyWindowIndex(_, _, _)
|
|
|
|
| RepairProtocol::LegacyHighestWindowIndex(_, _, _)
|
|
|
|
| RepairProtocol::LegacyOrphan(_, _) => (None, "Unsupported repair type"),
|
2020-01-31 14:23:51 -08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
Self::report_time_spent(label, &now.elapsed(), "");
|
|
|
|
res
|
|
|
|
}
|
|
|
|
|
|
|
|
fn report_time_spent(label: &str, time: &Duration, extra: &str) {
|
|
|
|
let count = duration_as_ms(time);
|
|
|
|
if count > 5 {
|
|
|
|
info!("{} took: {} ms {}", label, count, extra);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Process messages from the network
|
|
|
|
fn run_listen(
|
2022-08-01 11:22:54 -07:00
|
|
|
&self,
|
2022-07-31 15:48:51 -07:00
|
|
|
ping_cache: &mut PingCache,
|
2021-12-11 06:44:15 -08:00
|
|
|
recycler: &PacketBatchRecycler,
|
2022-08-01 11:46:45 -07:00
|
|
|
blockstore: &Blockstore,
|
2021-12-11 06:44:15 -08:00
|
|
|
requests_receiver: &PacketBatchReceiver,
|
|
|
|
response_sender: &PacketBatchSender,
|
2020-03-29 14:43:58 -07:00
|
|
|
stats: &mut ServeRepairStats,
|
2022-07-24 18:44:22 -07:00
|
|
|
data_budget: &DataBudget,
|
2020-01-31 14:23:51 -08:00
|
|
|
) -> Result<()> {
|
|
|
|
//TODO cache connections
|
|
|
|
let timeout = Duration::new(1, 0);
|
2020-04-01 06:48:35 -07:00
|
|
|
let mut reqs_v = vec![requests_receiver.recv_timeout(timeout)?];
|
2022-07-24 18:44:22 -07:00
|
|
|
const MAX_REQUESTS_PER_ITERATION: usize = 1024;
|
2022-07-07 13:02:43 -07:00
|
|
|
let mut total_requests = reqs_v[0].len();
|
2020-01-31 14:23:51 -08:00
|
|
|
|
2022-11-16 16:30:41 -08:00
|
|
|
let socket_addr_space = *self.cluster_info.socket_addr_space();
|
|
|
|
let root_bank = self.bank_forks.read().unwrap().root_bank();
|
|
|
|
let epoch_staked_nodes = root_bank.epoch_staked_nodes(root_bank.epoch());
|
|
|
|
let identity_keypair = self.cluster_info.keypair().clone();
|
|
|
|
let my_id = identity_keypair.pubkey();
|
2023-01-04 14:54:19 -08:00
|
|
|
let cluster_type = root_bank.cluster_type();
|
2022-11-16 16:30:41 -08:00
|
|
|
|
2023-01-17 18:38:10 -08:00
|
|
|
let max_buffered_packets = if self.repair_whitelist.read().unwrap().len() > 0 {
|
|
|
|
4 * MAX_REQUESTS_PER_ITERATION
|
2022-11-16 16:30:41 -08:00
|
|
|
} else {
|
2023-01-17 18:38:10 -08:00
|
|
|
2 * MAX_REQUESTS_PER_ITERATION
|
2022-11-16 16:30:41 -08:00
|
|
|
};
|
|
|
|
|
2022-07-07 13:02:43 -07:00
|
|
|
let mut dropped_requests = 0;
|
2020-04-01 06:48:35 -07:00
|
|
|
while let Ok(more) = requests_receiver.try_recv() {
|
2022-07-07 13:02:43 -07:00
|
|
|
total_requests += more.len();
|
2022-11-16 16:30:41 -08:00
|
|
|
if total_requests > max_buffered_packets {
|
2022-07-07 13:02:43 -07:00
|
|
|
dropped_requests += more.len();
|
2022-03-02 07:09:06 -08:00
|
|
|
} else {
|
|
|
|
reqs_v.push(more);
|
2020-04-01 06:48:35 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-09-24 23:20:05 -07:00
|
|
|
stats.dropped_requests_load_shed += dropped_requests;
|
2022-07-07 13:02:43 -07:00
|
|
|
stats.total_requests += total_requests;
|
2020-04-01 06:48:35 -07:00
|
|
|
|
2022-11-16 16:30:41 -08:00
|
|
|
let decode_start = Instant::now();
|
2022-12-15 19:24:23 -08:00
|
|
|
let mut decoded_requests = Vec::default();
|
|
|
|
let mut whitelisted_request_count: usize = 0;
|
|
|
|
{
|
|
|
|
let whitelist = self.repair_whitelist.read().unwrap();
|
|
|
|
for packet in reqs_v.iter().flatten() {
|
|
|
|
let request: RepairProtocol = match packet.deserialize_slice(..) {
|
|
|
|
Ok(request) => request,
|
|
|
|
Err(_) => {
|
|
|
|
stats.err_malformed += 1;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
let from_addr = packet.meta().socket_addr();
|
|
|
|
if !ContactInfo::is_valid_address(&from_addr, &socket_addr_space) {
|
2022-11-16 16:30:41 -08:00
|
|
|
stats.err_malformed += 1;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2023-01-04 14:54:19 -08:00
|
|
|
match cluster_type {
|
|
|
|
ClusterType::Testnet | ClusterType::Development => {
|
|
|
|
if !Self::verify_signed_packet(&my_id, packet, &request, stats) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ClusterType::MainnetBeta | ClusterType::Devnet => {
|
|
|
|
// collect stats for signature verification
|
|
|
|
let _ = Self::verify_signed_packet(&my_id, packet, &request, stats);
|
|
|
|
}
|
2022-12-15 19:24:23 -08:00
|
|
|
}
|
2022-11-16 16:30:41 -08:00
|
|
|
|
2022-12-15 19:24:23 -08:00
|
|
|
if request.sender() == &my_id {
|
|
|
|
stats.self_repair += 1;
|
|
|
|
continue;
|
|
|
|
}
|
2022-11-16 16:30:41 -08:00
|
|
|
|
2022-12-15 19:24:23 -08:00
|
|
|
let stake = epoch_staked_nodes
|
|
|
|
.as_ref()
|
|
|
|
.and_then(|stakes| stakes.get(request.sender()))
|
|
|
|
.unwrap_or(&0);
|
|
|
|
if *stake == 0 {
|
|
|
|
stats.handle_requests_unstaked += 1;
|
|
|
|
} else {
|
|
|
|
stats.handle_requests_staked += 1;
|
|
|
|
}
|
2022-11-16 16:30:41 -08:00
|
|
|
|
2022-12-15 19:24:23 -08:00
|
|
|
let whitelisted = whitelist.contains(request.sender());
|
|
|
|
if whitelisted {
|
|
|
|
whitelisted_request_count += 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
decoded_requests.push(RepairRequestWithMeta {
|
|
|
|
request,
|
|
|
|
from_addr,
|
|
|
|
stake: *stake,
|
|
|
|
whitelisted,
|
|
|
|
});
|
2022-11-16 16:30:41 -08:00
|
|
|
}
|
2020-04-01 06:48:35 -07:00
|
|
|
}
|
2022-11-16 16:30:41 -08:00
|
|
|
stats.decode_time_us += decode_start.elapsed().as_micros() as u64;
|
2022-12-15 19:24:23 -08:00
|
|
|
stats.whitelisted_requests += whitelisted_request_count.min(MAX_REQUESTS_PER_ITERATION);
|
2022-11-16 16:30:41 -08:00
|
|
|
|
2022-12-15 19:24:23 -08:00
|
|
|
if decoded_requests.len() > MAX_REQUESTS_PER_ITERATION {
|
|
|
|
stats.dropped_requests_low_stake += decoded_requests.len() - MAX_REQUESTS_PER_ITERATION;
|
|
|
|
decoded_requests.sort_unstable_by_key(|r| Reverse((r.whitelisted, r.stake)));
|
|
|
|
decoded_requests.truncate(MAX_REQUESTS_PER_ITERATION);
|
2022-11-16 16:30:41 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
self.handle_packets(
|
|
|
|
ping_cache,
|
|
|
|
recycler,
|
|
|
|
blockstore,
|
2022-12-15 19:24:23 -08:00
|
|
|
decoded_requests,
|
2022-11-16 16:30:41 -08:00
|
|
|
response_sender,
|
|
|
|
stats,
|
|
|
|
data_budget,
|
2023-01-04 14:54:19 -08:00
|
|
|
cluster_type,
|
2022-11-16 16:30:41 -08:00
|
|
|
);
|
|
|
|
|
2020-01-31 14:23:51 -08:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2022-08-01 11:22:54 -07:00
|
|
|
fn report_reset_stats(&self, stats: &mut ServeRepairStats) {
|
2020-03-29 14:43:58 -07:00
|
|
|
if stats.self_repair > 0 {
|
2022-08-01 11:22:54 -07:00
|
|
|
let my_id = self.cluster_info.id();
|
2020-03-29 14:43:58 -07:00
|
|
|
warn!(
|
|
|
|
"{}: Ignored received repair requests from ME: {}",
|
|
|
|
my_id, stats.self_repair,
|
|
|
|
);
|
|
|
|
inc_new_counter_debug!("serve_repair-handle-repair--eq", stats.self_repair);
|
|
|
|
}
|
|
|
|
|
2022-07-07 13:02:43 -07:00
|
|
|
datapoint_info!(
|
|
|
|
"serve_repair-requests_received",
|
|
|
|
("total_requests", stats.total_requests, i64),
|
2022-09-24 23:20:05 -07:00
|
|
|
(
|
|
|
|
"dropped_requests_outbound_bandwidth",
|
|
|
|
stats.dropped_requests_outbound_bandwidth,
|
|
|
|
i64
|
|
|
|
),
|
|
|
|
(
|
|
|
|
"dropped_requests_load_shed",
|
|
|
|
stats.dropped_requests_load_shed,
|
|
|
|
i64
|
|
|
|
),
|
2022-11-16 16:30:41 -08:00
|
|
|
(
|
|
|
|
"dropped_requests_low_stake",
|
|
|
|
stats.dropped_requests_low_stake,
|
|
|
|
i64
|
|
|
|
),
|
2022-12-15 19:24:23 -08:00
|
|
|
("whitelisted_requests", stats.whitelisted_requests, i64),
|
2022-07-24 18:44:22 -07:00
|
|
|
(
|
|
|
|
"total_dropped_response_packets",
|
|
|
|
stats.total_dropped_response_packets,
|
|
|
|
i64
|
|
|
|
),
|
2022-10-04 17:37:24 -07:00
|
|
|
("handle_requests_staked", stats.handle_requests_staked, i64),
|
|
|
|
(
|
|
|
|
"handle_requests_unstaked",
|
|
|
|
stats.handle_requests_unstaked,
|
|
|
|
i64
|
|
|
|
),
|
|
|
|
("processed", stats.processed, i64),
|
2022-07-07 13:02:43 -07:00
|
|
|
("total_response_packets", stats.total_response_packets, i64),
|
2022-10-04 17:37:24 -07:00
|
|
|
(
|
|
|
|
"total_response_bytes_staked",
|
|
|
|
stats.total_response_bytes_staked,
|
|
|
|
i64
|
|
|
|
),
|
|
|
|
(
|
|
|
|
"total_response_bytes_unstaked",
|
|
|
|
stats.total_response_bytes_unstaked,
|
|
|
|
i64
|
|
|
|
),
|
2022-07-07 13:02:43 -07:00
|
|
|
("self_repair", stats.self_repair, i64),
|
|
|
|
("window_index", stats.window_index, i64),
|
|
|
|
(
|
|
|
|
"request-highest-window-index",
|
|
|
|
stats.highest_window_index,
|
|
|
|
i64
|
|
|
|
),
|
|
|
|
("orphan", stats.orphan, i64),
|
|
|
|
(
|
|
|
|
"serve_repair-request-ancestor-hashes",
|
|
|
|
stats.ancestor_hashes,
|
|
|
|
i64
|
|
|
|
),
|
2022-07-31 15:48:51 -07:00
|
|
|
("pong", stats.pong, i64),
|
2022-10-19 14:55:55 -07:00
|
|
|
(
|
|
|
|
"ping_cache_check_failed",
|
|
|
|
stats.ping_cache_check_failed,
|
|
|
|
i64
|
|
|
|
),
|
|
|
|
("pings_sent", stats.pings_sent, i64),
|
2022-11-16 16:30:41 -08:00
|
|
|
("decode_time_us", stats.decode_time_us, i64),
|
2022-07-31 15:48:51 -07:00
|
|
|
("err_time_skew", stats.err_time_skew, i64),
|
|
|
|
("err_malformed", stats.err_malformed, i64),
|
|
|
|
("err_sig_verify", stats.err_sig_verify, i64),
|
|
|
|
("err_unsigned", stats.err_unsigned, i64),
|
|
|
|
("err_id_mismatch", stats.err_id_mismatch, i64),
|
2020-03-29 14:43:58 -07:00
|
|
|
);
|
|
|
|
|
|
|
|
*stats = ServeRepairStats::default();
|
|
|
|
}
|
|
|
|
|
2020-01-31 14:23:51 -08:00
|
|
|
pub fn listen(
|
2022-08-01 11:22:54 -07:00
|
|
|
self,
|
2022-08-01 11:46:45 -07:00
|
|
|
blockstore: Arc<Blockstore>,
|
2021-12-11 06:44:15 -08:00
|
|
|
requests_receiver: PacketBatchReceiver,
|
|
|
|
response_sender: PacketBatchSender,
|
2022-08-01 11:22:54 -07:00
|
|
|
exit: Arc<AtomicBool>,
|
2020-01-31 14:23:51 -08:00
|
|
|
) -> JoinHandle<()> {
|
2022-07-24 18:44:22 -07:00
|
|
|
const INTERVAL_MS: u64 = 1000;
|
|
|
|
const MAX_BYTES_PER_SECOND: usize = 12_000_000;
|
|
|
|
const MAX_BYTES_PER_INTERVAL: usize = MAX_BYTES_PER_SECOND * INTERVAL_MS as usize / 1000;
|
|
|
|
|
2022-09-26 14:16:56 -07:00
|
|
|
// rate limit delay should be greater than the repair request iteration delay
|
|
|
|
assert!(REPAIR_PING_CACHE_RATE_LIMIT_DELAY > Duration::from_millis(REPAIR_MS));
|
|
|
|
|
|
|
|
let mut ping_cache = PingCache::new(
|
|
|
|
REPAIR_PING_CACHE_TTL,
|
|
|
|
REPAIR_PING_CACHE_RATE_LIMIT_DELAY,
|
|
|
|
REPAIR_PING_CACHE_CAPACITY,
|
|
|
|
);
|
2022-07-31 15:48:51 -07:00
|
|
|
|
2021-12-11 06:44:15 -08:00
|
|
|
let recycler = PacketBatchRecycler::default();
|
2020-01-31 14:23:51 -08:00
|
|
|
Builder::new()
|
2022-08-17 08:40:23 -07:00
|
|
|
.name("solRepairListen".to_string())
|
2020-03-29 14:43:58 -07:00
|
|
|
.spawn(move || {
|
|
|
|
let mut last_print = Instant::now();
|
|
|
|
let mut stats = ServeRepairStats::default();
|
2022-07-24 18:44:22 -07:00
|
|
|
let data_budget = DataBudget::default();
|
2020-03-29 14:43:58 -07:00
|
|
|
loop {
|
2022-08-01 11:22:54 -07:00
|
|
|
let result = self.run_listen(
|
2022-07-31 15:48:51 -07:00
|
|
|
&mut ping_cache,
|
2020-03-29 14:43:58 -07:00
|
|
|
&recycler,
|
2022-08-01 11:46:45 -07:00
|
|
|
&blockstore,
|
2020-03-29 14:43:58 -07:00
|
|
|
&requests_receiver,
|
|
|
|
&response_sender,
|
|
|
|
&mut stats,
|
2022-07-24 18:44:22 -07:00
|
|
|
&data_budget,
|
2020-03-29 14:43:58 -07:00
|
|
|
);
|
|
|
|
match result {
|
2021-06-18 11:47:40 -07:00
|
|
|
Err(Error::RecvTimeout(_)) | Ok(_) => {}
|
2020-03-29 14:43:58 -07:00
|
|
|
Err(err) => info!("repair listener error: {:?}", err),
|
|
|
|
};
|
|
|
|
if exit.load(Ordering::Relaxed) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if last_print.elapsed().as_secs() > 2 {
|
2022-08-01 11:22:54 -07:00
|
|
|
self.report_reset_stats(&mut stats);
|
2020-03-29 14:43:58 -07:00
|
|
|
last_print = Instant::now();
|
|
|
|
}
|
2022-07-24 18:44:22 -07:00
|
|
|
data_budget.update(INTERVAL_MS, |_bytes| MAX_BYTES_PER_INTERVAL);
|
2020-01-31 14:23:51 -08:00
|
|
|
}
|
|
|
|
})
|
|
|
|
.unwrap()
|
|
|
|
}
|
|
|
|
|
2023-01-04 14:54:19 -08:00
|
|
|
#[must_use]
|
2022-07-31 15:48:51 -07:00
|
|
|
fn verify_signed_packet(
|
|
|
|
my_id: &Pubkey,
|
|
|
|
packet: &Packet,
|
|
|
|
request: &RepairProtocol,
|
|
|
|
stats: &mut ServeRepairStats,
|
|
|
|
) -> bool {
|
|
|
|
match request {
|
|
|
|
RepairProtocol::LegacyWindowIndex(_, _, _)
|
|
|
|
| RepairProtocol::LegacyHighestWindowIndex(_, _, _)
|
|
|
|
| RepairProtocol::LegacyOrphan(_, _)
|
|
|
|
| RepairProtocol::LegacyWindowIndexWithNonce(_, _, _, _)
|
|
|
|
| RepairProtocol::LegacyHighestWindowIndexWithNonce(_, _, _, _)
|
|
|
|
| RepairProtocol::LegacyOrphanWithNonce(_, _, _)
|
|
|
|
| RepairProtocol::LegacyAncestorHashes(_, _, _) => {
|
|
|
|
stats.err_unsigned += 1;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
RepairProtocol::Pong(pong) => {
|
|
|
|
if !pong.verify() {
|
|
|
|
stats.err_sig_verify += 1;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
RepairProtocol::WindowIndex { header, .. }
|
|
|
|
| RepairProtocol::HighestWindowIndex { header, .. }
|
|
|
|
| RepairProtocol::Orphan { header, .. }
|
|
|
|
| RepairProtocol::AncestorHashes { header, .. } => {
|
|
|
|
if &header.recipient != my_id {
|
|
|
|
stats.err_id_mismatch += 1;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
let time_diff_ms = timestamp().abs_diff(header.timestamp);
|
|
|
|
if u128::from(time_diff_ms) > SIGNED_REPAIR_TIME_WINDOW.as_millis() {
|
|
|
|
stats.err_time_skew += 1;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
let leading_buf = match packet.data(..4) {
|
|
|
|
Some(buf) => buf,
|
|
|
|
None => {
|
|
|
|
debug_assert!(false); // should have failed deserialize
|
|
|
|
stats.err_malformed += 1;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
let trailing_buf = match packet.data(4 + SIGNATURE_BYTES..) {
|
|
|
|
Some(buf) => buf,
|
|
|
|
None => {
|
|
|
|
debug_assert!(false); // should have failed deserialize
|
|
|
|
stats.err_malformed += 1;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
let from_id = request.sender();
|
|
|
|
let signed_data = [leading_buf, trailing_buf].concat();
|
|
|
|
if !header.signature.verify(from_id.as_ref(), &signed_data) {
|
|
|
|
stats.err_sig_verify += 1;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
true
|
|
|
|
}
|
|
|
|
|
2022-10-19 14:55:55 -07:00
|
|
|
fn check_ping_cache(
|
|
|
|
ping_cache: &mut PingCache,
|
|
|
|
request: &RepairProtocol,
|
|
|
|
from_addr: &SocketAddr,
|
|
|
|
identity_keypair: &Keypair,
|
|
|
|
) -> (bool, Option<Packet>) {
|
|
|
|
let mut rng = rand::thread_rng();
|
|
|
|
let mut pingf = move || Ping::new_rand(&mut rng, identity_keypair).ok();
|
|
|
|
let (check, ping) =
|
|
|
|
ping_cache.check(Instant::now(), (*request.sender(), *from_addr), &mut pingf);
|
|
|
|
let ping_pkt = if let Some(ping) = ping {
|
|
|
|
match request {
|
|
|
|
RepairProtocol::LegacyWindowIndex(_, _, _)
|
|
|
|
| RepairProtocol::LegacyHighestWindowIndex(_, _, _)
|
|
|
|
| RepairProtocol::LegacyOrphan(_, _)
|
|
|
|
| RepairProtocol::LegacyWindowIndexWithNonce(_, _, _, _)
|
|
|
|
| RepairProtocol::LegacyHighestWindowIndexWithNonce(_, _, _, _)
|
|
|
|
| RepairProtocol::LegacyOrphanWithNonce(_, _, _)
|
|
|
|
| RepairProtocol::WindowIndex { .. }
|
|
|
|
| RepairProtocol::HighestWindowIndex { .. }
|
|
|
|
| RepairProtocol::Orphan { .. } => {
|
|
|
|
let ping = RepairResponse::Ping(ping);
|
|
|
|
Packet::from_data(Some(from_addr), ping).ok()
|
|
|
|
}
|
|
|
|
RepairProtocol::LegacyAncestorHashes(_, _, _)
|
|
|
|
| RepairProtocol::AncestorHashes { .. } => {
|
|
|
|
let ping = AncestorHashesResponse::Ping(ping);
|
|
|
|
Packet::from_data(Some(from_addr), ping).ok()
|
|
|
|
}
|
|
|
|
RepairProtocol::Pong(_) => None,
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
|
|
|
(check, ping_pkt)
|
|
|
|
}
|
|
|
|
|
2020-01-31 14:23:51 -08:00
|
|
|
fn handle_packets(
|
2022-08-01 11:22:54 -07:00
|
|
|
&self,
|
2022-07-31 15:48:51 -07:00
|
|
|
ping_cache: &mut PingCache,
|
2021-12-11 06:44:15 -08:00
|
|
|
recycler: &PacketBatchRecycler,
|
2022-08-01 11:46:45 -07:00
|
|
|
blockstore: &Blockstore,
|
2022-12-15 19:24:23 -08:00
|
|
|
requests: Vec<RepairRequestWithMeta>,
|
2021-12-11 06:44:15 -08:00
|
|
|
response_sender: &PacketBatchSender,
|
2020-03-29 14:43:58 -07:00
|
|
|
stats: &mut ServeRepairStats,
|
2022-07-24 18:44:22 -07:00
|
|
|
data_budget: &DataBudget,
|
2023-01-04 14:54:19 -08:00
|
|
|
cluster_type: ClusterType,
|
2020-01-31 14:23:51 -08:00
|
|
|
) {
|
2022-08-01 11:22:54 -07:00
|
|
|
let identity_keypair = self.cluster_info.keypair().clone();
|
2022-10-19 14:55:55 -07:00
|
|
|
let mut pending_pings = Vec::default();
|
2022-07-31 15:48:51 -07:00
|
|
|
|
2022-11-16 16:30:41 -08:00
|
|
|
let requests_len = requests.len();
|
2022-12-15 19:24:23 -08:00
|
|
|
for (
|
|
|
|
i,
|
|
|
|
RepairRequestWithMeta {
|
|
|
|
request,
|
|
|
|
from_addr,
|
|
|
|
stake,
|
|
|
|
..
|
|
|
|
},
|
|
|
|
) in requests.into_iter().enumerate()
|
|
|
|
{
|
2022-10-19 14:55:55 -07:00
|
|
|
if !matches!(&request, RepairProtocol::Pong(_)) {
|
|
|
|
let (check, ping_pkt) =
|
|
|
|
Self::check_ping_cache(ping_cache, &request, &from_addr, &identity_keypair);
|
|
|
|
if let Some(ping_pkt) = ping_pkt {
|
|
|
|
pending_pings.push(ping_pkt);
|
|
|
|
}
|
|
|
|
if !check {
|
|
|
|
stats.ping_cache_check_failed += 1;
|
2023-01-04 14:54:19 -08:00
|
|
|
match cluster_type {
|
|
|
|
ClusterType::Testnet | ClusterType::Development => continue,
|
|
|
|
ClusterType::MainnetBeta | ClusterType::Devnet => (),
|
|
|
|
}
|
2022-10-19 14:55:55 -07:00
|
|
|
}
|
|
|
|
}
|
2022-07-31 15:48:51 -07:00
|
|
|
stats.processed += 1;
|
|
|
|
let rsp = match Self::handle_repair(
|
|
|
|
recycler, &from_addr, blockstore, request, stats, ping_cache,
|
|
|
|
) {
|
|
|
|
None => continue,
|
|
|
|
Some(rsp) => rsp,
|
|
|
|
};
|
|
|
|
let num_response_packets = rsp.len();
|
2022-12-06 03:54:49 -08:00
|
|
|
let num_response_bytes = rsp.iter().map(|p| p.meta().size).sum();
|
2022-07-31 15:48:51 -07:00
|
|
|
if data_budget.take(num_response_bytes) && response_sender.send(rsp).is_ok() {
|
|
|
|
stats.total_response_packets += num_response_packets;
|
2022-11-16 16:30:41 -08:00
|
|
|
match stake > 0 {
|
2022-10-04 17:37:24 -07:00
|
|
|
true => stats.total_response_bytes_staked += num_response_bytes,
|
|
|
|
false => stats.total_response_bytes_unstaked += num_response_bytes,
|
|
|
|
}
|
2022-07-31 15:48:51 -07:00
|
|
|
} else {
|
2022-11-16 16:30:41 -08:00
|
|
|
stats.dropped_requests_outbound_bandwidth += requests_len - i;
|
2022-07-31 15:48:51 -07:00
|
|
|
stats.total_dropped_response_packets += num_response_packets;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2022-10-19 14:55:55 -07:00
|
|
|
|
|
|
|
if !pending_pings.is_empty() {
|
|
|
|
stats.pings_sent += pending_pings.len();
|
|
|
|
let batch = PacketBatch::new(pending_pings);
|
|
|
|
let _ignore = response_sender.send(batch);
|
|
|
|
}
|
2020-01-31 14:23:51 -08:00
|
|
|
}
|
|
|
|
|
2021-07-23 16:54:47 -07:00
|
|
|
pub fn ancestor_repair_request_bytes(
|
|
|
|
&self,
|
2022-07-31 15:48:51 -07:00
|
|
|
keypair: &Keypair,
|
|
|
|
repair_peer_id: &Pubkey,
|
2021-07-23 16:54:47 -07:00
|
|
|
request_slot: Slot,
|
|
|
|
nonce: Nonce,
|
|
|
|
) -> Result<Vec<u8>> {
|
2022-10-10 14:09:45 -07:00
|
|
|
let header = RepairRequestHeader {
|
|
|
|
signature: Signature::default(),
|
|
|
|
sender: self.my_id(),
|
|
|
|
recipient: *repair_peer_id,
|
|
|
|
timestamp: timestamp(),
|
|
|
|
nonce,
|
2022-07-31 15:48:51 -07:00
|
|
|
};
|
2022-10-10 14:09:45 -07:00
|
|
|
let request = RepairProtocol::AncestorHashes {
|
|
|
|
header,
|
|
|
|
slot: request_slot,
|
|
|
|
};
|
|
|
|
Self::repair_proto_to_bytes(&request, keypair)
|
2021-07-23 16:54:47 -07:00
|
|
|
}
|
|
|
|
|
2021-07-07 07:12:09 -07:00
|
|
|
pub(crate) fn repair_request(
|
2020-03-12 17:34:46 -07:00
|
|
|
&self,
|
|
|
|
cluster_slots: &ClusterSlots,
|
2021-07-15 19:29:53 -07:00
|
|
|
repair_request: ShredRepairType,
|
2021-07-07 07:12:09 -07:00
|
|
|
peers_cache: &mut LruCache<Slot, RepairPeers>,
|
2020-03-29 14:43:58 -07:00
|
|
|
repair_stats: &mut RepairStats,
|
2020-08-21 00:35:11 -07:00
|
|
|
repair_validators: &Option<HashSet<Pubkey>>,
|
2021-07-21 11:15:08 -07:00
|
|
|
outstanding_requests: &mut OutstandingShredRepairs,
|
2022-10-10 14:09:45 -07:00
|
|
|
identity_keypair: &Keypair,
|
2020-03-12 17:34:46 -07:00
|
|
|
) -> Result<(SocketAddr, Vec<u8>)> {
|
2020-01-31 14:23:51 -08:00
|
|
|
// find a peer that appears to be accepting replication and has the desired slot, as indicated
|
|
|
|
// by a valid tvu port location
|
2020-05-19 12:38:18 -07:00
|
|
|
let slot = repair_request.slot();
|
2021-07-07 07:12:09 -07:00
|
|
|
let repair_peers = match peers_cache.get(&slot) {
|
|
|
|
Some(entry) if entry.asof.elapsed() < REPAIR_PEERS_CACHE_TTL => entry,
|
|
|
|
_ => {
|
|
|
|
peers_cache.pop(&slot);
|
2021-06-18 06:34:46 -07:00
|
|
|
let repair_peers = self.repair_peers(repair_validators, slot);
|
2020-12-03 06:26:07 -08:00
|
|
|
let weights = cluster_slots.compute_weights(slot, &repair_peers);
|
2021-07-07 07:12:09 -07:00
|
|
|
let repair_peers = RepairPeers::new(Instant::now(), &repair_peers, &weights)?;
|
|
|
|
peers_cache.put(slot, repair_peers);
|
|
|
|
peers_cache.get(&slot).unwrap()
|
2020-03-12 17:34:46 -07:00
|
|
|
}
|
2020-12-03 06:26:07 -08:00
|
|
|
};
|
2021-07-07 07:12:09 -07:00
|
|
|
let (peer, addr) = repair_peers.sample(&mut rand::thread_rng());
|
2022-07-31 15:48:51 -07:00
|
|
|
let nonce = outstanding_requests.add_request(repair_request, timestamp());
|
|
|
|
let out = self.map_repair_request(
|
|
|
|
&repair_request,
|
|
|
|
&peer,
|
|
|
|
repair_stats,
|
|
|
|
nonce,
|
|
|
|
identity_keypair,
|
|
|
|
)?;
|
2022-12-27 09:47:45 -08:00
|
|
|
debug!(
|
|
|
|
"Sending repair request from {} for {:#?}",
|
|
|
|
identity_keypair.pubkey(),
|
|
|
|
repair_request
|
|
|
|
);
|
2020-01-31 14:23:51 -08:00
|
|
|
Ok((addr, out))
|
|
|
|
}
|
|
|
|
|
2022-07-31 15:48:51 -07:00
|
|
|
pub(crate) fn repair_request_ancestor_hashes_sample_peers(
|
2021-07-21 11:15:08 -07:00
|
|
|
&self,
|
|
|
|
slot: Slot,
|
|
|
|
cluster_slots: &ClusterSlots,
|
|
|
|
repair_validators: &Option<HashSet<Pubkey>>,
|
|
|
|
) -> Result<Vec<(Pubkey, SocketAddr)>> {
|
|
|
|
let repair_peers: Vec<_> = self.repair_peers(repair_validators, slot);
|
|
|
|
if repair_peers.is_empty() {
|
|
|
|
return Err(ClusterInfoError::NoPeers.into());
|
|
|
|
}
|
2022-04-05 12:19:22 -07:00
|
|
|
let (weights, index): (Vec<_>, Vec<_>) = cluster_slots
|
|
|
|
.compute_weights_exclude_nonfrozen(slot, &repair_peers)
|
2021-07-21 11:15:08 -07:00
|
|
|
.into_iter()
|
2022-04-05 12:19:22 -07:00
|
|
|
.unzip();
|
|
|
|
let peers = WeightedShuffle::new("repair_request_ancestor_hashes", &weights)
|
|
|
|
.shuffle(&mut rand::thread_rng())
|
|
|
|
.take(ANCESTOR_HASH_REPAIR_SAMPLE_SIZE)
|
|
|
|
.map(|i| index[i])
|
2021-07-21 11:15:08 -07:00
|
|
|
.map(|i| (repair_peers[i].id, repair_peers[i].serve_repair))
|
2022-04-05 12:19:22 -07:00
|
|
|
.collect();
|
|
|
|
Ok(peers)
|
2021-07-21 11:15:08 -07:00
|
|
|
}
|
|
|
|
|
2020-05-05 14:07:21 -07:00
|
|
|
pub fn repair_request_duplicate_compute_best_peer(
|
|
|
|
&self,
|
|
|
|
slot: Slot,
|
|
|
|
cluster_slots: &ClusterSlots,
|
2020-08-21 00:35:11 -07:00
|
|
|
repair_validators: &Option<HashSet<Pubkey>>,
|
2020-06-19 18:28:15 -07:00
|
|
|
) -> Result<(Pubkey, SocketAddr)> {
|
2020-08-21 00:35:11 -07:00
|
|
|
let repair_peers: Vec<_> = self.repair_peers(repair_validators, slot);
|
2020-05-05 14:07:21 -07:00
|
|
|
if repair_peers.is_empty() {
|
|
|
|
return Err(ClusterInfoError::NoPeers.into());
|
|
|
|
}
|
2022-04-05 12:19:22 -07:00
|
|
|
let (weights, index): (Vec<_>, Vec<_>) = cluster_slots
|
|
|
|
.compute_weights_exclude_nonfrozen(slot, &repair_peers)
|
|
|
|
.into_iter()
|
|
|
|
.unzip();
|
|
|
|
let k = WeightedIndex::new(weights)?.sample(&mut rand::thread_rng());
|
|
|
|
let n = index[k];
|
2020-06-19 18:28:15 -07:00
|
|
|
Ok((repair_peers[n].id, repair_peers[n].serve_repair))
|
2020-05-05 14:07:21 -07:00
|
|
|
}
|
|
|
|
|
2022-07-31 15:48:51 -07:00
|
|
|
pub(crate) fn map_repair_request(
|
2020-03-29 14:43:58 -07:00
|
|
|
&self,
|
2021-07-15 19:29:53 -07:00
|
|
|
repair_request: &ShredRepairType,
|
2020-06-19 18:28:15 -07:00
|
|
|
repair_peer_id: &Pubkey,
|
2020-03-29 14:43:58 -07:00
|
|
|
repair_stats: &mut RepairStats,
|
2020-05-19 12:38:18 -07:00
|
|
|
nonce: Nonce,
|
2022-10-10 14:09:45 -07:00
|
|
|
identity_keypair: &Keypair,
|
2020-03-29 14:43:58 -07:00
|
|
|
) -> Result<Vec<u8>> {
|
2022-10-10 14:09:45 -07:00
|
|
|
let header = RepairRequestHeader {
|
|
|
|
signature: Signature::default(),
|
|
|
|
sender: self.my_id(),
|
|
|
|
recipient: *repair_peer_id,
|
|
|
|
timestamp: timestamp(),
|
|
|
|
nonce,
|
2022-07-31 15:48:51 -07:00
|
|
|
};
|
|
|
|
let request_proto = match repair_request {
|
2021-07-15 19:29:53 -07:00
|
|
|
ShredRepairType::Shred(slot, shred_index) => {
|
2020-06-19 18:28:15 -07:00
|
|
|
repair_stats
|
|
|
|
.shred
|
|
|
|
.update(repair_peer_id, *slot, *shred_index);
|
2022-10-10 14:09:45 -07:00
|
|
|
RepairProtocol::WindowIndex {
|
|
|
|
header,
|
|
|
|
slot: *slot,
|
|
|
|
shred_index: *shred_index,
|
2022-07-31 15:48:51 -07:00
|
|
|
}
|
2020-01-31 14:23:51 -08:00
|
|
|
}
|
2021-07-15 19:29:53 -07:00
|
|
|
ShredRepairType::HighestShred(slot, shred_index) => {
|
2020-06-19 18:28:15 -07:00
|
|
|
repair_stats
|
|
|
|
.highest_shred
|
|
|
|
.update(repair_peer_id, *slot, *shred_index);
|
2022-10-10 14:09:45 -07:00
|
|
|
RepairProtocol::HighestWindowIndex {
|
|
|
|
header,
|
|
|
|
slot: *slot,
|
|
|
|
shred_index: *shred_index,
|
2022-07-31 15:48:51 -07:00
|
|
|
}
|
2020-01-31 14:23:51 -08:00
|
|
|
}
|
2021-07-15 19:29:53 -07:00
|
|
|
ShredRepairType::Orphan(slot) => {
|
2020-06-19 18:28:15 -07:00
|
|
|
repair_stats.orphan.update(repair_peer_id, *slot, 0);
|
2022-10-10 14:09:45 -07:00
|
|
|
RepairProtocol::Orphan {
|
|
|
|
header,
|
|
|
|
slot: *slot,
|
2022-07-31 15:48:51 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
Self::repair_proto_to_bytes(&request_proto, identity_keypair)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Distinguish and process `RepairResponse` ping packets ignoring other
|
|
|
|
/// packets in the batch.
|
|
|
|
pub(crate) fn handle_repair_response_pings(
|
|
|
|
repair_socket: &UdpSocket,
|
|
|
|
keypair: &Keypair,
|
|
|
|
packet_batch: &mut PacketBatch,
|
|
|
|
stats: &mut ShredFetchStats,
|
|
|
|
) {
|
|
|
|
let mut pending_pongs = Vec::default();
|
|
|
|
for packet in packet_batch.iter_mut() {
|
2022-12-06 03:54:49 -08:00
|
|
|
if packet.meta().size != REPAIR_RESPONSE_SERIALIZED_PING_BYTES {
|
2022-07-31 15:48:51 -07:00
|
|
|
continue;
|
2020-01-31 14:23:51 -08:00
|
|
|
}
|
2022-07-31 15:48:51 -07:00
|
|
|
if let Ok(RepairResponse::Ping(ping)) = packet.deserialize_slice(..) {
|
|
|
|
if !ping.verify() {
|
2022-08-01 16:17:19 -07:00
|
|
|
// Do _not_ set `discard` to allow shred processing to attempt to
|
|
|
|
// handle the packet.
|
|
|
|
// Ping error count may include false posities for shreds of size
|
|
|
|
// `REPAIR_RESPONSE_SERIALIZED_PING_BYTES` whose first 4 bytes
|
|
|
|
// match `RepairResponse` discriminator (these 4 bytes overlap
|
|
|
|
// with the shred signature field).
|
2022-07-31 15:48:51 -07:00
|
|
|
stats.ping_err_verify_count += 1;
|
|
|
|
continue;
|
|
|
|
}
|
2022-12-06 03:54:49 -08:00
|
|
|
packet.meta_mut().set_discard(true);
|
2022-07-31 15:48:51 -07:00
|
|
|
stats.ping_count += 1;
|
2022-10-18 11:17:12 -07:00
|
|
|
if let Ok(pong) = Pong::new(&ping, keypair) {
|
|
|
|
let pong = RepairProtocol::Pong(pong);
|
|
|
|
if let Ok(pong_bytes) = serialize(&pong) {
|
2022-12-06 03:54:49 -08:00
|
|
|
let from_addr = packet.meta().socket_addr();
|
2022-10-18 11:17:12 -07:00
|
|
|
pending_pongs.push((pong_bytes, from_addr));
|
2022-07-31 15:48:51 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !pending_pongs.is_empty() {
|
|
|
|
if let Err(SendPktsError::IoError(err, num_failed)) =
|
|
|
|
batch_send(repair_socket, &pending_pongs)
|
|
|
|
{
|
|
|
|
warn!(
|
|
|
|
"batch_send failed to send {}/{} packets. First error: {:?}",
|
|
|
|
num_failed,
|
|
|
|
pending_pongs.len(),
|
|
|
|
err
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-10 14:09:45 -07:00
|
|
|
pub fn repair_proto_to_bytes(request: &RepairProtocol, keypair: &Keypair) -> Result<Vec<u8>> {
|
|
|
|
debug_assert!(request.supports_signature());
|
2022-07-31 15:48:51 -07:00
|
|
|
let mut payload = serialize(&request)?;
|
2022-10-10 14:09:45 -07:00
|
|
|
let signable_data = [&payload[..4], &payload[4 + SIGNATURE_BYTES..]].concat();
|
|
|
|
let signature = keypair.sign_message(&signable_data[..]);
|
|
|
|
payload[4..4 + SIGNATURE_BYTES].copy_from_slice(signature.as_ref());
|
2022-07-31 15:48:51 -07:00
|
|
|
Ok(payload)
|
2020-01-31 14:23:51 -08:00
|
|
|
}
|
|
|
|
|
2020-08-21 00:35:11 -07:00
|
|
|
fn repair_peers(
|
|
|
|
&self,
|
|
|
|
repair_validators: &Option<HashSet<Pubkey>>,
|
|
|
|
slot: Slot,
|
|
|
|
) -> Vec<ContactInfo> {
|
|
|
|
if let Some(repair_validators) = repair_validators {
|
|
|
|
repair_validators
|
|
|
|
.iter()
|
|
|
|
.filter_map(|key| {
|
2021-06-21 09:29:23 -07:00
|
|
|
if *key != self.my_id() {
|
2020-08-21 00:35:11 -07:00
|
|
|
self.cluster_info.lookup_contact_info(key, |ci| ci.clone())
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
})
|
|
|
|
.collect()
|
|
|
|
} else {
|
|
|
|
self.cluster_info.repair_peers(slot)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-31 14:23:51 -08:00
|
|
|
fn run_window_request(
|
2021-12-11 06:44:15 -08:00
|
|
|
recycler: &PacketBatchRecycler,
|
2020-01-31 14:23:51 -08:00
|
|
|
from_addr: &SocketAddr,
|
2022-08-01 11:46:45 -07:00
|
|
|
blockstore: &Blockstore,
|
2020-01-31 14:23:51 -08:00
|
|
|
slot: Slot,
|
|
|
|
shred_index: u64,
|
2020-05-19 12:38:18 -07:00
|
|
|
nonce: Nonce,
|
2021-12-11 06:44:15 -08:00
|
|
|
) -> Option<PacketBatch> {
|
2022-08-01 11:46:45 -07:00
|
|
|
// Try to find the requested index in one of the slots
|
|
|
|
let packet = repair_response::repair_response_packet(
|
|
|
|
blockstore,
|
|
|
|
slot,
|
|
|
|
shred_index,
|
|
|
|
from_addr,
|
|
|
|
nonce,
|
|
|
|
)?;
|
2020-01-31 14:23:51 -08:00
|
|
|
|
2022-08-01 11:46:45 -07:00
|
|
|
inc_new_counter_debug!("serve_repair-window-request-ledger", 1);
|
|
|
|
Some(PacketBatch::new_unpinned_with_recycler_data(
|
|
|
|
recycler,
|
|
|
|
"run_window_request",
|
|
|
|
vec![packet],
|
|
|
|
))
|
2020-01-31 14:23:51 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
fn run_highest_window_request(
|
2021-12-11 06:44:15 -08:00
|
|
|
recycler: &PacketBatchRecycler,
|
2020-01-31 14:23:51 -08:00
|
|
|
from_addr: &SocketAddr,
|
2022-08-01 11:46:45 -07:00
|
|
|
blockstore: &Blockstore,
|
2020-01-31 14:23:51 -08:00
|
|
|
slot: Slot,
|
|
|
|
highest_index: u64,
|
2020-05-19 12:38:18 -07:00
|
|
|
nonce: Nonce,
|
2021-12-11 06:44:15 -08:00
|
|
|
) -> Option<PacketBatch> {
|
2020-01-31 14:23:51 -08:00
|
|
|
// Try to find the requested index in one of the slots
|
|
|
|
let meta = blockstore.meta(slot).ok()??;
|
|
|
|
if meta.received > highest_index {
|
|
|
|
// meta.received must be at least 1 by this point
|
2020-05-19 12:38:18 -07:00
|
|
|
let packet = repair_response::repair_response_packet(
|
|
|
|
blockstore,
|
|
|
|
slot,
|
|
|
|
meta.received - 1,
|
|
|
|
from_addr,
|
|
|
|
nonce,
|
|
|
|
)?;
|
2021-12-11 06:44:15 -08:00
|
|
|
return Some(PacketBatch::new_unpinned_with_recycler_data(
|
2021-04-07 08:15:38 -07:00
|
|
|
recycler,
|
|
|
|
"run_highest_window_request",
|
|
|
|
vec![packet],
|
|
|
|
));
|
2020-01-31 14:23:51 -08:00
|
|
|
}
|
|
|
|
None
|
|
|
|
}
|
|
|
|
|
|
|
|
fn run_orphan(
|
2021-12-11 06:44:15 -08:00
|
|
|
recycler: &PacketBatchRecycler,
|
2020-01-31 14:23:51 -08:00
|
|
|
from_addr: &SocketAddr,
|
2022-08-01 11:46:45 -07:00
|
|
|
blockstore: &Blockstore,
|
2020-01-31 14:23:51 -08:00
|
|
|
mut slot: Slot,
|
|
|
|
max_responses: usize,
|
2020-05-19 12:38:18 -07:00
|
|
|
nonce: Nonce,
|
2021-12-11 06:44:15 -08:00
|
|
|
) -> Option<PacketBatch> {
|
2022-05-17 18:45:45 -07:00
|
|
|
let mut res =
|
|
|
|
PacketBatch::new_unpinned_with_recycler(recycler.clone(), max_responses, "run_orphan");
|
2022-08-01 11:46:45 -07:00
|
|
|
// Try to find the next "n" parent slots of the input slot
|
|
|
|
while let Ok(Some(meta)) = blockstore.meta(slot) {
|
|
|
|
if meta.received == 0 {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
let packet = repair_response::repair_response_packet(
|
|
|
|
blockstore,
|
|
|
|
slot,
|
|
|
|
meta.received - 1,
|
|
|
|
from_addr,
|
|
|
|
nonce,
|
|
|
|
);
|
|
|
|
if let Some(packet) = packet {
|
|
|
|
res.push(packet);
|
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
2022-05-23 13:30:15 -07:00
|
|
|
|
2022-08-01 11:46:45 -07:00
|
|
|
if meta.parent_slot.is_some() && res.len() < max_responses {
|
|
|
|
slot = meta.parent_slot.unwrap();
|
|
|
|
} else {
|
|
|
|
break;
|
2020-01-31 14:23:51 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if res.is_empty() {
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
Some(res)
|
|
|
|
}
|
2021-07-15 19:29:53 -07:00
|
|
|
|
|
|
|
fn run_ancestor_hashes(
|
2021-12-11 06:44:15 -08:00
|
|
|
recycler: &PacketBatchRecycler,
|
2021-07-15 19:29:53 -07:00
|
|
|
from_addr: &SocketAddr,
|
2022-08-01 11:46:45 -07:00
|
|
|
blockstore: &Blockstore,
|
2021-07-15 19:29:53 -07:00
|
|
|
slot: Slot,
|
|
|
|
nonce: Nonce,
|
2021-12-11 06:44:15 -08:00
|
|
|
) -> Option<PacketBatch> {
|
2021-07-15 19:29:53 -07:00
|
|
|
let ancestor_slot_hashes = if blockstore.is_duplicate_confirmed(slot) {
|
|
|
|
let ancestor_iterator =
|
|
|
|
AncestorIteratorWithHash::from(AncestorIterator::new_inclusive(slot, blockstore));
|
|
|
|
ancestor_iterator.take(MAX_ANCESTOR_RESPONSES).collect()
|
|
|
|
} else {
|
|
|
|
// If this slot is not duplicate confirmed, return nothing
|
|
|
|
vec![]
|
|
|
|
};
|
2022-08-09 21:39:55 -07:00
|
|
|
let response = AncestorHashesResponse::Hashes(ancestor_slot_hashes);
|
2021-07-15 19:29:53 -07:00
|
|
|
let serialized_response = serialize(&response).ok()?;
|
|
|
|
|
|
|
|
// Could probably directly write response into packet via `serialize_into()`
|
|
|
|
// instead of incurring extra copy in `repair_response_packet_from_bytes`, but
|
|
|
|
// serialize_into doesn't return the written size...
|
|
|
|
let packet = repair_response::repair_response_packet_from_bytes(
|
|
|
|
serialized_response,
|
|
|
|
from_addr,
|
|
|
|
nonce,
|
|
|
|
)?;
|
2021-12-11 06:44:15 -08:00
|
|
|
Some(PacketBatch::new_unpinned_with_recycler_data(
|
2021-07-15 19:29:53 -07:00
|
|
|
recycler,
|
|
|
|
"run_ancestor_hashes",
|
|
|
|
vec![packet],
|
|
|
|
))
|
|
|
|
}
|
2020-01-31 14:23:51 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
2021-12-03 09:00:31 -08:00
|
|
|
use {
|
|
|
|
super::*,
|
|
|
|
crate::{repair_response, result::Error},
|
|
|
|
solana_gossip::{socketaddr, socketaddr_any},
|
|
|
|
solana_ledger::{
|
|
|
|
blockstore::make_many_slot_entries,
|
|
|
|
blockstore_processor::fill_blockstore_slot_with_ticks,
|
2022-07-31 15:48:51 -07:00
|
|
|
genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
2021-12-03 09:00:31 -08:00
|
|
|
get_tmp_ledger_path,
|
2022-05-02 16:33:53 -07:00
|
|
|
shred::{max_ticks_per_n_shreds, Shred, ShredFlags},
|
2021-12-03 09:00:31 -08:00
|
|
|
},
|
2022-07-31 15:48:51 -07:00
|
|
|
solana_perf::packet::{deserialize_from_with_limit, Packet},
|
|
|
|
solana_runtime::bank::Bank,
|
|
|
|
solana_sdk::{
|
|
|
|
feature_set::FeatureSet, hash::Hash, pubkey::Pubkey, signature::Keypair,
|
|
|
|
timing::timestamp,
|
|
|
|
},
|
2021-12-03 09:00:31 -08:00
|
|
|
solana_streamer::socket::SocketAddrSpace,
|
2023-01-23 14:49:51 -08:00
|
|
|
std::{io::Cursor, net::Ipv4Addr},
|
2020-01-31 14:23:51 -08:00
|
|
|
};
|
|
|
|
|
2022-07-31 15:48:51 -07:00
|
|
|
#[test]
|
|
|
|
fn test_serialized_ping_size() {
|
|
|
|
let mut rng = rand::thread_rng();
|
|
|
|
let keypair = Keypair::new();
|
|
|
|
let ping = Ping::new_rand(&mut rng, &keypair).unwrap();
|
|
|
|
let ping = RepairResponse::Ping(ping);
|
|
|
|
let pkt = Packet::from_data(None, ping).unwrap();
|
2022-12-06 03:54:49 -08:00
|
|
|
assert_eq!(pkt.meta().size, REPAIR_RESPONSE_SERIALIZED_PING_BYTES);
|
2022-07-31 15:48:51 -07:00
|
|
|
}
|
|
|
|
|
2022-08-01 16:17:19 -07:00
|
|
|
#[test]
|
|
|
|
fn test_deserialize_shred_as_ping() {
|
|
|
|
let data_buf = vec![7u8, 44]; // REPAIR_RESPONSE_SERIALIZED_PING_BYTES - SIZE_OF_DATA_SHRED_HEADERS
|
|
|
|
let keypair = Keypair::new();
|
|
|
|
let mut shred = Shred::new_from_data(
|
|
|
|
123, // slot
|
|
|
|
456, // index
|
|
|
|
111, // parent_offset
|
|
|
|
&data_buf,
|
|
|
|
ShredFlags::empty(),
|
|
|
|
222, // reference_tick
|
|
|
|
333, // version
|
|
|
|
444, // fec_set_index
|
|
|
|
);
|
|
|
|
shred.sign(&keypair);
|
|
|
|
let mut pkt = Packet::default();
|
|
|
|
shred.copy_to_packet(&mut pkt);
|
2022-12-06 03:54:49 -08:00
|
|
|
pkt.meta_mut().size = REPAIR_RESPONSE_SERIALIZED_PING_BYTES;
|
2022-08-01 16:17:19 -07:00
|
|
|
let res = pkt.deserialize_slice::<RepairResponse, _>(..);
|
|
|
|
if let Ok(RepairResponse::Ping(ping)) = res {
|
|
|
|
assert!(!ping.verify());
|
|
|
|
} else {
|
|
|
|
assert!(res.is_err());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-07-31 15:48:51 -07:00
|
|
|
#[test]
|
|
|
|
fn test_serialize_deserialize_signed_request() {
|
|
|
|
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
|
|
|
|
let bank = Bank::new_for_tests(&genesis_config);
|
|
|
|
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
|
2023-01-24 08:57:55 -08:00
|
|
|
let cluster_info = Arc::new(new_test_cluster_info());
|
2022-12-15 19:24:23 -08:00
|
|
|
let serve_repair = ServeRepair::new(
|
|
|
|
cluster_info.clone(),
|
|
|
|
bank_forks,
|
|
|
|
Arc::new(RwLock::new(HashSet::default())),
|
|
|
|
);
|
2022-07-31 15:48:51 -07:00
|
|
|
let keypair = cluster_info.keypair().clone();
|
|
|
|
let repair_peer_id = solana_sdk::pubkey::new_rand();
|
|
|
|
let repair_request = ShredRepairType::Orphan(123);
|
|
|
|
|
|
|
|
let rsp = serve_repair
|
|
|
|
.map_repair_request(
|
|
|
|
&repair_request,
|
|
|
|
&repair_peer_id,
|
|
|
|
&mut RepairStats::default(),
|
|
|
|
456,
|
2022-10-10 14:09:45 -07:00
|
|
|
&keypair,
|
2022-07-31 15:48:51 -07:00
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let mut cursor = Cursor::new(&rsp[..]);
|
|
|
|
let deserialized_request: RepairProtocol =
|
|
|
|
deserialize_from_with_limit(&mut cursor).unwrap();
|
|
|
|
assert_eq!(cursor.position(), rsp.len() as u64);
|
|
|
|
if let RepairProtocol::Orphan { header, slot } = deserialized_request {
|
|
|
|
assert_eq!(slot, 123);
|
|
|
|
assert_eq!(header.nonce, 456);
|
|
|
|
assert_eq!(&header.sender, &serve_repair.my_id());
|
|
|
|
assert_eq!(&header.recipient, &repair_peer_id);
|
|
|
|
let signed_data = [&rsp[..4], &rsp[4 + SIGNATURE_BYTES..]].concat();
|
|
|
|
assert!(header
|
|
|
|
.signature
|
|
|
|
.verify(keypair.pubkey().as_ref(), &signed_data));
|
|
|
|
} else {
|
|
|
|
panic!("unexpected request type {:?}", &deserialized_request);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_serialize_deserialize_ancestor_hashes_request() {
|
2022-10-10 14:09:45 -07:00
|
|
|
let slot: Slot = 50;
|
2022-07-31 15:48:51 -07:00
|
|
|
let nonce = 70;
|
2023-01-24 08:57:55 -08:00
|
|
|
let cluster_info = Arc::new(new_test_cluster_info());
|
2022-07-31 15:48:51 -07:00
|
|
|
let repair_peer_id = solana_sdk::pubkey::new_rand();
|
|
|
|
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
|
|
|
|
let keypair = cluster_info.keypair().clone();
|
|
|
|
|
|
|
|
let mut bank = Bank::new_for_tests(&genesis_config);
|
|
|
|
bank.feature_set = Arc::new(FeatureSet::all_enabled());
|
|
|
|
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
|
2022-12-15 19:24:23 -08:00
|
|
|
let serve_repair = ServeRepair::new(
|
|
|
|
cluster_info,
|
|
|
|
bank_forks,
|
|
|
|
Arc::new(RwLock::new(HashSet::default())),
|
|
|
|
);
|
2022-07-31 15:48:51 -07:00
|
|
|
|
|
|
|
let request_bytes = serve_repair
|
2022-10-10 14:09:45 -07:00
|
|
|
.ancestor_repair_request_bytes(&keypair, &repair_peer_id, slot, nonce)
|
2022-07-31 15:48:51 -07:00
|
|
|
.unwrap();
|
|
|
|
let mut cursor = Cursor::new(&request_bytes[..]);
|
|
|
|
let deserialized_request: RepairProtocol =
|
|
|
|
deserialize_from_with_limit(&mut cursor).unwrap();
|
|
|
|
assert_eq!(cursor.position(), request_bytes.len() as u64);
|
|
|
|
if let RepairProtocol::AncestorHashes {
|
|
|
|
header,
|
|
|
|
slot: deserialized_slot,
|
|
|
|
} = deserialized_request
|
|
|
|
{
|
|
|
|
assert_eq!(deserialized_slot, slot);
|
|
|
|
assert_eq!(header.nonce, nonce);
|
|
|
|
assert_eq!(&header.sender, &serve_repair.my_id());
|
|
|
|
assert_eq!(&header.recipient, &repair_peer_id);
|
|
|
|
let signed_data = [&request_bytes[..4], &request_bytes[4 + SIGNATURE_BYTES..]].concat();
|
|
|
|
assert!(header
|
|
|
|
.signature
|
|
|
|
.verify(keypair.pubkey().as_ref(), &signed_data));
|
|
|
|
} else {
|
|
|
|
panic!("unexpected request type {:?}", &deserialized_request);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2022-10-10 14:09:45 -07:00
|
|
|
fn test_map_requests_signed() {
|
2022-07-31 15:48:51 -07:00
|
|
|
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
|
|
|
|
let bank = Bank::new_for_tests(&genesis_config);
|
|
|
|
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
|
2023-01-24 08:57:55 -08:00
|
|
|
let cluster_info = Arc::new(new_test_cluster_info());
|
2022-12-15 19:24:23 -08:00
|
|
|
let serve_repair = ServeRepair::new(
|
|
|
|
cluster_info.clone(),
|
|
|
|
bank_forks,
|
|
|
|
Arc::new(RwLock::new(HashSet::default())),
|
|
|
|
);
|
2022-07-31 15:48:51 -07:00
|
|
|
let keypair = cluster_info.keypair().clone();
|
|
|
|
let repair_peer_id = solana_sdk::pubkey::new_rand();
|
|
|
|
|
|
|
|
let slot = 50;
|
|
|
|
let shred_index = 60;
|
|
|
|
let nonce = 70;
|
|
|
|
|
|
|
|
let request = ShredRepairType::Shred(slot, shred_index);
|
2022-10-10 14:09:45 -07:00
|
|
|
let request_bytes = serve_repair
|
2022-07-31 15:48:51 -07:00
|
|
|
.map_repair_request(
|
|
|
|
&request,
|
|
|
|
&repair_peer_id,
|
|
|
|
&mut RepairStats::default(),
|
|
|
|
nonce,
|
2022-10-10 14:09:45 -07:00
|
|
|
&keypair,
|
2022-07-31 15:48:51 -07:00
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
|
2022-10-10 14:09:45 -07:00
|
|
|
let mut cursor = Cursor::new(&request_bytes[..]);
|
2022-07-31 15:48:51 -07:00
|
|
|
let deserialized_request: RepairProtocol =
|
|
|
|
deserialize_from_with_limit(&mut cursor).unwrap();
|
2022-10-10 14:09:45 -07:00
|
|
|
assert_eq!(cursor.position(), request_bytes.len() as u64);
|
2022-07-31 15:48:51 -07:00
|
|
|
if let RepairProtocol::WindowIndex {
|
|
|
|
header,
|
|
|
|
slot: deserialized_slot,
|
|
|
|
shred_index: deserialized_shred_index,
|
|
|
|
} = deserialized_request
|
|
|
|
{
|
|
|
|
assert_eq!(deserialized_slot, slot);
|
|
|
|
assert_eq!(deserialized_shred_index, shred_index);
|
|
|
|
assert_eq!(header.nonce, nonce);
|
|
|
|
assert_eq!(&header.sender, &serve_repair.my_id());
|
|
|
|
assert_eq!(&header.recipient, &repair_peer_id);
|
2022-10-10 14:09:45 -07:00
|
|
|
let signed_data = [&request_bytes[..4], &request_bytes[4 + SIGNATURE_BYTES..]].concat();
|
2022-07-31 15:48:51 -07:00
|
|
|
assert!(header
|
|
|
|
.signature
|
|
|
|
.verify(keypair.pubkey().as_ref(), &signed_data));
|
|
|
|
} else {
|
|
|
|
panic!("unexpected request type {:?}", &deserialized_request);
|
|
|
|
}
|
|
|
|
|
|
|
|
let request = ShredRepairType::HighestShred(slot, shred_index);
|
2022-10-10 14:09:45 -07:00
|
|
|
let request_bytes = serve_repair
|
2022-07-31 15:48:51 -07:00
|
|
|
.map_repair_request(
|
|
|
|
&request,
|
|
|
|
&repair_peer_id,
|
|
|
|
&mut RepairStats::default(),
|
|
|
|
nonce,
|
2022-10-10 14:09:45 -07:00
|
|
|
&keypair,
|
2022-07-31 15:48:51 -07:00
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
|
2022-10-10 14:09:45 -07:00
|
|
|
let mut cursor = Cursor::new(&request_bytes[..]);
|
2022-07-31 15:48:51 -07:00
|
|
|
let deserialized_request: RepairProtocol =
|
|
|
|
deserialize_from_with_limit(&mut cursor).unwrap();
|
2022-10-10 14:09:45 -07:00
|
|
|
assert_eq!(cursor.position(), request_bytes.len() as u64);
|
2022-07-31 15:48:51 -07:00
|
|
|
if let RepairProtocol::HighestWindowIndex {
|
|
|
|
header,
|
|
|
|
slot: deserialized_slot,
|
|
|
|
shred_index: deserialized_shred_index,
|
|
|
|
} = deserialized_request
|
|
|
|
{
|
|
|
|
assert_eq!(deserialized_slot, slot);
|
|
|
|
assert_eq!(deserialized_shred_index, shred_index);
|
|
|
|
assert_eq!(header.nonce, nonce);
|
|
|
|
assert_eq!(&header.sender, &serve_repair.my_id());
|
|
|
|
assert_eq!(&header.recipient, &repair_peer_id);
|
2022-10-10 14:09:45 -07:00
|
|
|
let signed_data = [&request_bytes[..4], &request_bytes[4 + SIGNATURE_BYTES..]].concat();
|
2022-07-31 15:48:51 -07:00
|
|
|
assert!(header
|
|
|
|
.signature
|
|
|
|
.verify(keypair.pubkey().as_ref(), &signed_data));
|
|
|
|
} else {
|
|
|
|
panic!("unexpected request type {:?}", &deserialized_request);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_verify_signed_packet() {
|
2022-11-15 16:46:17 -08:00
|
|
|
let my_keypair = Keypair::new();
|
2022-07-31 15:48:51 -07:00
|
|
|
let other_keypair = Keypair::new();
|
|
|
|
|
|
|
|
fn sign_packet(packet: &mut Packet, keypair: &Keypair) {
|
|
|
|
let signable_data = [
|
|
|
|
packet.data(..4).unwrap(),
|
|
|
|
packet.data(4 + SIGNATURE_BYTES..).unwrap(),
|
|
|
|
]
|
|
|
|
.concat();
|
|
|
|
let signature = keypair.sign_message(&signable_data[..]);
|
|
|
|
packet.buffer_mut()[4..4 + SIGNATURE_BYTES].copy_from_slice(signature.as_ref());
|
|
|
|
}
|
|
|
|
|
|
|
|
// well formed packet
|
|
|
|
let packet = {
|
2022-11-15 16:46:17 -08:00
|
|
|
let header = RepairRequestHeader::new(
|
|
|
|
my_keypair.pubkey(),
|
|
|
|
other_keypair.pubkey(),
|
|
|
|
timestamp(),
|
|
|
|
678,
|
|
|
|
);
|
2022-07-31 15:48:51 -07:00
|
|
|
let slot = 239847;
|
|
|
|
let request = RepairProtocol::Orphan { header, slot };
|
|
|
|
let mut packet = Packet::from_data(None, &request).unwrap();
|
2022-11-15 16:46:17 -08:00
|
|
|
sign_packet(&mut packet, &my_keypair);
|
2022-07-31 15:48:51 -07:00
|
|
|
packet
|
|
|
|
};
|
|
|
|
let request: RepairProtocol = packet.deserialize_slice(..).unwrap();
|
|
|
|
assert!(ServeRepair::verify_signed_packet(
|
2022-11-15 16:46:17 -08:00
|
|
|
&other_keypair.pubkey(),
|
2022-07-31 15:48:51 -07:00
|
|
|
&packet,
|
|
|
|
&request,
|
|
|
|
&mut ServeRepairStats::default(),
|
|
|
|
));
|
|
|
|
|
|
|
|
// recipient mismatch
|
|
|
|
let packet = {
|
2022-11-15 16:46:17 -08:00
|
|
|
let header = RepairRequestHeader::new(
|
|
|
|
my_keypair.pubkey(),
|
|
|
|
other_keypair.pubkey(),
|
|
|
|
timestamp(),
|
|
|
|
678,
|
|
|
|
);
|
2022-07-31 15:48:51 -07:00
|
|
|
let slot = 239847;
|
|
|
|
let request = RepairProtocol::Orphan { header, slot };
|
|
|
|
let mut packet = Packet::from_data(None, &request).unwrap();
|
2022-11-15 16:46:17 -08:00
|
|
|
sign_packet(&mut packet, &my_keypair);
|
2022-07-31 15:48:51 -07:00
|
|
|
packet
|
|
|
|
};
|
|
|
|
let request: RepairProtocol = packet.deserialize_slice(..).unwrap();
|
|
|
|
let mut stats = ServeRepairStats::default();
|
|
|
|
assert!(!ServeRepair::verify_signed_packet(
|
2022-11-15 16:46:17 -08:00
|
|
|
&my_keypair.pubkey(),
|
|
|
|
&packet,
|
|
|
|
&request,
|
|
|
|
&mut stats,
|
2022-07-31 15:48:51 -07:00
|
|
|
));
|
|
|
|
assert_eq!(stats.err_id_mismatch, 1);
|
|
|
|
|
|
|
|
// outside time window
|
|
|
|
let packet = {
|
|
|
|
let time_diff_ms = u64::try_from(SIGNED_REPAIR_TIME_WINDOW.as_millis() * 2).unwrap();
|
|
|
|
let old_timestamp = timestamp().saturating_sub(time_diff_ms);
|
2022-11-15 16:46:17 -08:00
|
|
|
let header = RepairRequestHeader::new(
|
|
|
|
my_keypair.pubkey(),
|
|
|
|
other_keypair.pubkey(),
|
|
|
|
old_timestamp,
|
|
|
|
678,
|
|
|
|
);
|
2022-07-31 15:48:51 -07:00
|
|
|
let slot = 239847;
|
|
|
|
let request = RepairProtocol::Orphan { header, slot };
|
|
|
|
let mut packet = Packet::from_data(None, &request).unwrap();
|
2022-11-15 16:46:17 -08:00
|
|
|
sign_packet(&mut packet, &my_keypair);
|
2022-07-31 15:48:51 -07:00
|
|
|
packet
|
|
|
|
};
|
|
|
|
let request: RepairProtocol = packet.deserialize_slice(..).unwrap();
|
|
|
|
let mut stats = ServeRepairStats::default();
|
|
|
|
assert!(!ServeRepair::verify_signed_packet(
|
2022-11-15 16:46:17 -08:00
|
|
|
&other_keypair.pubkey(),
|
|
|
|
&packet,
|
|
|
|
&request,
|
|
|
|
&mut stats,
|
2022-07-31 15:48:51 -07:00
|
|
|
));
|
|
|
|
assert_eq!(stats.err_time_skew, 1);
|
|
|
|
|
|
|
|
// bad signature
|
|
|
|
let packet = {
|
2022-11-15 16:46:17 -08:00
|
|
|
let header = RepairRequestHeader::new(
|
|
|
|
my_keypair.pubkey(),
|
|
|
|
other_keypair.pubkey(),
|
|
|
|
timestamp(),
|
|
|
|
678,
|
|
|
|
);
|
2022-07-31 15:48:51 -07:00
|
|
|
let slot = 239847;
|
|
|
|
let request = RepairProtocol::Orphan { header, slot };
|
|
|
|
let mut packet = Packet::from_data(None, &request).unwrap();
|
|
|
|
sign_packet(&mut packet, &other_keypair);
|
|
|
|
packet
|
|
|
|
};
|
|
|
|
let request: RepairProtocol = packet.deserialize_slice(..).unwrap();
|
|
|
|
let mut stats = ServeRepairStats::default();
|
|
|
|
assert!(!ServeRepair::verify_signed_packet(
|
2022-11-15 16:46:17 -08:00
|
|
|
&other_keypair.pubkey(),
|
|
|
|
&packet,
|
|
|
|
&request,
|
|
|
|
&mut stats,
|
2022-07-31 15:48:51 -07:00
|
|
|
));
|
|
|
|
assert_eq!(stats.err_sig_verify, 1);
|
|
|
|
}
|
|
|
|
|
2020-01-31 14:23:51 -08:00
|
|
|
#[test]
|
2020-05-19 12:38:18 -07:00
|
|
|
fn test_run_highest_window_request() {
|
|
|
|
run_highest_window_request(5, 3, 9);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// test run_window_request responds with the right shred, and do not overrun
|
|
|
|
fn run_highest_window_request(slot: Slot, num_slots: u64, nonce: Nonce) {
|
2021-12-11 06:44:15 -08:00
|
|
|
let recycler = PacketBatchRecycler::default();
|
2020-01-31 14:23:51 -08:00
|
|
|
solana_logger::setup();
|
|
|
|
let ledger_path = get_tmp_ledger_path!();
|
|
|
|
{
|
|
|
|
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
|
|
|
let rv = ServeRepair::run_highest_window_request(
|
|
|
|
&recycler,
|
|
|
|
&socketaddr_any!(),
|
2022-08-01 11:46:45 -07:00
|
|
|
&blockstore,
|
2020-01-31 14:23:51 -08:00
|
|
|
0,
|
|
|
|
0,
|
2020-05-19 12:38:18 -07:00
|
|
|
nonce,
|
2020-01-31 14:23:51 -08:00
|
|
|
);
|
|
|
|
assert!(rv.is_none());
|
|
|
|
|
|
|
|
let _ = fill_blockstore_slot_with_ticks(
|
|
|
|
&blockstore,
|
2020-05-19 12:38:18 -07:00
|
|
|
max_ticks_per_n_shreds(1, None) + 1,
|
|
|
|
slot,
|
|
|
|
slot - num_slots + 1,
|
2020-01-31 14:23:51 -08:00
|
|
|
Hash::default(),
|
|
|
|
);
|
|
|
|
|
2020-05-19 12:38:18 -07:00
|
|
|
let index = 1;
|
2020-01-31 14:23:51 -08:00
|
|
|
let rv = ServeRepair::run_highest_window_request(
|
|
|
|
&recycler,
|
|
|
|
&socketaddr_any!(),
|
2022-08-01 11:46:45 -07:00
|
|
|
&blockstore,
|
2020-05-19 12:38:18 -07:00
|
|
|
slot,
|
|
|
|
index,
|
|
|
|
nonce,
|
|
|
|
)
|
|
|
|
.expect("packets");
|
2021-07-15 19:29:53 -07:00
|
|
|
let request = ShredRepairType::HighestShred(slot, index);
|
2022-05-23 13:30:15 -07:00
|
|
|
verify_responses(&request, rv.iter());
|
2020-05-19 12:38:18 -07:00
|
|
|
|
2020-01-31 14:23:51 -08:00
|
|
|
let rv: Vec<Shred> = rv
|
|
|
|
.into_iter()
|
2022-05-23 20:15:20 -07:00
|
|
|
.filter_map(|p| {
|
|
|
|
assert_eq!(repair_response::nonce(p).unwrap(), nonce);
|
2022-06-02 18:05:06 -07:00
|
|
|
Shred::new_from_serialized_shred(p.data(..).unwrap().to_vec()).ok()
|
2020-05-19 12:38:18 -07:00
|
|
|
})
|
2020-01-31 14:23:51 -08:00
|
|
|
.collect();
|
|
|
|
assert!(!rv.is_empty());
|
2020-05-19 12:38:18 -07:00
|
|
|
let index = blockstore.meta(slot).unwrap().unwrap().received - 1;
|
2020-01-31 14:23:51 -08:00
|
|
|
assert_eq!(rv[0].index(), index as u32);
|
2020-05-19 12:38:18 -07:00
|
|
|
assert_eq!(rv[0].slot(), slot);
|
2020-01-31 14:23:51 -08:00
|
|
|
|
|
|
|
let rv = ServeRepair::run_highest_window_request(
|
|
|
|
&recycler,
|
|
|
|
&socketaddr_any!(),
|
2022-08-01 11:46:45 -07:00
|
|
|
&blockstore,
|
2020-05-19 12:38:18 -07:00
|
|
|
slot,
|
2020-01-31 14:23:51 -08:00
|
|
|
index + 1,
|
2020-05-19 12:38:18 -07:00
|
|
|
nonce,
|
2020-01-31 14:23:51 -08:00
|
|
|
);
|
|
|
|
assert!(rv.is_none());
|
|
|
|
}
|
|
|
|
|
|
|
|
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2020-05-19 12:38:18 -07:00
|
|
|
fn test_run_window_request() {
|
|
|
|
run_window_request(2, 9);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// test window requests respond with the right shred, and do not overrun
|
|
|
|
fn run_window_request(slot: Slot, nonce: Nonce) {
|
2021-12-11 06:44:15 -08:00
|
|
|
let recycler = PacketBatchRecycler::default();
|
2020-01-31 14:23:51 -08:00
|
|
|
solana_logger::setup();
|
|
|
|
let ledger_path = get_tmp_ledger_path!();
|
|
|
|
{
|
|
|
|
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
|
|
|
let rv = ServeRepair::run_window_request(
|
|
|
|
&recycler,
|
|
|
|
&socketaddr_any!(),
|
2022-08-01 11:46:45 -07:00
|
|
|
&blockstore,
|
2020-05-19 12:38:18 -07:00
|
|
|
slot,
|
2020-01-31 14:23:51 -08:00
|
|
|
0,
|
2020-05-19 12:38:18 -07:00
|
|
|
nonce,
|
2020-01-31 14:23:51 -08:00
|
|
|
);
|
|
|
|
assert!(rv.is_none());
|
2022-05-02 16:33:53 -07:00
|
|
|
let shred = Shred::new_from_data(slot, 1, 1, &[], ShredFlags::empty(), 0, 2, 0);
|
2020-01-31 14:23:51 -08:00
|
|
|
|
|
|
|
blockstore
|
2021-04-27 15:40:41 -07:00
|
|
|
.insert_shreds(vec![shred], None, false)
|
2020-01-31 14:23:51 -08:00
|
|
|
.expect("Expect successful ledger write");
|
|
|
|
|
2020-05-19 12:38:18 -07:00
|
|
|
let index = 1;
|
2020-01-31 14:23:51 -08:00
|
|
|
let rv = ServeRepair::run_window_request(
|
|
|
|
&recycler,
|
|
|
|
&socketaddr_any!(),
|
2022-08-01 11:46:45 -07:00
|
|
|
&blockstore,
|
2020-05-19 12:38:18 -07:00
|
|
|
slot,
|
|
|
|
index,
|
|
|
|
nonce,
|
|
|
|
)
|
|
|
|
.expect("packets");
|
2021-07-15 19:29:53 -07:00
|
|
|
let request = ShredRepairType::Shred(slot, index);
|
2022-05-23 13:30:15 -07:00
|
|
|
verify_responses(&request, rv.iter());
|
2020-01-31 14:23:51 -08:00
|
|
|
let rv: Vec<Shred> = rv
|
|
|
|
.into_iter()
|
2022-05-23 20:15:20 -07:00
|
|
|
.filter_map(|p| {
|
|
|
|
assert_eq!(repair_response::nonce(p).unwrap(), nonce);
|
2022-06-02 18:05:06 -07:00
|
|
|
Shred::new_from_serialized_shred(p.data(..).unwrap().to_vec()).ok()
|
2020-05-19 12:38:18 -07:00
|
|
|
})
|
2020-01-31 14:23:51 -08:00
|
|
|
.collect();
|
|
|
|
assert_eq!(rv[0].index(), 1);
|
2020-05-19 12:38:18 -07:00
|
|
|
assert_eq!(rv[0].slot(), slot);
|
2020-01-31 14:23:51 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
|
|
|
|
}
|
|
|
|
|
2023-01-24 08:57:55 -08:00
|
|
|
fn new_test_cluster_info() -> ClusterInfo {
|
|
|
|
let keypair = Arc::new(Keypair::new());
|
|
|
|
let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), timestamp());
|
|
|
|
ClusterInfo::new(contact_info, keypair, SocketAddrSpace::Unspecified)
|
2021-07-23 08:25:03 -07:00
|
|
|
}
|
|
|
|
|
2020-01-31 14:23:51 -08:00
|
|
|
#[test]
|
|
|
|
fn window_index_request() {
|
2022-07-31 15:48:51 -07:00
|
|
|
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
|
|
|
|
let bank = Bank::new_for_tests(&genesis_config);
|
|
|
|
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
|
2020-03-12 17:34:46 -07:00
|
|
|
let cluster_slots = ClusterSlots::default();
|
2023-01-24 08:57:55 -08:00
|
|
|
let cluster_info = Arc::new(new_test_cluster_info());
|
2022-12-15 19:24:23 -08:00
|
|
|
let serve_repair = ServeRepair::new(
|
|
|
|
cluster_info.clone(),
|
|
|
|
bank_forks,
|
|
|
|
Arc::new(RwLock::new(HashSet::default())),
|
|
|
|
);
|
2022-10-10 14:09:45 -07:00
|
|
|
let identity_keypair = cluster_info.keypair().clone();
|
2021-07-21 11:15:08 -07:00
|
|
|
let mut outstanding_requests = OutstandingShredRepairs::default();
|
2020-03-12 17:34:46 -07:00
|
|
|
let rv = serve_repair.repair_request(
|
|
|
|
&cluster_slots,
|
2021-07-15 19:29:53 -07:00
|
|
|
ShredRepairType::Shred(0, 0),
|
2021-07-07 07:12:09 -07:00
|
|
|
&mut LruCache::new(100),
|
2020-03-29 14:43:58 -07:00
|
|
|
&mut RepairStats::default(),
|
2020-08-21 00:35:11 -07:00
|
|
|
&None,
|
2021-04-20 09:37:33 -07:00
|
|
|
&mut outstanding_requests,
|
2022-10-10 14:09:45 -07:00
|
|
|
&identity_keypair,
|
2020-03-12 17:34:46 -07:00
|
|
|
);
|
2021-06-18 11:47:40 -07:00
|
|
|
assert_matches!(rv, Err(Error::ClusterInfo(ClusterInfoError::NoPeers)));
|
2020-01-31 14:23:51 -08:00
|
|
|
|
2023-01-23 14:49:51 -08:00
|
|
|
let serve_repair_addr = socketaddr!(Ipv4Addr::LOCALHOST, 1243);
|
2020-02-12 12:58:51 -08:00
|
|
|
let nxt = ContactInfo {
|
2020-10-19 12:12:08 -07:00
|
|
|
id: solana_sdk::pubkey::new_rand(),
|
2023-01-23 14:49:51 -08:00
|
|
|
gossip: socketaddr!(Ipv4Addr::LOCALHOST, 1234),
|
|
|
|
tvu: socketaddr!(Ipv4Addr::LOCALHOST, 1235),
|
|
|
|
tvu_forwards: socketaddr!(Ipv4Addr::LOCALHOST, 1236),
|
|
|
|
repair: socketaddr!(Ipv4Addr::LOCALHOST, 1237),
|
|
|
|
tpu: socketaddr!(Ipv4Addr::LOCALHOST, 1238),
|
|
|
|
tpu_forwards: socketaddr!(Ipv4Addr::LOCALHOST, 1239),
|
|
|
|
tpu_vote: socketaddr!(Ipv4Addr::LOCALHOST, 1240),
|
|
|
|
rpc: socketaddr!(Ipv4Addr::LOCALHOST, 1241),
|
|
|
|
rpc_pubsub: socketaddr!(Ipv4Addr::LOCALHOST, 1242),
|
2020-02-12 12:58:51 -08:00
|
|
|
serve_repair: serve_repair_addr,
|
|
|
|
wallclock: 0,
|
|
|
|
shred_version: 0,
|
|
|
|
};
|
2020-04-21 12:54:45 -07:00
|
|
|
cluster_info.insert_info(nxt.clone());
|
2020-01-31 14:23:51 -08:00
|
|
|
let rv = serve_repair
|
2020-03-12 17:34:46 -07:00
|
|
|
.repair_request(
|
|
|
|
&cluster_slots,
|
2021-07-15 19:29:53 -07:00
|
|
|
ShredRepairType::Shred(0, 0),
|
2021-07-07 07:12:09 -07:00
|
|
|
&mut LruCache::new(100),
|
2020-03-29 14:43:58 -07:00
|
|
|
&mut RepairStats::default(),
|
2020-08-21 00:35:11 -07:00
|
|
|
&None,
|
2021-04-20 09:37:33 -07:00
|
|
|
&mut outstanding_requests,
|
2022-10-10 14:09:45 -07:00
|
|
|
&identity_keypair,
|
2020-03-12 17:34:46 -07:00
|
|
|
)
|
2020-01-31 14:23:51 -08:00
|
|
|
.unwrap();
|
|
|
|
assert_eq!(nxt.serve_repair, serve_repair_addr);
|
|
|
|
assert_eq!(rv.0, nxt.serve_repair);
|
|
|
|
|
|
|
|
let serve_repair_addr2 = socketaddr!([127, 0, 0, 2], 1243);
|
2020-02-12 12:58:51 -08:00
|
|
|
let nxt = ContactInfo {
|
2020-10-19 12:12:08 -07:00
|
|
|
id: solana_sdk::pubkey::new_rand(),
|
2023-01-23 14:49:51 -08:00
|
|
|
gossip: socketaddr!(Ipv4Addr::LOCALHOST, 1234),
|
|
|
|
tvu: socketaddr!(Ipv4Addr::LOCALHOST, 1235),
|
|
|
|
tvu_forwards: socketaddr!(Ipv4Addr::LOCALHOST, 1236),
|
|
|
|
repair: socketaddr!(Ipv4Addr::LOCALHOST, 1237),
|
|
|
|
tpu: socketaddr!(Ipv4Addr::LOCALHOST, 1238),
|
|
|
|
tpu_forwards: socketaddr!(Ipv4Addr::LOCALHOST, 1239),
|
|
|
|
tpu_vote: socketaddr!(Ipv4Addr::LOCALHOST, 1240),
|
|
|
|
rpc: socketaddr!(Ipv4Addr::LOCALHOST, 1241),
|
|
|
|
rpc_pubsub: socketaddr!(Ipv4Addr::LOCALHOST, 1242),
|
2020-02-12 12:58:51 -08:00
|
|
|
serve_repair: serve_repair_addr2,
|
|
|
|
wallclock: 0,
|
|
|
|
shred_version: 0,
|
|
|
|
};
|
2020-04-21 12:54:45 -07:00
|
|
|
cluster_info.insert_info(nxt);
|
2020-01-31 14:23:51 -08:00
|
|
|
let mut one = false;
|
|
|
|
let mut two = false;
|
|
|
|
while !one || !two {
|
|
|
|
//this randomly picks an option, so eventually it should pick both
|
|
|
|
let rv = serve_repair
|
2020-03-12 17:34:46 -07:00
|
|
|
.repair_request(
|
|
|
|
&cluster_slots,
|
2021-07-15 19:29:53 -07:00
|
|
|
ShredRepairType::Shred(0, 0),
|
2021-07-07 07:12:09 -07:00
|
|
|
&mut LruCache::new(100),
|
2020-03-29 14:43:58 -07:00
|
|
|
&mut RepairStats::default(),
|
2020-08-21 00:35:11 -07:00
|
|
|
&None,
|
2021-04-20 09:37:33 -07:00
|
|
|
&mut outstanding_requests,
|
2022-10-10 14:09:45 -07:00
|
|
|
&identity_keypair,
|
2020-03-12 17:34:46 -07:00
|
|
|
)
|
2020-01-31 14:23:51 -08:00
|
|
|
.unwrap();
|
|
|
|
if rv.0 == serve_repair_addr {
|
|
|
|
one = true;
|
|
|
|
}
|
|
|
|
if rv.0 == serve_repair_addr2 {
|
|
|
|
two = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
assert!(one && two);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2020-05-19 12:38:18 -07:00
|
|
|
fn test_run_orphan() {
|
|
|
|
run_orphan(2, 3, 9);
|
|
|
|
}
|
|
|
|
|
|
|
|
fn run_orphan(slot: Slot, num_slots: u64, nonce: Nonce) {
|
2020-01-31 14:23:51 -08:00
|
|
|
solana_logger::setup();
|
2021-12-11 06:44:15 -08:00
|
|
|
let recycler = PacketBatchRecycler::default();
|
2020-01-31 14:23:51 -08:00
|
|
|
let ledger_path = get_tmp_ledger_path!();
|
|
|
|
{
|
|
|
|
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
2022-08-01 11:46:45 -07:00
|
|
|
let rv =
|
|
|
|
ServeRepair::run_orphan(&recycler, &socketaddr_any!(), &blockstore, slot, 0, nonce);
|
2020-01-31 14:23:51 -08:00
|
|
|
assert!(rv.is_none());
|
|
|
|
|
2020-05-19 12:38:18 -07:00
|
|
|
// Create slots [slot, slot + num_slots) with 5 shreds apiece
|
|
|
|
let (shreds, _) = make_many_slot_entries(slot, num_slots, 5);
|
2020-01-31 14:23:51 -08:00
|
|
|
|
|
|
|
blockstore
|
|
|
|
.insert_shreds(shreds, None, false)
|
|
|
|
.expect("Expect successful ledger write");
|
|
|
|
|
2020-05-19 12:38:18 -07:00
|
|
|
// We don't have slot `slot + num_slots`, so we don't know how to service this request
|
|
|
|
let rv = ServeRepair::run_orphan(
|
|
|
|
&recycler,
|
|
|
|
&socketaddr_any!(),
|
2022-08-01 11:46:45 -07:00
|
|
|
&blockstore,
|
2020-05-19 12:38:18 -07:00
|
|
|
slot + num_slots,
|
|
|
|
5,
|
|
|
|
nonce,
|
|
|
|
);
|
2020-01-31 14:23:51 -08:00
|
|
|
assert!(rv.is_none());
|
|
|
|
|
2020-05-19 12:38:18 -07:00
|
|
|
// For a orphan request for `slot + num_slots - 1`, we should return the highest shreds
|
|
|
|
// from slots in the range [slot, slot + num_slots - 1]
|
|
|
|
let rv: Vec<_> = ServeRepair::run_orphan(
|
|
|
|
&recycler,
|
|
|
|
&socketaddr_any!(),
|
2022-08-01 11:46:45 -07:00
|
|
|
&blockstore,
|
2020-05-19 12:38:18 -07:00
|
|
|
slot + num_slots - 1,
|
|
|
|
5,
|
|
|
|
nonce,
|
|
|
|
)
|
|
|
|
.expect("run_orphan packets")
|
|
|
|
.iter()
|
|
|
|
.cloned()
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
// Verify responses
|
2021-07-15 19:29:53 -07:00
|
|
|
let request = ShredRepairType::Orphan(slot);
|
2021-04-20 09:37:33 -07:00
|
|
|
verify_responses(&request, rv.iter());
|
|
|
|
|
2020-05-19 12:38:18 -07:00
|
|
|
let expected: Vec<_> = (slot..slot + num_slots)
|
2020-01-31 14:23:51 -08:00
|
|
|
.rev()
|
2020-05-19 12:38:18 -07:00
|
|
|
.filter_map(|slot| {
|
2020-01-31 14:23:51 -08:00
|
|
|
let index = blockstore.meta(slot).unwrap().unwrap().received - 1;
|
2020-05-19 12:38:18 -07:00
|
|
|
repair_response::repair_response_packet(
|
2020-01-31 14:23:51 -08:00
|
|
|
&blockstore,
|
|
|
|
slot,
|
|
|
|
index,
|
|
|
|
&socketaddr_any!(),
|
2020-05-19 12:38:18 -07:00
|
|
|
nonce,
|
2020-01-31 14:23:51 -08:00
|
|
|
)
|
|
|
|
})
|
|
|
|
.collect();
|
2020-05-19 12:38:18 -07:00
|
|
|
assert_eq!(rv, expected);
|
2020-01-31 14:23:51 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
|
|
|
|
}
|
2020-05-28 14:16:24 -07:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn run_orphan_corrupted_shred_size() {
|
|
|
|
solana_logger::setup();
|
2021-12-11 06:44:15 -08:00
|
|
|
let recycler = PacketBatchRecycler::default();
|
2020-05-28 14:16:24 -07:00
|
|
|
let ledger_path = get_tmp_ledger_path!();
|
|
|
|
{
|
|
|
|
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
|
|
|
// Create slots [1, 2] with 1 shred apiece
|
|
|
|
let (mut shreds, _) = make_many_slot_entries(1, 2, 1);
|
|
|
|
|
|
|
|
assert_eq!(shreds[0].slot(), 1);
|
|
|
|
assert_eq!(shreds[0].index(), 0);
|
2022-04-25 05:43:22 -07:00
|
|
|
// TODO: The test previously relied on corrupting shred payload
|
|
|
|
// size which we no longer want to expose. Current test no longer
|
|
|
|
// covers packet size check in repair_response_packet_from_bytes.
|
|
|
|
shreds.remove(0);
|
2020-05-28 14:16:24 -07:00
|
|
|
blockstore
|
|
|
|
.insert_shreds(shreds, None, false)
|
|
|
|
.expect("Expect successful ledger write");
|
|
|
|
let nonce = 42;
|
|
|
|
// Make sure repair response is corrupted
|
|
|
|
assert!(repair_response::repair_response_packet(
|
|
|
|
&blockstore,
|
|
|
|
1,
|
|
|
|
0,
|
|
|
|
&socketaddr_any!(),
|
|
|
|
nonce,
|
|
|
|
)
|
|
|
|
.is_none());
|
|
|
|
|
|
|
|
// Orphan request for slot 2 should only return slot 1 since
|
|
|
|
// calling `repair_response_packet` on slot 1's shred will
|
|
|
|
// be corrupted
|
2022-08-01 11:46:45 -07:00
|
|
|
let rv: Vec<_> =
|
|
|
|
ServeRepair::run_orphan(&recycler, &socketaddr_any!(), &blockstore, 2, 5, nonce)
|
|
|
|
.expect("run_orphan packets")
|
|
|
|
.iter()
|
|
|
|
.cloned()
|
|
|
|
.collect();
|
2020-05-28 14:16:24 -07:00
|
|
|
|
|
|
|
// Verify responses
|
|
|
|
let expected = vec![repair_response::repair_response_packet(
|
|
|
|
&blockstore,
|
|
|
|
2,
|
|
|
|
0,
|
|
|
|
&socketaddr_any!(),
|
|
|
|
nonce,
|
|
|
|
)
|
|
|
|
.unwrap()];
|
|
|
|
assert_eq!(rv, expected);
|
|
|
|
}
|
|
|
|
|
|
|
|
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
|
|
|
|
}
|
2020-08-21 00:35:11 -07:00
|
|
|
|
2021-07-15 19:29:53 -07:00
|
|
|
#[test]
|
|
|
|
fn test_run_ancestor_hashes() {
|
2022-08-09 21:39:55 -07:00
|
|
|
fn deserialize_ancestor_hashes_response(packet: &Packet) -> AncestorHashesResponse {
|
2022-05-23 20:15:20 -07:00
|
|
|
packet
|
2022-12-06 03:54:49 -08:00
|
|
|
.deserialize_slice(..packet.meta().size - SIZE_OF_NONCE)
|
2022-05-23 20:15:20 -07:00
|
|
|
.unwrap()
|
2022-04-17 22:16:06 -07:00
|
|
|
}
|
|
|
|
|
2021-07-15 19:29:53 -07:00
|
|
|
solana_logger::setup();
|
2021-12-11 06:44:15 -08:00
|
|
|
let recycler = PacketBatchRecycler::default();
|
2021-07-15 19:29:53 -07:00
|
|
|
let ledger_path = get_tmp_ledger_path!();
|
|
|
|
{
|
|
|
|
let slot = 0;
|
|
|
|
let num_slots = MAX_ANCESTOR_RESPONSES as u64;
|
|
|
|
let nonce = 10;
|
|
|
|
|
|
|
|
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
|
|
|
|
|
|
|
// Create slots [slot, slot + num_slots) with 5 shreds apiece
|
|
|
|
let (shreds, _) = make_many_slot_entries(slot, num_slots, 5);
|
|
|
|
|
|
|
|
blockstore
|
|
|
|
.insert_shreds(shreds, None, false)
|
|
|
|
.expect("Expect successful ledger write");
|
|
|
|
|
|
|
|
// We don't have slot `slot + num_slots`, so we return empty
|
|
|
|
let rv = ServeRepair::run_ancestor_hashes(
|
|
|
|
&recycler,
|
|
|
|
&socketaddr_any!(),
|
2022-08-01 11:46:45 -07:00
|
|
|
&blockstore,
|
2021-07-15 19:29:53 -07:00
|
|
|
slot + num_slots,
|
|
|
|
nonce,
|
|
|
|
)
|
2022-05-23 13:30:15 -07:00
|
|
|
.expect("run_ancestor_hashes packets");
|
2021-07-15 19:29:53 -07:00
|
|
|
assert_eq!(rv.len(), 1);
|
|
|
|
let packet = &rv[0];
|
2022-04-17 22:16:06 -07:00
|
|
|
let ancestor_hashes_response = deserialize_ancestor_hashes_response(packet);
|
2022-08-09 21:39:55 -07:00
|
|
|
match ancestor_hashes_response {
|
|
|
|
AncestorHashesResponse::Hashes(hashes) => {
|
|
|
|
assert!(hashes.is_empty());
|
|
|
|
}
|
|
|
|
_ => {
|
|
|
|
panic!("unexpected response: {:?}", &ancestor_hashes_response);
|
|
|
|
}
|
|
|
|
}
|
2021-07-15 19:29:53 -07:00
|
|
|
|
|
|
|
// `slot + num_slots - 1` is not marked duplicate confirmed so nothing should return
|
|
|
|
// empty
|
|
|
|
let rv = ServeRepair::run_ancestor_hashes(
|
|
|
|
&recycler,
|
|
|
|
&socketaddr_any!(),
|
2022-08-01 11:46:45 -07:00
|
|
|
&blockstore,
|
2021-07-15 19:29:53 -07:00
|
|
|
slot + num_slots - 1,
|
|
|
|
nonce,
|
|
|
|
)
|
2022-05-23 13:30:15 -07:00
|
|
|
.expect("run_ancestor_hashes packets");
|
2021-07-15 19:29:53 -07:00
|
|
|
assert_eq!(rv.len(), 1);
|
|
|
|
let packet = &rv[0];
|
2022-04-17 22:16:06 -07:00
|
|
|
let ancestor_hashes_response = deserialize_ancestor_hashes_response(packet);
|
2022-08-09 21:39:55 -07:00
|
|
|
match ancestor_hashes_response {
|
|
|
|
AncestorHashesResponse::Hashes(hashes) => {
|
|
|
|
assert!(hashes.is_empty());
|
|
|
|
}
|
|
|
|
_ => {
|
|
|
|
panic!("unexpected response: {:?}", &ancestor_hashes_response);
|
|
|
|
}
|
|
|
|
}
|
2021-07-15 19:29:53 -07:00
|
|
|
|
|
|
|
// Set duplicate confirmed
|
|
|
|
let mut expected_ancestors = Vec::with_capacity(num_slots as usize);
|
|
|
|
expected_ancestors.resize(num_slots as usize, (0, Hash::default()));
|
|
|
|
for (i, duplicate_confirmed_slot) in (slot..slot + num_slots).enumerate() {
|
|
|
|
let frozen_hash = Hash::new_unique();
|
|
|
|
expected_ancestors[num_slots as usize - i - 1] =
|
|
|
|
(duplicate_confirmed_slot, frozen_hash);
|
|
|
|
blockstore.insert_bank_hash(duplicate_confirmed_slot, frozen_hash, true);
|
|
|
|
}
|
|
|
|
let rv = ServeRepair::run_ancestor_hashes(
|
|
|
|
&recycler,
|
|
|
|
&socketaddr_any!(),
|
2022-08-01 11:46:45 -07:00
|
|
|
&blockstore,
|
2021-07-15 19:29:53 -07:00
|
|
|
slot + num_slots - 1,
|
|
|
|
nonce,
|
|
|
|
)
|
2022-05-23 13:30:15 -07:00
|
|
|
.expect("run_ancestor_hashes packets");
|
2021-07-15 19:29:53 -07:00
|
|
|
assert_eq!(rv.len(), 1);
|
|
|
|
let packet = &rv[0];
|
2022-04-17 22:16:06 -07:00
|
|
|
let ancestor_hashes_response = deserialize_ancestor_hashes_response(packet);
|
2022-08-09 21:39:55 -07:00
|
|
|
match ancestor_hashes_response {
|
|
|
|
AncestorHashesResponse::Hashes(hashes) => {
|
|
|
|
assert_eq!(hashes, expected_ancestors);
|
|
|
|
}
|
|
|
|
_ => {
|
|
|
|
panic!("unexpected response: {:?}", &ancestor_hashes_response);
|
|
|
|
}
|
|
|
|
}
|
2021-07-15 19:29:53 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
|
|
|
|
}
|
|
|
|
|
2020-08-21 00:35:11 -07:00
|
|
|
#[test]
|
|
|
|
fn test_repair_with_repair_validators() {
|
2022-07-31 15:48:51 -07:00
|
|
|
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
|
|
|
|
let bank = Bank::new_for_tests(&genesis_config);
|
|
|
|
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
|
2020-08-21 00:35:11 -07:00
|
|
|
let cluster_slots = ClusterSlots::default();
|
2023-01-24 08:57:55 -08:00
|
|
|
let cluster_info = Arc::new(new_test_cluster_info());
|
|
|
|
let me = cluster_info.my_contact_info();
|
2020-08-21 00:35:11 -07:00
|
|
|
|
|
|
|
// Insert two peers on the network
|
2020-10-19 12:23:14 -07:00
|
|
|
let contact_info2 =
|
|
|
|
ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), timestamp());
|
|
|
|
let contact_info3 =
|
|
|
|
ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), timestamp());
|
2020-08-21 00:35:11 -07:00
|
|
|
cluster_info.insert_info(contact_info2.clone());
|
|
|
|
cluster_info.insert_info(contact_info3.clone());
|
2022-10-10 14:09:45 -07:00
|
|
|
let identity_keypair = cluster_info.keypair().clone();
|
2022-12-15 19:24:23 -08:00
|
|
|
let serve_repair = ServeRepair::new(
|
|
|
|
cluster_info,
|
|
|
|
bank_forks,
|
|
|
|
Arc::new(RwLock::new(HashSet::default())),
|
|
|
|
);
|
2020-08-21 00:35:11 -07:00
|
|
|
|
|
|
|
// If:
|
|
|
|
// 1) repair validator set doesn't exist in gossip
|
|
|
|
// 2) repair validator set only includes our own id
|
|
|
|
// then no repairs should be generated
|
2020-10-19 12:12:08 -07:00
|
|
|
for pubkey in &[solana_sdk::pubkey::new_rand(), me.id] {
|
2021-11-12 10:57:55 -08:00
|
|
|
let known_validators = Some(vec![*pubkey].into_iter().collect());
|
|
|
|
assert!(serve_repair.repair_peers(&known_validators, 1).is_empty());
|
2020-08-21 00:35:11 -07:00
|
|
|
assert!(serve_repair
|
|
|
|
.repair_request(
|
|
|
|
&cluster_slots,
|
2021-07-15 19:29:53 -07:00
|
|
|
ShredRepairType::Shred(0, 0),
|
2021-07-07 07:12:09 -07:00
|
|
|
&mut LruCache::new(100),
|
2020-08-21 00:35:11 -07:00
|
|
|
&mut RepairStats::default(),
|
2021-11-12 10:57:55 -08:00
|
|
|
&known_validators,
|
2021-07-21 11:15:08 -07:00
|
|
|
&mut OutstandingShredRepairs::default(),
|
2022-10-10 14:09:45 -07:00
|
|
|
&identity_keypair,
|
2020-08-21 00:35:11 -07:00
|
|
|
)
|
|
|
|
.is_err());
|
|
|
|
}
|
|
|
|
|
2021-11-12 10:57:55 -08:00
|
|
|
// If known validator exists in gossip, should return repair successfully
|
|
|
|
let known_validators = Some(vec![contact_info2.id].into_iter().collect());
|
|
|
|
let repair_peers = serve_repair.repair_peers(&known_validators, 1);
|
2020-08-21 00:35:11 -07:00
|
|
|
assert_eq!(repair_peers.len(), 1);
|
|
|
|
assert_eq!(repair_peers[0].id, contact_info2.id);
|
|
|
|
assert!(serve_repair
|
|
|
|
.repair_request(
|
|
|
|
&cluster_slots,
|
2021-07-15 19:29:53 -07:00
|
|
|
ShredRepairType::Shred(0, 0),
|
2021-07-07 07:12:09 -07:00
|
|
|
&mut LruCache::new(100),
|
2020-08-21 00:35:11 -07:00
|
|
|
&mut RepairStats::default(),
|
2021-11-12 10:57:55 -08:00
|
|
|
&known_validators,
|
2021-07-21 11:15:08 -07:00
|
|
|
&mut OutstandingShredRepairs::default(),
|
2022-10-10 14:09:45 -07:00
|
|
|
&identity_keypair,
|
2020-08-21 00:35:11 -07:00
|
|
|
)
|
|
|
|
.is_ok());
|
|
|
|
|
2021-11-12 10:57:55 -08:00
|
|
|
// Using no known validators should default to all
|
2020-08-21 00:35:11 -07:00
|
|
|
// validator's available in gossip, excluding myself
|
|
|
|
let repair_peers: HashSet<Pubkey> = serve_repair
|
|
|
|
.repair_peers(&None, 1)
|
|
|
|
.into_iter()
|
|
|
|
.map(|c| c.id)
|
|
|
|
.collect();
|
|
|
|
assert_eq!(repair_peers.len(), 2);
|
|
|
|
assert!(repair_peers.contains(&contact_info2.id));
|
|
|
|
assert!(repair_peers.contains(&contact_info3.id));
|
|
|
|
assert!(serve_repair
|
|
|
|
.repair_request(
|
|
|
|
&cluster_slots,
|
2021-07-15 19:29:53 -07:00
|
|
|
ShredRepairType::Shred(0, 0),
|
2021-07-07 07:12:09 -07:00
|
|
|
&mut LruCache::new(100),
|
2020-08-21 00:35:11 -07:00
|
|
|
&mut RepairStats::default(),
|
|
|
|
&None,
|
2021-07-21 11:15:08 -07:00
|
|
|
&mut OutstandingShredRepairs::default(),
|
2022-10-10 14:09:45 -07:00
|
|
|
&identity_keypair,
|
2020-08-21 00:35:11 -07:00
|
|
|
)
|
|
|
|
.is_ok());
|
|
|
|
}
|
2021-04-20 09:37:33 -07:00
|
|
|
|
|
|
|
#[test]
|
2021-07-15 19:29:53 -07:00
|
|
|
fn test_verify_shred_response() {
|
2022-04-26 16:13:12 -07:00
|
|
|
fn new_test_data_shred(slot: Slot, index: u32) -> Shred {
|
2022-05-02 16:33:53 -07:00
|
|
|
Shred::new_from_data(slot, index, 1, &[], ShredFlags::empty(), 0, 0, 0)
|
2022-04-26 16:13:12 -07:00
|
|
|
}
|
2021-07-15 19:29:53 -07:00
|
|
|
let repair = ShredRepairType::Orphan(9);
|
2021-04-20 09:37:33 -07:00
|
|
|
// Ensure new options are addded to this test
|
|
|
|
match repair {
|
2021-07-15 19:29:53 -07:00
|
|
|
ShredRepairType::Orphan(_) => (),
|
|
|
|
ShredRepairType::HighestShred(_, _) => (),
|
|
|
|
ShredRepairType::Shred(_, _) => (),
|
2021-04-20 09:37:33 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
let slot = 9;
|
|
|
|
let index = 5;
|
|
|
|
|
|
|
|
// Orphan
|
2022-04-26 16:13:12 -07:00
|
|
|
let shred = new_test_data_shred(slot, 0);
|
2021-07-15 19:29:53 -07:00
|
|
|
let request = ShredRepairType::Orphan(slot);
|
2021-04-20 09:37:33 -07:00
|
|
|
assert!(request.verify_response(&shred));
|
2022-04-26 16:13:12 -07:00
|
|
|
let shred = new_test_data_shred(slot - 1, 0);
|
2021-04-20 09:37:33 -07:00
|
|
|
assert!(request.verify_response(&shred));
|
2022-04-26 16:13:12 -07:00
|
|
|
let shred = new_test_data_shred(slot + 1, 0);
|
2021-04-20 09:37:33 -07:00
|
|
|
assert!(!request.verify_response(&shred));
|
|
|
|
|
|
|
|
// HighestShred
|
2022-04-26 16:13:12 -07:00
|
|
|
let shred = new_test_data_shred(slot, index);
|
2021-07-15 19:29:53 -07:00
|
|
|
let request = ShredRepairType::HighestShred(slot, index as u64);
|
2021-04-20 09:37:33 -07:00
|
|
|
assert!(request.verify_response(&shred));
|
2022-04-26 16:13:12 -07:00
|
|
|
let shred = new_test_data_shred(slot, index + 1);
|
2021-04-20 09:37:33 -07:00
|
|
|
assert!(request.verify_response(&shred));
|
2022-04-26 16:13:12 -07:00
|
|
|
let shred = new_test_data_shred(slot, index - 1);
|
2021-04-20 09:37:33 -07:00
|
|
|
assert!(!request.verify_response(&shred));
|
2022-04-26 16:13:12 -07:00
|
|
|
let shred = new_test_data_shred(slot - 1, index);
|
2021-04-20 09:37:33 -07:00
|
|
|
assert!(!request.verify_response(&shred));
|
2022-04-26 16:13:12 -07:00
|
|
|
let shred = new_test_data_shred(slot + 1, index);
|
2021-04-20 09:37:33 -07:00
|
|
|
assert!(!request.verify_response(&shred));
|
|
|
|
|
|
|
|
// Shred
|
2022-04-26 16:13:12 -07:00
|
|
|
let shred = new_test_data_shred(slot, index);
|
2021-07-15 19:29:53 -07:00
|
|
|
let request = ShredRepairType::Shred(slot, index as u64);
|
2021-04-20 09:37:33 -07:00
|
|
|
assert!(request.verify_response(&shred));
|
2022-04-26 16:13:12 -07:00
|
|
|
let shred = new_test_data_shred(slot, index + 1);
|
2021-04-20 09:37:33 -07:00
|
|
|
assert!(!request.verify_response(&shred));
|
2022-04-26 16:13:12 -07:00
|
|
|
let shred = new_test_data_shred(slot + 1, index);
|
2021-04-20 09:37:33 -07:00
|
|
|
assert!(!request.verify_response(&shred));
|
|
|
|
}
|
|
|
|
|
2021-07-15 19:29:53 -07:00
|
|
|
fn verify_responses<'a>(request: &ShredRepairType, packets: impl Iterator<Item = &'a Packet>) {
|
2021-04-20 09:37:33 -07:00
|
|
|
for packet in packets {
|
2022-06-02 18:05:06 -07:00
|
|
|
let shred_payload = packet.data(..).unwrap().to_vec();
|
2021-04-20 09:37:33 -07:00
|
|
|
let shred = Shred::new_from_serialized_shred(shred_payload).unwrap();
|
|
|
|
request.verify_response(&shred);
|
|
|
|
}
|
|
|
|
}
|
2021-07-15 19:29:53 -07:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_verify_ancestor_response() {
|
|
|
|
let request_slot = MAX_ANCESTOR_RESPONSES as Slot;
|
2021-07-23 16:54:47 -07:00
|
|
|
let repair = AncestorHashesRepairType(request_slot);
|
2021-07-15 19:29:53 -07:00
|
|
|
let mut response: Vec<SlotHash> = (0..request_slot)
|
|
|
|
.map(|slot| (slot, Hash::new_unique()))
|
|
|
|
.collect();
|
2022-08-09 21:39:55 -07:00
|
|
|
assert!(repair.verify_response(&AncestorHashesResponse::Hashes(response.clone())));
|
2021-07-15 19:29:53 -07:00
|
|
|
|
|
|
|
// over the allowed limit, should fail
|
|
|
|
response.push((request_slot, Hash::new_unique()));
|
2022-08-09 21:39:55 -07:00
|
|
|
assert!(!repair.verify_response(&AncestorHashesResponse::Hashes(response)));
|
2021-07-15 19:29:53 -07:00
|
|
|
}
|
2020-01-31 14:23:51 -08:00
|
|
|
}
|