Limit deserialization of data coming off the wire (#6751)

* Limit deserialization of data coming off the wire

* Feedback and cleanup
This commit is contained in:
Jack May 2019-11-06 00:07:57 -08:00 committed by GitHub
parent 8e3be6413e
commit 9614d17024
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 49 additions and 28 deletions

View File

@ -4,6 +4,7 @@ use crate::{
cluster_info::{ClusterInfo, Node, VALIDATOR_PORT_RANGE}, cluster_info::{ClusterInfo, Node, VALIDATOR_PORT_RANGE},
contact_info::ContactInfo, contact_info::ContactInfo,
gossip_service::GossipService, gossip_service::GossipService,
packet::{limited_deserialize, PACKET_DATA_SIZE},
repair_service, repair_service,
repair_service::{RepairService, RepairSlotRange, RepairStrategy}, repair_service::{RepairService, RepairSlotRange, RepairStrategy},
result::{Error, Result}, result::{Error, Result},
@ -14,7 +15,6 @@ use crate::{
streamer::{receiver, responder, PacketReceiver}, streamer::{receiver, responder, PacketReceiver},
window_service::WindowService, window_service::WindowService,
}; };
use bincode::deserialize;
use crossbeam_channel::unbounded; use crossbeam_channel::unbounded;
use rand::{thread_rng, Rng, SeedableRng}; use rand::{thread_rng, Rng, SeedableRng};
use rand_chacha::ChaChaRng; use rand_chacha::ChaChaRng;
@ -161,7 +161,7 @@ fn create_request_processor(
if let Ok(packets) = packets { if let Ok(packets) = packets {
for packet in &packets.packets { for packet in &packets.packets {
let req: result::Result<ArchiverRequest, Box<bincode::ErrorKind>> = let req: result::Result<ArchiverRequest, Box<bincode::ErrorKind>> =
deserialize(&packet.data[..packet.meta.size]); limited_deserialize(&packet.data[..packet.meta.size]);
match req { match req {
Ok(ArchiverRequest::GetSlotHeight(from)) => { Ok(ArchiverRequest::GetSlotHeight(from)) => {
if let Ok(blob) = to_shared_blob(slot, from) { if let Ok(blob) = to_shared_blob(slot, from) {
@ -933,7 +933,10 @@ impl Archiver {
socket.send_to(&serialized_req, to).unwrap(); socket.send_to(&serialized_req, to).unwrap();
let mut buf = [0; 1024]; let mut buf = [0; 1024];
if let Ok((size, _addr)) = socket.recv_from(&mut buf) { if let Ok((size, _addr)) = socket.recv_from(&mut buf) {
return deserialize(&buf[..size]).unwrap(); return bincode::config()
.limit(PACKET_DATA_SIZE as u64)
.deserialize(&buf[..size])
.unwrap();
} }
sleep(Duration::from_millis(500)); sleep(Duration::from_millis(500));
} }

View File

@ -3,13 +3,12 @@
//! can do its processing in parallel with signature verification on the GPU. //! can do its processing in parallel with signature verification on the GPU.
use crate::{ use crate::{
cluster_info::ClusterInfo, cluster_info::ClusterInfo,
packet::{Packet, Packets, PACKETS_PER_BATCH}, packet::{limited_deserialize, Packet, Packets, PACKETS_PER_BATCH},
poh_recorder::{PohRecorder, PohRecorderError, WorkingBankEntry}, poh_recorder::{PohRecorder, PohRecorderError, WorkingBankEntry},
poh_service::PohService, poh_service::PohService,
result::{Error, Result}, result::{Error, Result},
service::Service, service::Service,
}; };
use bincode::deserialize;
use crossbeam_channel::{Receiver as CrossbeamReceiver, RecvTimeoutError}; use crossbeam_channel::{Receiver as CrossbeamReceiver, RecvTimeoutError};
use itertools::Itertools; use itertools::Itertools;
use solana_ledger::{ use solana_ledger::{
@ -19,11 +18,10 @@ use solana_measure::measure::Measure;
use solana_metrics::{inc_new_counter_debug, inc_new_counter_info, inc_new_counter_warn}; use solana_metrics::{inc_new_counter_debug, inc_new_counter_info, inc_new_counter_warn};
use solana_perf::perf_libs; use solana_perf::perf_libs;
use solana_runtime::{accounts_db::ErrorCounters, bank::Bank, transaction_batch::TransactionBatch}; use solana_runtime::{accounts_db::ErrorCounters, bank::Bank, transaction_batch::TransactionBatch};
use solana_sdk::clock::MAX_TRANSACTION_FORWARDING_DELAY_GPU;
use solana_sdk::{ use solana_sdk::{
clock::{ clock::{
Slot, DEFAULT_TICKS_PER_SECOND, DEFAULT_TICKS_PER_SLOT, MAX_PROCESSING_AGE, Slot, DEFAULT_TICKS_PER_SECOND, DEFAULT_TICKS_PER_SLOT, MAX_PROCESSING_AGE,
MAX_TRANSACTION_FORWARDING_DELAY, MAX_TRANSACTION_FORWARDING_DELAY, MAX_TRANSACTION_FORWARDING_DELAY_GPU,
}, },
poh_config::PohConfig, poh_config::PohConfig,
pubkey::Pubkey, pubkey::Pubkey,
@ -421,7 +419,7 @@ impl BankingStage {
fn deserialize_transactions(p: &Packets) -> Vec<Option<Transaction>> { fn deserialize_transactions(p: &Packets) -> Vec<Option<Transaction>> {
p.packets p.packets
.iter() .iter()
.map(|x| deserialize(&x.data[0..x.meta.size]).ok()) .map(|x| limited_deserialize(&x.data[0..x.meta.size]).ok())
.collect() .collect()
} }

View File

@ -404,6 +404,13 @@ pub fn index_blobs(
} }
} }
pub fn limited_deserialize<T>(data: &[u8]) -> bincode::Result<T>
where
T: serde::de::DeserializeOwned,
{
bincode::config().limit(BLOB_SIZE as u64).deserialize(data)
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;

View File

@ -13,20 +13,20 @@
//! //!
//! Bank needs to provide an interface for us to query the stake weight //! Bank needs to provide an interface for us to query the stake weight
use crate::{ use crate::{
blob::{to_shared_blob, Blob, SharedBlob}, blob::{limited_deserialize, to_shared_blob, Blob, SharedBlob},
contact_info::ContactInfo, contact_info::ContactInfo,
crds_gossip::CrdsGossip, crds_gossip::CrdsGossip,
crds_gossip_error::CrdsGossipError, crds_gossip_error::CrdsGossipError,
crds_gossip_pull::{CrdsFilter, CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS}, crds_gossip_pull::{CrdsFilter, CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS},
crds_value::{self, CrdsData, CrdsValue, CrdsValueLabel, EpochSlots, Vote}, crds_value::{self, CrdsData, CrdsValue, CrdsValueLabel, EpochSlots, Vote},
packet::Packet, packet::{Packet, PACKET_DATA_SIZE},
repair_service::RepairType, repair_service::RepairType,
result::{Error, Result}, result::{Error, Result},
sendmmsg::{multicast, send_mmsg}, sendmmsg::{multicast, send_mmsg},
streamer::{BlobReceiver, BlobSender}, streamer::{BlobReceiver, BlobSender},
weighted_shuffle::{weighted_best, weighted_shuffle}, weighted_shuffle::{weighted_best, weighted_shuffle},
}; };
use bincode::{deserialize, serialize, serialized_size}; use bincode::{serialize, serialized_size};
use core::cmp; use core::cmp;
use itertools::Itertools; use itertools::Itertools;
use rand::{thread_rng, Rng}; use rand::{thread_rng, Rng};
@ -38,7 +38,6 @@ use solana_netutil::{
}; };
use solana_sdk::{ use solana_sdk::{
clock::Slot, clock::Slot,
packet::PACKET_DATA_SIZE,
pubkey::Pubkey, pubkey::Pubkey,
signature::{Keypair, KeypairUtil, Signable, Signature}, signature::{Keypair, KeypairUtil, Signable, Signature},
timing::{duration_as_ms, timestamp}, timing::{duration_as_ms, timestamp},
@ -1175,7 +1174,7 @@ impl ClusterInfo {
blobs.iter().for_each(|blob| { blobs.iter().for_each(|blob| {
let blob = blob.read().unwrap(); let blob = blob.read().unwrap();
let from_addr = blob.meta.addr(); let from_addr = blob.meta.addr();
deserialize(&blob.data[..blob.meta.size]) limited_deserialize(&blob.data[..blob.meta.size])
.into_iter() .into_iter()
.for_each(|request| match request { .for_each(|request| match request {
Protocol::PullRequest(filter, caller) => { Protocol::PullRequest(filter, caller) => {

View File

@ -85,6 +85,15 @@ pub fn to_packets<T: Serialize>(xs: &[T]) -> Vec<Packets> {
to_packets_chunked(xs, NUM_PACKETS) to_packets_chunked(xs, NUM_PACKETS)
} }
pub fn limited_deserialize<T>(data: &[u8]) -> bincode::Result<T>
where
T: serde::de::DeserializeOwned,
{
bincode::config()
.limit(PACKET_DATA_SIZE as u64)
.deserialize(data)
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;

View File

@ -9,7 +9,7 @@ use crate::{
validator::ValidatorExit, validator::ValidatorExit,
version::VERSION, version::VERSION,
}; };
use bincode::{deserialize, serialize}; use bincode::serialize;
use jsonrpc_core::{Error, Metadata, Result}; use jsonrpc_core::{Error, Metadata, Result};
use jsonrpc_derive::rpc; use jsonrpc_derive::rpc;
use solana_client::rpc_request::{RpcEpochInfo, RpcVoteAccountInfo, RpcVoteAccountStatus}; use solana_client::rpc_request::{RpcEpochInfo, RpcVoteAccountInfo, RpcVoteAccountStatus};
@ -655,10 +655,6 @@ impl RpcSol for RpcSolImpl {
} }
fn send_transaction(&self, meta: Self::Metadata, data: Vec<u8>) -> Result<String> { fn send_transaction(&self, meta: Self::Metadata, data: Vec<u8>) -> Result<String> {
let tx: Transaction = deserialize(&data).map_err(|err| {
info!("send_transaction: deserialize error: {:?}", err);
Error::invalid_request()
})?;
if data.len() >= PACKET_DATA_SIZE { if data.len() >= PACKET_DATA_SIZE {
info!( info!(
"send_transaction: transaction too large: {} bytes (max: {} bytes)", "send_transaction: transaction too large: {} bytes (max: {} bytes)",
@ -667,6 +663,14 @@ impl RpcSol for RpcSolImpl {
); );
return Err(Error::invalid_request()); return Err(Error::invalid_request());
} }
let tx: Transaction = bincode::config()
.limit(PACKET_DATA_SIZE as u64)
.deserialize(&data)
.map_err(|err| {
info!("send_transaction: deserialize error: {:?}", err);
Error::invalid_request()
})?;
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap(); let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let tpu_addr = get_tpu_addr(&meta.cluster_info)?; let tpu_addr = get_tpu_addr(&meta.cluster_info)?;
trace!("send_transaction: leader is {:?}", &tpu_addr); trace!("send_transaction: leader is {:?}", &tpu_addr);

View File

@ -1,8 +1,7 @@
#![allow(clippy::implicit_hasher)] #![allow(clippy::implicit_hasher)]
use crate::packet::{Packet, Packets}; use crate::packet::{limited_deserialize, Packet, Packets};
use crate::sigverify::{self, TxOffset}; use crate::sigverify::{self, TxOffset};
use crate::sigverify_stage::SigVerifier; use crate::sigverify_stage::SigVerifier;
use bincode::deserialize;
use rayon::iter::IndexedParallelIterator; use rayon::iter::IndexedParallelIterator;
use rayon::iter::IntoParallelIterator; use rayon::iter::IntoParallelIterator;
use rayon::iter::IntoParallelRefMutIterator; use rayon::iter::IntoParallelRefMutIterator;
@ -57,7 +56,8 @@ impl ShredSigVerifier {
let slot_end = slot_start + size_of::<u64>(); let slot_end = slot_start + size_of::<u64>();
trace!("slot {} {}", slot_start, slot_end,); trace!("slot {} {}", slot_start, slot_end,);
if slot_end <= packet.meta.size { if slot_end <= packet.meta.size {
let slot: u64 = deserialize(&packet.data[slot_start..slot_end]).ok()?; let slot: u64 =
limited_deserialize(&packet.data[slot_start..slot_end]).ok()?;
Some(slot) Some(slot)
} else { } else {
None None
@ -120,7 +120,7 @@ fn verify_shred_cpu(packet: &Packet, slot_leaders: &HashMap<u64, [u8; 32]>) -> O
if packet.meta.size < slot_end { if packet.meta.size < slot_end {
return Some(0); return Some(0);
} }
let slot: u64 = deserialize(&packet.data[slot_start..slot_end]).ok()?; let slot: u64 = limited_deserialize(&packet.data[slot_start..slot_end]).ok()?;
trace!("slot {}", slot); trace!("slot {}", slot);
let pubkey = slot_leaders.get(&slot)?; let pubkey = slot_leaders.get(&slot)?;
if packet.meta.size < sig_end { if packet.meta.size < sig_end {
@ -180,7 +180,7 @@ fn slot_key_data_for_gpu<
return std::u64::MAX; return std::u64::MAX;
} }
let slot: Option<u64> = let slot: Option<u64> =
deserialize(&packet.data[slot_start..slot_end]).ok(); limited_deserialize(&packet.data[slot_start..slot_end]).ok();
match slot { match slot {
Some(slot) if slot_keys.get(&slot).is_some() => slot, Some(slot) if slot_keys.get(&slot).is_some() => slot,
_ => std::u64::MAX, _ => std::u64::MAX,
@ -379,7 +379,7 @@ fn sign_shred_cpu(
"packet is not large enough for a slot" "packet is not large enough for a slot"
); );
let slot: u64 = let slot: u64 =
deserialize(&packet.data[slot_start..slot_end]).expect("can't deserialize slot"); limited_deserialize(&packet.data[slot_start..slot_end]).expect("can't deserialize slot");
trace!("slot {}", slot); trace!("slot {}", slot);
let pubkey = slot_leaders_pubkeys let pubkey = slot_leaders_pubkeys
.get(&slot) .get(&slot)

View File

@ -115,7 +115,9 @@ impl Shred {
where where
T: Deserialize<'de>, T: Deserialize<'de>,
{ {
let ret = bincode::deserialize(&buf[*index..*index + size])?; let ret = bincode::config()
.limit(PACKET_DATA_SIZE as u64)
.deserialize(&buf[*index..*index + size])?;
*index += size; *index += size;
Ok(ret) Ok(ret)
} }

View File

@ -1,7 +1,7 @@
use log::*; use log::*;
use solana_sdk::account::KeyedAccount; use solana_sdk::account::KeyedAccount;
use solana_sdk::instruction::InstructionError; use solana_sdk::instruction::InstructionError;
use solana_sdk::instruction_processor_utils::next_keyed_account; use solana_sdk::instruction_processor_utils::{limited_deserialize, next_keyed_account};
use solana_sdk::pubkey::Pubkey; use solana_sdk::pubkey::Pubkey;
use solana_sdk::system_instruction::{SystemError, SystemInstruction}; use solana_sdk::system_instruction::{SystemError, SystemInstruction};
use solana_sdk::system_program; use solana_sdk::system_program;
@ -102,8 +102,7 @@ pub fn process_instruction(
keyed_accounts: &mut [KeyedAccount], keyed_accounts: &mut [KeyedAccount],
data: &[u8], data: &[u8],
) -> Result<(), InstructionError> { ) -> Result<(), InstructionError> {
let instruction = let instruction = limited_deserialize(data)?;
bincode::deserialize(data).map_err(|_| InstructionError::InvalidInstructionData)?;
trace!("process_instruction: {:?}", instruction); trace!("process_instruction: {:?}", instruction);
trace!("keyed_accounts: {:?}", keyed_accounts); trace!("keyed_accounts: {:?}", keyed_accounts);