chain: Check the maximum block size when parsing

The maximum block size is 2,000,000 bytes. This commit also limits the
maximum transaction size in parsed blocks. (See #484 for the
corresponding limit on mempool transactions.)

The proptests might test the maximum block size, but they are
randomised. So we also want to explicitly test large block sizes.
(See #482 for these test cases and tests.)

Part of #477.
This commit is contained in:
teor 2020-06-15 21:14:09 +10:00 committed by Henry de Valence
parent 7710ee20dc
commit 210e11a86d
3 changed files with 37 additions and 8 deletions

View File

@ -208,6 +208,14 @@ pub struct Block {
pub transactions: Vec<Arc<Transaction>>,
}
/// The maximum size of a Zcash block, in bytes.
///
/// Post-Sapling, this is also the maximum size of a transaction
/// in the Zcash specification. (But since blocks also contain a
/// block header and transaction count, the maximum size of a
/// transaction in the chain is approximately 1.5 kB smaller.)
const MAX_BLOCK_BYTES: u64 = 2_000_000;
impl Block {
/// Return the block height reported in the coinbase transaction, if any.
pub fn coinbase_height(&self) -> Option<BlockHeight> {
@ -230,6 +238,9 @@ impl<'a> From<&'a Block> for BlockHeaderHash {
impl ZcashSerialize for Block {
fn zcash_serialize<W: io::Write>(&self, mut writer: W) -> Result<(), io::Error> {
// All block structs are validated when they are parsed.
// So we don't need to check MAX_BLOCK_BYTES here, until
// we start generating our own blocks (see #483).
self.header.zcash_serialize(&mut writer)?;
self.transactions.zcash_serialize(&mut writer)?;
Ok(())
@ -237,10 +248,12 @@ impl ZcashSerialize for Block {
}
impl ZcashDeserialize for Block {
fn zcash_deserialize<R: io::Read>(mut reader: R) -> Result<Self, SerializationError> {
fn zcash_deserialize<R: io::Read>(reader: R) -> Result<Self, SerializationError> {
// If the limit is reached, we'll get an UnexpectedEof error
let mut limited_reader = reader.take(MAX_BLOCK_BYTES);
Ok(Block {
header: BlockHeader::zcash_deserialize(&mut reader)?,
transactions: Vec::zcash_deserialize(&mut reader)?,
header: BlockHeader::zcash_deserialize(&mut limited_reader)?,
transactions: Vec::zcash_deserialize(&mut limited_reader)?,
})
}
}

View File

@ -1,4 +1,4 @@
use std::io::{Cursor, Write};
use std::io::{Cursor, ErrorKind, Write};
use chrono::NaiveDateTime;
use proptest::{
@ -153,10 +153,25 @@ proptest! {
let mut bytes = Cursor::new(Vec::new());
block.zcash_serialize(&mut bytes)?;
bytes.set_position(0);
let other_block = Block::zcash_deserialize(&mut bytes)?;
// Check the block size limit
if bytes.position() <= MAX_BLOCK_BYTES {
bytes.set_position(0);
let other_block = Block::zcash_deserialize(&mut bytes)?;
prop_assert_eq![block, other_block];
prop_assert_eq![block, other_block];
} else {
let serialization_err = Block::zcash_deserialize(&mut bytes)
.expect_err("blocks larger than the maximum size should fail");
match serialization_err {
SerializationError::Io(io_err) => {
prop_assert_eq![io_err.kind(), ErrorKind::UnexpectedEof];
}
_ => {
prop_assert!(false,
"blocks larger than the maximum size should fail with an io::Error");
}
}
}
}
}

View File

@ -76,7 +76,8 @@ impl<T: ZcashDeserialize> ZcashDeserialize for Vec<T> {
// We're given len, so we could preallocate. But blindly preallocating
// without a size bound can allow DOS attacks, and there's no way to
// pass a size bound in a ZcashDeserialize impl, so instead we allocate
// as we read from the reader.
// as we read from the reader. (The maximum block and transaction sizes
// limit the eventual size of these allocations.)
let mut vec = Vec::new();
for _ in 0..len {
vec.push(T::zcash_deserialize(&mut reader)?);