diff --git a/zebra-chain/src/block.rs b/zebra-chain/src/block.rs index 107e535aa..2cce8d6ce 100644 --- a/zebra-chain/src/block.rs +++ b/zebra-chain/src/block.rs @@ -208,6 +208,14 @@ pub struct Block { pub transactions: Vec>, } +/// The maximum size of a Zcash block, in bytes. +/// +/// Post-Sapling, this is also the maximum size of a transaction +/// in the Zcash specification. (But since blocks also contain a +/// block header and transaction count, the maximum size of a +/// transaction in the chain is approximately 1.5 kB smaller.) +const MAX_BLOCK_BYTES: u64 = 2_000_000; + impl Block { /// Return the block height reported in the coinbase transaction, if any. pub fn coinbase_height(&self) -> Option { @@ -230,6 +238,9 @@ impl<'a> From<&'a Block> for BlockHeaderHash { impl ZcashSerialize for Block { fn zcash_serialize(&self, mut writer: W) -> Result<(), io::Error> { + // All block structs are validated when they are parsed. + // So we don't need to check MAX_BLOCK_BYTES here, until + // we start generating our own blocks (see #483). self.header.zcash_serialize(&mut writer)?; self.transactions.zcash_serialize(&mut writer)?; Ok(()) @@ -237,10 +248,12 @@ impl ZcashSerialize for Block { } impl ZcashDeserialize for Block { - fn zcash_deserialize(mut reader: R) -> Result { + fn zcash_deserialize(reader: R) -> Result { + // If the limit is reached, we'll get an UnexpectedEof error + let mut limited_reader = reader.take(MAX_BLOCK_BYTES); Ok(Block { - header: BlockHeader::zcash_deserialize(&mut reader)?, - transactions: Vec::zcash_deserialize(&mut reader)?, + header: BlockHeader::zcash_deserialize(&mut limited_reader)?, + transactions: Vec::zcash_deserialize(&mut limited_reader)?, }) } } diff --git a/zebra-chain/src/block/tests.rs b/zebra-chain/src/block/tests.rs index c068de4dc..fa1ade145 100644 --- a/zebra-chain/src/block/tests.rs +++ b/zebra-chain/src/block/tests.rs @@ -1,4 +1,4 @@ -use std::io::{Cursor, Write}; +use std::io::{Cursor, ErrorKind, Write}; use chrono::NaiveDateTime; use proptest::{ @@ -153,10 +153,25 @@ proptest! { let mut bytes = Cursor::new(Vec::new()); block.zcash_serialize(&mut bytes)?; - bytes.set_position(0); - let other_block = Block::zcash_deserialize(&mut bytes)?; + // Check the block size limit + if bytes.position() <= MAX_BLOCK_BYTES { + bytes.set_position(0); + let other_block = Block::zcash_deserialize(&mut bytes)?; - prop_assert_eq![block, other_block]; + prop_assert_eq![block, other_block]; + } else { + let serialization_err = Block::zcash_deserialize(&mut bytes) + .expect_err("blocks larger than the maximum size should fail"); + match serialization_err { + SerializationError::Io(io_err) => { + prop_assert_eq![io_err.kind(), ErrorKind::UnexpectedEof]; + } + _ => { + prop_assert!(false, + "blocks larger than the maximum size should fail with an io::Error"); + } + } + } } } diff --git a/zebra-chain/src/serialization.rs b/zebra-chain/src/serialization.rs index 942bf5675..6bd1f6106 100644 --- a/zebra-chain/src/serialization.rs +++ b/zebra-chain/src/serialization.rs @@ -76,7 +76,8 @@ impl ZcashDeserialize for Vec { // We're given len, so we could preallocate. But blindly preallocating // without a size bound can allow DOS attacks, and there's no way to // pass a size bound in a ZcashDeserialize impl, so instead we allocate - // as we read from the reader. + // as we read from the reader. (The maximum block and transaction sizes + // limit the eventual size of these allocations.) let mut vec = Vec::new(); for _ in 0..len { vec.push(T::zcash_deserialize(&mut reader)?);