diff --git a/zebra-chain/src/block.rs b/zebra-chain/src/block.rs index 0bbd4d14f..e840ab2b8 100644 --- a/zebra-chain/src/block.rs +++ b/zebra-chain/src/block.rs @@ -21,10 +21,17 @@ pub use hash::Hash; pub use header::BlockTimeError; pub use header::{CountedHeader, Header}; pub use height::Height; +pub use serialize::MAX_BLOCK_BYTES; use serde::{Deserialize, Serialize}; -use crate::{fmt::DisplayToDebug, parameters::Network, transaction::Transaction, transparent}; +use crate::{ + fmt::DisplayToDebug, + parameters::Network, + serialization::{TrustedPreallocate, MAX_PROTOCOL_MESSAGE_LEN}, + transaction::Transaction, + transparent, +}; /// A Zcash block, containing a header and a list of transactions. #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] @@ -80,3 +87,63 @@ impl<'a> From<&'a Block> for Hash { (&block.header).into() } } +/// A serialized Block hash takes 32 bytes +const BLOCK_HASH_SIZE: u64 = 32; +/// The maximum number of hashes in a valid Zcash protocol message. +impl TrustedPreallocate for Hash { + fn max_allocation() -> u64 { + // Every vector type requires a length field of at least one byte for de/serialization. + // Since a block::Hash takes 32 bytes, we can never receive more than (MAX_PROTOCOL_MESSAGE_LEN - 1) / 32 hashes in a single message + ((MAX_PROTOCOL_MESSAGE_LEN - 1) as u64) / BLOCK_HASH_SIZE + } +} + +#[cfg(test)] +mod test_trusted_preallocate { + use super::{Hash, BLOCK_HASH_SIZE, MAX_PROTOCOL_MESSAGE_LEN}; + use crate::serialization::{TrustedPreallocate, ZcashSerialize}; + use proptest::prelude::*; + use std::convert::TryInto; + proptest! { + #![proptest_config(ProptestConfig::with_cases(10_000))] + /// Verify that the serialized size of a block hash used to calculate the allocation limit is correct + #[test] + fn block_hash_size_is_correct(hash in Hash::arbitrary()) { + let serialized = hash.zcash_serialize_to_vec().expect("Serialization to vec must succeed"); + prop_assert!(serialized.len() as u64 == BLOCK_HASH_SIZE); + } + } + proptest! { + + #![proptest_config(ProptestConfig::with_cases(200))] + + /// Verify that... + /// 1. The smallest disallowed vector of `Hash`s is too large to send via the Zcash Wire Protocol + /// 2. The largest allowed vector is small enough to fit in a legal Zcash Wire Protocol message + #[test] + fn block_hash_max_allocation(hash in Hash::arbitrary_with(())) { + let max_allocation: usize = Hash::max_allocation().try_into().unwrap(); + let mut smallest_disallowed_vec = Vec::with_capacity(max_allocation + 1); + for _ in 0..(Hash::max_allocation()+1) { + smallest_disallowed_vec.push(hash); + } + + let smallest_disallowed_serialized = smallest_disallowed_vec.zcash_serialize_to_vec().expect("Serialization to vec must succeed"); + // Check that our smallest_disallowed_vec is only one item larger than the limit + prop_assert!(((smallest_disallowed_vec.len() - 1) as u64) == Hash::max_allocation()); + // Check that our smallest_disallowed_vec is too big to send as a protocol message + prop_assert!(smallest_disallowed_serialized.len() > MAX_PROTOCOL_MESSAGE_LEN); + + // Create largest_allowed_vec by removing one element from smallest_disallowed_vec without copying (for efficiency) + smallest_disallowed_vec.pop(); + let largest_allowed_vec = smallest_disallowed_vec; + let largest_allowed_serialized = largest_allowed_vec.zcash_serialize_to_vec().expect("Serialization to vec must succeed"); + + // Check that our largest_allowed_vec contains the maximum number of hashes + prop_assert!((largest_allowed_vec.len() as u64) == Hash::max_allocation()); + // Check that our largest_allowed_vec is small enough to send as a protocol message + prop_assert!(largest_allowed_serialized.len() <= MAX_PROTOCOL_MESSAGE_LEN); + + } + } +} diff --git a/zebra-chain/src/block/header.rs b/zebra-chain/src/block/header.rs index 8ab349212..be4c34ddc 100644 --- a/zebra-chain/src/block/header.rs +++ b/zebra-chain/src/block/header.rs @@ -1,7 +1,12 @@ +use std::usize; + use chrono::{DateTime, Duration, Utc}; use thiserror::Error; -use crate::work::{difficulty::CompactDifficulty, equihash::Solution}; +use crate::{ + serialization::{TrustedPreallocate, MAX_PROTOCOL_MESSAGE_LEN}, + work::{difficulty::CompactDifficulty, equihash::Solution}, +}; use super::{merkle, Hash, Height}; @@ -118,3 +123,79 @@ pub struct CountedHeader { pub header: Header, pub transaction_count: usize, } + +/// The serialized size of a Zcash block header. +/// +/// Includes the equihash input, 32-byte nonce, 3-byte equihash length field, and equihash solution. +const BLOCK_HEADER_LENGTH: usize = + crate::work::equihash::Solution::INPUT_LENGTH + 32 + 3 + crate::work::equihash::SOLUTION_SIZE; + +/// The minimum size for a serialized CountedHeader. +/// +/// A CountedHeader has BLOCK_HEADER_LENGTH bytes + 1 or more bytes for the transaction count +const MIN_COUNTED_HEADER_LEN: usize = BLOCK_HEADER_LENGTH + 1; +impl TrustedPreallocate for CountedHeader { + fn max_allocation() -> u64 { + // Every vector type requires a length field of at least one byte for de/serialization. + // Therefore, we can never receive more than (MAX_PROTOCOL_MESSAGE_LEN - 1) / MIN_COUNTED_HEADER_LEN counted headers in a single message + ((MAX_PROTOCOL_MESSAGE_LEN - 1) / MIN_COUNTED_HEADER_LEN) as u64 + } +} + +#[cfg(test)] +mod test_trusted_preallocate { + use super::{CountedHeader, Header, MAX_PROTOCOL_MESSAGE_LEN, MIN_COUNTED_HEADER_LEN}; + use crate::serialization::{TrustedPreallocate, ZcashSerialize}; + use proptest::prelude::*; + use std::convert::TryInto; + proptest! { + + #![proptest_config(ProptestConfig::with_cases(10_000))] + + /// Confirm that each counted header takes at least COUNTED_HEADER_LEN bytes when serialized. + /// This verifies that our calculated `TrustedPreallocate::max_allocation()` is indeed an upper bound. + #[test] + fn counted_header_min_length(header in Header::arbitrary_with(()), transaction_count in (0..std::u32::MAX)) { + let header = CountedHeader { + header, + transaction_count: transaction_count.try_into().expect("Must run test on platform with at least 32 bit address space"), + }; + let serialized_header = header.zcash_serialize_to_vec().expect("Serialization to vec must succeed"); + prop_assert!(serialized_header.len() >= MIN_COUNTED_HEADER_LEN) + } + } + proptest! { + #![proptest_config(ProptestConfig::with_cases(100))] + /// Verify that... + /// 1. The smallest disallowed vector of `CountedHeaders`s is too large to send via the Zcash Wire Protocol + /// 2. The largest allowed vector is small enough to fit in a legal Zcash Wire Protocol message + #[test] + fn counted_header_max_allocation(header in Header::arbitrary_with(())) { + let header = CountedHeader { + header, + transaction_count: 0, + }; + let max_allocation: usize = CountedHeader::max_allocation().try_into().unwrap(); + let mut smallest_disallowed_vec = Vec::with_capacity(max_allocation + 1); + for _ in 0..(CountedHeader::max_allocation()+1) { + smallest_disallowed_vec.push(header.clone()); + } + let smallest_disallowed_serialized = smallest_disallowed_vec.zcash_serialize_to_vec().expect("Serialization to vec must succeed"); + // Check that our smallest_disallowed_vec is only one item larger than the limit + prop_assert!(((smallest_disallowed_vec.len() - 1) as u64) == CountedHeader::max_allocation()); + // Check that our smallest_disallowed_vec is too big to send as a protocol message + prop_assert!(smallest_disallowed_serialized.len() > MAX_PROTOCOL_MESSAGE_LEN); + + + // Create largest_allowed_vec by removing one element from smallest_disallowed_vec without copying (for efficiency) + smallest_disallowed_vec.pop(); + let largest_allowed_vec = smallest_disallowed_vec; + let largest_allowed_serialized = largest_allowed_vec.zcash_serialize_to_vec().expect("Serialization to vec must succeed"); + + // Check that our largest_allowed_vec contains the maximum number of CountedHeaders + prop_assert!((largest_allowed_vec.len() as u64) == CountedHeader::max_allocation()); + // Check that our largest_allowed_vec is small enough to send as a protocol message + prop_assert!(largest_allowed_serialized.len() <= MAX_PROTOCOL_MESSAGE_LEN); + } + } +} diff --git a/zebra-chain/src/sapling/arbitrary.rs b/zebra-chain/src/sapling/arbitrary.rs index 2e088d7d1..7f3faebcd 100644 --- a/zebra-chain/src/sapling/arbitrary.rs +++ b/zebra-chain/src/sapling/arbitrary.rs @@ -3,7 +3,9 @@ use proptest::{arbitrary::any, array, collection::vec, prelude::*}; use crate::primitives::Groth16Proof; -use super::{keys, note, tree, NoteCommitment, Output, PerSpendAnchor, Spend, ValueCommitment}; +use super::{ + keys, note, tree, NoteCommitment, Output, PerSpendAnchor, SharedAnchor, Spend, ValueCommitment, +}; impl Arbitrary for Spend { type Parameters = (); @@ -36,6 +38,34 @@ impl Arbitrary for Spend { type Strategy = BoxedStrategy; } +impl Arbitrary for Spend { + type Parameters = (); + + fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy { + ( + any::(), + array::uniform32(any::()), + any::(), + vec(any::(), 64), + ) + .prop_map(|(nullifier, rpk_bytes, proof, sig_bytes)| Self { + per_spend_anchor: (), + cv: ValueCommitment(AffinePoint::identity()), + nullifier, + rk: redjubjub::VerificationKeyBytes::from(rpk_bytes), + zkproof: proof, + spend_auth_sig: redjubjub::Signature::from({ + let mut b = [0u8; 64]; + b.copy_from_slice(sig_bytes.as_slice()); + b + }), + }) + .boxed() + } + + type Strategy = BoxedStrategy; +} + impl Arbitrary for Output { type Parameters = (); diff --git a/zebra-chain/src/sapling/output.rs b/zebra-chain/src/sapling/output.rs index 930070566..fa9bd4e40 100644 --- a/zebra-chain/src/sapling/output.rs +++ b/zebra-chain/src/sapling/output.rs @@ -1,8 +1,11 @@ use std::io; use crate::{ + block::MAX_BLOCK_BYTES, primitives::Groth16Proof, - serialization::{serde_helpers, SerializationError, ZcashDeserialize, ZcashSerialize}, + serialization::{ + serde_helpers, SerializationError, TrustedPreallocate, ZcashDeserialize, ZcashSerialize, + }, }; use super::{commitment, keys, note}; @@ -75,3 +78,73 @@ impl ZcashDeserialize for Output { }) } } +/// An output contains: a 32 byte cv, a 32 byte cmu, a 32 byte ephemeral key +/// a 580 byte encCiphertext, an 80 byte outCiphertext, and a 192 byte zkproof +/// [ps]: https://zips.z.cash/protocol/protocol.pdf#outputencoding +const OUTPUT_SIZE: u64 = 32 + 32 + 32 + 580 + 80 + 192; + +/// The maximum number of outputs in a valid Zcash on-chain transaction. +/// +/// If a transaction contains more outputs than can fit in maximally large block, it might be +/// valid on the network and in the mempool, but it can never be mined into a block. So +/// rejecting these large edge-case transactions can never break consensus +impl TrustedPreallocate for Output { + fn max_allocation() -> u64 { + // Since a serialized Vec uses at least one byte for its length, + // the max allocation can never exceed (MAX_BLOCK_BYTES - 1) / OUTPUT_SIZE + (MAX_BLOCK_BYTES - 1) / OUTPUT_SIZE + } +} + +#[cfg(test)] +mod test_trusted_preallocate { + use super::{Output, MAX_BLOCK_BYTES, OUTPUT_SIZE}; + use crate::serialization::{TrustedPreallocate, ZcashSerialize}; + use proptest::prelude::*; + use std::convert::TryInto; + + proptest! { + #![proptest_config(ProptestConfig::with_cases(10_000))] + + /// Confirm that each output takes exactly OUTPUT_SIZE bytes when serialized. + /// This verifies that our calculated `TrustedPreallocate::max_allocation()` is indeed an upper bound. + #[test] + fn output_size_is_small_enough(output in Output::arbitrary_with(())) { + let serialized = output.zcash_serialize_to_vec().expect("Serialization to vec must succeed"); + prop_assert!(serialized.len() as u64 == OUTPUT_SIZE) + } + + } + proptest! { + #![proptest_config(ProptestConfig::with_cases(100))] + /// Verify that... + /// 1. The smallest disallowed vector of `Outputs`s is too large to fit in a Zcash block + /// 2. The largest allowed vector is small enough to fit in a legal Zcash block + #[test] + fn output_max_allocation_is_big_enough(output in Output::arbitrary_with(())) { + + let max_allocation: usize = Output::max_allocation().try_into().unwrap(); + let mut smallest_disallowed_vec = Vec::with_capacity(max_allocation + 1); + for _ in 0..(Output::max_allocation()+1) { + smallest_disallowed_vec.push(output.clone()); + } + let smallest_disallowed_serialized = smallest_disallowed_vec.zcash_serialize_to_vec().expect("Serialization to vec must succeed"); + // Check that our smallest_disallowed_vec is only one item larger than the limit + prop_assert!(((smallest_disallowed_vec.len() - 1) as u64) == Output::max_allocation()); + // Check that our smallest_disallowed_vec is too big to be included in a valid block + // Note that a serialized block always includes at least one byte for the number of transactions, + // so any serialized Vec at least MAX_BLOCK_BYTES long is too large to fit in a block. + prop_assert!((smallest_disallowed_serialized.len() as u64) >= MAX_BLOCK_BYTES); + + // Create largest_allowed_vec by removing one element from smallest_disallowed_vec without copying (for efficiency) + smallest_disallowed_vec.pop(); + let largest_allowed_vec = smallest_disallowed_vec; + let largest_allowed_serialized = largest_allowed_vec.zcash_serialize_to_vec().expect("Serialization to vec must succeed"); + + // Check that our largest_allowed_vec contains the maximum number of Outputs + prop_assert!((largest_allowed_vec.len() as u64) == Output::max_allocation()); + // Check that our largest_allowed_vec is small enough to fit in a Zcash block. + prop_assert!((largest_allowed_serialized.len() as u64) < MAX_BLOCK_BYTES); + } + } +} diff --git a/zebra-chain/src/sapling/spend.rs b/zebra-chain/src/sapling/spend.rs index c06b7d4dd..ef1ae14a9 100644 --- a/zebra-chain/src/sapling/spend.rs +++ b/zebra-chain/src/sapling/spend.rs @@ -6,12 +6,14 @@ use std::io; use crate::{ + block::MAX_BLOCK_BYTES, primitives::{ redjubjub::{self, SpendAuth}, Groth16Proof, }, serialization::{ - ReadZcashExt, SerializationError, WriteZcashExt, ZcashDeserialize, ZcashSerialize, + ReadZcashExt, SerializationError, TrustedPreallocate, WriteZcashExt, ZcashDeserialize, + ZcashSerialize, }, }; @@ -100,7 +102,6 @@ impl Spend { impl ZcashSerialize for Spend { fn zcash_serialize(&self, mut writer: W) -> Result<(), io::Error> { self.cv.zcash_serialize(&mut writer)?; - // TODO: V4 only writer.write_all(&self.per_spend_anchor.0[..])?; writer.write_32_bytes(&self.nullifier.into())?; writer.write_all(&<[u8; 32]>::from(self.rk)[..])?; @@ -115,7 +116,6 @@ impl ZcashDeserialize for Spend { use crate::sapling::{commitment::ValueCommitment, note::Nullifier}; Ok(Spend { cv: ValueCommitment::zcash_deserialize(&mut reader)?, - // TODO: V4 only per_spend_anchor: tree::Root(reader.read_32_bytes()?), nullifier: Nullifier::from(reader.read_32_bytes()?), rk: reader.read_32_bytes()?.into(), @@ -124,3 +124,172 @@ impl ZcashDeserialize for Spend { }) } } + +impl ZcashSerialize for Spend { + fn zcash_serialize(&self, mut writer: W) -> Result<(), io::Error> { + self.cv.zcash_serialize(&mut writer)?; + writer.write_32_bytes(&self.nullifier.into())?; + writer.write_all(&<[u8; 32]>::from(self.rk)[..])?; + // zkproof and spend_auth_sig are serialized separately + Ok(()) + } +} + +// zkproof and spend_auth_sig are deserialized separately, so we can only +// deserialize Spend in the context of a transaction + +/// The size of a spend with a shared anchor, including associated fields. +/// +/// A Spend contains: a 32 byte cv, a 32 byte anchor (transaction V4 only), +/// a 32 byte nullifier, a 32 byte rk, a 192 byte zkproof (serialized separately +/// in V5), and a 64 byte spendAuthSig (serialized separately in V5). +/// +/// [ps]: https://zips.z.cash/protocol/protocol.pdf#spendencoding +const SHARED_ANCHOR_SPEND_FULL_SIZE: u64 = SHARED_ANCHOR_SPEND_INITIAL_SIZE + 192 + 64; +/// The size of a spend with a shared anchor, without associated fields. +/// +/// This is the size of spends in the initial array, there are another +/// 2 arrays of zkproofs and spend_auth_sigs required in the transaction format. +const SHARED_ANCHOR_SPEND_INITIAL_SIZE: u64 = 32 + 32 + 32; + +/// The size of a spend with a per-spend anchor. +const ANCHOR_PER_SPEND_SIZE: u64 = SHARED_ANCHOR_SPEND_FULL_SIZE + 32; + +/// The maximum number of spends in a valid Zcash on-chain transaction V5. +/// +/// If a transaction contains more spends than can fit in maximally large block, it might be +/// valid on the network and in the mempool, but it can never be mined into a block. So +/// rejecting these large edge-case transactions can never break consensus. +impl TrustedPreallocate for Spend { + fn max_allocation() -> u64 { + // Since a serialized Vec uses at least one byte for its length, + // and the associated fields are required, + // a valid max allocation can never exceed this size + (MAX_BLOCK_BYTES - 1) / SHARED_ANCHOR_SPEND_FULL_SIZE + } +} + +/// The maximum number of spends in a valid Zcash on-chain transaction V4. +impl TrustedPreallocate for Spend { + fn max_allocation() -> u64 { + (MAX_BLOCK_BYTES - 1) / ANCHOR_PER_SPEND_SIZE + } +} + +#[cfg(test)] +mod test_trusted_preallocate { + use super::{ + Spend, ANCHOR_PER_SPEND_SIZE, MAX_BLOCK_BYTES, SHARED_ANCHOR_SPEND_FULL_SIZE, + SHARED_ANCHOR_SPEND_INITIAL_SIZE, + }; + use crate::{ + sapling::{AnchorVariant, PerSpendAnchor, SharedAnchor}, + serialization::{TrustedPreallocate, ZcashSerialize}, + }; + use proptest::prelude::*; + use std::convert::TryInto; + + proptest! { + #![proptest_config(ProptestConfig::with_cases(10_000))] + + /// Confirm that each spend takes exactly ANCHOR_PER_SPEND_SIZE bytes when serialized. + /// This verifies that our calculated `TrustedPreallocate::max_allocation()` is indeed an upper bound. + #[test] + fn anchor_per_spend_size_is_small_enough(spend in Spend::::arbitrary_with(())) { + let serialized = spend.zcash_serialize_to_vec().expect("Serialization to vec must succeed"); + prop_assert!(serialized.len() as u64 == ANCHOR_PER_SPEND_SIZE) + } + + /// Confirm that each spend takes exactly SHARED_SPEND_SIZE bytes when serialized. + #[test] + fn shared_anchor_spend_size_is_small_enough(spend in Spend::::arbitrary_with(())) { + let mut serialized_len = spend.zcash_serialize_to_vec().expect("Serialization to vec must succeed").len(); + serialized_len += spend.zkproof.zcash_serialize_to_vec().expect("Serialization to vec must succeed").len(); + serialized_len += &<[u8; 64]>::from(spend.spend_auth_sig).len(); + prop_assert!(serialized_len as u64 == SHARED_ANCHOR_SPEND_FULL_SIZE) + } + } + + proptest! { + #![proptest_config(ProptestConfig::with_cases(100))] + + /// Verify that... + /// 1. The smallest disallowed vector of `Spend`s is too large to fit in a Zcash block + /// 2. The largest allowed vector is small enough to fit in a legal Zcash block + #[test] + fn anchor_per_spend_max_allocation_is_big_enough(spend in Spend::::arbitrary_with(())) { + let ( + smallest_disallowed_vec_len, + smallest_disallowed_serialized_len, + largest_allowed_vec_len, + largest_allowed_serialized_len, + ) = spend_max_allocation_is_big_enough(spend); + + // Check that our smallest_disallowed_vec is only one item larger than the limit + prop_assert!(((smallest_disallowed_vec_len - 1) as u64) == Spend::::max_allocation()); + // Check that our smallest_disallowed_vec is too big to send as a protocol message + // Note that a serialized block always includes at least one byte for the number of transactions, + // so any serialized Vec at least MAX_BLOCK_BYTES long is too large to fit in a block. + prop_assert!((smallest_disallowed_serialized_len as u64) >= MAX_BLOCK_BYTES); + + // Check that our largest_allowed_vec contains the maximum number of spends + prop_assert!((largest_allowed_vec_len as u64) == Spend::::max_allocation()); + // Check that our largest_allowed_vec is small enough to send as a protocol message + prop_assert!((largest_allowed_serialized_len as u64) <= MAX_BLOCK_BYTES); + } + + /// Verify trusted preallocation for `Spend` + #[test] + fn shared_spend_max_allocation_is_big_enough(spend in Spend::::arbitrary_with(())) { + let ( + smallest_disallowed_vec_len, + smallest_disallowed_serialized_len, + largest_allowed_vec_len, + largest_allowed_serialized_len, + ) = spend_max_allocation_is_big_enough(spend); + + prop_assert!(((smallest_disallowed_vec_len - 1) as u64) == Spend::::max_allocation()); + // Calculate the actual size of all required Spend fields + // + // TODO: modify the test to serialize the associated zkproof and + // spend_auth_sig fields + prop_assert!((smallest_disallowed_serialized_len as u64)/SHARED_ANCHOR_SPEND_INITIAL_SIZE*SHARED_ANCHOR_SPEND_FULL_SIZE >= MAX_BLOCK_BYTES); + + prop_assert!((largest_allowed_vec_len as u64) == Spend::::max_allocation()); + prop_assert!((largest_allowed_serialized_len as u64) <= MAX_BLOCK_BYTES); + } + } + + /// Return the + fn spend_max_allocation_is_big_enough( + spend: Spend, + ) -> (usize, usize, usize, usize) + where + AnchorV: AnchorVariant, + Spend: TrustedPreallocate + ZcashSerialize + Clone, + { + let max_allocation: usize = Spend::max_allocation().try_into().unwrap(); + let mut smallest_disallowed_vec = Vec::with_capacity(max_allocation + 1); + for _ in 0..(Spend::max_allocation() + 1) { + smallest_disallowed_vec.push(spend.clone()); + } + let smallest_disallowed_serialized = smallest_disallowed_vec + .zcash_serialize_to_vec() + .expect("Serialization to vec must succeed"); + let smallest_disallowed_vec_len = smallest_disallowed_vec.len(); + + // Create largest_allowed_vec by removing one element from smallest_disallowed_vec without copying (for efficiency) + smallest_disallowed_vec.pop(); + let largest_allowed_vec = smallest_disallowed_vec; + let largest_allowed_serialized = largest_allowed_vec + .zcash_serialize_to_vec() + .expect("Serialization to vec must succeed"); + + ( + smallest_disallowed_vec_len, + smallest_disallowed_serialized.len(), + largest_allowed_vec.len(), + largest_allowed_serialized.len(), + ) + } +} diff --git a/zebra-chain/src/serialization.rs b/zebra-chain/src/serialization.rs index 89046e0f9..61031fb2a 100644 --- a/zebra-chain/src/serialization.rs +++ b/zebra-chain/src/serialization.rs @@ -19,8 +19,8 @@ pub mod sha256d; pub use error::SerializationError; pub use read_zcash::ReadZcashExt; pub use write_zcash::WriteZcashExt; -pub use zcash_deserialize::{ZcashDeserialize, ZcashDeserializeInto}; -pub use zcash_serialize::ZcashSerialize; +pub use zcash_deserialize::{TrustedPreallocate, ZcashDeserialize, ZcashDeserializeInto}; +pub use zcash_serialize::{ZcashSerialize, MAX_PROTOCOL_MESSAGE_LEN}; #[cfg(test)] mod proptests; diff --git a/zebra-chain/src/serialization/error.rs b/zebra-chain/src/serialization/error.rs index 009732099..2c08d970a 100644 --- a/zebra-chain/src/serialization/error.rs +++ b/zebra-chain/src/serialization/error.rs @@ -1,4 +1,4 @@ -use std::io; +use std::{io, num::TryFromIntError}; use thiserror::Error; @@ -13,6 +13,9 @@ pub enum SerializationError { // XXX refine errors #[error("parse error: {0}")] Parse(&'static str), + /// The length of a vec is too large to convert to a usize (and thus, too large to allocate on this platform) + #[error("compactsize too large: {0}")] + TryFromIntError(#[from] TryFromIntError), /// An error caused when validating a zatoshi `Amount` #[error("input couldn't be parsed as a zatoshi `Amount`: {source}")] Amount { diff --git a/zebra-chain/src/serialization/zcash_deserialize.rs b/zebra-chain/src/serialization/zcash_deserialize.rs index ac9d5425a..70cfb8c99 100644 --- a/zebra-chain/src/serialization/zcash_deserialize.rs +++ b/zebra-chain/src/serialization/zcash_deserialize.rs @@ -1,6 +1,6 @@ -use std::io; +use std::{convert::TryInto, io}; -use super::{ReadZcashExt, SerializationError}; +use super::{ReadZcashExt, SerializationError, MAX_PROTOCOL_MESSAGE_LEN}; use byteorder::ReadBytesExt; /// Consensus-critical serialization for Zcash. @@ -18,15 +18,15 @@ pub trait ZcashDeserialize: Sized { fn zcash_deserialize(reader: R) -> Result; } -impl ZcashDeserialize for Vec { +impl ZcashDeserialize for Vec { fn zcash_deserialize(mut reader: R) -> Result { let len = reader.read_compactsize()?; - // We're given len, so we could preallocate. But blindly preallocating - // without a size bound can allow DOS attacks, and there's no way to - // pass a size bound in a ZcashDeserialize impl, so instead we allocate - // as we read from the reader. (The maximum block and transaction sizes - // limit the eventual size of these allocations.) - let mut vec = Vec::new(); + if len > T::max_allocation() { + return Err(SerializationError::Parse( + "Vector longer than max_allocation", + )); + } + let mut vec = Vec::with_capacity(len.try_into()?); for _ in 0..len { vec.push(T::zcash_deserialize(&mut reader)?); } @@ -65,3 +65,138 @@ impl ZcashDeserializeInto for R { T::zcash_deserialize(self) } } + +/// Blind preallocation of a Vec is based on a bounded length. This is in contrast +/// to blind preallocation of a generic Vec, which is a DOS vector. +/// +/// The max_allocation() function provides a loose upper bound on the size of the Vec +/// which can possibly be received from an honest peer. If this limit is too low, Zebra may reject valid messages. +/// In the worst case, setting the lower bound too low could cause Zebra to fall out of consensus by rejecting all messages containing a valid block. +pub trait TrustedPreallocate { + /// Provides a ***loose upper bound*** on the size of the Vec + /// which can possibly be received from an honest peer. + fn max_allocation() -> u64; +} + +/// The length of the longest valid `Vec` that can be received over the network +/// +/// It takes 5 bytes to encode a compactsize representing any number netween 2^16 and (2^32 - 1) +/// MAX_PROTOCOL_MESSAGE_LEN is ~2^21, so the largest Vec that can be received from an honest peer is +/// (MAX_PROTOCOL_MESSAGE_LEN - 5); +const MAX_U8_ALLOCATION: usize = MAX_PROTOCOL_MESSAGE_LEN - 5; + +/// Implement ZcashDeserialize for Vec directly instead of using the blanket Vec implementation +/// +/// This allows us to optimize the inner loop into a single call to `read_exact()` +/// Note thate we don't implement TrustedPreallocate for u8. +/// This allows the optimization without relying on specialization. +impl ZcashDeserialize for Vec { + fn zcash_deserialize(mut reader: R) -> Result { + let len = reader.read_compactsize()?.try_into()?; + if len > MAX_U8_ALLOCATION { + return Err(SerializationError::Parse( + "Vector longer than max_allocation", + )); + } + let mut vec = vec![0u8; len]; + reader.read_exact(&mut vec)?; + Ok(vec) + } +} + +#[cfg(test)] +mod test_u8_deserialize { + use super::MAX_U8_ALLOCATION; + use crate::serialization::MAX_PROTOCOL_MESSAGE_LEN; + use crate::serialization::{SerializationError, ZcashDeserialize, ZcashSerialize}; + use proptest::{collection::size_range, prelude::*}; + use std::matches; + + // Allow direct serialization of Vec for these tests. We don't usuall allow this because some types have + // specific rules for about serialization of their inner Vec. This method could be easily misused if it applied + // more generally. + impl ZcashSerialize for u8 { + fn zcash_serialize(&self, mut writer: W) -> Result<(), std::io::Error> { + writer.write_all(&[*self]) + } + } + + proptest! { + #![proptest_config(ProptestConfig::with_cases(3))] + #[test] + /// Confirm that deserialize yields the expected result for any vec smaller than `MAX_U8_ALLOCATION` + fn u8_ser_deser_roundtrip(input in any_with::>(size_range(MAX_U8_ALLOCATION).lift()) ) { + let serialized = input.zcash_serialize_to_vec().expect("Serialization to vec must succeed"); + let cursor = std::io::Cursor::new(serialized); + let deserialized = >::zcash_deserialize(cursor).expect("deserialization from vec must succeed"); + prop_assert_eq!(deserialized, input) + } + } + + #[test] + /// Confirm that deserialize allows vectors with length up to and including `MAX_U8_ALLOCATION` + fn u8_deser_accepts_max_valid_input() { + let serialized = vec![0u8; MAX_U8_ALLOCATION] + .zcash_serialize_to_vec() + .expect("Serialization to vec must succeed"); + let cursor = std::io::Cursor::new(serialized); + let deserialized = >::zcash_deserialize(cursor); + assert!(deserialized.is_ok()) + } + #[test] + /// Confirm that rejects vectors longer than `MAX_U8_ALLOCATION` + fn u8_deser_throws_when_input_too_large() { + let serialized = vec![0u8; MAX_U8_ALLOCATION + 1] + .zcash_serialize_to_vec() + .expect("Serialization to vec must succeed"); + let cursor = std::io::Cursor::new(serialized); + let deserialized = >::zcash_deserialize(cursor); + + assert!(matches!( + deserialized, + Err(SerializationError::Parse( + "Vector longer than max_allocation" + )) + )) + } + + #[test] + /// Confirm that every u8 takes exactly 1 byte when serialized. + /// This verifies that our calculated `MAX_U8_ALLOCATION` is indeed an upper bound. + fn u8_size_is_correct() { + for byte in std::u8::MIN..=std::u8::MAX { + let serialized = byte + .zcash_serialize_to_vec() + .expect("Serialization to vec must succeed"); + assert!(serialized.len() == 1) + } + } + + #[test] + /// Verify that... + /// 1. The smallest disallowed `Vec` is too big to include in a Zcash Wire Protocol message + /// 2. The largest allowed `Vec`is exactly the size of a maximal Zcash Wire Protocol message + fn u8_max_allocation_is_correct() { + let mut shortest_disallowed_vec = vec![0u8; MAX_U8_ALLOCATION + 1]; + let shortest_disallowed_serialized = shortest_disallowed_vec + .zcash_serialize_to_vec() + .expect("Serialization to vec must succeed"); + + // Confirm that shortest_disallowed_vec is only one item larger than the limit + assert_eq!((shortest_disallowed_vec.len() - 1), MAX_U8_ALLOCATION); + // Confirm that shortest_disallowed_vec is too large to be included in a valid zcash message + assert!(shortest_disallowed_serialized.len() > MAX_PROTOCOL_MESSAGE_LEN); + + // Create largest_allowed_vec by removing one element from smallest_disallowed_vec without copying (for efficiency) + shortest_disallowed_vec.pop(); + let longest_allowed_vec = shortest_disallowed_vec; + let longest_allowed_serialized = longest_allowed_vec + .zcash_serialize_to_vec() + .expect("serialization to vec must succed"); + + // Check that our largest_allowed_vec contains the maximum number of items + assert_eq!(longest_allowed_vec.len(), MAX_U8_ALLOCATION); + // Check that our largest_allowed_vec is the size of a maximal protocol message + assert_eq!(longest_allowed_serialized.len(), MAX_PROTOCOL_MESSAGE_LEN); + } +} diff --git a/zebra-chain/src/serialization/zcash_serialize.rs b/zebra-chain/src/serialization/zcash_serialize.rs index 1e66bb365..f12198411 100644 --- a/zebra-chain/src/serialization/zcash_serialize.rs +++ b/zebra-chain/src/serialization/zcash_serialize.rs @@ -38,3 +38,8 @@ impl ZcashSerialize for Vec { Ok(()) } } + +/// The maximum length of a Zcash message, in bytes. +/// +/// This value is used to calculate safe preallocation limits for some types +pub const MAX_PROTOCOL_MESSAGE_LEN: usize = 2 * 1024 * 1024; diff --git a/zebra-chain/src/sprout/joinsplit.rs b/zebra-chain/src/sprout/joinsplit.rs index 5262d2aa9..817e12080 100644 --- a/zebra-chain/src/sprout/joinsplit.rs +++ b/zebra-chain/src/sprout/joinsplit.rs @@ -4,10 +4,11 @@ use serde::{Deserialize, Serialize}; use crate::{ amount::{Amount, NonNegative}, - primitives::{x25519, ZkSnarkProof}, + block::MAX_BLOCK_BYTES, + primitives::{x25519, Bctv14Proof, Groth16Proof, ZkSnarkProof}, serialization::{ - ReadZcashExt, SerializationError, WriteZcashExt, ZcashDeserialize, ZcashDeserializeInto, - ZcashSerialize, + ReadZcashExt, SerializationError, TrustedPreallocate, WriteZcashExt, ZcashDeserialize, + ZcashDeserializeInto, ZcashSerialize, }, }; @@ -97,3 +98,133 @@ impl ZcashDeserialize for JoinSplit

{ }) } } + +/// The size of a joinsplit, excluding the ZkProof +/// +/// Excluding the ZkProof, a Joinsplit consists of an 8 byte vpub_old, an 8 byte vpub_new, a 32 byte anchor, +/// two 32 byte nullifiers, two 32 byte committments, a 32 byte epheremral key, a 32 byte random seed +/// two 32 byte vmacs, and two 601 byte encrypted ciphertexts. +const JOINSPLIT_SIZE_WITHOUT_ZKPROOF: u64 = + 8 + 8 + 32 + (32 * 2) + (32 * 2) + 32 + 32 + (32 * 2) + (601 * 2); +/// The size of a version 2 or 3 joinsplit transaction, which uses a BCTV14 proof. +/// +/// A BTCV14 proof takes 296 bytes, per the Zcash [protocol specification §7.2][ps] +/// +/// [ps]: https://zips.z.cash/protocol/protocol.pdf#joinsplitencoding +const BCTV14_JOINSPLIT_SIZE: u64 = JOINSPLIT_SIZE_WITHOUT_ZKPROOF + 296; +/// The size of a version 4+ joinsplit transaction, which uses a Groth16 proof +/// +/// A Groth16 proof takes 192 bytes, per the Zcash [protocol specification §7.2][ps] +/// +/// [ps]: https://zips.z.cash/protocol/protocol.pdf#joinsplitencoding +const GROTH16_JOINSPLIT_SIZE: u64 = JOINSPLIT_SIZE_WITHOUT_ZKPROOF + 192; + +impl TrustedPreallocate for JoinSplit { + fn max_allocation() -> u64 { + // The longest Vec we receive from an honest peer must fit inside a valid block. + // Since encoding the length of the vec takes at least one byte + // (MAX_BLOCK_BYTES - 1) / BCTV14_JOINSPLIT_SIZE is a loose upper bound on the max allocation + (MAX_BLOCK_BYTES - 1) / BCTV14_JOINSPLIT_SIZE + } +} + +impl TrustedPreallocate for JoinSplit { + // The longest Vec we receive from an honest peer must fit inside a valid block. + // Since encoding the length of the vec takes at least one byte + // (MAX_BLOCK_BYTES - 1) / GROTH16_JOINSPLIT_SIZE is a loose upper bound on the max allocation + fn max_allocation() -> u64 { + (MAX_BLOCK_BYTES - 1) / GROTH16_JOINSPLIT_SIZE + } +} + +#[cfg(test)] +mod test_trusted_preallocate { + use super::{ + Bctv14Proof, Groth16Proof, JoinSplit, BCTV14_JOINSPLIT_SIZE, GROTH16_JOINSPLIT_SIZE, + MAX_BLOCK_BYTES, + }; + use crate::serialization::{TrustedPreallocate, ZcashSerialize}; + use proptest::{prelude::*, proptest}; + use std::convert::TryInto; + + proptest! { + #![proptest_config(ProptestConfig::with_cases(1_000))] + #[test] + /// Confirm that each JoinSplit takes exactly BCTV14_JOINSPLIT_SIZE bytes when serialized. + /// This verifies that our calculated `TrustedPreallocate::max_allocation()` is indeed an upper bound. + fn joinsplit_btcv14_size_is_correct(joinsplit in >::arbitrary_with(())) { + let serialized = joinsplit.zcash_serialize_to_vec().expect("Serialization to vec must succeed"); + prop_assert!(serialized.len() as u64 == BCTV14_JOINSPLIT_SIZE) + } + + #[test] + /// Confirm that each JoinSplit takes exactly GROTH16_JOINSPLIT_SIZE bytes when serialized. + /// This verifies that our calculated `TrustedPreallocate::max_allocation()` is indeed an upper bound. + fn joinsplit_groth16_size_is_correct(joinsplit in >::arbitrary_with(())) { + let serialized = joinsplit.zcash_serialize_to_vec().expect("Serialization to vec must succeed"); + prop_assert!(serialized.len() as u64 == GROTH16_JOINSPLIT_SIZE) + } + } + proptest! { + #![proptest_config(ProptestConfig::with_cases(100))] + /// Verify that... + /// 1. The smallest disallowed vector of `JoinSplit`s is too large to fit in a Zcash block + /// 2. The largest allowed vector is small enough to fit in a legal Zcash block + #[test] + fn joinsplit_btcv14_max_allocation_is_correct(joinsplit in >::arbitrary_with(())) { + + let max_allocation: usize = >::max_allocation().try_into().unwrap(); + let mut smallest_disallowed_vec = Vec::with_capacity(max_allocation + 1); + for _ in 0..(>::max_allocation()+1) { + smallest_disallowed_vec.push(joinsplit.clone()); + } + let smallest_disallowed_serialized = smallest_disallowed_vec.zcash_serialize_to_vec().expect("Serialization to vec must succeed"); + // Check that our smallest_disallowed_vec is only one item larger than the limit + prop_assert!(((smallest_disallowed_vec.len() - 1) as u64) == >::max_allocation()); + // Check that our smallest_disallowed_vec is too big to be included in a valid block + // Note that a serialized block always includes at least one byte for the number of transactions, + // so any serialized Vec<>> at least MAX_BLOCK_BYTES long is too large to fit in a block. + prop_assert!((smallest_disallowed_serialized.len() as u64) >= MAX_BLOCK_BYTES); + + // Create largest_allowed_vec by removing one element from smallest_disallowed_vec without copying (for efficiency) + smallest_disallowed_vec.pop(); + let largest_allowed_vec = smallest_disallowed_vec; + let largest_allowed_serialized = largest_allowed_vec.zcash_serialize_to_vec().expect("Serialization to vec must succeed"); + + // Check that our largest_allowed_vec contains the maximum number of > + prop_assert!((largest_allowed_vec.len() as u64) == >::max_allocation()); + // Check that our largest_allowed_vec is small enough to fit in a Zcash block. + prop_assert!((largest_allowed_serialized.len() as u64) < MAX_BLOCK_BYTES); + } + + /// Verify that... + /// 1. The smallest disallowed vector of `JoinSplit`s is too large to fit in a Zcash block + /// 2. The largest allowed vector is small enough to fit in a legal Zcash block + #[test] + fn joinsplit_groth16_max_allocation_is_correct(joinsplit in >::arbitrary_with(())) { + + let max_allocation: usize = >::max_allocation().try_into().unwrap(); + let mut smallest_disallowed_vec = Vec::with_capacity(max_allocation + 1); + for _ in 0..(>::max_allocation()+1) { + smallest_disallowed_vec.push(joinsplit.clone()); + } + let smallest_disallowed_serialized = smallest_disallowed_vec.zcash_serialize_to_vec().expect("Serialization to vec must succeed"); + // Check that our smallest_disallowed_vec is only one item larger than the limit + prop_assert!(((smallest_disallowed_vec.len() - 1) as u64) == >::max_allocation()); + // Check that our smallest_disallowed_vec is too big to be included in a valid block + // Note that a serialized block always includes at least one byte for the number of transactions, + // so any serialized Vec<>> at least MAX_BLOCK_BYTES long is too large to fit in a block. + prop_assert!((smallest_disallowed_serialized.len() as u64) >= MAX_BLOCK_BYTES); + + // Create largest_allowed_vec by removing one element from smallest_disallowed_vec without copying (for efficiency) + smallest_disallowed_vec.pop(); + let largest_allowed_vec = smallest_disallowed_vec; + let largest_allowed_serialized = largest_allowed_vec.zcash_serialize_to_vec().expect("Serialization to vec must succeed"); + + // Check that our largest_allowed_vec contains the maximum number of > + prop_assert!((largest_allowed_vec.len() as u64) == >::max_allocation()); + // Check that our largest_allowed_vec is small enough to fit in a Zcash block. + prop_assert!((largest_allowed_serialized.len() as u64) < MAX_BLOCK_BYTES); + } + } +} diff --git a/zebra-chain/src/transaction/serialize.rs b/zebra-chain/src/transaction/serialize.rs index b21788a23..d132217ef 100644 --- a/zebra-chain/src/transaction/serialize.rs +++ b/zebra-chain/src/transaction/serialize.rs @@ -6,11 +6,12 @@ use std::{io, sync::Arc}; use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; use crate::{ + block::MAX_BLOCK_BYTES, parameters::{OVERWINTER_VERSION_GROUP_ID, SAPLING_VERSION_GROUP_ID, TX_V5_VERSION_GROUP_ID}, primitives::ZkSnarkProof, serialization::{ - ReadZcashExt, SerializationError, WriteZcashExt, ZcashDeserialize, ZcashDeserializeInto, - ZcashSerialize, + ReadZcashExt, SerializationError, TrustedPreallocate, WriteZcashExt, ZcashDeserialize, + ZcashDeserializeInto, ZcashSerialize, }, sprout, }; @@ -353,3 +354,138 @@ where T::zcash_serialize(self, writer) } } + +/// A Tx Input must have an Outpoint (32 byte hash + 4 byte index), a 4 byte sequence number, +/// and a signature script, which always takes a min of 1 byte (for a length 0 script) +const MIN_TRANSPARENT_INPUT_SIZE: u64 = 32 + 4 + 4 + 1; +/// A Transparent output has an 8 byte value and script which takes a min of 1 byte +const MIN_TRANSPARENT_OUTPUT_SIZE: u64 = 8 + 1; +// All txs must have at least one input, a 4 byte locktime, and at least one output +const MIN_TRANSPARENT_TX_SIZE: u64 = MIN_TRANSPARENT_INPUT_SIZE + 4 + MIN_TRANSPARENT_OUTPUT_SIZE; + +/// No valid Zcash message contains more transactions than can fit in a single block +/// +/// `tx` messages contain a single transaction, and `block` messages are limited to the maximum +/// block size. +impl TrustedPreallocate for Arc { + fn max_allocation() -> u64 { + // A transparent transaction is the smallest transaction variant + MAX_BLOCK_BYTES / MIN_TRANSPARENT_TX_SIZE + } +} +/// The maximum number of inputs in a valid Zcash on-chain transaction. +/// +/// If a transaction contains more inputs than can fit in maximally large block, it might be +/// valid on the network and in the mempool, but it can never be mined into a block. So +/// rejecting these large edge-case transactions can never break consensus. +impl TrustedPreallocate for transparent::Input { + fn max_allocation() -> u64 { + MAX_BLOCK_BYTES / MIN_TRANSPARENT_INPUT_SIZE + } +} +/// The maximum number of outputs in a valid Zcash on-chain transaction. +/// +/// If a transaction contains more outputs than can fit in maximally large block, it might be +/// valid on the network and in the mempool, but it can never be mined into a block. So +/// rejecting these large edge-case transactions can never break consensus. +impl TrustedPreallocate for transparent::Output { + fn max_allocation() -> u64 { + MAX_BLOCK_BYTES / MIN_TRANSPARENT_OUTPUT_SIZE + } +} + +#[cfg(test)] +mod test_trusted_preallocate { + use super::{ + transparent::Input, transparent::Output, Transaction, MAX_BLOCK_BYTES, + MIN_TRANSPARENT_INPUT_SIZE, MIN_TRANSPARENT_OUTPUT_SIZE, MIN_TRANSPARENT_TX_SIZE, + }; + use crate::serialization::{TrustedPreallocate, ZcashSerialize}; + use proptest::prelude::*; + use std::{convert::TryInto, sync::Arc}; + proptest! { + #![proptest_config(ProptestConfig::with_cases(300))] + + /// Confirm that each spend takes at least MIN_TRANSPARENT_TX_SIZE bytes when serialized. + /// This verifies that our calculated `TrustedPreallocate::max_allocation()` is indeed an upper bound. + #[test] + fn tx_size_is_small_enough(tx in Transaction::arbitrary()) { + let serialized = tx.zcash_serialize_to_vec().expect("Serialization to vec must succeed"); + prop_assert!(serialized.len() as u64 >= MIN_TRANSPARENT_TX_SIZE) + } + + /// Confirm that each spend takes at least MIN_TRANSPARENT_TX_SIZE bytes when serialized. + /// This verifies that our calculated `TrustedPreallocate::max_allocation()` is indeed an upper bound. + #[test] + fn transparent_input_size_is_small_enough(input in Input::arbitrary()) { + let serialized = input.zcash_serialize_to_vec().expect("Serialization to vec must succeed"); + prop_assert!(serialized.len() as u64 >= MIN_TRANSPARENT_INPUT_SIZE) + } + + /// Confirm that each spend takes at least MIN_TRANSPARENT_TX_SIZE bytes when serialized. + /// This verifies that our calculated `TrustedPreallocate::max_allocation()` is indeed an upper bound. + #[test] + fn transparent_output_size_is_small_enough(output in Output::arbitrary()) { + let serialized = output.zcash_serialize_to_vec().expect("Serialization to vec must succeed"); + prop_assert!(serialized.len() as u64 >= MIN_TRANSPARENT_OUTPUT_SIZE) + } + + } + proptest! { + // This test is pretty slow, so only run a few + #![proptest_config(ProptestConfig::with_cases(7))] + #[test] + /// Verify the smallest disallowed vector of `Transaction`s is too large to fit in a Zcash block + fn tx_max_allocation_is_big_enough(tx in Transaction::arbitrary()) { + + let max_allocation: usize = >::max_allocation().try_into().unwrap(); + let mut smallest_disallowed_vec = Vec::with_capacity(max_allocation + 1); + for _ in 0..(>::max_allocation()+1) { + smallest_disallowed_vec.push(Arc::new(tx.clone())); + } + let serialized = smallest_disallowed_vec.zcash_serialize_to_vec().expect("Serialization to vec must succeed"); + + // Check that our smallest_disallowed_vec is only one item larger than the limit + prop_assert!(((smallest_disallowed_vec.len() - 1) as u64) == >::max_allocation()); + // Check that our smallest_disallowed_vec is too big to be included in a valid block + prop_assert!(serialized.len() as u64 > MAX_BLOCK_BYTES); + } + + #[test] + /// Verify the smallest disallowed vector of `Input`s is too large to fit in a Zcash block + fn input_max_allocation_is_big_enough(input in Input::arbitrary()) { + + let max_allocation: usize = Input::max_allocation().try_into().unwrap(); + let mut smallest_disallowed_vec = Vec::with_capacity(max_allocation + 1); + for _ in 0..(Input::max_allocation()+1) { + smallest_disallowed_vec.push(input.clone()); + } + let serialized = smallest_disallowed_vec.zcash_serialize_to_vec().expect("Serialization to vec must succeed"); + + // Check that our smallest_disallowed_vec is only one item larger than the limit + prop_assert!(((smallest_disallowed_vec.len() - 1) as u64) == Input::max_allocation()); + // Check that our smallest_disallowed_vec is too big to be included in a valid block + // Note that a serialized block always includes at least one byte for the number of transactions, + // so any serialized Vec at least MAX_BLOCK_BYTES long is too large to fit in a block. + prop_assert!(serialized.len() as u64 >= MAX_BLOCK_BYTES); + } + #[test] + /// Verify the smallest disallowed vector of `Output`s is too large to fit in a Zcash block + fn output_max_allocation_is_big_enough(output in Output::arbitrary()) { + + let max_allocation: usize = Output::max_allocation().try_into().unwrap(); + let mut smallest_disallowed_vec = Vec::with_capacity(max_allocation + 1); + for _ in 0..(Output::max_allocation()+1) { + smallest_disallowed_vec.push(output.clone()); + } + let serialized = smallest_disallowed_vec.zcash_serialize_to_vec().expect("Serialization to vec must succeed"); + + // Check that our smallest_disallowed_vec is only one item larger than the limit + prop_assert!(((smallest_disallowed_vec.len() - 1) as u64) == Output::max_allocation()); + // Check that our smallest_disallowed_vec is too big to be included in a valid block + // Note that a serialized block always includes at least one byte for the number of transactions, + // so any serialized Vec at least MAX_BLOCK_BYTES long is too large to fit in a block. + prop_assert!(serialized.len() as u64 >= MAX_BLOCK_BYTES); + } + } +} diff --git a/zebra-network/src/meta_addr.rs b/zebra-network/src/meta_addr.rs index be239ff81..afd560852 100644 --- a/zebra-network/src/meta_addr.rs +++ b/zebra-network/src/meta_addr.rs @@ -10,10 +10,11 @@ use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; use chrono::{DateTime, TimeZone, Utc}; use zebra_chain::serialization::{ - ReadZcashExt, SerializationError, WriteZcashExt, ZcashDeserialize, ZcashSerialize, + ReadZcashExt, SerializationError, TrustedPreallocate, WriteZcashExt, ZcashDeserialize, + ZcashSerialize, }; -use crate::protocol::types::PeerServices; +use crate::protocol::{external::MAX_PROTOCOL_MESSAGE_LEN, types::PeerServices}; use PeerAddrState::*; @@ -263,6 +264,76 @@ impl ZcashDeserialize for MetaAddr { Ok(MetaAddr::new_gossiped(&addr, &services, &last_seen)) } } +/// A serialized meta addr has a 4 byte time, 8 byte services, 16 byte IP addr, and 2 byte port +const META_ADDR_SIZE: usize = 4 + 8 + 16 + 2; +impl TrustedPreallocate for MetaAddr { + fn max_allocation() -> u64 { + // Since a maximal serialized Vec uses at least three bytes for its length (2MB messages / 30B MetaAddr implies the maximal length is much greater than 253) + // the max allocation can never exceed (MAX_PROTOCOL_MESSAGE_LEN - 3) / META_ADDR_SIZE + ((MAX_PROTOCOL_MESSAGE_LEN - 3) / META_ADDR_SIZE) as u64 + } +} + +#[cfg(test)] +mod test_trusted_preallocate { + use std::convert::TryInto; + + use super::{MetaAddr, MAX_PROTOCOL_MESSAGE_LEN, META_ADDR_SIZE}; + use super::{PeerAddrState, PeerServices}; + use chrono::{TimeZone, Utc}; + use zebra_chain::serialization::{TrustedPreallocate, ZcashSerialize}; + #[test] + /// Confirm that each MetaAddr takes exactly META_ADDR_SIZE bytes when serialized. + /// This verifies that our calculated `TrustedPreallocate::max_allocation()` is indeed an upper bound. + fn meta_addr_size_is_correct() { + let addr = MetaAddr { + addr: ([192, 168, 0, 0], 8333).into(), + services: PeerServices::default(), + last_seen: Utc.timestamp(1_573_680_222, 0), + last_connection_state: PeerAddrState::Responded, + }; + let serialized = addr + .zcash_serialize_to_vec() + .expect("Serialization to vec must succeed"); + assert!(serialized.len() == META_ADDR_SIZE) + } + #[test] + /// Verifies that... + /// 1. The smallest disallowed vector of `MetaAddrs`s is too large to fit in a legal Zcash message + /// 2. The largest allowed vector is small enough to fit in a legal Zcash message + fn meta_addr_max_allocation_is_correct() { + let addr = MetaAddr { + addr: ([192, 168, 0, 0], 8333).into(), + services: PeerServices::default(), + last_seen: Utc.timestamp(1_573_680_222, 0), + last_connection_state: PeerAddrState::Responded, + }; + let max_allocation: usize = MetaAddr::max_allocation().try_into().unwrap(); + let mut smallest_disallowed_vec = Vec::with_capacity(max_allocation + 1); + for _ in 0..(MetaAddr::max_allocation() + 1) { + smallest_disallowed_vec.push(addr); + } + let smallest_disallowed_serialized = smallest_disallowed_vec + .zcash_serialize_to_vec() + .expect("Serialization to vec must succeed"); + // Check that our smallest_disallowed_vec is only one item larger than the limit + assert!(((smallest_disallowed_vec.len() - 1) as u64) == MetaAddr::max_allocation()); + // Check that our smallest_disallowed_vec is too big to send in a valid Zcash message + assert!(smallest_disallowed_serialized.len() > MAX_PROTOCOL_MESSAGE_LEN); + + // Create largest_allowed_vec by removing one element from smallest_disallowed_vec without copying (for efficiency) + smallest_disallowed_vec.pop(); + let largest_allowed_vec = smallest_disallowed_vec; + let largest_allowed_serialized = largest_allowed_vec + .zcash_serialize_to_vec() + .expect("Serialization to vec must succeed"); + + // Check that our largest_allowed_vec contains the maximum number of MetaAddrs + assert!((largest_allowed_vec.len() as u64) == MetaAddr::max_allocation()); + // Check that our largest_allowed_vec is small enough to fit in a Zcash message. + assert!(largest_allowed_serialized.len() <= MAX_PROTOCOL_MESSAGE_LEN); + } +} #[cfg(test)] mod tests { diff --git a/zebra-network/src/protocol/external.rs b/zebra-network/src/protocol/external.rs index c9e96e6e6..872dd1446 100644 --- a/zebra-network/src/protocol/external.rs +++ b/zebra-network/src/protocol/external.rs @@ -7,6 +7,6 @@ mod message; /// Newtype wrappers for primitive types. pub mod types; -pub use codec::Codec; +pub use codec::{Codec, MAX_PROTOCOL_MESSAGE_LEN}; pub use inv::InventoryHash; pub use message::Message; diff --git a/zebra-network/src/protocol/external/codec.rs b/zebra-network/src/protocol/external/codec.rs index 519869f84..3ccc27a5c 100644 --- a/zebra-network/src/protocol/external/codec.rs +++ b/zebra-network/src/protocol/external/codec.rs @@ -32,7 +32,7 @@ use super::{ const HEADER_LEN: usize = 24usize; /// Maximum size of a protocol message body. -const MAX_PROTOCOL_MESSAGE_LEN: usize = 2 * 1024 * 1024; +pub use zebra_chain::serialization::MAX_PROTOCOL_MESSAGE_LEN; /// A codec which produces Bitcoin messages from byte streams and vice versa. pub struct Codec { diff --git a/zebra-network/src/protocol/external/inv.rs b/zebra-network/src/protocol/external/inv.rs index 9a558402d..cb12eb036 100644 --- a/zebra-network/src/protocol/external/inv.rs +++ b/zebra-network/src/protocol/external/inv.rs @@ -10,10 +10,14 @@ use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; use zebra_chain::{ block, - serialization::{ReadZcashExt, SerializationError, ZcashDeserialize, ZcashSerialize}, + serialization::{ + ReadZcashExt, SerializationError, TrustedPreallocate, ZcashDeserialize, ZcashSerialize, + }, transaction, }; +use super::MAX_PROTOCOL_MESSAGE_LEN; + /// An inventory hash which refers to some advertised or requested data. /// /// Bitcoin calls this an "inventory vector" but it is just a typed hash, not a @@ -81,3 +85,85 @@ impl ZcashDeserialize for InventoryHash { } } } + +const INV_HASH_SIZE: usize = 36; +impl TrustedPreallocate for InventoryHash { + fn max_allocation() -> u64 { + // An Inventory hash takes 36 bytes, and we reserve at least one byte for the Vector length + // so we can never receive more than ((MAX_PROTOCOL_MESSAGE_LEN - 1) / 36) in a single message + ((MAX_PROTOCOL_MESSAGE_LEN - 1) / INV_HASH_SIZE) as u64 + } +} + +#[cfg(test)] +mod test_trusted_preallocate { + use std::convert::TryInto; + + use super::{InventoryHash, INV_HASH_SIZE, MAX_PROTOCOL_MESSAGE_LEN}; + use zebra_chain::{ + block, + serialization::{TrustedPreallocate, ZcashSerialize}, + transaction, + }; + #[test] + /// Confirm that each InventoryHash takes exactly INV_HASH_SIZE bytes when serialized. + /// This verifies that our calculated `TrustedPreallocate::max_allocation()` is indeed an upper bound. + fn inv_hash_size_is_correct() { + let block_hash = block::Hash([1u8; 32]); + let tx_hash = transaction::Hash([1u8; 32]); + let inv_block = InventoryHash::Block(block_hash); + let serialized_inv_block = inv_block + .zcash_serialize_to_vec() + .expect("Serialization to vec must succeed"); + assert!(serialized_inv_block.len() == INV_HASH_SIZE); + + let inv_filtered_block = InventoryHash::FilteredBlock(block_hash); + let serialized_inv_filtered = inv_filtered_block + .zcash_serialize_to_vec() + .expect("Serialization to vec must succeed"); + assert!(serialized_inv_filtered.len() == INV_HASH_SIZE); + + let inv_tx = InventoryHash::Tx(tx_hash); + let serialized_inv_tx = inv_tx + .zcash_serialize_to_vec() + .expect("Serialization to vec must succeed"); + assert!(serialized_inv_tx.len() == INV_HASH_SIZE); + + let inv_err = InventoryHash::Error; + let serializd_inv_err = inv_err + .zcash_serialize_to_vec() + .expect("Serialization to vec must succeed"); + assert!(serializd_inv_err.len() == INV_HASH_SIZE) + } + #[test] + /// Verifies that... + /// 1. The smallest disallowed vector of `InventoryHash`s is too large to fit in a legal Zcash message + /// 2. The largest allowed vector is small enough to fit in a legal Zcash message + fn meta_addr_max_allocation_is_correct() { + let inv = InventoryHash::Error; + let max_allocation: usize = InventoryHash::max_allocation().try_into().unwrap(); + let mut smallest_disallowed_vec = Vec::with_capacity(max_allocation + 1); + for _ in 0..(InventoryHash::max_allocation() + 1) { + smallest_disallowed_vec.push(inv); + } + let smallest_disallowed_serialized = smallest_disallowed_vec + .zcash_serialize_to_vec() + .expect("Serialization to vec must succeed"); + // Check that our smallest_disallowed_vec is only one item larger than the limit + assert!(((smallest_disallowed_vec.len() - 1) as u64) == InventoryHash::max_allocation()); + // Check that our smallest_disallowed_vec is too big to fit in a Zcash message. + assert!(smallest_disallowed_serialized.len() > MAX_PROTOCOL_MESSAGE_LEN); + + // Create largest_allowed_vec by removing one element from smallest_disallowed_vec without copying (for efficiency) + smallest_disallowed_vec.pop(); + let largest_allowed_vec = smallest_disallowed_vec; + let largest_allowed_serialized = largest_allowed_vec + .zcash_serialize_to_vec() + .expect("Serialization to vec must succeed"); + + // Check that our largest_allowed_vec contains the maximum number of InventoryHashes + assert!((largest_allowed_vec.len() as u64) == InventoryHash::max_allocation()); + // Check that our largest_allowed_vec is small enough to fit in a Zcash message. + assert!(largest_allowed_serialized.len() <= MAX_PROTOCOL_MESSAGE_LEN); + } +}