Move the preallocate tests into their own files (#1977)

* Move the preallocate tests into their own files

And move the MetaAddr proptest into its own file.

Also do some minor formatting and cleanups.

Co-authored-by: Deirdre Connolly <durumcrustulum@gmail.com>
This commit is contained in:
teor 2021-04-07 12:32:27 +10:00 committed by GitHub
parent 05b60db993
commit 64662a758d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
24 changed files with 677 additions and 665 deletions

28
Cargo.lock generated
View File

@ -914,20 +914,6 @@ dependencies = [
"syn 1.0.60",
]
[[package]]
name = "ed25519-zebra"
version = "2.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0a128b76af6dd4b427e34a6fd43dc78dbfe73672ec41ff615a2414c1a0ad0409"
dependencies = [
"curve25519-dalek",
"hex",
"rand_core 0.5.1",
"serde",
"sha2",
"thiserror",
]
[[package]]
name = "ed25519-zebra"
version = "2.2.0"
@ -942,6 +928,20 @@ dependencies = [
"zeroize",
]
[[package]]
name = "ed25519-zebra"
version = "2.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0a128b76af6dd4b427e34a6fd43dc78dbfe73672ec41ff615a2414c1a0ad0409"
dependencies = [
"curve25519-dalek",
"hex",
"rand_core 0.5.1",
"serde",
"sha2",
"thiserror",
]
[[package]]
name = "either"
version = "1.6.1"

View File

@ -91,8 +91,10 @@ impl<'a> From<&'a Block> for Hash {
(&block.header).into()
}
}
/// A serialized Block hash takes 32 bytes
const BLOCK_HASH_SIZE: u64 = 32;
/// The maximum number of hashes in a valid Zcash protocol message.
impl TrustedPreallocate for Hash {
fn max_allocation() -> u64 {
@ -101,53 +103,3 @@ impl TrustedPreallocate for Hash {
((MAX_PROTOCOL_MESSAGE_LEN - 1) as u64) / BLOCK_HASH_SIZE
}
}
#[cfg(test)]
mod test_trusted_preallocate {
use super::{Hash, BLOCK_HASH_SIZE, MAX_PROTOCOL_MESSAGE_LEN};
use crate::serialization::{TrustedPreallocate, ZcashSerialize};
use proptest::prelude::*;
use std::convert::TryInto;
proptest! {
#![proptest_config(ProptestConfig::with_cases(10_000))]
/// Verify that the serialized size of a block hash used to calculate the allocation limit is correct
#[test]
fn block_hash_size_is_correct(hash in Hash::arbitrary()) {
let serialized = hash.zcash_serialize_to_vec().expect("Serialization to vec must succeed");
prop_assert!(serialized.len() as u64 == BLOCK_HASH_SIZE);
}
}
proptest! {
#![proptest_config(ProptestConfig::with_cases(200))]
/// Verify that...
/// 1. The smallest disallowed vector of `Hash`s is too large to send via the Zcash Wire Protocol
/// 2. The largest allowed vector is small enough to fit in a legal Zcash Wire Protocol message
#[test]
fn block_hash_max_allocation(hash in Hash::arbitrary_with(())) {
let max_allocation: usize = Hash::max_allocation().try_into().unwrap();
let mut smallest_disallowed_vec = Vec::with_capacity(max_allocation + 1);
for _ in 0..(Hash::max_allocation()+1) {
smallest_disallowed_vec.push(hash);
}
let smallest_disallowed_serialized = smallest_disallowed_vec.zcash_serialize_to_vec().expect("Serialization to vec must succeed");
// Check that our smallest_disallowed_vec is only one item larger than the limit
prop_assert!(((smallest_disallowed_vec.len() - 1) as u64) == Hash::max_allocation());
// Check that our smallest_disallowed_vec is too big to send as a protocol message
prop_assert!(smallest_disallowed_serialized.len() > MAX_PROTOCOL_MESSAGE_LEN);
// Create largest_allowed_vec by removing one element from smallest_disallowed_vec without copying (for efficiency)
smallest_disallowed_vec.pop();
let largest_allowed_vec = smallest_disallowed_vec;
let largest_allowed_serialized = largest_allowed_vec.zcash_serialize_to_vec().expect("Serialization to vec must succeed");
// Check that our largest_allowed_vec contains the maximum number of hashes
prop_assert!((largest_allowed_vec.len() as u64) == Hash::max_allocation());
// Check that our largest_allowed_vec is small enough to send as a protocol message
prop_assert!(largest_allowed_serialized.len() <= MAX_PROTOCOL_MESSAGE_LEN);
}
}
}

View File

@ -135,7 +135,8 @@ const BLOCK_HEADER_LENGTH: usize =
/// The minimum size for a serialized CountedHeader.
///
/// A CountedHeader has BLOCK_HEADER_LENGTH bytes + 1 or more bytes for the transaction count
const MIN_COUNTED_HEADER_LEN: usize = BLOCK_HEADER_LENGTH + 1;
pub(crate) const MIN_COUNTED_HEADER_LEN: usize = BLOCK_HEADER_LENGTH + 1;
impl TrustedPreallocate for CountedHeader {
fn max_allocation() -> u64 {
// Every vector type requires a length field of at least one byte for de/serialization.
@ -143,61 +144,3 @@ impl TrustedPreallocate for CountedHeader {
((MAX_PROTOCOL_MESSAGE_LEN - 1) / MIN_COUNTED_HEADER_LEN) as u64
}
}
#[cfg(test)]
mod test_trusted_preallocate {
use super::{CountedHeader, Header, MAX_PROTOCOL_MESSAGE_LEN, MIN_COUNTED_HEADER_LEN};
use crate::serialization::{TrustedPreallocate, ZcashSerialize};
use proptest::prelude::*;
use std::convert::TryInto;
proptest! {
#![proptest_config(ProptestConfig::with_cases(10_000))]
/// Confirm that each counted header takes at least COUNTED_HEADER_LEN bytes when serialized.
/// This verifies that our calculated `TrustedPreallocate::max_allocation()` is indeed an upper bound.
#[test]
fn counted_header_min_length(header in Header::arbitrary_with(()), transaction_count in (0..std::u32::MAX)) {
let header = CountedHeader {
header,
transaction_count: transaction_count.try_into().expect("Must run test on platform with at least 32 bit address space"),
};
let serialized_header = header.zcash_serialize_to_vec().expect("Serialization to vec must succeed");
prop_assert!(serialized_header.len() >= MIN_COUNTED_HEADER_LEN)
}
}
proptest! {
#![proptest_config(ProptestConfig::with_cases(100))]
/// Verify that...
/// 1. The smallest disallowed vector of `CountedHeaders`s is too large to send via the Zcash Wire Protocol
/// 2. The largest allowed vector is small enough to fit in a legal Zcash Wire Protocol message
#[test]
fn counted_header_max_allocation(header in Header::arbitrary_with(())) {
let header = CountedHeader {
header,
transaction_count: 0,
};
let max_allocation: usize = CountedHeader::max_allocation().try_into().unwrap();
let mut smallest_disallowed_vec = Vec::with_capacity(max_allocation + 1);
for _ in 0..(CountedHeader::max_allocation()+1) {
smallest_disallowed_vec.push(header.clone());
}
let smallest_disallowed_serialized = smallest_disallowed_vec.zcash_serialize_to_vec().expect("Serialization to vec must succeed");
// Check that our smallest_disallowed_vec is only one item larger than the limit
prop_assert!(((smallest_disallowed_vec.len() - 1) as u64) == CountedHeader::max_allocation());
// Check that our smallest_disallowed_vec is too big to send as a protocol message
prop_assert!(smallest_disallowed_serialized.len() > MAX_PROTOCOL_MESSAGE_LEN);
// Create largest_allowed_vec by removing one element from smallest_disallowed_vec without copying (for efficiency)
smallest_disallowed_vec.pop();
let largest_allowed_vec = smallest_disallowed_vec;
let largest_allowed_serialized = largest_allowed_vec.zcash_serialize_to_vec().expect("Serialization to vec must succeed");
// Check that our largest_allowed_vec contains the maximum number of CountedHeaders
prop_assert!((largest_allowed_vec.len() as u64) == CountedHeader::max_allocation());
// Check that our largest_allowed_vec is small enough to send as a protocol message
prop_assert!(largest_allowed_serialized.len() <= MAX_PROTOCOL_MESSAGE_LEN);
}
}
}

View File

@ -1,4 +1,5 @@
// XXX this should be rewritten as strategies
// XXX generate should be rewritten as strategies
mod generate;
mod preallocate;
mod prop;
mod vectors;

View File

@ -0,0 +1,98 @@
//! Tests for trusted preallocation during deserialization.
use crate::{
block::{
header::MIN_COUNTED_HEADER_LEN, CountedHeader, Hash, Header, BLOCK_HASH_SIZE,
MAX_PROTOCOL_MESSAGE_LEN,
},
serialization::{TrustedPreallocate, ZcashSerialize},
};
use proptest::prelude::*;
use std::convert::TryInto;
proptest! {
/// Verify that the serialized size of a block hash used to calculate the allocation limit is correct
#[test]
fn block_hash_size_is_correct(hash in Hash::arbitrary()) {
let serialized = hash.zcash_serialize_to_vec().expect("Serialization to vec must succeed");
prop_assert!(serialized.len() as u64 == BLOCK_HASH_SIZE);
}
/// Verify that...
/// 1. The smallest disallowed vector of `Hash`s is too large to send via the Zcash Wire Protocol
/// 2. The largest allowed vector is small enough to fit in a legal Zcash Wire Protocol message
#[test]
fn block_hash_max_allocation(hash in Hash::arbitrary_with(())) {
let max_allocation: usize = Hash::max_allocation().try_into().unwrap();
let mut smallest_disallowed_vec = Vec::with_capacity(max_allocation + 1);
for _ in 0..(Hash::max_allocation()+1) {
smallest_disallowed_vec.push(hash);
}
let smallest_disallowed_serialized = smallest_disallowed_vec.zcash_serialize_to_vec().expect("Serialization to vec must succeed");
// Check that our smallest_disallowed_vec is only one item larger than the limit
prop_assert!(((smallest_disallowed_vec.len() - 1) as u64) == Hash::max_allocation());
// Check that our smallest_disallowed_vec is too big to send as a protocol message
prop_assert!(smallest_disallowed_serialized.len() > MAX_PROTOCOL_MESSAGE_LEN);
// Create largest_allowed_vec by removing one element from smallest_disallowed_vec without copying (for efficiency)
smallest_disallowed_vec.pop();
let largest_allowed_vec = smallest_disallowed_vec;
let largest_allowed_serialized = largest_allowed_vec.zcash_serialize_to_vec().expect("Serialization to vec must succeed");
// Check that our largest_allowed_vec contains the maximum number of hashes
prop_assert!((largest_allowed_vec.len() as u64) == Hash::max_allocation());
// Check that our largest_allowed_vec is small enough to send as a protocol message
prop_assert!(largest_allowed_serialized.len() <= MAX_PROTOCOL_MESSAGE_LEN);
}
/// Confirm that each counted header takes at least COUNTED_HEADER_LEN bytes when serialized.
/// This verifies that our calculated `TrustedPreallocate::max_allocation()` is indeed an upper bound.
#[test]
fn counted_header_min_length(header in Header::arbitrary_with(()), transaction_count in (0..std::u32::MAX)) {
let header = CountedHeader {
header,
transaction_count: transaction_count.try_into().expect("Must run test on platform with at least 32 bit address space"),
};
let serialized_header = header.zcash_serialize_to_vec().expect("Serialization to vec must succeed");
prop_assert!(serialized_header.len() >= MIN_COUNTED_HEADER_LEN)
}
}
proptest! {
#![proptest_config(ProptestConfig::with_cases(128))]
/// Verify that...
/// 1. The smallest disallowed vector of `CountedHeaders`s is too large to send via the Zcash Wire Protocol
/// 2. The largest allowed vector is small enough to fit in a legal Zcash Wire Protocol message
#[test]
fn counted_header_max_allocation(header in Header::arbitrary_with(())) {
let header = CountedHeader {
header,
transaction_count: 0,
};
let max_allocation: usize = CountedHeader::max_allocation().try_into().unwrap();
let mut smallest_disallowed_vec = Vec::with_capacity(max_allocation + 1);
for _ in 0..(CountedHeader::max_allocation()+1) {
smallest_disallowed_vec.push(header.clone());
}
let smallest_disallowed_serialized = smallest_disallowed_vec.zcash_serialize_to_vec().expect("Serialization to vec must succeed");
// Check that our smallest_disallowed_vec is only one item larger than the limit
prop_assert!(((smallest_disallowed_vec.len() - 1) as u64) == CountedHeader::max_allocation());
// Check that our smallest_disallowed_vec is too big to send as a protocol message
prop_assert!(smallest_disallowed_serialized.len() > MAX_PROTOCOL_MESSAGE_LEN);
// Create largest_allowed_vec by removing one element from smallest_disallowed_vec without copying (for efficiency)
smallest_disallowed_vec.pop();
let largest_allowed_vec = smallest_disallowed_vec;
let largest_allowed_serialized = largest_allowed_vec.zcash_serialize_to_vec().expect("Serialization to vec must succeed");
// Check that our largest_allowed_vec contains the maximum number of CountedHeaders
prop_assert!((largest_allowed_vec.len() as u64) == CountedHeader::max_allocation());
// Check that our largest_allowed_vec is small enough to send as a protocol message
prop_assert!(largest_allowed_serialized.len() <= MAX_PROTOCOL_MESSAGE_LEN);
}
}

View File

@ -78,10 +78,11 @@ impl ZcashDeserialize for Output {
})
}
}
/// An output contains: a 32 byte cv, a 32 byte cmu, a 32 byte ephemeral key
/// a 580 byte encCiphertext, an 80 byte outCiphertext, and a 192 byte zkproof
/// [ps]: https://zips.z.cash/protocol/protocol.pdf#outputencoding
const OUTPUT_SIZE: u64 = 32 + 32 + 32 + 580 + 80 + 192;
pub(crate) const OUTPUT_SIZE: u64 = 32 + 32 + 32 + 580 + 80 + 192;
/// The maximum number of outputs in a valid Zcash on-chain transaction.
///
@ -95,56 +96,3 @@ impl TrustedPreallocate for Output {
(MAX_BLOCK_BYTES - 1) / OUTPUT_SIZE
}
}
#[cfg(test)]
mod test_trusted_preallocate {
use super::{Output, MAX_BLOCK_BYTES, OUTPUT_SIZE};
use crate::serialization::{TrustedPreallocate, ZcashSerialize};
use proptest::prelude::*;
use std::convert::TryInto;
proptest! {
#![proptest_config(ProptestConfig::with_cases(10_000))]
/// Confirm that each output takes exactly OUTPUT_SIZE bytes when serialized.
/// This verifies that our calculated `TrustedPreallocate::max_allocation()` is indeed an upper bound.
#[test]
fn output_size_is_small_enough(output in Output::arbitrary_with(())) {
let serialized = output.zcash_serialize_to_vec().expect("Serialization to vec must succeed");
prop_assert!(serialized.len() as u64 == OUTPUT_SIZE)
}
}
proptest! {
#![proptest_config(ProptestConfig::with_cases(100))]
/// Verify that...
/// 1. The smallest disallowed vector of `Outputs`s is too large to fit in a Zcash block
/// 2. The largest allowed vector is small enough to fit in a legal Zcash block
#[test]
fn output_max_allocation_is_big_enough(output in Output::arbitrary_with(())) {
let max_allocation: usize = Output::max_allocation().try_into().unwrap();
let mut smallest_disallowed_vec = Vec::with_capacity(max_allocation + 1);
for _ in 0..(Output::max_allocation()+1) {
smallest_disallowed_vec.push(output.clone());
}
let smallest_disallowed_serialized = smallest_disallowed_vec.zcash_serialize_to_vec().expect("Serialization to vec must succeed");
// Check that our smallest_disallowed_vec is only one item larger than the limit
prop_assert!(((smallest_disallowed_vec.len() - 1) as u64) == Output::max_allocation());
// Check that our smallest_disallowed_vec is too big to be included in a valid block
// Note that a serialized block always includes at least one byte for the number of transactions,
// so any serialized Vec<Output> at least MAX_BLOCK_BYTES long is too large to fit in a block.
prop_assert!((smallest_disallowed_serialized.len() as u64) >= MAX_BLOCK_BYTES);
// Create largest_allowed_vec by removing one element from smallest_disallowed_vec without copying (for efficiency)
smallest_disallowed_vec.pop();
let largest_allowed_vec = smallest_disallowed_vec;
let largest_allowed_serialized = largest_allowed_vec.zcash_serialize_to_vec().expect("Serialization to vec must succeed");
// Check that our largest_allowed_vec contains the maximum number of Outputs
prop_assert!((largest_allowed_vec.len() as u64) == Output::max_allocation());
// Check that our largest_allowed_vec is small enough to fit in a Zcash block.
prop_assert!((largest_allowed_serialized.len() as u64) < MAX_BLOCK_BYTES);
}
}
}

View File

@ -145,15 +145,15 @@ impl ZcashSerialize for Spend<SharedAnchor> {
/// in V5), and a 64 byte spendAuthSig (serialized separately in V5).
///
/// [ps]: https://zips.z.cash/protocol/protocol.pdf#spendencoding
const SHARED_ANCHOR_SPEND_FULL_SIZE: u64 = SHARED_ANCHOR_SPEND_INITIAL_SIZE + 192 + 64;
pub(crate) const SHARED_ANCHOR_SPEND_FULL_SIZE: u64 = SHARED_ANCHOR_SPEND_INITIAL_SIZE + 192 + 64;
/// The size of a spend with a shared anchor, without associated fields.
///
/// This is the size of spends in the initial array, there are another
/// 2 arrays of zkproofs and spend_auth_sigs required in the transaction format.
const SHARED_ANCHOR_SPEND_INITIAL_SIZE: u64 = 32 + 32 + 32;
pub(crate) const SHARED_ANCHOR_SPEND_INITIAL_SIZE: u64 = 32 + 32 + 32;
/// The size of a spend with a per-spend anchor.
const ANCHOR_PER_SPEND_SIZE: u64 = SHARED_ANCHOR_SPEND_FULL_SIZE + 32;
pub(crate) const ANCHOR_PER_SPEND_SIZE: u64 = SHARED_ANCHOR_SPEND_FULL_SIZE + 32;
/// The maximum number of spends in a valid Zcash on-chain transaction V5.
///
@ -175,121 +175,3 @@ impl TrustedPreallocate for Spend<PerSpendAnchor> {
(MAX_BLOCK_BYTES - 1) / ANCHOR_PER_SPEND_SIZE
}
}
#[cfg(test)]
mod test_trusted_preallocate {
use super::{
Spend, ANCHOR_PER_SPEND_SIZE, MAX_BLOCK_BYTES, SHARED_ANCHOR_SPEND_FULL_SIZE,
SHARED_ANCHOR_SPEND_INITIAL_SIZE,
};
use crate::{
sapling::{AnchorVariant, PerSpendAnchor, SharedAnchor},
serialization::{TrustedPreallocate, ZcashSerialize},
};
use proptest::prelude::*;
use std::convert::TryInto;
proptest! {
#![proptest_config(ProptestConfig::with_cases(10_000))]
/// Confirm that each spend takes exactly ANCHOR_PER_SPEND_SIZE bytes when serialized.
/// This verifies that our calculated `TrustedPreallocate::max_allocation()` is indeed an upper bound.
#[test]
fn anchor_per_spend_size_is_small_enough(spend in Spend::<PerSpendAnchor>::arbitrary_with(())) {
let serialized = spend.zcash_serialize_to_vec().expect("Serialization to vec must succeed");
prop_assert!(serialized.len() as u64 == ANCHOR_PER_SPEND_SIZE)
}
/// Confirm that each spend takes exactly SHARED_SPEND_SIZE bytes when serialized.
#[test]
fn shared_anchor_spend_size_is_small_enough(spend in Spend::<SharedAnchor>::arbitrary_with(())) {
let mut serialized_len = spend.zcash_serialize_to_vec().expect("Serialization to vec must succeed").len();
serialized_len += spend.zkproof.zcash_serialize_to_vec().expect("Serialization to vec must succeed").len();
serialized_len += &<[u8; 64]>::from(spend.spend_auth_sig).len();
prop_assert!(serialized_len as u64 == SHARED_ANCHOR_SPEND_FULL_SIZE)
}
}
proptest! {
#![proptest_config(ProptestConfig::with_cases(100))]
/// Verify that...
/// 1. The smallest disallowed vector of `Spend`s is too large to fit in a Zcash block
/// 2. The largest allowed vector is small enough to fit in a legal Zcash block
#[test]
fn anchor_per_spend_max_allocation_is_big_enough(spend in Spend::<PerSpendAnchor>::arbitrary_with(())) {
let (
smallest_disallowed_vec_len,
smallest_disallowed_serialized_len,
largest_allowed_vec_len,
largest_allowed_serialized_len,
) = spend_max_allocation_is_big_enough(spend);
// Check that our smallest_disallowed_vec is only one item larger than the limit
prop_assert!(((smallest_disallowed_vec_len - 1) as u64) == Spend::<PerSpendAnchor>::max_allocation());
// Check that our smallest_disallowed_vec is too big to send as a protocol message
// Note that a serialized block always includes at least one byte for the number of transactions,
// so any serialized Vec<Spend> at least MAX_BLOCK_BYTES long is too large to fit in a block.
prop_assert!((smallest_disallowed_serialized_len as u64) >= MAX_BLOCK_BYTES);
// Check that our largest_allowed_vec contains the maximum number of spends
prop_assert!((largest_allowed_vec_len as u64) == Spend::<PerSpendAnchor>::max_allocation());
// Check that our largest_allowed_vec is small enough to send as a protocol message
prop_assert!((largest_allowed_serialized_len as u64) <= MAX_BLOCK_BYTES);
}
/// Verify trusted preallocation for `Spend<SharedAnchor>`
#[test]
fn shared_spend_max_allocation_is_big_enough(spend in Spend::<SharedAnchor>::arbitrary_with(())) {
let (
smallest_disallowed_vec_len,
smallest_disallowed_serialized_len,
largest_allowed_vec_len,
largest_allowed_serialized_len,
) = spend_max_allocation_is_big_enough(spend);
prop_assert!(((smallest_disallowed_vec_len - 1) as u64) == Spend::<SharedAnchor>::max_allocation());
// Calculate the actual size of all required Spend fields
//
// TODO: modify the test to serialize the associated zkproof and
// spend_auth_sig fields
prop_assert!((smallest_disallowed_serialized_len as u64)/SHARED_ANCHOR_SPEND_INITIAL_SIZE*SHARED_ANCHOR_SPEND_FULL_SIZE >= MAX_BLOCK_BYTES);
prop_assert!((largest_allowed_vec_len as u64) == Spend::<SharedAnchor>::max_allocation());
prop_assert!((largest_allowed_serialized_len as u64) <= MAX_BLOCK_BYTES);
}
}
/// Return the
fn spend_max_allocation_is_big_enough<AnchorV>(
spend: Spend<AnchorV>,
) -> (usize, usize, usize, usize)
where
AnchorV: AnchorVariant,
Spend<AnchorV>: TrustedPreallocate + ZcashSerialize + Clone,
{
let max_allocation: usize = Spend::max_allocation().try_into().unwrap();
let mut smallest_disallowed_vec = Vec::with_capacity(max_allocation + 1);
for _ in 0..(Spend::max_allocation() + 1) {
smallest_disallowed_vec.push(spend.clone());
}
let smallest_disallowed_serialized = smallest_disallowed_vec
.zcash_serialize_to_vec()
.expect("Serialization to vec must succeed");
let smallest_disallowed_vec_len = smallest_disallowed_vec.len();
// Create largest_allowed_vec by removing one element from smallest_disallowed_vec without copying (for efficiency)
smallest_disallowed_vec.pop();
let largest_allowed_vec = smallest_disallowed_vec;
let largest_allowed_serialized = largest_allowed_vec
.zcash_serialize_to_vec()
.expect("Serialization to vec must succeed");
(
smallest_disallowed_vec_len,
smallest_disallowed_serialized.len(),
largest_allowed_vec.len(),
largest_allowed_serialized.len(),
)
}
}

View File

@ -1 +1,2 @@
mod preallocate;
mod prop;

View File

@ -0,0 +1,165 @@
//! Tests for trusted preallocation during deserialization.
use super::super::{
output::{Output, OUTPUT_SIZE},
spend::{
Spend, ANCHOR_PER_SPEND_SIZE, SHARED_ANCHOR_SPEND_FULL_SIZE,
SHARED_ANCHOR_SPEND_INITIAL_SIZE,
},
};
use crate::{
block::MAX_BLOCK_BYTES,
sapling::{AnchorVariant, PerSpendAnchor, SharedAnchor},
serialization::{TrustedPreallocate, ZcashSerialize},
};
use proptest::prelude::*;
use std::convert::TryInto;
proptest! {
/// Confirm that each spend takes exactly ANCHOR_PER_SPEND_SIZE bytes when serialized.
/// This verifies that our calculated `TrustedPreallocate::max_allocation()` is indeed an upper bound.
#[test]
fn anchor_per_spend_size_is_small_enough(spend in Spend::<PerSpendAnchor>::arbitrary_with(())) {
let serialized = spend.zcash_serialize_to_vec().expect("Serialization to vec must succeed");
prop_assert!(serialized.len() as u64 == ANCHOR_PER_SPEND_SIZE)
}
/// Confirm that each spend takes exactly SHARED_SPEND_SIZE bytes when serialized.
#[test]
fn shared_anchor_spend_size_is_small_enough(spend in Spend::<SharedAnchor>::arbitrary_with(())) {
let mut serialized_len = spend.zcash_serialize_to_vec().expect("Serialization to vec must succeed").len();
serialized_len += spend.zkproof.zcash_serialize_to_vec().expect("Serialization to vec must succeed").len();
serialized_len += &<[u8; 64]>::from(spend.spend_auth_sig).len();
prop_assert!(serialized_len as u64 == SHARED_ANCHOR_SPEND_FULL_SIZE)
}
}
proptest! {
#![proptest_config(ProptestConfig::with_cases(128))]
/// Verify that...
/// 1. The smallest disallowed vector of `Spend`s is too large to fit in a Zcash block
/// 2. The largest allowed vector is small enough to fit in a legal Zcash block
#[test]
fn anchor_per_spend_max_allocation_is_big_enough(spend in Spend::<PerSpendAnchor>::arbitrary_with(())) {
let (
smallest_disallowed_vec_len,
smallest_disallowed_serialized_len,
largest_allowed_vec_len,
largest_allowed_serialized_len,
) = spend_max_allocation_is_big_enough(spend);
// Check that our smallest_disallowed_vec is only one item larger than the limit
prop_assert!(((smallest_disallowed_vec_len - 1) as u64) == Spend::<PerSpendAnchor>::max_allocation());
// Check that our smallest_disallowed_vec is too big to send as a protocol message
// Note that a serialized block always includes at least one byte for the number of transactions,
// so any serialized Vec<Spend> at least MAX_BLOCK_BYTES long is too large to fit in a block.
prop_assert!((smallest_disallowed_serialized_len as u64) >= MAX_BLOCK_BYTES);
// Check that our largest_allowed_vec contains the maximum number of spends
prop_assert!((largest_allowed_vec_len as u64) == Spend::<PerSpendAnchor>::max_allocation());
// Check that our largest_allowed_vec is small enough to send as a protocol message
prop_assert!((largest_allowed_serialized_len as u64) <= MAX_BLOCK_BYTES);
}
/// Verify trusted preallocation for `Spend<SharedAnchor>`
#[test]
fn shared_spend_max_allocation_is_big_enough(spend in Spend::<SharedAnchor>::arbitrary_with(())) {
let (
smallest_disallowed_vec_len,
smallest_disallowed_serialized_len,
largest_allowed_vec_len,
largest_allowed_serialized_len,
) = spend_max_allocation_is_big_enough(spend);
prop_assert!(((smallest_disallowed_vec_len - 1) as u64) == Spend::<SharedAnchor>::max_allocation());
// Calculate the actual size of all required Spend fields
//
// TODO: modify the test to serialize the associated zkproof and
// spend_auth_sig fields
prop_assert!((smallest_disallowed_serialized_len as u64)/SHARED_ANCHOR_SPEND_INITIAL_SIZE*SHARED_ANCHOR_SPEND_FULL_SIZE >= MAX_BLOCK_BYTES);
prop_assert!((largest_allowed_vec_len as u64) == Spend::<SharedAnchor>::max_allocation());
prop_assert!((largest_allowed_serialized_len as u64) <= MAX_BLOCK_BYTES);
}
}
/// Return the
fn spend_max_allocation_is_big_enough<AnchorV>(
spend: Spend<AnchorV>,
) -> (usize, usize, usize, usize)
where
AnchorV: AnchorVariant,
Spend<AnchorV>: TrustedPreallocate + ZcashSerialize + Clone,
{
let max_allocation: usize = Spend::max_allocation().try_into().unwrap();
let mut smallest_disallowed_vec = Vec::with_capacity(max_allocation + 1);
for _ in 0..(Spend::max_allocation() + 1) {
smallest_disallowed_vec.push(spend.clone());
}
let smallest_disallowed_serialized = smallest_disallowed_vec
.zcash_serialize_to_vec()
.expect("Serialization to vec must succeed");
let smallest_disallowed_vec_len = smallest_disallowed_vec.len();
// Create largest_allowed_vec by removing one element from smallest_disallowed_vec without copying (for efficiency)
smallest_disallowed_vec.pop();
let largest_allowed_vec = smallest_disallowed_vec;
let largest_allowed_serialized = largest_allowed_vec
.zcash_serialize_to_vec()
.expect("Serialization to vec must succeed");
(
smallest_disallowed_vec_len,
smallest_disallowed_serialized.len(),
largest_allowed_vec.len(),
largest_allowed_serialized.len(),
)
}
proptest! {
/// Confirm that each output takes exactly OUTPUT_SIZE bytes when serialized.
/// This verifies that our calculated `TrustedPreallocate::max_allocation()` is indeed an upper bound.
#[test]
fn output_size_is_small_enough(output in Output::arbitrary_with(())) {
let serialized = output.zcash_serialize_to_vec().expect("Serialization to vec must succeed");
prop_assert!(serialized.len() as u64 == OUTPUT_SIZE)
}
}
proptest! {
#![proptest_config(ProptestConfig::with_cases(128))]
/// Verify that...
/// 1. The smallest disallowed vector of `Outputs`s is too large to fit in a Zcash block
/// 2. The largest allowed vector is small enough to fit in a legal Zcash block
#[test]
fn output_max_allocation_is_big_enough(output in Output::arbitrary_with(())) {
let max_allocation: usize = Output::max_allocation().try_into().unwrap();
let mut smallest_disallowed_vec = Vec::with_capacity(max_allocation + 1);
for _ in 0..(Output::max_allocation()+1) {
smallest_disallowed_vec.push(output.clone());
}
let smallest_disallowed_serialized = smallest_disallowed_vec.zcash_serialize_to_vec().expect("Serialization to vec must succeed");
// Check that our smallest_disallowed_vec is only one item larger than the limit
prop_assert!(((smallest_disallowed_vec.len() - 1) as u64) == Output::max_allocation());
// Check that our smallest_disallowed_vec is too big to be included in a valid block
// Note that a serialized block always includes at least one byte for the number of transactions,
// so any serialized Vec<Output> at least MAX_BLOCK_BYTES long is too large to fit in a block.
prop_assert!((smallest_disallowed_serialized.len() as u64) >= MAX_BLOCK_BYTES);
// Create largest_allowed_vec by removing one element from smallest_disallowed_vec without copying (for efficiency)
smallest_disallowed_vec.pop();
let largest_allowed_vec = smallest_disallowed_vec;
let largest_allowed_serialized = largest_allowed_vec.zcash_serialize_to_vec().expect("Serialization to vec must succeed");
// Check that our largest_allowed_vec contains the maximum number of Outputs
prop_assert!((largest_allowed_vec.len() as u64) == Output::max_allocation());
// Check that our largest_allowed_vec is small enough to fit in a Zcash block.
prop_assert!((largest_allowed_serialized.len() as u64) < MAX_BLOCK_BYTES);
}
}

View File

@ -3,6 +3,8 @@
#[cfg(any(test, feature = "proptest-impl"))]
mod arbitrary;
mod joinsplit;
#[cfg(test)]
mod tests;
// XXX clean up these modules

View File

@ -111,13 +111,13 @@ const JOINSPLIT_SIZE_WITHOUT_ZKPROOF: u64 =
/// A BTCV14 proof takes 296 bytes, per the Zcash [protocol specification §7.2][ps]
///
/// [ps]: https://zips.z.cash/protocol/protocol.pdf#joinsplitencoding
const BCTV14_JOINSPLIT_SIZE: u64 = JOINSPLIT_SIZE_WITHOUT_ZKPROOF + 296;
pub(crate) const BCTV14_JOINSPLIT_SIZE: u64 = JOINSPLIT_SIZE_WITHOUT_ZKPROOF + 296;
/// The size of a version 4+ joinsplit transaction, which uses a Groth16 proof
///
/// A Groth16 proof takes 192 bytes, per the Zcash [protocol specification §7.2][ps]
///
/// [ps]: https://zips.z.cash/protocol/protocol.pdf#joinsplitencoding
const GROTH16_JOINSPLIT_SIZE: u64 = JOINSPLIT_SIZE_WITHOUT_ZKPROOF + 192;
pub(crate) const GROTH16_JOINSPLIT_SIZE: u64 = JOINSPLIT_SIZE_WITHOUT_ZKPROOF + 192;
impl TrustedPreallocate for JoinSplit<Bctv14Proof> {
fn max_allocation() -> u64 {
@ -136,95 +136,3 @@ impl TrustedPreallocate for JoinSplit<Groth16Proof> {
(MAX_BLOCK_BYTES - 1) / GROTH16_JOINSPLIT_SIZE
}
}
#[cfg(test)]
mod test_trusted_preallocate {
use super::{
Bctv14Proof, Groth16Proof, JoinSplit, BCTV14_JOINSPLIT_SIZE, GROTH16_JOINSPLIT_SIZE,
MAX_BLOCK_BYTES,
};
use crate::serialization::{TrustedPreallocate, ZcashSerialize};
use proptest::{prelude::*, proptest};
use std::convert::TryInto;
proptest! {
#![proptest_config(ProptestConfig::with_cases(1_000))]
#[test]
/// Confirm that each JoinSplit<Btcv14Proof> takes exactly BCTV14_JOINSPLIT_SIZE bytes when serialized.
/// This verifies that our calculated `TrustedPreallocate::max_allocation()` is indeed an upper bound.
fn joinsplit_btcv14_size_is_correct(joinsplit in <JoinSplit<Bctv14Proof>>::arbitrary_with(())) {
let serialized = joinsplit.zcash_serialize_to_vec().expect("Serialization to vec must succeed");
prop_assert!(serialized.len() as u64 == BCTV14_JOINSPLIT_SIZE)
}
#[test]
/// Confirm that each JoinSplit<Btcv14Proof> takes exactly GROTH16_JOINSPLIT_SIZE bytes when serialized.
/// This verifies that our calculated `TrustedPreallocate::max_allocation()` is indeed an upper bound.
fn joinsplit_groth16_size_is_correct(joinsplit in <JoinSplit<Groth16Proof>>::arbitrary_with(())) {
let serialized = joinsplit.zcash_serialize_to_vec().expect("Serialization to vec must succeed");
prop_assert!(serialized.len() as u64 == GROTH16_JOINSPLIT_SIZE)
}
}
proptest! {
#![proptest_config(ProptestConfig::with_cases(100))]
/// Verify that...
/// 1. The smallest disallowed vector of `JoinSplit<Bctv14Proof>`s is too large to fit in a Zcash block
/// 2. The largest allowed vector is small enough to fit in a legal Zcash block
#[test]
fn joinsplit_btcv14_max_allocation_is_correct(joinsplit in <JoinSplit<Bctv14Proof>>::arbitrary_with(())) {
let max_allocation: usize = <JoinSplit<Bctv14Proof>>::max_allocation().try_into().unwrap();
let mut smallest_disallowed_vec = Vec::with_capacity(max_allocation + 1);
for _ in 0..(<JoinSplit<Bctv14Proof>>::max_allocation()+1) {
smallest_disallowed_vec.push(joinsplit.clone());
}
let smallest_disallowed_serialized = smallest_disallowed_vec.zcash_serialize_to_vec().expect("Serialization to vec must succeed");
// Check that our smallest_disallowed_vec is only one item larger than the limit
prop_assert!(((smallest_disallowed_vec.len() - 1) as u64) == <JoinSplit<Bctv14Proof>>::max_allocation());
// Check that our smallest_disallowed_vec is too big to be included in a valid block
// Note that a serialized block always includes at least one byte for the number of transactions,
// so any serialized Vec<<JoinSplit<Bctv14Proof>>> at least MAX_BLOCK_BYTES long is too large to fit in a block.
prop_assert!((smallest_disallowed_serialized.len() as u64) >= MAX_BLOCK_BYTES);
// Create largest_allowed_vec by removing one element from smallest_disallowed_vec without copying (for efficiency)
smallest_disallowed_vec.pop();
let largest_allowed_vec = smallest_disallowed_vec;
let largest_allowed_serialized = largest_allowed_vec.zcash_serialize_to_vec().expect("Serialization to vec must succeed");
// Check that our largest_allowed_vec contains the maximum number of <JoinSplit<Bctv14Proof>>
prop_assert!((largest_allowed_vec.len() as u64) == <JoinSplit<Bctv14Proof>>::max_allocation());
// Check that our largest_allowed_vec is small enough to fit in a Zcash block.
prop_assert!((largest_allowed_serialized.len() as u64) < MAX_BLOCK_BYTES);
}
/// Verify that...
/// 1. The smallest disallowed vector of `JoinSplit<Groth16Proof>`s is too large to fit in a Zcash block
/// 2. The largest allowed vector is small enough to fit in a legal Zcash block
#[test]
fn joinsplit_groth16_max_allocation_is_correct(joinsplit in <JoinSplit<Groth16Proof>>::arbitrary_with(())) {
let max_allocation: usize = <JoinSplit<Groth16Proof>>::max_allocation().try_into().unwrap();
let mut smallest_disallowed_vec = Vec::with_capacity(max_allocation + 1);
for _ in 0..(<JoinSplit<Groth16Proof>>::max_allocation()+1) {
smallest_disallowed_vec.push(joinsplit.clone());
}
let smallest_disallowed_serialized = smallest_disallowed_vec.zcash_serialize_to_vec().expect("Serialization to vec must succeed");
// Check that our smallest_disallowed_vec is only one item larger than the limit
prop_assert!(((smallest_disallowed_vec.len() - 1) as u64) == <JoinSplit<Groth16Proof>>::max_allocation());
// Check that our smallest_disallowed_vec is too big to be included in a valid block
// Note that a serialized block always includes at least one byte for the number of transactions,
// so any serialized Vec<<JoinSplit<Groth16Proof>>> at least MAX_BLOCK_BYTES long is too large to fit in a block.
prop_assert!((smallest_disallowed_serialized.len() as u64) >= MAX_BLOCK_BYTES);
// Create largest_allowed_vec by removing one element from smallest_disallowed_vec without copying (for efficiency)
smallest_disallowed_vec.pop();
let largest_allowed_vec = smallest_disallowed_vec;
let largest_allowed_serialized = largest_allowed_vec.zcash_serialize_to_vec().expect("Serialization to vec must succeed");
// Check that our largest_allowed_vec contains the maximum number of <JoinSplit<Groth16Proof>>
prop_assert!((largest_allowed_vec.len() as u64) == <JoinSplit<Groth16Proof>>::max_allocation());
// Check that our largest_allowed_vec is small enough to fit in a Zcash block.
prop_assert!((largest_allowed_serialized.len() as u64) < MAX_BLOCK_BYTES);
}
}
}

View File

@ -1 +1 @@
mod arbitrary;
mod preallocate;

View File

@ -0,0 +1,94 @@
//! Tests for trusted preallocation during deserialization.
use super::super::joinsplit::{JoinSplit, BCTV14_JOINSPLIT_SIZE, GROTH16_JOINSPLIT_SIZE};
use crate::{
block::MAX_BLOCK_BYTES,
primitives::{Bctv14Proof, Groth16Proof},
serialization::{TrustedPreallocate, ZcashSerialize},
};
use proptest::{prelude::*, proptest};
use std::convert::TryInto;
proptest! {
/// Confirm that each JoinSplit<Btcv14Proof> takes exactly BCTV14_JOINSPLIT_SIZE bytes when serialized.
/// This verifies that our calculated `TrustedPreallocate::max_allocation()` is indeed an upper bound.
#[test]
fn joinsplit_btcv14_size_is_correct(joinsplit in <JoinSplit<Bctv14Proof>>::arbitrary_with(())) {
let serialized = joinsplit.zcash_serialize_to_vec().expect("Serialization to vec must succeed");
prop_assert!(serialized.len() as u64 == BCTV14_JOINSPLIT_SIZE)
}
/// Confirm that each JoinSplit<Btcv14Proof> takes exactly GROTH16_JOINSPLIT_SIZE bytes when serialized.
/// This verifies that our calculated `TrustedPreallocate::max_allocation()` is indeed an upper bound.
#[test]
fn joinsplit_groth16_size_is_correct(joinsplit in <JoinSplit<Groth16Proof>>::arbitrary_with(())) {
let serialized = joinsplit.zcash_serialize_to_vec().expect("Serialization to vec must succeed");
prop_assert!(serialized.len() as u64 == GROTH16_JOINSPLIT_SIZE)
}
}
proptest! {
#![proptest_config(ProptestConfig::with_cases(128))]
/// Verify that...
/// 1. The smallest disallowed vector of `JoinSplit<Bctv14Proof>`s is too large to fit in a Zcash block
/// 2. The largest allowed vector is small enough to fit in a legal Zcash block
#[test]
fn joinsplit_btcv14_max_allocation_is_correct(joinsplit in <JoinSplit<Bctv14Proof>>::arbitrary_with(())) {
let max_allocation: usize = <JoinSplit<Bctv14Proof>>::max_allocation().try_into().unwrap();
let mut smallest_disallowed_vec = Vec::with_capacity(max_allocation + 1);
for _ in 0..(<JoinSplit<Bctv14Proof>>::max_allocation()+1) {
smallest_disallowed_vec.push(joinsplit.clone());
}
let smallest_disallowed_serialized = smallest_disallowed_vec.zcash_serialize_to_vec().expect("Serialization to vec must succeed");
// Check that our smallest_disallowed_vec is only one item larger than the limit
prop_assert!(((smallest_disallowed_vec.len() - 1) as u64) == <JoinSplit<Bctv14Proof>>::max_allocation());
// Check that our smallest_disallowed_vec is too big to be included in a valid block
// Note that a serialized block always includes at least one byte for the number of transactions,
// so any serialized Vec<<JoinSplit<Bctv14Proof>>> at least MAX_BLOCK_BYTES long is too large to fit in a block.
prop_assert!((smallest_disallowed_serialized.len() as u64) >= MAX_BLOCK_BYTES);
// Create largest_allowed_vec by removing one element from smallest_disallowed_vec without copying (for efficiency)
smallest_disallowed_vec.pop();
let largest_allowed_vec = smallest_disallowed_vec;
let largest_allowed_serialized = largest_allowed_vec.zcash_serialize_to_vec().expect("Serialization to vec must succeed");
// Check that our largest_allowed_vec contains the maximum number of <JoinSplit<Bctv14Proof>>
prop_assert!((largest_allowed_vec.len() as u64) == <JoinSplit<Bctv14Proof>>::max_allocation());
// Check that our largest_allowed_vec is small enough to fit in a Zcash block.
prop_assert!((largest_allowed_serialized.len() as u64) < MAX_BLOCK_BYTES);
}
/// Verify that...
/// 1. The smallest disallowed vector of `JoinSplit<Groth16Proof>`s is too large to fit in a Zcash block
/// 2. The largest allowed vector is small enough to fit in a legal Zcash block
#[test]
fn joinsplit_groth16_max_allocation_is_correct(joinsplit in <JoinSplit<Groth16Proof>>::arbitrary_with(())) {
let max_allocation: usize = <JoinSplit<Groth16Proof>>::max_allocation().try_into().unwrap();
let mut smallest_disallowed_vec = Vec::with_capacity(max_allocation + 1);
for _ in 0..(<JoinSplit<Groth16Proof>>::max_allocation()+1) {
smallest_disallowed_vec.push(joinsplit.clone());
}
let smallest_disallowed_serialized = smallest_disallowed_vec.zcash_serialize_to_vec().expect("Serialization to vec must succeed");
// Check that our smallest_disallowed_vec is only one item larger than the limit
prop_assert!(((smallest_disallowed_vec.len() - 1) as u64) == <JoinSplit<Groth16Proof>>::max_allocation());
// Check that our smallest_disallowed_vec is too big to be included in a valid block
// Note that a serialized block always includes at least one byte for the number of transactions,
// so any serialized Vec<<JoinSplit<Groth16Proof>>> at least MAX_BLOCK_BYTES long is too large to fit in a block.
prop_assert!((smallest_disallowed_serialized.len() as u64) >= MAX_BLOCK_BYTES);
// Create largest_allowed_vec by removing one element from smallest_disallowed_vec without copying (for efficiency)
smallest_disallowed_vec.pop();
let largest_allowed_vec = smallest_disallowed_vec;
let largest_allowed_serialized = largest_allowed_vec.zcash_serialize_to_vec().expect("Serialization to vec must succeed");
// Check that our largest_allowed_vec contains the maximum number of <JoinSplit<Groth16Proof>>
prop_assert!((largest_allowed_vec.len() as u64) == <JoinSplit<Groth16Proof>>::max_allocation());
// Check that our largest_allowed_vec is small enough to fit in a Zcash block.
prop_assert!((largest_allowed_serialized.len() as u64) < MAX_BLOCK_BYTES);
}
}

View File

@ -357,11 +357,12 @@ where
/// A Tx Input must have an Outpoint (32 byte hash + 4 byte index), a 4 byte sequence number,
/// and a signature script, which always takes a min of 1 byte (for a length 0 script)
const MIN_TRANSPARENT_INPUT_SIZE: u64 = 32 + 4 + 4 + 1;
pub(crate) const MIN_TRANSPARENT_INPUT_SIZE: u64 = 32 + 4 + 4 + 1;
/// A Transparent output has an 8 byte value and script which takes a min of 1 byte
const MIN_TRANSPARENT_OUTPUT_SIZE: u64 = 8 + 1;
// All txs must have at least one input, a 4 byte locktime, and at least one output
const MIN_TRANSPARENT_TX_SIZE: u64 = MIN_TRANSPARENT_INPUT_SIZE + 4 + MIN_TRANSPARENT_OUTPUT_SIZE;
pub(crate) const MIN_TRANSPARENT_OUTPUT_SIZE: u64 = 8 + 1;
/// All txs must have at least one input, a 4 byte locktime, and at least one output
pub(crate) const MIN_TRANSPARENT_TX_SIZE: u64 =
MIN_TRANSPARENT_INPUT_SIZE + 4 + MIN_TRANSPARENT_OUTPUT_SIZE;
/// No valid Zcash message contains more transactions than can fit in a single block
///
@ -373,6 +374,7 @@ impl TrustedPreallocate for Arc<Transaction> {
MAX_BLOCK_BYTES / MIN_TRANSPARENT_TX_SIZE
}
}
/// The maximum number of inputs in a valid Zcash on-chain transaction.
///
/// If a transaction contains more inputs than can fit in maximally large block, it might be
@ -383,6 +385,7 @@ impl TrustedPreallocate for transparent::Input {
MAX_BLOCK_BYTES / MIN_TRANSPARENT_INPUT_SIZE
}
}
/// The maximum number of outputs in a valid Zcash on-chain transaction.
///
/// If a transaction contains more outputs than can fit in maximally large block, it might be
@ -393,99 +396,3 @@ impl TrustedPreallocate for transparent::Output {
MAX_BLOCK_BYTES / MIN_TRANSPARENT_OUTPUT_SIZE
}
}
#[cfg(test)]
mod test_trusted_preallocate {
use super::{
transparent::Input, transparent::Output, Transaction, MAX_BLOCK_BYTES,
MIN_TRANSPARENT_INPUT_SIZE, MIN_TRANSPARENT_OUTPUT_SIZE, MIN_TRANSPARENT_TX_SIZE,
};
use crate::serialization::{TrustedPreallocate, ZcashSerialize};
use proptest::prelude::*;
use std::{convert::TryInto, sync::Arc};
proptest! {
#![proptest_config(ProptestConfig::with_cases(300))]
/// Confirm that each spend takes at least MIN_TRANSPARENT_TX_SIZE bytes when serialized.
/// This verifies that our calculated `TrustedPreallocate::max_allocation()` is indeed an upper bound.
#[test]
fn tx_size_is_small_enough(tx in Transaction::arbitrary()) {
let serialized = tx.zcash_serialize_to_vec().expect("Serialization to vec must succeed");
prop_assert!(serialized.len() as u64 >= MIN_TRANSPARENT_TX_SIZE)
}
/// Confirm that each spend takes at least MIN_TRANSPARENT_TX_SIZE bytes when serialized.
/// This verifies that our calculated `TrustedPreallocate::max_allocation()` is indeed an upper bound.
#[test]
fn transparent_input_size_is_small_enough(input in Input::arbitrary()) {
let serialized = input.zcash_serialize_to_vec().expect("Serialization to vec must succeed");
prop_assert!(serialized.len() as u64 >= MIN_TRANSPARENT_INPUT_SIZE)
}
/// Confirm that each spend takes at least MIN_TRANSPARENT_TX_SIZE bytes when serialized.
/// This verifies that our calculated `TrustedPreallocate::max_allocation()` is indeed an upper bound.
#[test]
fn transparent_output_size_is_small_enough(output in Output::arbitrary()) {
let serialized = output.zcash_serialize_to_vec().expect("Serialization to vec must succeed");
prop_assert!(serialized.len() as u64 >= MIN_TRANSPARENT_OUTPUT_SIZE)
}
}
proptest! {
// This test is pretty slow, so only run a few
#![proptest_config(ProptestConfig::with_cases(7))]
#[test]
/// Verify the smallest disallowed vector of `Transaction`s is too large to fit in a Zcash block
fn tx_max_allocation_is_big_enough(tx in Transaction::arbitrary()) {
let max_allocation: usize = <Arc<Transaction>>::max_allocation().try_into().unwrap();
let mut smallest_disallowed_vec = Vec::with_capacity(max_allocation + 1);
for _ in 0..(<Arc<Transaction>>::max_allocation()+1) {
smallest_disallowed_vec.push(Arc::new(tx.clone()));
}
let serialized = smallest_disallowed_vec.zcash_serialize_to_vec().expect("Serialization to vec must succeed");
// Check that our smallest_disallowed_vec is only one item larger than the limit
prop_assert!(((smallest_disallowed_vec.len() - 1) as u64) == <Arc<Transaction>>::max_allocation());
// Check that our smallest_disallowed_vec is too big to be included in a valid block
prop_assert!(serialized.len() as u64 > MAX_BLOCK_BYTES);
}
#[test]
/// Verify the smallest disallowed vector of `Input`s is too large to fit in a Zcash block
fn input_max_allocation_is_big_enough(input in Input::arbitrary()) {
let max_allocation: usize = Input::max_allocation().try_into().unwrap();
let mut smallest_disallowed_vec = Vec::with_capacity(max_allocation + 1);
for _ in 0..(Input::max_allocation()+1) {
smallest_disallowed_vec.push(input.clone());
}
let serialized = smallest_disallowed_vec.zcash_serialize_to_vec().expect("Serialization to vec must succeed");
// Check that our smallest_disallowed_vec is only one item larger than the limit
prop_assert!(((smallest_disallowed_vec.len() - 1) as u64) == Input::max_allocation());
// Check that our smallest_disallowed_vec is too big to be included in a valid block
// Note that a serialized block always includes at least one byte for the number of transactions,
// so any serialized Vec<Input> at least MAX_BLOCK_BYTES long is too large to fit in a block.
prop_assert!(serialized.len() as u64 >= MAX_BLOCK_BYTES);
}
#[test]
/// Verify the smallest disallowed vector of `Output`s is too large to fit in a Zcash block
fn output_max_allocation_is_big_enough(output in Output::arbitrary()) {
let max_allocation: usize = Output::max_allocation().try_into().unwrap();
let mut smallest_disallowed_vec = Vec::with_capacity(max_allocation + 1);
for _ in 0..(Output::max_allocation()+1) {
smallest_disallowed_vec.push(output.clone());
}
let serialized = smallest_disallowed_vec.zcash_serialize_to_vec().expect("Serialization to vec must succeed");
// Check that our smallest_disallowed_vec is only one item larger than the limit
prop_assert!(((smallest_disallowed_vec.len() - 1) as u64) == Output::max_allocation());
// Check that our smallest_disallowed_vec is too big to be included in a valid block
// Note that a serialized block always includes at least one byte for the number of transactions,
// so any serialized Vec<Output> at least MAX_BLOCK_BYTES long is too large to fit in a block.
prop_assert!(serialized.len() as u64 >= MAX_BLOCK_BYTES);
}
}
}

View File

@ -1,2 +1,3 @@
mod preallocate;
mod prop;
mod vectors;

View File

@ -0,0 +1,103 @@
//! Tests for trusted preallocation during deserialization.
use super::super::{
serialize::{MIN_TRANSPARENT_INPUT_SIZE, MIN_TRANSPARENT_OUTPUT_SIZE, MIN_TRANSPARENT_TX_SIZE},
transparent::Input,
transparent::Output,
Transaction,
};
use crate::{
block::MAX_BLOCK_BYTES,
serialization::{TrustedPreallocate, ZcashSerialize},
};
use proptest::prelude::*;
use std::{convert::TryInto, sync::Arc};
proptest! {
/// Confirm that each spend takes at least MIN_TRANSPARENT_TX_SIZE bytes when serialized.
/// This verifies that our calculated `TrustedPreallocate::max_allocation()` is indeed an upper bound.
#[test]
fn tx_size_is_small_enough(tx in Transaction::arbitrary()) {
let serialized = tx.zcash_serialize_to_vec().expect("Serialization to vec must succeed");
prop_assert!(serialized.len() as u64 >= MIN_TRANSPARENT_TX_SIZE)
}
/// Confirm that each spend takes at least MIN_TRANSPARENT_TX_SIZE bytes when serialized.
/// This verifies that our calculated `TrustedPreallocate::max_allocation()` is indeed an upper bound.
#[test]
fn transparent_input_size_is_small_enough(input in Input::arbitrary()) {
let serialized = input.zcash_serialize_to_vec().expect("Serialization to vec must succeed");
prop_assert!(serialized.len() as u64 >= MIN_TRANSPARENT_INPUT_SIZE)
}
/// Confirm that each spend takes at least MIN_TRANSPARENT_TX_SIZE bytes when serialized.
/// This verifies that our calculated `TrustedPreallocate::max_allocation()` is indeed an upper bound.
#[test]
fn transparent_output_size_is_small_enough(output in Output::arbitrary()) {
let serialized = output.zcash_serialize_to_vec().expect("Serialization to vec must succeed");
prop_assert!(serialized.len() as u64 >= MIN_TRANSPARENT_OUTPUT_SIZE)
}
}
proptest! {
// This test is pretty slow, so only run a few cases
#![proptest_config(ProptestConfig::with_cases(8))]
/// Verify the smallest disallowed vector of `Transaction`s is too large to fit in a Zcash block
#[test]
fn tx_max_allocation_is_big_enough(tx in Transaction::arbitrary()) {
let max_allocation: usize = <Arc<Transaction>>::max_allocation().try_into().unwrap();
let mut smallest_disallowed_vec = Vec::with_capacity(max_allocation + 1);
for _ in 0..(<Arc<Transaction>>::max_allocation()+1) {
smallest_disallowed_vec.push(Arc::new(tx.clone()));
}
let serialized = smallest_disallowed_vec.zcash_serialize_to_vec().expect("Serialization to vec must succeed");
// Check that our smallest_disallowed_vec is only one item larger than the limit
prop_assert!(((smallest_disallowed_vec.len() - 1) as u64) == <Arc<Transaction>>::max_allocation());
// Check that our smallest_disallowed_vec is too big to be included in a valid block
prop_assert!(serialized.len() as u64 > MAX_BLOCK_BYTES);
}
/// Verify the smallest disallowed vector of `Input`s is too large to fit in a Zcash block
#[test]
fn input_max_allocation_is_big_enough(input in Input::arbitrary()) {
let max_allocation: usize = Input::max_allocation().try_into().unwrap();
let mut smallest_disallowed_vec = Vec::with_capacity(max_allocation + 1);
for _ in 0..(Input::max_allocation()+1) {
smallest_disallowed_vec.push(input.clone());
}
let serialized = smallest_disallowed_vec.zcash_serialize_to_vec().expect("Serialization to vec must succeed");
// Check that our smallest_disallowed_vec is only one item larger than the limit
prop_assert!(((smallest_disallowed_vec.len() - 1) as u64) == Input::max_allocation());
// Check that our smallest_disallowed_vec is too big to be included in a valid block
// Note that a serialized block always includes at least one byte for the number of transactions,
// so any serialized Vec<Input> at least MAX_BLOCK_BYTES long is too large to fit in a block.
prop_assert!(serialized.len() as u64 >= MAX_BLOCK_BYTES);
}
/// Verify the smallest disallowed vector of `Output`s is too large to fit in a Zcash block
#[test]
fn output_max_allocation_is_big_enough(output in Output::arbitrary()) {
let max_allocation: usize = Output::max_allocation().try_into().unwrap();
let mut smallest_disallowed_vec = Vec::with_capacity(max_allocation + 1);
for _ in 0..(Output::max_allocation()+1) {
smallest_disallowed_vec.push(output.clone());
}
let serialized = smallest_disallowed_vec.zcash_serialize_to_vec().expect("Serialization to vec must succeed");
// Check that our smallest_disallowed_vec is only one item larger than the limit
prop_assert!(((smallest_disallowed_vec.len() - 1) as u64) == Output::max_allocation());
// Check that our smallest_disallowed_vec is too big to be included in a valid block
// Note that a serialized block always includes at least one byte for the number of transactions,
// so any serialized Vec<Output> at least MAX_BLOCK_BYTES long is too large to fit in a block.
prop_assert!(serialized.len() as u64 >= MAX_BLOCK_BYTES);
}
}

View File

@ -18,6 +18,9 @@ use crate::protocol::{external::MAX_PROTOCOL_MESSAGE_LEN, types::PeerServices};
use PeerAddrState::*;
#[cfg(test)]
mod tests;
/// Peer connection state, based on our interactions with the peer.
///
/// Zebra also tracks how recently a peer has sent us messages, and derives peer
@ -264,8 +267,10 @@ impl ZcashDeserialize for MetaAddr {
Ok(MetaAddr::new_gossiped(&addr, &services, &last_seen))
}
}
/// A serialized meta addr has a 4 byte time, 8 byte services, 16 byte IP addr, and 2 byte port
const META_ADDR_SIZE: usize = 4 + 8 + 16 + 2;
impl TrustedPreallocate for MetaAddr {
fn max_allocation() -> u64 {
// Since a maximal serialized Vec<MetAddr> uses at least three bytes for its length (2MB messages / 30B MetaAddr implies the maximal length is much greater than 253)
@ -273,96 +278,3 @@ impl TrustedPreallocate for MetaAddr {
((MAX_PROTOCOL_MESSAGE_LEN - 3) / META_ADDR_SIZE) as u64
}
}
#[cfg(test)]
mod test_trusted_preallocate {
use std::convert::TryInto;
use super::{MetaAddr, MAX_PROTOCOL_MESSAGE_LEN, META_ADDR_SIZE};
use super::{PeerAddrState, PeerServices};
use chrono::{TimeZone, Utc};
use zebra_chain::serialization::{TrustedPreallocate, ZcashSerialize};
#[test]
/// Confirm that each MetaAddr takes exactly META_ADDR_SIZE bytes when serialized.
/// This verifies that our calculated `TrustedPreallocate::max_allocation()` is indeed an upper bound.
fn meta_addr_size_is_correct() {
let addr = MetaAddr {
addr: ([192, 168, 0, 0], 8333).into(),
services: PeerServices::default(),
last_seen: Utc.timestamp(1_573_680_222, 0),
last_connection_state: PeerAddrState::Responded,
};
let serialized = addr
.zcash_serialize_to_vec()
.expect("Serialization to vec must succeed");
assert!(serialized.len() == META_ADDR_SIZE)
}
#[test]
/// Verifies that...
/// 1. The smallest disallowed vector of `MetaAddrs`s is too large to fit in a legal Zcash message
/// 2. The largest allowed vector is small enough to fit in a legal Zcash message
fn meta_addr_max_allocation_is_correct() {
let addr = MetaAddr {
addr: ([192, 168, 0, 0], 8333).into(),
services: PeerServices::default(),
last_seen: Utc.timestamp(1_573_680_222, 0),
last_connection_state: PeerAddrState::Responded,
};
let max_allocation: usize = MetaAddr::max_allocation().try_into().unwrap();
let mut smallest_disallowed_vec = Vec::with_capacity(max_allocation + 1);
for _ in 0..(MetaAddr::max_allocation() + 1) {
smallest_disallowed_vec.push(addr);
}
let smallest_disallowed_serialized = smallest_disallowed_vec
.zcash_serialize_to_vec()
.expect("Serialization to vec must succeed");
// Check that our smallest_disallowed_vec is only one item larger than the limit
assert!(((smallest_disallowed_vec.len() - 1) as u64) == MetaAddr::max_allocation());
// Check that our smallest_disallowed_vec is too big to send in a valid Zcash message
assert!(smallest_disallowed_serialized.len() > MAX_PROTOCOL_MESSAGE_LEN);
// Create largest_allowed_vec by removing one element from smallest_disallowed_vec without copying (for efficiency)
smallest_disallowed_vec.pop();
let largest_allowed_vec = smallest_disallowed_vec;
let largest_allowed_serialized = largest_allowed_vec
.zcash_serialize_to_vec()
.expect("Serialization to vec must succeed");
// Check that our largest_allowed_vec contains the maximum number of MetaAddrs
assert!((largest_allowed_vec.len() as u64) == MetaAddr::max_allocation());
// Check that our largest_allowed_vec is small enough to fit in a Zcash message.
assert!(largest_allowed_serialized.len() <= MAX_PROTOCOL_MESSAGE_LEN);
}
}
#[cfg(test)]
mod tests {
use super::*;
// XXX remove this test and replace it with a proptest instance.
#[test]
fn sanitize_truncates_timestamps() {
zebra_test::init();
let services = PeerServices::default();
let addr = "127.0.0.1:8233".parse().unwrap();
let entry = MetaAddr {
services,
addr,
last_seen: Utc.timestamp(1_573_680_222, 0),
last_connection_state: Responded,
}
.sanitize();
// We want the sanitized timestamp to be a multiple of the truncation interval.
assert_eq!(
entry.get_last_seen().timestamp() % crate::constants::TIMESTAMP_TRUNCATION_SECONDS,
0
);
// We want the state to be the default
assert_eq!(entry.last_connection_state, Default::default());
// We want the other fields to be unmodified
assert_eq!(entry.addr, addr);
assert_eq!(entry.services, services);
}
}

View File

@ -0,0 +1,2 @@
mod preallocate;
mod prop;

View File

@ -0,0 +1,61 @@
//! Tests for trusted preallocation during deserialization.
use super::super::{MetaAddr, PeerAddrState, PeerServices, META_ADDR_SIZE};
use zebra_chain::serialization::{TrustedPreallocate, ZcashSerialize, MAX_PROTOCOL_MESSAGE_LEN};
use chrono::{TimeZone, Utc};
use std::convert::TryInto;
/// Confirm that each MetaAddr takes exactly META_ADDR_SIZE bytes when serialized.
/// This verifies that our calculated `TrustedPreallocate::max_allocation()` is indeed an upper bound.
#[test]
fn meta_addr_size_is_correct() {
let addr = MetaAddr {
addr: ([192, 168, 0, 0], 8333).into(),
services: PeerServices::default(),
last_seen: Utc.timestamp(1_573_680_222, 0),
last_connection_state: PeerAddrState::Responded,
};
let serialized = addr
.zcash_serialize_to_vec()
.expect("Serialization to vec must succeed");
assert!(serialized.len() == META_ADDR_SIZE)
}
/// Verifies that...
/// 1. The smallest disallowed vector of `MetaAddrs`s is too large to fit in a legal Zcash message
/// 2. The largest allowed vector is small enough to fit in a legal Zcash message
#[test]
fn meta_addr_max_allocation_is_correct() {
let addr = MetaAddr {
addr: ([192, 168, 0, 0], 8333).into(),
services: PeerServices::default(),
last_seen: Utc.timestamp(1_573_680_222, 0),
last_connection_state: PeerAddrState::Responded,
};
let max_allocation: usize = MetaAddr::max_allocation().try_into().unwrap();
let mut smallest_disallowed_vec = Vec::with_capacity(max_allocation + 1);
for _ in 0..(MetaAddr::max_allocation() + 1) {
smallest_disallowed_vec.push(addr);
}
let smallest_disallowed_serialized = smallest_disallowed_vec
.zcash_serialize_to_vec()
.expect("Serialization to vec must succeed");
// Check that our smallest_disallowed_vec is only one item larger than the limit
assert!(((smallest_disallowed_vec.len() - 1) as u64) == MetaAddr::max_allocation());
// Check that our smallest_disallowed_vec is too big to send in a valid Zcash message
assert!(smallest_disallowed_serialized.len() > MAX_PROTOCOL_MESSAGE_LEN);
// Create largest_allowed_vec by removing one element from smallest_disallowed_vec without copying (for efficiency)
smallest_disallowed_vec.pop();
let largest_allowed_vec = smallest_disallowed_vec;
let largest_allowed_serialized = largest_allowed_vec
.zcash_serialize_to_vec()
.expect("Serialization to vec must succeed");
// Check that our largest_allowed_vec contains the maximum number of MetaAddrs
assert!((largest_allowed_vec.len() as u64) == MetaAddr::max_allocation());
// Check that our largest_allowed_vec is small enough to fit in a Zcash message.
assert!(largest_allowed_serialized.len() <= MAX_PROTOCOL_MESSAGE_LEN);
}

View File

@ -0,0 +1,29 @@
use super::super::*;
// XXX remove this test and replace it with a proptest instance.
#[test]
fn sanitize_truncates_timestamps() {
zebra_test::init();
let services = PeerServices::default();
let addr = "127.0.0.1:8233".parse().unwrap();
let entry = MetaAddr {
services,
addr,
last_seen: Utc.timestamp(1_573_680_222, 0),
last_connection_state: Responded,
}
.sanitize();
// We want the sanitized timestamp to be a multiple of the truncation interval.
assert_eq!(
entry.get_last_seen().timestamp() % crate::constants::TIMESTAMP_TRUNCATION_SECONDS,
0
);
// We want the state to be the default
assert_eq!(entry.last_connection_state, Default::default());
// We want the other fields to be unmodified
assert_eq!(entry.addr, addr);
assert_eq!(entry.services, services);
}

View File

@ -7,6 +7,9 @@ mod message;
/// Newtype wrappers for primitive types.
pub mod types;
#[cfg(test)]
mod tests;
pub use codec::{Codec, MAX_PROTOCOL_MESSAGE_LEN};
pub use inv::InventoryHash;
pub use message::Message;

View File

@ -1,9 +1,5 @@
//! Inventory items for the Bitcoin protocol.
// XXX the exact optimal arrangement of all of these parts is a little unclear
// until we have more pieces in place the optimal global arrangement of items is
// a little unclear.
use std::io::{Read, Write};
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
@ -86,7 +82,9 @@ impl ZcashDeserialize for InventoryHash {
}
}
const INV_HASH_SIZE: usize = 36;
/// The serialized size of an [`InventoryHash`].
pub(crate) const INV_HASH_SIZE: usize = 36;
impl TrustedPreallocate for InventoryHash {
fn max_allocation() -> u64 {
// An Inventory hash takes 36 bytes, and we reserve at least one byte for the Vector length
@ -94,76 +92,3 @@ impl TrustedPreallocate for InventoryHash {
((MAX_PROTOCOL_MESSAGE_LEN - 1) / INV_HASH_SIZE) as u64
}
}
#[cfg(test)]
mod test_trusted_preallocate {
use std::convert::TryInto;
use super::{InventoryHash, INV_HASH_SIZE, MAX_PROTOCOL_MESSAGE_LEN};
use zebra_chain::{
block,
serialization::{TrustedPreallocate, ZcashSerialize},
transaction,
};
#[test]
/// Confirm that each InventoryHash takes exactly INV_HASH_SIZE bytes when serialized.
/// This verifies that our calculated `TrustedPreallocate::max_allocation()` is indeed an upper bound.
fn inv_hash_size_is_correct() {
let block_hash = block::Hash([1u8; 32]);
let tx_hash = transaction::Hash([1u8; 32]);
let inv_block = InventoryHash::Block(block_hash);
let serialized_inv_block = inv_block
.zcash_serialize_to_vec()
.expect("Serialization to vec must succeed");
assert!(serialized_inv_block.len() == INV_HASH_SIZE);
let inv_filtered_block = InventoryHash::FilteredBlock(block_hash);
let serialized_inv_filtered = inv_filtered_block
.zcash_serialize_to_vec()
.expect("Serialization to vec must succeed");
assert!(serialized_inv_filtered.len() == INV_HASH_SIZE);
let inv_tx = InventoryHash::Tx(tx_hash);
let serialized_inv_tx = inv_tx
.zcash_serialize_to_vec()
.expect("Serialization to vec must succeed");
assert!(serialized_inv_tx.len() == INV_HASH_SIZE);
let inv_err = InventoryHash::Error;
let serializd_inv_err = inv_err
.zcash_serialize_to_vec()
.expect("Serialization to vec must succeed");
assert!(serializd_inv_err.len() == INV_HASH_SIZE)
}
#[test]
/// Verifies that...
/// 1. The smallest disallowed vector of `InventoryHash`s is too large to fit in a legal Zcash message
/// 2. The largest allowed vector is small enough to fit in a legal Zcash message
fn meta_addr_max_allocation_is_correct() {
let inv = InventoryHash::Error;
let max_allocation: usize = InventoryHash::max_allocation().try_into().unwrap();
let mut smallest_disallowed_vec = Vec::with_capacity(max_allocation + 1);
for _ in 0..(InventoryHash::max_allocation() + 1) {
smallest_disallowed_vec.push(inv);
}
let smallest_disallowed_serialized = smallest_disallowed_vec
.zcash_serialize_to_vec()
.expect("Serialization to vec must succeed");
// Check that our smallest_disallowed_vec is only one item larger than the limit
assert!(((smallest_disallowed_vec.len() - 1) as u64) == InventoryHash::max_allocation());
// Check that our smallest_disallowed_vec is too big to fit in a Zcash message.
assert!(smallest_disallowed_serialized.len() > MAX_PROTOCOL_MESSAGE_LEN);
// Create largest_allowed_vec by removing one element from smallest_disallowed_vec without copying (for efficiency)
smallest_disallowed_vec.pop();
let largest_allowed_vec = smallest_disallowed_vec;
let largest_allowed_serialized = largest_allowed_vec
.zcash_serialize_to_vec()
.expect("Serialization to vec must succeed");
// Check that our largest_allowed_vec contains the maximum number of InventoryHashes
assert!((largest_allowed_vec.len() as u64) == InventoryHash::max_allocation());
// Check that our largest_allowed_vec is small enough to fit in a Zcash message.
assert!(largest_allowed_serialized.len() <= MAX_PROTOCOL_MESSAGE_LEN);
}
}

View File

@ -0,0 +1 @@
mod preallocate;

View File

@ -0,0 +1,74 @@
//! Tests for trusted preallocation during deserialization.
use super::super::inv::{InventoryHash, INV_HASH_SIZE};
use zebra_chain::{
block,
serialization::{TrustedPreallocate, ZcashSerialize, MAX_PROTOCOL_MESSAGE_LEN},
transaction,
};
use std::convert::TryInto;
/// Confirm that each InventoryHash takes exactly INV_HASH_SIZE bytes when serialized.
/// This verifies that our calculated `TrustedPreallocate::max_allocation()` is indeed an upper bound.
#[test]
fn inv_hash_size_is_correct() {
let block_hash = block::Hash([1u8; 32]);
let tx_hash = transaction::Hash([1u8; 32]);
let inv_block = InventoryHash::Block(block_hash);
let serialized_inv_block = inv_block
.zcash_serialize_to_vec()
.expect("Serialization to vec must succeed");
assert!(serialized_inv_block.len() == INV_HASH_SIZE);
let inv_filtered_block = InventoryHash::FilteredBlock(block_hash);
let serialized_inv_filtered = inv_filtered_block
.zcash_serialize_to_vec()
.expect("Serialization to vec must succeed");
assert!(serialized_inv_filtered.len() == INV_HASH_SIZE);
let inv_tx = InventoryHash::Tx(tx_hash);
let serialized_inv_tx = inv_tx
.zcash_serialize_to_vec()
.expect("Serialization to vec must succeed");
assert!(serialized_inv_tx.len() == INV_HASH_SIZE);
let inv_err = InventoryHash::Error;
let serializd_inv_err = inv_err
.zcash_serialize_to_vec()
.expect("Serialization to vec must succeed");
assert!(serializd_inv_err.len() == INV_HASH_SIZE)
}
/// Verifies that...
/// 1. The smallest disallowed vector of `InventoryHash`s is too large to fit in a legal Zcash message
/// 2. The largest allowed vector is small enough to fit in a legal Zcash message
#[test]
fn inv_hash_max_allocation_is_correct() {
let inv = InventoryHash::Error;
let max_allocation: usize = InventoryHash::max_allocation().try_into().unwrap();
let mut smallest_disallowed_vec = Vec::with_capacity(max_allocation + 1);
for _ in 0..(InventoryHash::max_allocation() + 1) {
smallest_disallowed_vec.push(inv);
}
let smallest_disallowed_serialized = smallest_disallowed_vec
.zcash_serialize_to_vec()
.expect("Serialization to vec must succeed");
// Check that our smallest_disallowed_vec is only one item larger than the limit
assert!(((smallest_disallowed_vec.len() - 1) as u64) == InventoryHash::max_allocation());
// Check that our smallest_disallowed_vec is too big to fit in a Zcash message.
assert!(smallest_disallowed_serialized.len() > MAX_PROTOCOL_MESSAGE_LEN);
// Create largest_allowed_vec by removing one element from smallest_disallowed_vec without copying (for efficiency)
smallest_disallowed_vec.pop();
let largest_allowed_vec = smallest_disallowed_vec;
let largest_allowed_serialized = largest_allowed_vec
.zcash_serialize_to_vec()
.expect("Serialization to vec must succeed");
// Check that our largest_allowed_vec contains the maximum number of InventoryHashes
assert!((largest_allowed_vec.len() as u64) == InventoryHash::max_allocation());
// Check that our largest_allowed_vec is small enough to fit in a Zcash message.
assert!(largest_allowed_serialized.len() <= MAX_PROTOCOL_MESSAGE_LEN);
}