Implement Zcash(De)Serialize for BlockHeaderHash, use general read_list for getheaders/headers

This commit is contained in:
Deirdre Connolly 2019-10-08 19:39:35 -04:00 committed by Deirdre Connolly
parent eed69063f6
commit dd1b9166b0
2 changed files with 18 additions and 23 deletions

View File

@ -5,7 +5,7 @@ use std::io;
use crate::merkle_tree::MerkleTreeRootHash;
use crate::note_commitment_tree::SaplingNoteTreeRootHash;
use crate::serialization::{SerializationError, ZcashDeserialize, ZcashSerialize};
use crate::serialization::{ReadZcashExt, SerializationError, ZcashDeserialize, ZcashSerialize};
use crate::sha256d_writer::Sha256dWriter;
use crate::transaction::Transaction;
@ -34,6 +34,20 @@ impl From<BlockHeader> for BlockHeaderHash {
}
}
impl ZcashSerialize for BlockHeaderHash {
fn zcash_serialize<W: io::Write>(&self, mut writer: W) -> Result<(), SerializationError> {
writer.write_all(&self.0)?;
Ok(())
}
}
impl ZcashDeserialize for BlockHeaderHash {
fn zcash_deserialize<R: io::Read>(mut reader: R) -> Result<Self, SerializationError> {
let bytes = reader.read_32_bytes()?;
Ok(BlockHeaderHash(bytes))
}
}
/// Block header.
///
/// How are blocks chained together? They are chained together via the

View File

@ -405,13 +405,9 @@ impl Codec {
fn read_getblocks<R: Read>(&self, mut reader: R) -> Result<Message, Error> {
let version = Version(reader.read_u32::<LittleEndian>()?);
let count = reader.read_compactsize()? as usize;
let max_count = self.builder.max_len / 32;
let mut block_locator_hashes = Vec::with_capacity(std::cmp::min(count, max_count));
for _ in 0..count {
block_locator_hashes.push(BlockHeaderHash(reader.read_32_bytes()?));
}
let block_locator_hashes: Vec<BlockHeaderHash> = reader.read_list(max_count)?;
let hash_stop = BlockHeaderHash(reader.read_32_bytes()?);
@ -428,21 +424,10 @@ impl Codec {
///
/// [Zcash block header](https://zips.z.cash/protocol/protocol.pdf#page=84)
fn read_headers<R: Read>(&self, mut reader: R) -> Result<Message, Error> {
let count = reader.read_compactsize()? as usize;
// Preallocate a buffer, performing a single allocation in the honest
// case. Although the size of the received data buffer is bounded by the
// codec's max_len field, it's still possible for someone to send a
// short message with a large count field, so if we naively trust
// the count field we could be tricked into preallocating a large
// buffer. Instead, calculate the maximum count for a valid message from
// the codec's max_len using ENCODED_HEADER_SIZE.
const ENCODED_HEADER_SIZE: usize = 4 + 32 + 32 + 32 + 4 + 4 + 32 + 3 + 1344;
let max_count = self.builder.max_len / ENCODED_HEADER_SIZE;
let mut headers = Vec::with_capacity(std::cmp::min(count, max_count));
for _ in 0..count {
headers.push(BlockHeader::zcash_deserialize(&mut reader)?);
}
let headers: Vec<BlockHeader> = reader.read_list(max_count)?;
Ok(Message::Headers(headers))
}
@ -451,13 +436,9 @@ impl Codec {
fn read_getheaders<R: Read>(&self, mut reader: R) -> Result<Message, Error> {
let version = Version(reader.read_u32::<LittleEndian>()?);
let count = reader.read_compactsize()? as usize;
let max_count = self.builder.max_len / 32;
let mut block_locator_hashes = Vec::with_capacity(std::cmp::min(count, max_count));
for _ in 0..count {
block_locator_hashes.push(BlockHeaderHash(reader.read_32_bytes()?));
}
let block_locator_hashes: Vec<BlockHeaderHash> = reader.read_list(max_count)?;
let hash_stop = BlockHeaderHash(reader.read_32_bytes()?);