4. change(db): stop storing redundant transparent output fields in the database (#3992)

* Add Utxo constructors from output locations

* Store transparent outputs rather than Utxo structs

* Update raw data snapshots

* Increment the state version
This commit is contained in:
teor 2022-04-12 13:10:23 +10:00 committed by GitHub
parent 6d2c4fbb5f
commit caac71a9d8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 70 additions and 82 deletions

View File

@ -54,20 +54,53 @@ pub struct OrderedUtxo {
pub tx_index_in_block: usize,
}
impl Utxo {
/// Create a new UTXO from its fields.
pub fn new(output: transparent::Output, height: block::Height, from_coinbase: bool) -> Utxo {
Utxo {
output,
height,
from_coinbase,
}
}
/// Create a new UTXO from an output and its transaction location.
pub fn from_location(
output: transparent::Output,
height: block::Height,
tx_index_in_block: usize,
) -> Utxo {
// Coinbase transactions are always the first transaction in their block,
// we check the other consensus rules separately.
let from_coinbase = tx_index_in_block == 0;
Utxo {
output,
height,
from_coinbase,
}
}
}
impl OrderedUtxo {
/// Create a new ordered UTXO from its fields.
pub fn new(
output: transparent::Output,
height: block::Height,
from_coinbase: bool,
tx_index_in_block: usize,
) -> OrderedUtxo {
let utxo = Utxo {
output,
height,
from_coinbase,
};
// Coinbase transactions are always the first transaction in their block,
// we check the other consensus rules separately.
let from_coinbase = tx_index_in_block == 0;
OrderedUtxo {
utxo: Utxo::new(output, height, from_coinbase),
tx_index_in_block,
}
}
/// Create a new ordered UTXO from a UTXO and transaction index.
pub fn from_utxo(utxo: Utxo, tx_index_in_block: usize) -> OrderedUtxo {
OrderedUtxo {
utxo,
tx_index_in_block,
@ -194,17 +227,17 @@ pub fn new_transaction_ordered_outputs(
) -> HashMap<transparent::OutPoint, OrderedUtxo> {
let mut new_ordered_outputs = HashMap::new();
let from_coinbase = transaction.has_valid_coinbase_transaction_inputs();
for (output_index_in_transaction, output) in transaction.outputs().iter().cloned().enumerate() {
let output_index_in_transaction = output_index_in_transaction
.try_into()
.expect("unexpectedly large number of outputs");
new_ordered_outputs.insert(
transparent::OutPoint {
hash,
index: output_index_in_transaction,
},
OrderedUtxo::new(output, height, from_coinbase, tx_index_in_block),
OrderedUtxo::new(output, height, tx_index_in_block),
);
}

View File

@ -1823,8 +1823,7 @@ fn mock_transparent_transfer(
lock_script,
};
let previous_utxo =
transparent::OrderedUtxo::new(previous_output, previous_utxo_height, false, 1);
let previous_utxo = transparent::OrderedUtxo::new(previous_output, previous_utxo_height, 1);
// Use the `previous_outpoint` as input
let input = transparent::Input::PrevOut {

View File

@ -18,7 +18,7 @@ pub use zebra_chain::transparent::MIN_TRANSPARENT_COINBASE_MATURITY;
pub const MAX_BLOCK_REORG_HEIGHT: u32 = MIN_TRANSPARENT_COINBASE_MATURITY - 1;
/// The database format version, incremented each time the database format changes.
pub const DATABASE_FORMAT_VERSION: u32 = 18;
pub const DATABASE_FORMAT_VERSION: u32 = 19;
/// The maximum number of blocks to check for NU5 transactions,
/// before we assume we are on a pre-NU5 legacy chain.

View File

@ -87,7 +87,6 @@ impl TransactionIndex {
}
/// Returns this index as a `usize`
#[allow(dead_code)]
pub fn as_usize(&self) -> usize {
self.0
.try_into()

View File

@ -158,18 +158,6 @@ fn roundtrip_address_balance_location() {
);
}
#[test]
fn roundtrip_unspent_transparent_output() {
zebra_test::init();
proptest!(
|(mut val in any::<transparent::Utxo>())| {
val.height = val.height.clamp(Height(0), MAX_ON_DISK_HEIGHT);
assert_value_properties(val)
}
);
}
#[test]
fn roundtrip_transparent_output() {
zebra_test::init();

View File

@ -5,10 +5,10 @@ expression: cf_data
[
KV(
k: "0000010000000000",
v: "0000010150c30000000000002321027a46eb513588b01b37ea24303f4b628afd12cc20df789fede0921e43cad3e875ac",
v: "50c30000000000002321027a46eb513588b01b37ea24303f4b628afd12cc20df789fede0921e43cad3e875ac",
),
KV(
k: "0000010000000001",
v: "00000101d43000000000000017a9147d46a730d31f97b1930d3368a967c309bd4d136a87",
v: "d43000000000000017a9147d46a730d31f97b1930d3368a967c309bd4d136a87",
),
]

View File

@ -5,18 +5,18 @@ expression: cf_data
[
KV(
k: "0000010000000000",
v: "0000010150c30000000000002321027a46eb513588b01b37ea24303f4b628afd12cc20df789fede0921e43cad3e875ac",
v: "50c30000000000002321027a46eb513588b01b37ea24303f4b628afd12cc20df789fede0921e43cad3e875ac",
),
KV(
k: "0000010000000001",
v: "00000101d43000000000000017a9147d46a730d31f97b1930d3368a967c309bd4d136a87",
v: "d43000000000000017a9147d46a730d31f97b1930d3368a967c309bd4d136a87",
),
KV(
k: "0000020000000000",
v: "00000201a0860100000000002321027a46eb513588b01b37ea24303f4b628afd12cc20df789fede0921e43cad3e875ac",
v: "a0860100000000002321027a46eb513588b01b37ea24303f4b628afd12cc20df789fede0921e43cad3e875ac",
),
KV(
k: "0000020000000001",
v: "00000201a86100000000000017a9147d46a730d31f97b1930d3368a967c309bd4d136a87",
v: "a86100000000000017a9147d46a730d31f97b1930d3368a967c309bd4d136a87",
),
]

View File

@ -5,10 +5,10 @@ expression: cf_data
[
KV(
k: "0000010000000000",
v: "0000010150c30000000000002321025229e1240a21004cf8338db05679fa34753706e84f6aebba086ba04317fd8f99ac",
v: "50c30000000000002321025229e1240a21004cf8338db05679fa34753706e84f6aebba086ba04317fd8f99ac",
),
KV(
k: "0000010000000001",
v: "00000101d43000000000000017a914ef775f1f997f122a062fff1a2d7443abd1f9c64287",
v: "d43000000000000017a914ef775f1f997f122a062fff1a2d7443abd1f9c64287",
),
]

View File

@ -5,18 +5,18 @@ expression: cf_data
[
KV(
k: "0000010000000000",
v: "0000010150c30000000000002321025229e1240a21004cf8338db05679fa34753706e84f6aebba086ba04317fd8f99ac",
v: "50c30000000000002321025229e1240a21004cf8338db05679fa34753706e84f6aebba086ba04317fd8f99ac",
),
KV(
k: "0000010000000001",
v: "00000101d43000000000000017a914ef775f1f997f122a062fff1a2d7443abd1f9c64287",
v: "d43000000000000017a914ef775f1f997f122a062fff1a2d7443abd1f9c64287",
),
KV(
k: "0000020000000000",
v: "00000201a086010000000000232102acce9f6c16986c525fd34759d851ef5b4b85b5019a57bd59747be0ef1ba62523ac",
v: "a086010000000000232102acce9f6c16986c525fd34759d851ef5b4b85b5019a57bd59747be0ef1ba62523ac",
),
KV(
k: "0000020000000001",
v: "00000201a86100000000000017a914ef775f1f997f122a062fff1a2d7443abd1f9c64287",
v: "a86100000000000017a914ef775f1f997f122a062fff1a2d7443abd1f9c64287",
),
]

View File

@ -18,9 +18,7 @@ use zebra_chain::{
};
use crate::service::finalized_state::disk_format::{
block::{
TransactionIndex, TransactionLocation, HEIGHT_DISK_BYTES, TRANSACTION_LOCATION_DISK_BYTES,
},
block::{TransactionIndex, TransactionLocation, TRANSACTION_LOCATION_DISK_BYTES},
expand_zero_be_bytes, truncate_zero_be_bytes, FromDisk, IntoDisk,
};
@ -153,13 +151,11 @@ impl OutputLocation {
}
/// Returns the height of this [`transparent::Output`].
#[allow(dead_code)]
pub fn height(&self) -> Height {
self.transaction_location.height
}
/// Returns the transaction index of this [`transparent::Output`].
#[allow(dead_code)]
pub fn transaction_index(&self) -> TransactionIndex {
self.transaction_location.index
}
@ -404,38 +400,3 @@ impl FromDisk for transparent::Output {
bytes.as_ref().zcash_deserialize_into().unwrap()
}
}
// TODO: delete UTXO serialization (#3953)
impl IntoDisk for transparent::Utxo {
type Bytes = Vec<u8>;
fn as_bytes(&self) -> Self::Bytes {
let height_bytes = self.height.as_bytes().to_vec();
let coinbase_flag_bytes = [self.from_coinbase as u8].to_vec();
let output_bytes = self
.output
.zcash_serialize_to_vec()
.expect("serialization to vec doesn't fail");
[height_bytes, coinbase_flag_bytes, output_bytes].concat()
}
}
impl FromDisk for transparent::Utxo {
fn from_bytes(bytes: impl AsRef<[u8]>) -> Self {
let (height_bytes, rest_bytes) = bytes.as_ref().split_at(HEIGHT_DISK_BYTES);
let (coinbase_flag_bytes, output_bytes) = rest_bytes.split_at(1);
let height = Height::from_bytes(height_bytes);
let from_coinbase = coinbase_flag_bytes[0] == 1u8;
let output = output_bytes
.zcash_deserialize_into()
.expect("db has valid serialized data");
Self {
output,
height,
from_coinbase,
}
}
}

View File

@ -15,7 +15,7 @@ use std::collections::{BTreeMap, HashMap};
use zebra_chain::{
amount::{Amount, NonNegative},
transparent,
transparent::{self, Utxo},
};
use crate::{
@ -82,7 +82,14 @@ impl ZebraDb {
/// if it is unspent in the finalized state.
pub fn utxo_by_location(&self, output_location: OutputLocation) -> Option<transparent::Utxo> {
let utxo_by_out_loc = self.db.cf_handle("utxo_by_outpoint").unwrap();
self.db.zs_get(&utxo_by_out_loc, &output_location)
let output = self.db.zs_get(&utxo_by_out_loc, &output_location)?;
Some(Utxo::from_location(
output,
output_location.height(),
output_location.transaction_index().as_usize(),
))
}
}
@ -109,7 +116,8 @@ impl DiskWriteBatch {
// Index all new transparent outputs, before deleting any we've spent
for (output_location, utxo) in new_outputs_by_out_loc {
let receiving_address = utxo.output.address(self.network());
let output = utxo.output;
let receiving_address = output.address(self.network());
// Update the address balance by adding this UTXO's value
if let Some(receiving_address) = receiving_address {
@ -118,13 +126,13 @@ impl DiskWriteBatch {
.or_insert_with(|| AddressBalanceLocation::new(output_location))
.balance_mut();
let new_address_balance = (*address_balance + utxo.output.value())
.expect("balance overflow already checked");
let new_address_balance =
(*address_balance + output.value()).expect("balance overflow already checked");
*address_balance = new_address_balance;
}
self.zs_insert(&utxo_by_outpoint, output_location, utxo);
self.zs_insert(&utxo_by_outpoint, output_location, output);
}
// Mark all transparent inputs as spent.