Squashed commit of the following:

commit 3dca82d62e10252555fcfe498e63c41a5ca967af
Author: Hanh <hanh425@gmail.com>
Date:   Sun Sep 18 01:30:56 2022 +0800

    WIP

commit 32013d4eea0bff3321e4bb82a4c878aa4feaea7b
Author: Hanh <hanh425@gmail.com>
Date:   Sat Sep 17 19:48:32 2022 +0800

    WIP

commit 0f4b16d1b3874f9377b7144591a602aa97e6747d
Author: Hanh <hanh425@gmail.com>
Date:   Sat Sep 17 12:45:41 2022 +0800

    WIP

commit 90cf116c230b2845d43bdc9b81057f8b761b6773
Author: Hanh <hanh425@gmail.com>
Date:   Fri Sep 16 21:05:52 2022 +0800

    WIP

commit d8a8db0a29564c98b3f7dc331d5d37f4b7a87c18
Author: Hanh <hanh425@gmail.com>
Date:   Fri Sep 16 18:03:56 2022 +0800

    WIP

commit cb467ea2cd7bada9a9cbf9fbc59b265bb3be4968
Author: Hanh <hanh425@gmail.com>
Date:   Fri Sep 16 17:17:51 2022 +0800

    WIP

commit ba3b4de96e19329a317cc4164cf69442e9b1aa8a
Author: Hanh <hanh425@gmail.com>
Date:   Fri Sep 16 14:41:07 2022 +0800

    Sapling Pedersen hash

commit 3e9be116a68342c22da147dba011e2d6a9e68cbc
Author: Hanh <hanh425@gmail.com>
Date:   Thu Sep 15 10:16:54 2022 +0800

    WIP

commit 94e0e8b0d5601ed48227bae89ea3a6c1bb093abc
Author: Hanh <hanh425@gmail.com>
Date:   Wed Sep 14 01:22:22 2022 +0800

    WIP

commit 95708029ab4b94d85f9d565a16505d767bb4598b
Author: Hanh <hanh425@gmail.com>
Date:   Tue Sep 13 21:27:51 2022 +0800

    Db API for Orchard

commit 170a31fd7cf79fba74f710fcd1bf7404235c8e51
Author: Hanh <hanh425@gmail.com>
Date:   Tue Sep 13 20:57:38 2022 +0800

    Add support for orchard to db schema
This commit is contained in:
Hanh 2022-10-20 14:32:11 +08:00
parent 267192c42c
commit d5f06cc7eb
22 changed files with 1590 additions and 90 deletions

2
.gitignore vendored
View File

@ -1,5 +1,5 @@
/target
.env
../../.env
.idea/
docs/_site/
*.db

View File

@ -7,6 +7,7 @@ edition = "2021"
[[bench]]
name = "scan_all"
harness = false
required-features = ["dotenv"]
[[bin]]
name = "warp-rpc"
@ -18,6 +19,11 @@ name = "wallet"
path = "src/main/wallet.rs"
required-features = ["dotenv"]
[[bin]]
name = "tests"
path = "src/main/tests.rs"
required-features = ["dotenv"]
#[[bin]]
#name = "ledger"
#path = "src/main/ledger.rs"

View File

@ -1,5 +1,4 @@
use criterion::{criterion_group, criterion_main, Criterion};
use warp_api_ffi::scan_all;
use tokio::runtime::Runtime;
use zcash_client_backend::encoding::decode_extended_full_viewing_key;
use zcash_primitives::consensus::{Network, Parameters};

View File

@ -581,7 +581,7 @@ pub unsafe extern "C" fn split_data(id: u32, data: *mut c_char) -> CResult<*mut
pub unsafe extern "C" fn merge_data(drop: *mut c_char) -> CResult<*mut c_char> {
from_c_str!(drop);
let res = || {
let res = crate::fountain::put_drop(&*drop)?
let res = crate::fountain::RaptorQDrops::put_drop(&*drop)?
.map(|d| base64::encode(&d))
.unwrap_or(String::new());
Ok::<_, anyhow::Error>(res)

View File

@ -141,7 +141,7 @@ async fn fetch_and_store_tree_state(
.into_inner();
let tree = CTree::read(&*hex::decode(&tree_state.sapling_tree)?)?;
let db = c.db()?;
DbAdapter::store_block(&db.connection, height, &block.hash, block.time, &tree)?;
DbAdapter::store_block(&db.connection, height, &block.hash, block.time, &tree, None)?;
Ok(())
}

View File

@ -9,6 +9,20 @@ use rayon::prelude::IntoParallelIterator;
use rayon::prelude::*;
use zcash_primitives::sapling::Node;
pub enum ProvingSystem {
Sapling,
Orchard
}
pub trait ProvingSystemHasher {
type ExtendedPoint;
type AffinePoint;
fn combine_pair(depth: usize, left: &Node, right: &Node) -> Node;
fn batch_combine(depth: usize, left: &Node, right: &Node) -> Node;
fn batch_normalize(hash_extended: &[Self::ExtendedPoint], hash_affine: &mut [Self::AffinePoint]);
}
#[inline(always)]
fn batch_node_combine1(depth: usize, left: &Node, right: &Node) -> ExtendedPoint {
// Node::new(pedersen_hash(depth as u8, &left.repr, &right.repr))

186
src/db.rs
View File

@ -9,18 +9,26 @@ use rusqlite::{params, Connection, OptionalExtension, Transaction};
use serde::{Deserialize, Serialize};
use serde_with::serde_as;
use std::collections::HashMap;
use std::convert::TryInto;
use zcash_client_backend::encoding::decode_extended_full_viewing_key;
use zcash_params::coin::{get_coin_chain, get_coin_id, CoinType};
use zcash_params::coin::{CoinType, get_coin_chain, get_coin_id};
use zcash_primitives::consensus::{Network, NetworkUpgrade, Parameters};
use zcash_primitives::merkle_tree::IncrementalWitness;
use zcash_primitives::sapling::{Diversifier, Node, Note, Rseed, SaplingIvk};
use zcash_primitives::zip32::{DiversifierIndex, ExtendedFullViewingKey};
use crate::sync;
mod migration;
#[allow(dead_code)]
pub const DEFAULT_DB_PATH: &str = "zec.db";
#[derive(Clone)]
pub struct DbAdapterBuilder {
pub coin_type: CoinType,
pub db_path: String,
}
pub struct DbAdapter {
pub coin_type: CoinType,
pub connection: Connection,
@ -35,9 +43,17 @@ pub struct ReceivedNote {
pub value: u64,
pub rcm: Vec<u8>,
pub nf: Vec<u8>,
pub rho: Option<Vec<u8>>,
pub spent: Option<u32>,
}
pub struct ReceivedNoteShort {
pub id: u32,
pub account: u32,
pub nf: Nf,
pub value: u64,
}
#[derive(Clone)]
pub struct SpendableNote {
pub id: u32,
@ -82,6 +98,15 @@ pub fn wrap_query_no_rows(name: &'static str) -> impl Fn(rusqlite::Error) -> any
}
}
impl DbAdapterBuilder {
pub fn build(&self) -> anyhow::Result<DbAdapter> {
DbAdapter::new(
self.coin_type,
&self.db_path,
)
}
}
impl DbAdapter {
pub fn new(coin_type: CoinType, db_path: &str) -> anyhow::Result<DbAdapter> {
let connection = Connection::open(db_path)?;
@ -226,6 +251,10 @@ impl DbAdapter {
"DELETE FROM sapling_witnesses WHERE height > ?1",
params![height],
)?;
tx.execute(
"DELETE FROM orchard_witnesses WHERE height > ?1",
params![height],
)?;
tx.execute(
"DELETE FROM received_notes WHERE height > ?1",
params![height],
@ -265,16 +294,48 @@ impl DbAdapter {
height: u32,
hash: &[u8],
timestamp: u32,
tree: &CTree,
sapling_tree: &CTree,
orchard_tree: Option<&CTree>,
) -> anyhow::Result<()> {
log::debug!("+block");
let mut bb: Vec<u8> = vec![];
tree.write(&mut bb)?;
let mut sapling_bb: Vec<u8> = vec![];
sapling_tree.write(&mut sapling_bb)?;
let orchard_bb = orchard_tree.map(|tree| {
let mut bb: Vec<u8> = vec![];
tree.write(&mut bb).unwrap();
bb
});
connection.execute(
"INSERT INTO blocks(height, hash, timestamp, sapling_tree)
VALUES (?1, ?2, ?3, ?4)
"INSERT INTO blocks(height, hash, timestamp, sapling_tree, orchard_tree)
VALUES (?1, ?2, ?3, ?4, ?5)
ON CONFLICT DO NOTHING",
params![height, hash, timestamp, &bb],
params![height, hash, timestamp, &sapling_bb, orchard_bb],
)?;
log::debug!("-block");
Ok(())
}
pub fn store_block2(
height: u32,
hash: &[u8],
timestamp: u32,
sapling_tree: &sync::CTree,
orchard_tree: Option<&sync::CTree>,
connection: &Connection,
) -> anyhow::Result<()> {
log::debug!("+block");
let mut sapling_bb: Vec<u8> = vec![];
sapling_tree.write(&mut sapling_bb)?;
let orchard_bb = orchard_tree.map(|tree| {
let mut bb: Vec<u8> = vec![];
tree.write(&mut bb).unwrap();
bb
});
connection.execute(
"INSERT INTO blocks(height, hash, timestamp, sapling_tree, orchard_tree)
VALUES (?1, ?2, ?3, ?4, ?5)
ON CONFLICT DO NOTHING",
params![height, hash, timestamp, &sapling_bb, orchard_bb],
)?;
log::debug!("-block");
Ok(())
@ -313,9 +374,9 @@ impl DbAdapter {
db_tx: &Transaction,
) -> anyhow::Result<u32> {
log::debug!("+received_note {}", id_tx);
db_tx.execute("INSERT INTO received_notes(account, tx, height, position, output_index, diversifier, value, rcm, nf, spent)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10)
ON CONFLICT DO NOTHING", params![note.account, id_tx, note.height, position as u32, note.output_index, note.diversifier, note.value as i64, note.rcm, note.nf, note.spent])?;
db_tx.execute("INSERT INTO received_notes(account, tx, height, position, output_index, diversifier, value, rcm, rho, nf, spent)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11)
ON CONFLICT DO NOTHING", params![note.account, id_tx, note.height, position as u32, note.output_index, note.diversifier, note.value as i64, note.rcm, note.rho, note.nf, note.spent])?;
let id_note: u32 = db_tx
.query_row(
"SELECT id_note FROM received_notes WHERE tx = ?1 AND output_index = ?2",
@ -327,6 +388,7 @@ impl DbAdapter {
Ok(id_note)
}
// TODO: Depends on the type of witness
pub fn store_witnesses(
connection: &Connection,
witness: &Witness,
@ -345,6 +407,25 @@ impl DbAdapter {
Ok(())
}
pub fn store_witness(
witness: &sync::Witness,
height: u32,
id_note: u32,
connection: &Connection,
shielded_pool: &str
) -> anyhow::Result<()> {
log::debug!("+store_witness");
let mut bb: Vec<u8> = vec![];
witness.write(&mut bb)?;
connection.execute(
&format!("INSERT INTO {}_witnesses(note, height, witness) VALUES (?1, ?2, ?3)
ON CONFLICT DO NOTHING", shielded_pool),
params![id_note, height, bb],
)?;
log::debug!("-store_witness");
Ok(())
}
pub fn store_tx_metadata(&self, id_tx: u32, tx_info: &TransactionInfo) -> anyhow::Result<()> {
self.connection.execute(
"UPDATE transactions SET address = ?1, memo = ?2 WHERE id_tx = ?3",
@ -420,8 +501,12 @@ impl DbAdapter {
}
pub fn get_tree(&self) -> anyhow::Result<(CTree, Vec<Witness>)> {
todo!()
}
pub fn get_tree_by_name(&self, shielded_pool: &str) -> anyhow::Result<(sync::CTree, Vec<sync::Witness>)> {
let res = self.connection.query_row(
"SELECT height, sapling_tree FROM blocks WHERE height = (SELECT MAX(height) FROM blocks)",
&format!("SELECT height, {}_tree FROM blocks WHERE height = (SELECT MAX(height) FROM blocks)", shielded_pool),
[], |row| {
let height: u32 = row.get(0)?;
let tree: Vec<u8> = row.get(1)?;
@ -429,21 +514,21 @@ impl DbAdapter {
}).optional()?;
Ok(match res {
Some((height, tree)) => {
let tree = CTree::read(&*tree)?;
let tree = sync::CTree::read(&*tree)?;
let mut statement = self.connection.prepare(
"SELECT id_note, witness FROM sapling_witnesses w, received_notes n WHERE w.height = ?1 AND w.note = n.id_note AND (n.spent IS NULL OR n.spent = 0)")?;
&format!("SELECT id_note, witness FROM {}_witnesses w, received_notes n WHERE w.height = ?1 AND w.note = n.id_note AND (n.spent IS NULL OR n.spent = 0)", shielded_pool))?;
let ws = statement.query_map(params![height], |row| {
let id_note: u32 = row.get(0)?;
let witness: Vec<u8> = row.get(1)?;
Ok(Witness::read(id_note, &*witness).unwrap())
Ok(sync::Witness::read(id_note, &*witness).unwrap())
})?;
let mut witnesses: Vec<Witness> = vec![];
let mut witnesses = vec![];
for w in ws {
witnesses.push(w?);
}
(tree, witnesses)
}
None => (CTree::new(), vec![]),
None => (sync::CTree::new(), vec![]),
})
}
@ -493,6 +578,33 @@ impl DbAdapter {
Ok(nfs)
}
pub fn get_unspent_nullifiers(
&self,
account: u32,
) -> anyhow::Result<Vec<ReceivedNoteShort>> {
let sql = "SELECT id_note, nf, value FROM received_notes WHERE account = ?1 AND (spent IS NULL OR spent = 0)";
let mut statement = self.connection.prepare(sql)?;
let nfs_res = statement.query_map(params![account], |row| {
let id: u32 = row.get(0)?;
let nf: Vec<u8> = row.get(1)?;
let value: i64 = row.get(2)?;
let nf: [u8; 32] = nf.try_into().unwrap();
let nf = Nf(nf);
Ok(ReceivedNoteShort {
id,
account,
nf,
value: value as u64,
})
})?;
let mut nfs = vec![];
for n in nfs_res {
let n = n?;
nfs.push(n);
}
Ok(nfs)
}
pub fn get_nullifiers_raw(&self) -> anyhow::Result<Vec<(u32, u64, Vec<u8>)>> {
let mut statement = self
.connection
@ -510,6 +622,7 @@ impl DbAdapter {
Ok(v)
}
// TODO: Depends on the type of witness - Should it returned any spendable note? sapling or orchard
pub fn get_spendable_notes(
&self,
account: u32,
@ -590,6 +703,10 @@ impl DbAdapter {
"DELETE FROM sapling_witnesses WHERE height < ?1",
params![min_height],
)?;
transaction.execute(
"DELETE FROM orchard_witnesses WHERE height < ?1",
params![min_height],
)?;
transaction.execute("DELETE FROM blocks WHERE height < ?1", params![min_height])?;
transaction.commit()?;
}
@ -634,6 +751,7 @@ impl DbAdapter {
Ok(contacts)
}
// TODO: Orchard diversifiers have a different space
pub fn get_diversifier(&self, account: u32) -> anyhow::Result<DiversifierIndex> {
let diversifier_index = self
.connection
@ -652,6 +770,21 @@ impl DbAdapter {
Ok(DiversifierIndex(diversifier_index))
}
// TODO: See get_diversifier
pub fn store_diversifier(
&self,
account: u32,
diversifier_index: &DiversifierIndex,
) -> anyhow::Result<()> {
let diversifier_bytes = diversifier_index.0.to_vec();
self.connection.execute(
"INSERT INTO diversifiers(account, diversifier_index) VALUES (?1, ?2) ON CONFLICT \
(account) DO UPDATE SET diversifier_index = excluded.diversifier_index",
params![account, diversifier_bytes],
)?;
Ok(())
}
pub fn get_account_info(&self, account: u32) -> anyhow::Result<AccountData> {
let account_data = self
.connection
@ -679,20 +812,6 @@ impl DbAdapter {
Ok(account_data)
}
pub fn store_diversifier(
&self,
account: u32,
diversifier_index: &DiversifierIndex,
) -> anyhow::Result<()> {
let diversifier_bytes = diversifier_index.0.to_vec();
self.connection.execute(
"INSERT INTO diversifiers(account, diversifier_index) VALUES (?1, ?2) ON CONFLICT \
(account) DO UPDATE SET diversifier_index = excluded.diversifier_index",
params![account, diversifier_bytes],
)?;
Ok(())
}
pub fn get_taddr(&self, account: u32) -> anyhow::Result<Option<String>> {
let address = self
.connection
@ -813,6 +932,8 @@ impl DbAdapter {
self.connection.execute("DELETE FROM received_notes", [])?;
self.connection
.execute("DELETE FROM sapling_witnesses", [])?;
self.connection
.execute("DELETE FROM orchard_witnesses", [])?;
self.connection.execute("DELETE FROM transactions", [])?;
self.connection.execute("DELETE FROM messages", [])?;
Ok(())
@ -1147,7 +1268,7 @@ pub struct AccountData {
#[cfg(test)]
mod tests {
use crate::db::{DbAdapter, ReceivedNote, DEFAULT_DB_PATH};
use crate::db::{DbAdapter, DEFAULT_DB_PATH, ReceivedNote};
use crate::commitment::{CTree, Witness};
use zcash_params::coin::CoinType;
@ -1158,7 +1279,7 @@ mod tests {
db.trim_to_height(0).unwrap();
let db_tx = db.begin_transaction().unwrap();
DbAdapter::store_block(&db_tx, 1, &[0u8; 32], 0, &CTree::new()).unwrap();
DbAdapter::store_block(&db_tx, 1, &[0u8; 32], 0, &CTree::new(), None).unwrap();
let id_tx = DbAdapter::store_transaction(&[0; 32], 1, 1, 0, 20, &db_tx).unwrap();
DbAdapter::store_received_note(
&ReceivedNote {
@ -1169,6 +1290,7 @@ mod tests {
value: 0,
rcm: vec![],
nf: vec![],
rho: None,
spent: None,
},
id_tx,

View File

@ -177,8 +177,26 @@ pub fn init_db(connection: &Connection) -> anyhow::Result<()> {
connection.execute("ALTER TABLE messages ADD id_tx INTEGER", [])?;
}
if version != 4 {
update_schema_version(connection, 4)?;
if version < 5 {
connection.execute("ALTER TABLE blocks ADD orchard_tree BLOB", [])?;
connection.execute("ALTER TABLE received_notes ADD rho BLOB", [])?;
connection.execute(
"CREATE TABLE IF NOT EXISTS orchard_witnesses (
id_witness INTEGER PRIMARY KEY,
note INTEGER NOT NULL,
height INTEGER NOT NULL,
witness BLOB NOT NULL,
CONSTRAINT witness_height UNIQUE (note, height))",
[],
)?;
connection.execute(
"CREATE INDEX IF NOT EXISTS i_orchard_witness ON orchard_witnesses(height)",
[],
)?;
}
if version != 5 {
update_schema_version(connection, 5)?;
log::info!("Database migrated");
}

View File

@ -96,7 +96,9 @@ impl FountainCodes {
}
}
pub fn put_drop(drop: &str) -> anyhow::Result<Option<Vec<u8>>> {
let mut fc = RAPTORQ.lock().unwrap();
fc.put_drop(drop)
impl RaptorQDrops {
pub fn put_drop(drop: &str) -> anyhow::Result<Option<Vec<u8>>> {
let mut fc = RAPTORQ.lock().unwrap();
fc.put_drop(drop)
}
}

View File

@ -410,9 +410,9 @@ pub mod compact_tx_streamer_client {
&mut self,
request: impl tonic::IntoRequest<super::BlockRange>,
) -> Result<
tonic::Response<tonic::codec::Streaming<super::CompactBlock>>,
tonic::Status,
> {
tonic::Response<tonic::codec::Streaming<super::CompactBlock>>,
tonic::Status,
> {
self.inner
.ready()
.await
@ -473,9 +473,9 @@ pub mod compact_tx_streamer_client {
&mut self,
request: impl tonic::IntoRequest<super::TransparentAddressBlockFilter>,
) -> Result<
tonic::Response<tonic::codec::Streaming<super::RawTransaction>>,
tonic::Status,
> {
tonic::Response<tonic::codec::Streaming<super::RawTransaction>>,
tonic::Status,
> {
self.inner
.ready()
.await
@ -544,9 +544,9 @@ pub mod compact_tx_streamer_client {
&mut self,
request: impl tonic::IntoRequest<super::Exclude>,
) -> Result<
tonic::Response<tonic::codec::Streaming<super::CompactTx>>,
tonic::Status,
> {
tonic::Response<tonic::codec::Streaming<super::CompactTx>>,
tonic::Status,
> {
self.inner
.ready()
.await
@ -568,9 +568,9 @@ pub mod compact_tx_streamer_client {
&mut self,
request: impl tonic::IntoRequest<super::Empty>,
) -> Result<
tonic::Response<tonic::codec::Streaming<super::RawTransaction>>,
tonic::Status,
> {
tonic::Response<tonic::codec::Streaming<super::RawTransaction>>,
tonic::Status,
> {
self.inner
.ready()
.await
@ -632,9 +632,9 @@ pub mod compact_tx_streamer_client {
&mut self,
request: impl tonic::IntoRequest<super::GetAddressUtxosArg>,
) -> Result<
tonic::Response<tonic::codec::Streaming<super::GetAddressUtxosReply>>,
tonic::Status,
> {
tonic::Response<tonic::codec::Streaming<super::GetAddressUtxosReply>>,
tonic::Status,
> {
self.inner
.ready()
.await

View File

@ -6,6 +6,7 @@ use std::io::Read;
use std::ops::AddAssign;
use zcash_params::GENERATORS;
use zcash_primitives::constants::PEDERSEN_HASH_CHUNKS_PER_GENERATOR;
use crate::Hash;
lazy_static! {
pub static ref GENERATORS_EXP: Vec<ExtendedNielsPoint> = read_generators_bin();
@ -31,6 +32,7 @@ fn read_generators_bin() -> Vec<ExtendedNielsPoint> {
macro_rules! accumulate_scalar {
($acc: ident, $cur: ident, $x: expr) => {
// println!("accumulate_scalar {}", $x);
let mut tmp = $cur;
if $x & 1 != 0 {
tmp.add_assign(&$cur);
@ -47,8 +49,6 @@ macro_rules! accumulate_scalar {
};
}
pub type Hash = [u8; 32];
pub fn pedersen_hash(depth: u8, left: &Hash, right: &Hash) -> Hash {
let p = pedersen_hash_inner(depth, left, right);
@ -68,11 +68,9 @@ pub fn pedersen_hash_inner(depth: u8, left: &Hash, right: &Hash) -> ExtendedPoin
let b = depth >> 3;
accumulate_scalar!(acc, cur, a);
cur = cur.double().double().double();
// println!("{}", hex::encode(acc.to_bytes()));
accumulate_scalar!(acc, cur, b);
cur = cur.double().double().double();
// println!("{}", hex::encode(acc.to_bytes()));
let mut i_generator = 0;
let mut chunks_remaining = PEDERSEN_HASH_CHUNKS_PER_GENERATOR - 3;
@ -104,8 +102,6 @@ pub fn pedersen_hash_inner(depth: u8, left: &Hash, right: &Hash) -> ExtendedPoin
chunks_remaining -= 1;
if chunks_remaining == 0 {
// println!("++ {}", hex::encode(acc.to_bytes()));
result += generator_multiplication(&acc, &GENERATORS_EXP, i_generator);
i_generator += 1;

View File

@ -72,6 +72,8 @@ const LWD_URL: &str = "https://mainnet.lightwalletd.com:9067";
// YCash
// pub const LWD_URL: &str = "https://lite.ycash.xyz:9067";
pub type Hash = [u8; 32];
mod builder;
mod chain;
mod coinconfig;
@ -80,6 +82,8 @@ mod contact;
mod db;
mod fountain;
mod hash;
pub mod sync;
pub mod sapling;
mod key;
mod key2;
mod mempool;
@ -115,11 +119,11 @@ pub use crate::chain::{
ChainError,
};
pub use crate::coinconfig::{
init_coin, set_active, set_active_account, set_coin_lwd_url, CoinConfig,
CoinConfig, init_coin, set_active, set_active_account, set_coin_lwd_url,
COIN_CONFIG,
};
pub use crate::db::{AccountData, AccountInfo, AccountRec, DbAdapter, TxRec};
// pub use crate::fountain::FountainCodes;
pub use crate::hash::Hash;
pub use crate::db::{AccountData, AccountInfo, AccountRec, DbAdapter, TxRec, DbAdapterBuilder};
pub use crate::fountain::{FountainCodes, RaptorQDrops};
pub use crate::key::KeyHelpers;
pub use crate::lw_rpc::compact_tx_streamer_client::CompactTxStreamerClient;
pub use crate::lw_rpc::*;

View File

@ -15,10 +15,7 @@ use std::sync::Mutex;
use thiserror::Error;
use warp_api_ffi::api::payment::{Recipient, RecipientMemo};
use warp_api_ffi::api::payment_uri::PaymentURI;
use warp_api_ffi::{
derive_zip32, get_best_server, AccountData, AccountInfo, AccountRec, CoinConfig, KeyPack,
RaptorQDrops, Tx, TxRec,
};
use warp_api_ffi::{get_best_server, AccountData, AccountInfo, AccountRec, CoinConfig, KeyPack, Tx, TxRec, RaptorQDrops};
lazy_static! {
static ref SYNC_CANCELED: Mutex<bool> = Mutex::new(false);
@ -102,7 +99,6 @@ async fn main() -> anyhow::Result<()> {
merge_data,
derive_keys,
instant_sync,
trial_decrypt,
],
)
.attach(AdHoc::config::<Config>())
@ -310,7 +306,7 @@ pub fn split_data(id: u32, data: String) -> Result<Json<RaptorQDrops>, Error> {
#[post("/merge?<data>")]
pub fn merge_data(data: String) -> Result<String, Error> {
let result = warp_api_ffi::put_drop(&data)?
let result = warp_api_ffi::RaptorQDrops::put_drop(&data)?
.map(|data| hex::encode(&data))
.unwrap_or(String::new());
Ok(result)
@ -333,21 +329,6 @@ pub fn derive_keys(
Ok(Json(result))
}
#[post("/trial_decrypt?<height>&<cmu>&<epk>&<ciphertext>")]
pub async fn trial_decrypt(
height: u32,
cmu: String,
epk: String,
ciphertext: String,
) -> Result<String, Error> {
let epk = hex::decode(&epk)?;
let cmu = hex::decode(&cmu)?;
let ciphertext = hex::decode(&ciphertext)?;
let note = warp_api_ffi::api::sync::trial_decrypt(height, &cmu, &epk, &ciphertext)?;
log::info!("{:?}", note);
Ok(note.is_some().to_string())
}
#[post("/instant_sync")]
pub async fn instant_sync() -> Result<(), Error> {
let c = CoinConfig::get_active();

116
src/main/tests.rs Normal file
View File

@ -0,0 +1,116 @@
use std::collections::HashMap;
use std::fs::File;
use std::io::{BufReader, BufWriter, Read, Write};
use byteorder::{LE, ReadBytesExt, WriteBytesExt};
use tonic::Request;
use prost::Message;
use zcash_client_backend::encoding::{decode_extended_full_viewing_key, decode_extended_spending_key, encode_extended_full_viewing_key, encode_payment_address};
use zcash_primitives::consensus::{Network, NetworkUpgrade, Parameters};
use zcash_primitives::sapling::note_encryption::SaplingDomain;
use zcash_primitives::zip32::ExtendedFullViewingKey;
use warp_api_ffi::{BlockId, BlockRange, ChainSpec, COIN_CONFIG, CoinConfig, CompactBlock, connect_lightwalletd, DbAdapter, DbAdapterBuilder, derive_zip32, init_coin};
use warp_api_ffi::sapling::{DecryptedSaplingNote, SaplingDecrypter, SaplingHasher, SaplingViewKey};
use warp_api_ffi::sync::{WarpProcessor, Synchronizer, CTree};
type SaplingSynchronizer = Synchronizer<Network, SaplingDomain<Network>, SaplingViewKey, DecryptedSaplingNote,
SaplingDecrypter<Network>, SaplingHasher>;
#[allow(dead_code)]
async fn write_block_file() {
init_coin(1, "yec-new.db").unwrap();
let coin = COIN_CONFIG[1].lock().unwrap();
let mut client = connect_lightwalletd("https://lite.ycash.xyz:9067").await.unwrap();
let network = coin.chain.network();
let start = u32::from(network.activation_height(NetworkUpgrade::Sapling).unwrap()) + 1;
let end = client.get_latest_block(Request::new(ChainSpec {})).await.unwrap().into_inner();
let end = end.height as u32;
let mut blocks = client.get_block_range(Request::new(BlockRange {
start: Some(BlockId { height: start as u64, hash: vec![] }),
end: Some(BlockId { height: end as u64, hash: vec![] }),
spam_filter_threshold: 0
})).await.unwrap().into_inner();
let file = File::create("ycash.bin").unwrap();
let mut writer = BufWriter::new(file);
while let Some(block) = blocks.message().await.unwrap() {
println!("{}", block.height);
let mut buf = prost::bytes::BytesMut::new();
block.encode(&mut buf).unwrap();
writer.write_u32::<LE>(buf.len() as u32).unwrap();
writer.write_all(&buf).unwrap();
}
}
fn read_block_file(coin: &CoinConfig, fvk: ExtendedFullViewingKey) {
let network = coin.chain.network();
let file = File::open("/home/hanh/ycash.bin").unwrap();
let mut reader = BufReader::new(file);
let db_builder = DbAdapterBuilder { coin_type: coin.coin_type, db_path: coin.db_path.as_ref().unwrap().to_owned() };
let mut synchronizer = SaplingSynchronizer {
decrypter: SaplingDecrypter::new(*network),
warper: WarpProcessor::new(SaplingHasher::default()),
vks: vec![SaplingViewKey {
account: 1,
fvk: fvk.clone(),
ivk: fvk.fvk.vk.ivk()
}],
tree: CTree::new(),
witnesses: vec![],
db: db_builder.clone(),
shielded_pool: "sapling".to_string(),
note_position: 0,
nullifiers: HashMap::new(),
_phantom: Default::default()
};
synchronizer.initialize().unwrap();
let mut blocks = vec![];
let mut height = 0;
let mut hash = [0u8; 32];
let mut time = 0;
while let Ok(len) = reader.read_u32::<LE>() {
let mut buf = vec![0u8; len as usize];
reader.read_exact(&mut buf).unwrap();
let cb: CompactBlock = CompactBlock::decode(&*buf).unwrap();
height = cb.height;
hash.copy_from_slice(&cb.hash);
time = cb.time;
blocks.push(cb);
if height % 100_000 == 0 {
synchronizer.process(blocks).unwrap();
blocks = vec![];
}
}
synchronizer.process(blocks).unwrap();
let db = db_builder.build().unwrap();
DbAdapter::store_block2(height as u32, &hash, time, &synchronizer.tree, None, &db.connection).unwrap();
}
#[tokio::main]
async fn main() {
env_logger::init();
init_coin(1, "yec-new.db").unwrap();
let coin = COIN_CONFIG[1].lock().unwrap();
let network = coin.chain.network();
let _ = dotenv::dotenv();
let seed_str = dotenv::var("SEED").unwrap();
let kp = derive_zip32(&network, &seed_str, 0, 0, None).unwrap();
let zk = kp.z_key.clone();
let sk = decode_extended_spending_key(network.hrp_sapling_extended_spending_key(), &zk).unwrap().unwrap();
let fvk = ExtendedFullViewingKey::from(&sk);
let fvk_str = encode_extended_full_viewing_key(network.hrp_sapling_extended_full_viewing_key(), &fvk);
let (_, pa) = fvk.default_address();
let address = encode_payment_address(network.hrp_sapling_payment_address(), &pa);
let db_builder = DbAdapterBuilder { coin_type: coin.coin_type, db_path: coin.db_path.as_ref().unwrap().to_owned() };
let db = db_builder.build().unwrap();
db.store_account("test", Some(&seed_str), 0, Some(&zk), &fvk_str, &address).unwrap();
// write_block_file().await;
read_block_file(&coin, fvk);
}

34
src/sapling.rs Normal file
View File

@ -0,0 +1,34 @@
use std::io::Read;
use group::GroupEncoding;
use jubjub::{ExtendedNielsPoint, ExtendedPoint, SubgroupPoint};
use lazy_static::lazy_static;
use zcash_params::GENERATORS;
lazy_static! {
pub static ref GENERATORS_EXP: Vec<ExtendedNielsPoint> = read_generators_bin();
}
mod hash;
mod note;
pub use note::{SaplingDecrypter, SaplingViewKey, DecryptedSaplingNote};
pub use hash::SaplingHasher;
fn read_generators_bin() -> Vec<ExtendedNielsPoint> {
let mut generators_bin = GENERATORS;
let mut gens: Vec<ExtendedNielsPoint> = vec![];
gens.reserve_exact(3 * 32 * 256);
for _i in 0..3 {
for _j in 0..32 {
for _k in 0..256 {
let mut bb = [0u8; 32];
generators_bin.read_exact(&mut bb).unwrap();
let p = ExtendedPoint::from(SubgroupPoint::from_bytes_unchecked(&bb).unwrap())
.to_niels();
gens.push(p);
}
}
}
gens
}

165
src/sapling/hash.rs Normal file
View File

@ -0,0 +1,165 @@
use ff::PrimeField;
use group::Curve;
use jubjub::{ExtendedPoint, Fr};
use zcash_primitives::constants::PEDERSEN_HASH_CHUNKS_PER_GENERATOR;
use crate::sync::{Hasher, Node};
use super::GENERATORS_EXP;
#[inline(always)]
fn accumulate_scalar(acc: &mut Fr, cur: &mut Fr, x: u8) {
let mut tmp = *cur;
if x & 1 != 0 {
tmp += *cur;
}
*cur = cur.double();
if x & 2 != 0 {
tmp += *cur;
}
if x & 4 != 0 {
tmp = tmp.neg();
}
*acc += tmp;
}
fn accumulate_generator(acc: &Fr, idx_generator: u32) -> ExtendedPoint {
let acc_bytes = acc.to_repr();
let mut tmp = ExtendedPoint::identity();
for (i, &j) in acc_bytes.iter().enumerate() {
let offset = (idx_generator * 32 + i as u32) * 256 + j as u32;
let x = GENERATORS_EXP[offset as usize];
tmp += x;
}
tmp
}
pub fn hash_combine(depth: u8, left: &[u8; 32], right: &[u8; 32]) -> [u8; 32] {
let mut hash = ExtendedPoint::identity();
let mut acc = Fr::zero();
let mut cur = Fr::one();
let a = depth & 7;
let b = depth >> 3;
accumulate_scalar(&mut acc, &mut cur, a);
cur = cur.double().double().double();
accumulate_scalar(&mut acc, &mut cur, b);
cur = cur.double().double().double();
// Shift right by 1 bit and overwrite the 256th bit of left
let mut left = *left;
let mut right = *right;
// move by 1 bit to fill the missing 256th bit of left
let mut carry = 0;
for i in (0..32).rev() {
let c = right[i] & 1;
right[i] = right[i] >> 1 | carry << 7;
carry = c;
}
left[31] &= 0x7F;
left[31] |= carry << 7; // move the first bit of right into 256th of left
// we have 255*2/3 = 170 chunks
let mut bit_offset = 0;
let mut byte_offset = 0;
let mut idx_generator = 0;
for i in 0..170 {
let mut v = if byte_offset < 31 {
left[byte_offset] as u16 | (left[byte_offset + 1] as u16) << 8
} else if byte_offset == 31 {
left[31] as u16 | (right[0] as u16) << 8
} else if byte_offset < 63 {
right[byte_offset - 32] as u16 | (right[byte_offset - 31] as u16) << 8
} else {
right[byte_offset - 32] as u16
};
v = v >> bit_offset & 0x07; // keep 3 bits
accumulate_scalar(&mut acc, &mut cur, v as u8);
if (i+3) % PEDERSEN_HASH_CHUNKS_PER_GENERATOR as u32 == 0 {
hash += accumulate_generator(&acc, idx_generator);
idx_generator += 1;
acc = Fr::zero();
cur = Fr::one();
}
else {
cur = cur.double().double().double(); // 2^4 * cur
}
bit_offset += 3;
if bit_offset >= 8 {
byte_offset += bit_offset / 8;
bit_offset %= 8;
}
}
hash += accumulate_generator(&acc, idx_generator);
let hash = hash
.to_affine()
.get_u()
.to_repr();
hash
}
#[derive(Clone, Default)]
pub struct SaplingHasher {}
impl Hasher for SaplingHasher {
fn uncommited_node() -> Node {
[0u8; 32]
}
fn node_combine(&self, depth: u8, left: &Node, right: &Node) -> Node {
hash_combine(depth, left, right)
}
}
#[cfg(test)]
mod tests {
use std::convert::TryInto;
use rand::RngCore;
use rand::rngs::OsRng;
use crate::pedersen_hash;
use crate::sapling::hash::hash_combine;
#[test]
fn test_hash1() {
let depth = 8;
let sa = "767a9a7e989289efdfa69c4c8e985c31f3c2c0353f20a80f572854206f077f86";
let sb = "944c46945a9e7a0a753850bd90f69d44ac884b60244a9f8eacf3a2aeddd08d6e";
let a: [u8; 32] = hex::decode(sa).unwrap().try_into().unwrap();
let b: [u8; 32] = hex::decode(sb).unwrap().try_into().unwrap();
println!("A: {}", hex::encode(a));
println!("B: {}", hex::encode(b));
let hash = pedersen_hash(depth, &a, &b);
let hash2 = hash_combine(depth, &a, &b);
println!("Reference Hash: {}", hex::encode(hash));
println!("This Hash: {}", hex::encode(hash2));
// need to expose repr for this check
assert_eq!(hash, hash2);
}
#[test]
fn test_hash_random() {
let mut rng = OsRng;
for _ in 0..1000 {
let depth = (rng.next_u32() % 50) as u8;
let mut a = [0u8; 32];
let mut b = [0u8; 32];
rng.fill_bytes(&mut a);
rng.fill_bytes(&mut b);
println!("A: {}", hex::encode(a));
println!("B: {}", hex::encode(b));
let hash = pedersen_hash(depth, &a, &b);
let hash2 = hash_combine(depth, &a, &b);
println!("Reference Hash: {}", hex::encode(hash));
println!("This Hash: {}", hex::encode(hash2));
// need to expose repr for this check
assert_eq!(hash, hash2);
}
}
}

100
src/sapling/note.rs Normal file
View File

@ -0,0 +1,100 @@
use std::convert::TryInto;
use ff::PrimeField;
use zcash_note_encryption::Domain;
use zcash_primitives::consensus::{BlockHeight, Parameters};
use zcash_primitives::sapling::note_encryption::SaplingDomain;
use zcash_primitives::sapling::{PaymentAddress, SaplingIvk};
use zcash_primitives::zip32::ExtendedFullViewingKey;
use crate::chain::Nf;
use crate::CompactTx;
use crate::db::ReceivedNote;
use crate::sync::Node;
use crate::sync::{CompactOutputBytes, DecryptedNote, OutputPosition, TrialDecrypter, ViewKey};
#[derive(Clone)]
pub struct SaplingViewKey {
pub account: u32,
pub fvk: ExtendedFullViewingKey,
pub ivk: SaplingIvk,
}
impl <P: Parameters> ViewKey<SaplingDomain<P>> for SaplingViewKey {
fn account(&self) -> u32 { self.account }
fn ivk(&self) -> <SaplingDomain<P> as Domain>::IncomingViewingKey {
self.ivk.clone()
}
}
pub struct DecryptedSaplingNote {
pub vk: SaplingViewKey,
pub note: zcash_primitives::sapling::Note,
pub pa: PaymentAddress,
pub output_position: OutputPosition,
pub cmx: Node,
}
impl <P: Parameters> DecryptedNote<SaplingDomain<P>, SaplingViewKey> for DecryptedSaplingNote {
fn from_parts(vk: SaplingViewKey, note: zcash_primitives::sapling::Note, pa: PaymentAddress, output_position: OutputPosition, cmx: Node) -> Self {
DecryptedSaplingNote {
vk,
note,
pa,
output_position,
cmx,
}
}
fn position(&self, block_offset: usize) -> usize {
block_offset + self.output_position.position_in_block
}
fn cmx(&self) -> Node {
self.cmx
}
fn to_received_note(&self, position: u64) -> ReceivedNote {
let viewing_key = &self.vk.fvk.fvk.vk;
ReceivedNote {
account: self.vk.account,
height: self.output_position.height,
output_index: self.output_position.output_index as u32,
diversifier: self.pa.diversifier().0.to_vec(),
value: self.note.value,
rcm: self.note.rcm().to_repr().to_vec(),
nf: self.note.nf(viewing_key, position).to_vec(),
rho: None,
spent: None
}
}
}
#[derive(Clone)]
pub struct SaplingDecrypter<N> {
pub network: N,
}
impl <N> SaplingDecrypter<N> {
pub fn new(network: N) -> Self {
SaplingDecrypter {
network,
}
}
}
impl <N: Parameters> TrialDecrypter<N, SaplingDomain<N>, SaplingViewKey, DecryptedSaplingNote> for SaplingDecrypter<N> {
fn domain(&self, height: BlockHeight) -> SaplingDomain<N> {
SaplingDomain::<N>::for_height(self.network.clone(), height)
}
fn spends(&self, vtx: &CompactTx) -> Vec<Nf> {
vtx.spends.iter().map(|co| {
let nf: [u8; 32] = co.nf.clone().try_into().unwrap();
Nf(nf)
}).collect()
}
fn outputs(&self, vtx: &CompactTx) -> Vec<CompactOutputBytes> {
vtx.outputs.iter().map(|co| co.into()).collect()
}
}

View File

@ -210,6 +210,7 @@ pub async fn sync_async(
diversifier: n.pa.diversifier().0.to_vec(),
value: note.value,
rcm: rcm.to_vec(),
rho: None,
nf: nf.0.to_vec(),
spent: None,
},
@ -324,6 +325,7 @@ pub async fn sync_async(
&block.hash,
block.time,
&tree,
None,
)?;
db_transaction.commit()?;
// db_transaction is dropped here

165
src/sync.rs Normal file
View File

@ -0,0 +1,165 @@
use std::collections::HashMap;
use std::convert::TryInto;
use std::marker::PhantomData;
use anyhow::Result;
use rayon::prelude::*;
use zcash_note_encryption::BatchDomain;
use zcash_primitives::consensus::Parameters;
use crate::{CompactBlock, DbAdapter};
use crate::chain::Nf;
use crate::db::{DbAdapterBuilder, ReceivedNote, ReceivedNoteShort};
mod tree;
mod trial_decrypt;
pub use trial_decrypt::{ViewKey, DecryptedNote, TrialDecrypter, CompactOutputBytes, OutputPosition};
pub use tree::{Hasher, Node, WarpProcessor, Witness, CTree};
pub struct Synchronizer<N: Parameters, D: BatchDomain<ExtractedCommitmentBytes = [u8; 32]>, VK: ViewKey<D>, DN: DecryptedNote<D, VK>,
TD: TrialDecrypter<N, D, VK, DN>, H: Hasher> {
pub decrypter: TD,
pub warper: WarpProcessor<H>,
pub vks: Vec<VK>,
pub db: DbAdapterBuilder,
pub shielded_pool: String,
pub note_position: usize,
pub nullifiers: HashMap<Nf, ReceivedNoteShort>,
pub tree: CTree,
pub witnesses: Vec<Witness>,
pub _phantom: PhantomData<(N, D, DN)>,
}
impl <N: Parameters + Sync,
D: BatchDomain<ExtractedCommitmentBytes = [u8; 32]> + Sync + Send,
VK: ViewKey<D> + Sync + Send,
DN: DecryptedNote<D, VK> + Sync,
TD: TrialDecrypter<N, D, VK, DN> + Sync,
H: Hasher> Synchronizer<N, D, VK, DN, TD, H> {
pub fn initialize(&mut self) -> Result<()> {
let db = self.db.build()?;
let (tree, witnesses) = db.get_tree_by_name(&self.shielded_pool)?;
self.tree = tree;
self.witnesses = witnesses;
for vk in self.vks.iter() {
let account = vk.account();
let nfs = db.get_unspent_nullifiers(account)?;
for rn in nfs.into_iter() {
self.nullifiers.insert(rn.nf.clone(), rn);
}
}
Ok(())
}
pub fn process(&mut self, blocks: Vec<CompactBlock>) -> Result<()> {
if blocks.is_empty() { return Ok(()) }
let decrypter = self.decrypter.clone();
let decrypted_blocks: Vec<_> = blocks
.par_iter()
.map(|b| decrypter.decrypt_notes(b, &self.vks))
.collect();
let mut db = self.db.build()?;
self.warper.initialize(&self.tree, &self.witnesses);
let db_tx = db.begin_transaction()?;
// Detect new received notes
let mut new_witnesses = vec![];
for decb in decrypted_blocks.iter() {
for dectx in decb.txs.iter() {
let id_tx = DbAdapter::store_transaction(&dectx.tx_id, dectx.account, dectx.height, dectx.timestamp, dectx.tx_index as u32, &db_tx)?;
let mut balance: i64 = 0;
for decn in dectx.notes.iter() {
let position = decn.position(self.note_position);
let rn: ReceivedNote = decn.to_received_note(position as u64);
let id_note = DbAdapter::store_received_note(&rn, id_tx, position, &db_tx)?;
let nf = Nf(rn.nf.try_into().unwrap());
self.nullifiers.insert(nf, ReceivedNoteShort {
id: id_note,
account: rn.account,
nf,
value: rn.value
});
let witness = Witness::new(position, id_note, &decn.cmx());
log::info!("Witness {} {} {}", witness.position, witness.id_note, hex::encode(witness.cmx));
new_witnesses.push(witness);
balance += rn.value as i64;
}
DbAdapter::add_value(id_tx, balance, &db_tx)?;
}
self.note_position += decb.count_outputs as usize;
}
// Detect spends and collect note commitments
let mut new_cmx = vec![];
let mut height = 0;
for b in blocks.iter() {
for (tx_index, tx) in b.vtx.iter().enumerate() {
for sp in self.decrypter.spends(tx).iter() {
if let Some(rn) = self.nullifiers.get(sp) {
let id_tx = DbAdapter::store_transaction(&tx.hash, rn.account, b.height as u32, b.time, tx_index as u32, &db_tx)?;
DbAdapter::add_value(id_tx, -(rn.value as i64), &db_tx)?;
DbAdapter::mark_spent(rn.id, b.height as u32, &db_tx)?;
self.nullifiers.remove(sp);
}
}
new_cmx.extend(self.decrypter.outputs(tx).into_iter().map(|cob| cob.cmx));
}
height = b.height as u32;
}
// Run blocks through warp sync
self.warper.add_nodes(&mut new_cmx, &new_witnesses);
let (updated_tree, updated_witnesses) = self.warper.finalize();
// Store witnesses
for w in updated_witnesses.iter() {
DbAdapter::store_witness(w, height, w.id_note, &db_tx, &self.shielded_pool)?;
}
self.tree = updated_tree;
self.witnesses = updated_witnesses;
db_tx.commit()?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use zcash_primitives::consensus::Network;
use zcash_primitives::sapling::note_encryption::SaplingDomain;
use crate::coinconfig::COIN_CONFIG;
use crate::db::DbAdapterBuilder;
use crate::init_coin;
use crate::sapling::{DecryptedSaplingNote, SaplingDecrypter, SaplingHasher, SaplingViewKey};
use crate::sync::CTree;
use crate::sync::tree::WarpProcessor;
use super::Synchronizer;
type SaplingSynchronizer = Synchronizer<Network, SaplingDomain<Network>, SaplingViewKey, DecryptedSaplingNote,
SaplingDecrypter<Network>, SaplingHasher>;
#[test]
fn test() {
init_coin(0, "zec.db").unwrap();
let coin = COIN_CONFIG[0].lock().unwrap();
let network = coin.chain.network();
let mut synchronizer = SaplingSynchronizer {
decrypter: SaplingDecrypter::new(*network),
warper: WarpProcessor::new(SaplingHasher::default()),
vks: vec![],
db: DbAdapterBuilder { coin_type: coin.coin_type, db_path: coin.db_path.as_ref().unwrap().to_owned() },
shielded_pool: "sapling".to_string(),
tree: CTree::new(),
witnesses: vec![],
note_position: 0,
nullifiers: HashMap::new(),
_phantom: Default::default()
};
synchronizer.initialize().unwrap();
synchronizer.process(vec![]).unwrap();
}
}

603
src/sync/tree.rs Normal file
View File

@ -0,0 +1,603 @@
use rayon::prelude::*;
use std::io::{Read, Write};
use std::marker::PhantomData;
use byteorder::WriteBytesExt;
use zcash_encoding::{Optional, Vector};
pub type Node = [u8; 32];
pub trait Hasher: Clone + Sync {
fn uncommited_node() -> Node;
fn node_combine(&self, depth: u8, left: &Node, right: &Node) -> Node;
}
#[derive(Clone)]
pub struct CTree {
pub left: Option<Node>,
pub right: Option<Node>,
pub parents: Vec<Option<Node>>,
}
impl CTree {
pub fn new() -> Self {
CTree {
left: None,
right: None,
parents: vec![],
}
}
pub fn get_position(&self) -> usize {
let mut p = 0usize;
for parent in self.parents.iter().rev() {
if parent.is_some() {
p += 1;
}
p *= 2;
}
if self.left.is_some() {
p += 1;
}
if self.right.is_some() {
p += 1;
}
p
}
pub fn clone_trimmed(&self, depth: usize) -> Self {
let mut tree = self.clone();
tree.parents.truncate(depth);
if let Some(None) = tree.parents.last() {
// Remove trailing None
tree.parents.truncate(depth - 1);
}
tree
}
pub fn root<H: Hasher>(&self, height: usize, empty_roots: &[Node], hasher: &H) -> Node {
if self.left.is_none() {
return empty_roots[height];
}
// merge the leaves
let left = self.left.unwrap_or(H::uncommited_node());
let right = self.right.unwrap_or(H::uncommited_node());
let mut cur = hasher.node_combine(0, &left, &right);
// merge the parents
let mut depth = 1u8;
for p in self.parents.iter() {
if let Some(ref left) = p {
cur = hasher.node_combine(depth, left, &cur);
} else {
cur = hasher.node_combine(depth, &cur, &empty_roots[depth as usize]);
}
depth += 1;
}
// fill in the missing levels
for d in depth as usize..height {
cur = hasher.node_combine(d as u8, &cur, &empty_roots[d]);
}
cur
}
pub fn read<R: Read>(mut reader: R) -> std::io::Result<Self> {
let left = Optional::read(&mut reader, node_read)?;
let right = Optional::read(&mut reader, node_read)?;
let parents = Vector::read(&mut reader, |r| Optional::read(r, node_read))?;
Ok(CTree {
left,
right,
parents,
})
}
pub fn write<W: Write>(&self, mut writer: W) -> std::io::Result<()> {
Optional::write(&mut writer, self.left, |w, n| node_write(&n, w))?;
Optional::write(&mut writer, self.right, |w, n| node_write(&n, w))?;
Vector::write(&mut writer, &self.parents, |w, e| {
Optional::write(w, *e, |w, n| node_write(&n, w))
})?;
Ok(())
}
pub fn from_bytes(bytes: &[u8]) -> std::io::Result<Self> {
Self::read(bytes)
}
}
fn node_read<R: Read>(mut r: R) -> std::io::Result<Node> {
let mut hash = [0u8; 32];
r.read(&mut hash)?;
Ok(hash)
}
fn node_write<W: Write>(node: &Node, mut w: W) -> std::io::Result<()> {
w.write_all(node)
}
#[derive(Clone)]
pub struct Witness {
pub position: usize,
pub tree: CTree, // commitment tree at the moment the witness is created: immutable
pub filled: Vec<Node>, // as more nodes are added, levels get filled up: won't change anymore
pub cursor: CTree, // partial tree which still updates when nodes are added
// not used for decryption but identifies the witness
pub id_note: u32,
pub cmx: [u8; 32],
}
impl Witness {
pub fn new(position: usize, id_note: u32, cmx: &[u8; 32]) -> Witness {
Witness {
position,
id_note,
tree: CTree::new(),
filled: vec![],
cursor: CTree::new(),
cmx: cmx.clone(),
}
}
pub fn auth_path<H: Hasher>(&self, height: usize, empty_roots: &[Node], hasher: H) -> Vec<Node> {
let mut filled_iter = self.filled.iter();
let mut cursor_used = false;
let mut next_filler = move |depth: usize| {
if let Some(f) = filled_iter.next() {
*f
} else if !cursor_used {
cursor_used = true;
self.cursor.root(depth, empty_roots, &hasher)
} else {
empty_roots[depth]
}
};
let mut auth_path = vec![];
if let Some(left) = self.tree.left {
if self.tree.right.is_some() {
auth_path.push(left);
} else {
auth_path.push(next_filler(0));
}
}
for i in 1..height {
let p = if i - 1 < self.tree.parents.len() {
self.tree.parents[i - 1]
} else {
None
};
if let Some(node) = p {
auth_path.push(node);
} else {
auth_path.push(next_filler(i));
}
}
auth_path
}
pub fn read<R: Read>(id_note: u32, mut reader: R) -> std::io::Result<Self> {
let tree = CTree::read(&mut reader)?;
let filled = Vector::read(&mut reader, |r| node_read(r))?;
let cursor = Optional::read(&mut reader, |r| CTree::read(r))?;
let mut cmx = [0u8; 32];
reader.read(&mut cmx)?;
let mut witness = Witness {
position: 0,
id_note,
tree,
filled,
cursor: cursor.unwrap_or_else(CTree::new),
cmx,
};
witness.position = witness.tree.get_position() - 1;
Ok(witness)
}
pub fn write<W: Write>(&self, mut writer: W) -> std::io::Result<()> {
self.tree.write(&mut writer)?;
Vector::write(&mut writer, &self.filled, |w, n| node_write(&n, w))?;
if self.cursor.left == None && self.cursor.right == None {
writer.write_u8(0)?;
} else {
writer.write_u8(1)?;
self.cursor.write(&mut writer)?;
};
writer.write_all(&self.cmx)?;
Ok(())
}
pub fn from_bytes(id_note: u32, bytes: &[u8]) -> std::io::Result<Self> {
Self::read(id_note, bytes)
}
}
trait Builder {
type Context;
type Output;
fn collect(&mut self, commitments: &[Node], context: &Self::Context) -> usize;
fn up(&mut self);
fn finished(&self) -> bool;
fn finalize(self, context: &Self::Context) -> Self::Output;
}
struct CTreeBuilder<H: Hasher> {
left: Option<Node>,
right: Option<Node>,
prev_tree: CTree,
next_tree: CTree,
start: usize,
total_len: usize,
depth: u8,
offset: Option<Node>,
first_block: bool,
hasher: H,
}
impl <H: Hasher> Builder for CTreeBuilder<H> {
type Context = ();
type Output = CTree;
fn collect(&mut self, commitments: &[Node], _context: &()) -> usize {
assert!(self.right.is_none() || self.left.is_some()); // R can't be set without L
let offset: Option<Node>;
let m: usize;
if self.left.is_some() && self.right.is_none() {
offset = self.left;
m = commitments.len() + 1;
} else {
offset = None;
m = commitments.len();
};
let n = if self.total_len > 0 {
if self.depth == 0 {
if m % 2 == 0 {
self.next_tree.left = Some(*Self::get(commitments, m - 2, &offset));
self.next_tree.right = Some(*Self::get(commitments, m - 1, &offset));
m - 2
} else {
self.next_tree.left = Some(*Self::get(commitments, m - 1, &offset));
self.next_tree.right = None;
m - 1
}
} else if m % 2 == 0 {
self.next_tree.parents.push(None);
m
} else {
let last_node = Self::get(commitments, m - 1, &offset);
self.next_tree.parents.push(Some(*last_node));
m - 1
}
} else {
0
};
assert_eq!(n % 2, 0);
self.offset = offset;
n
}
fn up(&mut self) {
let h = if self.left.is_some() && self.right.is_some() {
Some(self.hasher.node_combine(
self.depth,
&self.left.unwrap(),
&self.right.unwrap(),
))
} else {
None
};
let (l, r) = match self.prev_tree.parents.get(self.depth as usize) {
Some(Some(p)) => (Some(*p), h),
Some(None) => (h, None),
None => (h, None),
};
self.left = l;
self.right = r;
assert!(self.start % 2 == 0 || self.offset.is_some());
self.start /= 2;
self.depth += 1;
}
fn finished(&self) -> bool {
self.depth as usize >= self.prev_tree.parents.len() && self.left.is_none() && self.right.is_none()
}
fn finalize(self, _context: &()) -> CTree {
if self.total_len > 0 {
self.next_tree
} else {
self.prev_tree
}
}
}
impl <H: Hasher> CTreeBuilder<H> {
fn new(prev_tree: &CTree, len: usize, first_block: bool, hasher: H) -> Self {
let start = prev_tree.get_position();
CTreeBuilder {
left: prev_tree.left,
right: prev_tree.right,
prev_tree: prev_tree.clone(),
next_tree: CTree::new(),
start,
total_len: len,
depth: 0,
offset: None,
first_block,
hasher,
}
}
#[inline(always)]
fn get_opt<'a>(
commitments: &'a [Node],
index: usize,
offset: &'a Option<Node>,
) -> Option<&'a Node> {
if offset.is_some() {
if index > 0 {
commitments.get(index - 1)
} else {
offset.as_ref()
}
} else {
commitments.get(index)
}
}
#[inline(always)]
fn get<'a>(
commitments: &'a [Node],
index: usize,
offset: &'a Option<Node>,
) -> &'a Node {
Self::get_opt(commitments, index, offset).unwrap()
}
fn adjusted_start(&self, prev: &Option<Node>) -> usize {
if prev.is_some() {
self.start - 1
} else {
self.start
}
}
}
fn combine_level<H: Hasher>(
commitments: &mut [Node],
offset: Option<Node>,
n: usize,
depth: u8,
hasher: &H,
) -> usize {
assert_eq!(n % 2, 0);
// TODO: Support batch hash combine
let nn = n / 2;
let next_level: Vec<_> = (0..nn)
.into_par_iter()
.map(|i| {
hasher.node_combine(
depth,
CTreeBuilder::<H>::get(commitments, 2 * i, &offset),
CTreeBuilder::<H>::get(commitments, 2 * i + 1, &offset),
)
})
.collect();
commitments[0..nn].copy_from_slice(&next_level);
nn
}
struct WitnessBuilder<H: Hasher> {
witness: Witness,
p: usize,
inside: bool,
_phantom: PhantomData<H>
}
impl <H: Hasher> WitnessBuilder<H> {
fn new(tree_builder: &CTreeBuilder<H>, prev_witness: &Witness, count: usize) -> Self {
let position = prev_witness.position;
let inside = position >= tree_builder.start && position < tree_builder.start + count;
WitnessBuilder {
witness: prev_witness.clone(),
p: position,
inside,
_phantom: PhantomData::default(),
}
}
}
impl <H: Hasher> Builder for WitnessBuilder<H> {
type Context = CTreeBuilder<H>;
type Output = Witness;
fn collect(&mut self, commitments: &[Node], context: &CTreeBuilder<H>) -> usize {
let offset = context.offset;
let depth = context.depth;
let tree = &mut self.witness.tree;
if self.inside {
let rp = self.p - context.adjusted_start(&offset);
if depth == 0 {
if self.p % 2 == 1 {
tree.left = Some(*CTreeBuilder::<H>::get(commitments, rp - 1, &offset));
tree.right = Some(*CTreeBuilder::<H>::get(commitments, rp, &offset));
} else {
tree.left = Some(*CTreeBuilder::<H>::get(commitments, rp, &offset));
tree.right = None;
}
} else if self.p % 2 == 1 {
tree.parents
.push(Some(*CTreeBuilder::<H>::get(commitments, rp - 1, &offset)));
} else if self.p != 0 {
tree.parents.push(None);
}
}
let right = if depth != 0 && !context.first_block {
context.right
} else {
None
};
// println!("D {}", depth);
// println!("O {:?}", offset.map(|r| hex::encode(r.repr)));
// println!("R {:?}", right.map(|r| hex::encode(r.repr)));
// for c in commitments.iter() {
// println!("{}", hex::encode(c.repr));
// }
let p1 = self.p + 1;
// println!("P {} P1 {} S {} AS {}", self.p, p1, context.start, context.adjusted_start(&right));
let has_p1 = p1 >= context.adjusted_start(&right) && p1 < context.start + commitments.len();
if has_p1 {
let p1 =
CTreeBuilder::<H>::get(commitments, p1 - context.adjusted_start(&right), &right);
if depth == 0 {
if tree.right.is_none() {
self.witness.filled.push(*p1);
}
} else if depth as usize > tree.parents.len() || tree.parents[depth as usize - 1].is_none() {
self.witness.filled.push(*p1);
}
}
0
}
fn up(&mut self) {
self.p /= 2;
}
fn finished(&self) -> bool {
false
}
fn finalize(mut self, context: &CTreeBuilder<H>) -> Witness {
if context.total_len == 0 {
self.witness.cursor = CTree::new();
let mut final_position = context.prev_tree.get_position() as u32;
let mut witness_position = self.witness.tree.get_position() as u32;
assert_ne!(witness_position, 0);
witness_position -= 1;
// look for first not equal bit in MSB order
final_position = final_position.reverse_bits();
witness_position = witness_position.reverse_bits();
let mut bit: i32 = 31;
// reverse bits because it is easier to do in LSB
// it should not underflow because these numbers are not equal
while bit >= 0 {
if final_position & 1 != witness_position & 1 {
break;
}
final_position >>= 1;
witness_position >>= 1;
bit -= 1;
}
// look for the first bit set in final_position after
final_position >>= 1;
bit -= 1;
while bit >= 0 {
if final_position & 1 == 1 {
break;
}
final_position >>= 1;
bit -= 1;
}
if bit >= 0 {
self.witness.cursor = context.prev_tree.clone_trimmed(bit as usize)
}
}
self.witness
}
}
pub struct WarpProcessor<H> {
prev_tree: CTree,
prev_witnesses: Vec<Witness>,
first_block: bool,
hasher: H,
}
impl <H: Hasher> WarpProcessor<H> {
pub fn new(hasher: H) -> WarpProcessor<H> {
WarpProcessor {
prev_tree: CTree::new(),
prev_witnesses: vec![],
first_block: true,
hasher,
}
}
pub fn initialize(&mut self, prev_tree: &CTree, prev_witnesses: &[Witness]) {
self.first_block = true;
self.prev_tree = prev_tree.clone();
self.prev_witnesses = prev_witnesses.to_vec();
}
pub fn add_nodes(&mut self, nodes: &mut [Node], new_witnesses: &[Witness]) {
log::info!("Adding {} cmx", nodes.len());
if nodes.is_empty() {
return;
}
self.prev_witnesses.extend_from_slice(new_witnesses);
let (t, ws) = self.advance_tree(
nodes,
);
self.first_block = false;
self.prev_tree = t;
self.prev_witnesses = ws;
}
pub fn finalize(&mut self) -> (CTree, Vec<Witness>) {
if self.first_block {
(self.prev_tree.clone(), self.prev_witnesses.clone())
} else {
let (t, ws) = self.advance_tree(&mut []);
(t, ws)
}
}
fn advance_tree(
&self,
mut commitments: &mut [Node],
) -> (CTree, Vec<Witness>) {
let mut builder = CTreeBuilder::<H>::new(&self.prev_tree, commitments.len(), self.first_block, self.hasher.clone());
let mut witness_builders: Vec<_> = self.prev_witnesses
.iter()
.map(|witness| WitnessBuilder::new(&builder, witness, commitments.len()))
.collect();
while !commitments.is_empty() || !builder.finished() {
let n = builder.collect(commitments, &());
for b in witness_builders.iter_mut() {
b.collect(commitments, &builder);
}
let nn = combine_level(commitments, builder.offset, n, builder.depth, &self.hasher);
builder.up();
for b in witness_builders.iter_mut() {
b.up();
}
commitments = &mut commitments[0..nn];
}
let witnesses = witness_builders
.into_iter()
.map(|b| b.finalize(&builder))
.collect();
let tree = builder.finalize(&());
(tree, witnesses)
}
}

174
src/sync/trial_decrypt.rs Normal file
View File

@ -0,0 +1,174 @@
use std::collections::HashMap;
use crate::chain::Nf;
use std::convert::TryInto;
use std::marker::PhantomData;
use std::time::Instant;
use zcash_note_encryption::batch::try_compact_note_decryption;
use zcash_note_encryption::{BatchDomain, COMPACT_NOTE_SIZE, EphemeralKeyBytes, ShieldedOutput};
use zcash_primitives::consensus::{BlockHeight, Parameters};
use crate::{CompactBlock, CompactSaplingOutput, CompactTx};
use crate::db::ReceivedNote;
use crate::sync::tree::Node;
pub struct DecryptedBlock<D: BatchDomain, VK, DN: DecryptedNote<D, VK>> {
pub height: u32,
pub spends: Vec<Nf>,
pub txs: Vec<DecryptedTx<D, VK, DN>>,
pub count_outputs: u32,
pub elapsed: usize,
_phantom: PhantomData<(D, VK)>,
}
pub struct DecryptedTx<D: BatchDomain, VK, DN: DecryptedNote<D, VK>> {
pub account: u32,
pub height: u32,
pub timestamp: u32,
pub tx_index: usize,
pub tx_id: Vec<u8>,
pub notes: Vec<DN>,
_phantom: PhantomData<(D, VK)>,
}
pub trait ViewKey<D: BatchDomain>: Clone {
fn account(&self) -> u32;
fn ivk(&self) -> D::IncomingViewingKey;
}
#[derive(Clone)]
pub struct OutputPosition {
pub height: u32,
pub tx_index: usize,
pub output_index: usize,
pub position_in_block: usize,
}
pub trait DecryptedNote<D: BatchDomain, VK>: Send + Sync {
fn from_parts(vk: VK, note: D::Note, pa: D::Recipient, output_position: OutputPosition, cmx: Node) -> Self;
fn position(&self, block_offset: usize) -> usize;
fn cmx(&self) -> Node;
fn to_received_note(&self, position: u64) -> ReceivedNote;
}
// Deep copy from protobuf message
pub struct CompactOutputBytes {
pub epk: [u8; 32],
pub cmx: [u8; 32],
pub ciphertext: [u8; 52],
}
impl From<&CompactSaplingOutput> for CompactOutputBytes {
fn from(co: &CompactSaplingOutput) -> Self {
CompactOutputBytes {
epk: co.epk.clone().try_into().unwrap(),
cmx: co.cmu.clone().try_into().unwrap(),
ciphertext: co.ciphertext.clone().try_into().unwrap(),
}
}
}
pub struct CompactShieldedOutput(CompactOutputBytes, OutputPosition);
impl<D: BatchDomain<ExtractedCommitmentBytes = [u8; 32]>> ShieldedOutput<D, COMPACT_NOTE_SIZE>
for CompactShieldedOutput
{
fn ephemeral_key(&self) -> EphemeralKeyBytes {
EphemeralKeyBytes(self.0.epk)
}
fn cmstar_bytes(&self) -> D::ExtractedCommitmentBytes {
self.0.cmx
}
fn enc_ciphertext(&self) -> &[u8; COMPACT_NOTE_SIZE] {
&self.0.ciphertext
}
}
pub trait TrialDecrypter<N: Parameters, D: BatchDomain<ExtractedCommitmentBytes = [u8; 32]>, VK: ViewKey<D>, DN: DecryptedNote<D, VK>>: Clone {
fn decrypt_notes(
&self,
block: &CompactBlock,
vks: &[VK],
) -> DecryptedBlock<D, VK, DN> {
let height = BlockHeight::from_u32(block.height as u32);
let mut count_outputs = 0u32;
let mut spends: Vec<Nf> = vec![];
let vvks: Vec<_> = vks.iter().map(|vk| vk.ivk().clone()).collect();
let mut outputs = vec![];
let mut txs = HashMap::new();
for (tx_index, vtx) in block.vtx.iter().enumerate() {
for cs in vtx.spends.iter() {
let mut nf = [0u8; 32];
nf.copy_from_slice(&cs.nf);
spends.push(Nf(nf));
}
let tx_outputs = self.outputs(vtx);
if let Some(fco) = tx_outputs.first() {
if !fco.epk.is_empty() {
for (output_index, cob) in tx_outputs.into_iter().enumerate() {
let domain = self.domain(height);
let pos = OutputPosition {
height: block.height as u32,
tx_index,
output_index,
position_in_block: count_outputs as usize,
};
let output = CompactShieldedOutput(cob, pos);
outputs.push((domain, output));
count_outputs += 1;
}
} else {
// we filter by transaction, therefore if one epk is empty, every epk is empty
// log::info!("Spam Filter tx {}", hex::encode(&vtx.hash));
count_outputs += vtx.outputs.len() as u32;
}
}
}
let start = Instant::now();
let notes_decrypted =
try_compact_note_decryption(&vvks, &outputs);
let elapsed = start.elapsed().as_millis() as usize;
for (pos, opt_note) in notes_decrypted.iter().enumerate() {
if let Some((note, pa)) = opt_note {
let vk = &vks[pos / outputs.len()];
let account = vk.account();
let output = &outputs[pos % outputs.len()];
let tx_index = output.1.1.tx_index;
let tx_key = (account, tx_index);
let tx = txs.entry(tx_key).or_insert_with(||
DecryptedTx {
account,
height: block.height as u32,
timestamp: block.time,
tx_index,
tx_id: block.vtx[tx_index].hash.clone(),
notes: vec![],
_phantom: PhantomData::default(),
});
tx.notes.push(DN::from_parts(
vk.clone(),
note.clone(),
pa.clone(),
output.1.1.clone(),
output.1.0.cmx,
));
}
}
DecryptedBlock {
height: block.height as u32,
spends,
txs: txs.into_values().collect(),
count_outputs,
elapsed,
_phantom: PhantomData::default(),
}
}
fn domain(&self, height: BlockHeight) -> D;
fn spends(&self, vtx: &CompactTx) -> Vec<Nf>;
fn outputs(&self, vtx: &CompactTx) -> Vec<CompactOutputBytes>;
}

View File

@ -271,7 +271,7 @@ pub async fn retrieve_tx_info(
#[cfg(test)]
mod tests {
use crate::transaction::decode_transaction;
use crate::{connect_lightwalletd, DbAdapter, LWD_URL};
use crate::{AccountData, connect_lightwalletd, DbAdapter, LWD_URL};
use std::collections::HashMap;
use zcash_client_backend::encoding::decode_extended_full_viewing_key;
use zcash_params::coin::CoinType;
@ -292,8 +292,7 @@ mod tests {
nf_map.insert((nf.0, nf.2.clone()), nf.1);
}
}
let account = db.get_account_info(account).unwrap();
let fvk = account.fvk.clone();
let AccountData { fvk, .. } = db.get_account_info(account).unwrap();
let fvk = decode_extended_full_viewing_key(
Network::MainNetwork.hrp_sapling_extended_full_viewing_key(),
&fvk,