2018-04-02 13:26:40 -07:00
|
|
|
|
//! Reliable broadcast algorithm instance.
|
2018-03-22 15:47:44 -07:00
|
|
|
|
use std::fmt::Debug;
|
|
|
|
|
use std::hash::Hash;
|
2018-03-16 14:04:06 -07:00
|
|
|
|
use std::sync::{Arc, Mutex};
|
2018-03-20 09:32:19 -07:00
|
|
|
|
use crossbeam;
|
2018-03-16 11:12:14 -07:00
|
|
|
|
use proto::*;
|
|
|
|
|
use std::marker::{Send, Sync};
|
2018-03-27 13:59:38 -07:00
|
|
|
|
use merkle::MerkleTree;
|
|
|
|
|
use merkle::proof::{Proof, Lemma, Positioned};
|
|
|
|
|
use reed_solomon_erasure::ReedSolomon;
|
2018-04-02 13:26:40 -07:00
|
|
|
|
use crossbeam_channel as channel;
|
2018-03-22 15:47:44 -07:00
|
|
|
|
|
2018-03-23 15:54:40 -07:00
|
|
|
|
/// Temporary placeholders for the number of participants and the maximum
|
|
|
|
|
/// envisaged number of faulty nodes. Only one is required since N >= 3f +
|
|
|
|
|
/// 1. There are at least two options for where should N and f come from:
|
|
|
|
|
///
|
|
|
|
|
/// - start-up parameters
|
|
|
|
|
///
|
|
|
|
|
/// - initial socket setup phase in node.rs
|
|
|
|
|
///
|
|
|
|
|
const PLACEHOLDER_N: usize = 8;
|
|
|
|
|
const PLACEHOLDER_F: usize = 2;
|
2018-03-19 10:12:20 -07:00
|
|
|
|
|
2018-03-27 13:59:38 -07:00
|
|
|
|
/// Broadcast stage. See the TODO note below!
|
|
|
|
|
///
|
|
|
|
|
/// TODO: The ACS algorithm will require multiple broadcast instances running
|
2018-04-02 13:26:40 -07:00
|
|
|
|
/// asynchronously, see Figure 4 in the HBBFT paper. Those are N asynchronous
|
|
|
|
|
/// threads, each responding to values from one particular remote node. The
|
|
|
|
|
/// paper doesn't make it clear though how other messages - Echo and Ready - are
|
|
|
|
|
/// distributed over the instances. Also it appears that the sender of a message
|
|
|
|
|
/// might become part of the message for this to work.
|
|
|
|
|
pub struct Instance<'a, T: 'a + Send + Sync> {
|
2018-03-19 10:12:20 -07:00
|
|
|
|
/// The transmit side of the multiple consumer channel to comms threads.
|
2018-04-02 13:26:40 -07:00
|
|
|
|
pub tx: &'a channel::Sender<Message<T>>,
|
2018-03-19 10:12:20 -07:00
|
|
|
|
/// The receive side of the multiple producer channel from comms threads.
|
2018-04-02 13:26:40 -07:00
|
|
|
|
pub rx: &'a channel::Receiver<Message<T>>,
|
2018-04-03 04:53:59 -07:00
|
|
|
|
/// Transmit sides of private channels to comms threads.
|
|
|
|
|
pub txs_priv: &'a Vec<channel::Sender<Message<T>>>,
|
2018-03-29 10:19:41 -07:00
|
|
|
|
/// Value to be broadcast.
|
|
|
|
|
pub broadcast_value: Option<T>,
|
2018-03-14 17:03:21 -07:00
|
|
|
|
}
|
|
|
|
|
|
2018-04-02 13:26:40 -07:00
|
|
|
|
impl<'a, T: Clone + Debug + Eq + Hash + Send + Sync + Into<Vec<u8>>
|
2018-03-27 13:59:38 -07:00
|
|
|
|
+ From<Vec<u8>> + AsRef<[u8]>>
|
2018-04-02 13:26:40 -07:00
|
|
|
|
Instance<'a, T>
|
2018-03-27 13:59:38 -07:00
|
|
|
|
where Vec<u8>: From<T>
|
|
|
|
|
{
|
2018-04-02 13:26:40 -07:00
|
|
|
|
pub fn new(tx: &'a channel::Sender<Message<T>>,
|
|
|
|
|
rx: &'a channel::Receiver<Message<T>>,
|
2018-04-03 04:53:59 -07:00
|
|
|
|
txs_priv: &'a Vec<channel::Sender<Message<T>>>,
|
2018-03-27 13:59:38 -07:00
|
|
|
|
broadcast_value: Option<T>) -> Self
|
|
|
|
|
{
|
2018-04-02 13:26:40 -07:00
|
|
|
|
Instance {
|
2018-03-19 10:12:20 -07:00
|
|
|
|
tx: tx,
|
|
|
|
|
rx: rx,
|
2018-04-03 04:53:59 -07:00
|
|
|
|
txs_priv: txs_priv,
|
2018-03-29 10:19:41 -07:00
|
|
|
|
broadcast_value: broadcast_value,
|
2018-03-14 17:03:21 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
2018-03-15 16:43:58 -07:00
|
|
|
|
|
2018-03-23 15:54:40 -07:00
|
|
|
|
/// Broadcast stage task returning the computed values in case of success,
|
|
|
|
|
/// and an error in case of failure.
|
|
|
|
|
///
|
|
|
|
|
/// TODO: Detailed error status.
|
2018-03-29 09:23:02 -07:00
|
|
|
|
pub fn run(&mut self) -> Result<T, BroadcastError> {
|
2018-03-27 13:59:38 -07:00
|
|
|
|
// Broadcast state machine thread.
|
|
|
|
|
let bvalue = self.broadcast_value.to_owned();
|
2018-03-29 10:19:41 -07:00
|
|
|
|
let result: Result<T, BroadcastError>;
|
|
|
|
|
let result_r = Arc::new(Mutex::new(None));
|
|
|
|
|
let result_r_scoped = result_r.clone();
|
2018-03-23 15:54:40 -07:00
|
|
|
|
|
2018-03-20 09:32:19 -07:00
|
|
|
|
crossbeam::scope(|scope| {
|
|
|
|
|
scope.spawn(move || {
|
2018-03-29 10:19:41 -07:00
|
|
|
|
*result_r_scoped.lock().unwrap() =
|
2018-04-02 13:26:40 -07:00
|
|
|
|
Some(inner_run(self.tx, self.rx, bvalue));
|
2018-03-20 09:32:19 -07:00
|
|
|
|
});
|
2018-03-16 11:12:14 -07:00
|
|
|
|
});
|
2018-03-29 10:19:41 -07:00
|
|
|
|
if let Some(ref r) = *result_r.lock().unwrap() {
|
|
|
|
|
result = r.to_owned();
|
2018-03-23 15:54:40 -07:00
|
|
|
|
}
|
2018-03-29 10:19:41 -07:00
|
|
|
|
else {
|
|
|
|
|
result = Err(BroadcastError::Threading);
|
|
|
|
|
}
|
|
|
|
|
result
|
2018-03-14 17:03:21 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
2018-03-22 15:47:44 -07:00
|
|
|
|
|
2018-03-29 09:23:02 -07:00
|
|
|
|
/// Errors returned by the broadcast instance.
|
|
|
|
|
#[derive(Debug, Clone, PartialEq)]
|
|
|
|
|
pub enum BroadcastError {
|
|
|
|
|
RootHashMismatch,
|
|
|
|
|
Threading
|
|
|
|
|
}
|
|
|
|
|
|
2018-03-28 06:51:58 -07:00
|
|
|
|
/// Breaks the input value into shards of equal length and encodes them -- and
|
|
|
|
|
/// some extra parity shards -- with a Reed-Solomon erasure coding scheme.
|
2018-04-02 13:26:40 -07:00
|
|
|
|
fn send_shards<'a, T>(value: T,
|
|
|
|
|
tx: &'a channel::Sender<Message<T>>,
|
|
|
|
|
coding: &ReedSolomon,
|
|
|
|
|
data_shard_num: usize,
|
|
|
|
|
parity_shard_num: usize)
|
2018-03-28 06:51:58 -07:00
|
|
|
|
where T: Clone + Debug + Send + Sync + Into<Vec<u8>>
|
|
|
|
|
+ From<Vec<u8>> + AsRef<[u8]>
|
|
|
|
|
, Vec<u8>: From<T>
|
|
|
|
|
{
|
|
|
|
|
let mut v: Vec<u8> = Vec::from(value).to_owned();
|
|
|
|
|
|
|
|
|
|
// Pad the value vector with zeros to allow for shards of equal sizes.
|
|
|
|
|
let shard_pad_len = v.len() % data_shard_num;
|
|
|
|
|
for _i in 0..shard_pad_len {
|
|
|
|
|
v.push(0);
|
|
|
|
|
}
|
|
|
|
|
// Size of a Merkle tree leaf value, in bytes.
|
|
|
|
|
// Now the vector length is evenly divisible by the number of shards.
|
|
|
|
|
let shard_len = v.len() / data_shard_num;
|
|
|
|
|
// Pad the parity shards with zeros.
|
|
|
|
|
for _i in 0 .. shard_len * parity_shard_num {
|
|
|
|
|
v.push(0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Divide the vector into chunks/shards.
|
|
|
|
|
let shards_iter = v.chunks_mut(shard_len);
|
|
|
|
|
// Convert the iterator over slices into a vector of slices.
|
|
|
|
|
let mut shards: Vec<&mut [u8]> = Vec::new();
|
|
|
|
|
for s in shards_iter {
|
|
|
|
|
shards.push(s);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Construct the parity chunks/shards
|
|
|
|
|
coding.encode(shards.as_mut_slice()).unwrap();
|
|
|
|
|
|
|
|
|
|
// Convert shards back to type `T` for proof generation.
|
|
|
|
|
let mut shards_t: Vec<T> = Vec::new();
|
|
|
|
|
for s in shards.iter() {
|
|
|
|
|
let s = Vec::into(s.to_vec());
|
|
|
|
|
shards_t.push(s);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Convert the Merkle tree into a partial binary tree for later
|
|
|
|
|
// deconstruction into compound branches.
|
|
|
|
|
let mtree = MerkleTree::from_vec(&::ring::digest::SHA256, shards_t);
|
|
|
|
|
|
|
|
|
|
// Send each proof to a node.
|
|
|
|
|
//
|
|
|
|
|
// FIXME: use a single consumer TX channel.
|
|
|
|
|
for leaf_value in mtree.iter().cloned() {
|
|
|
|
|
let proof = mtree.gen_proof(leaf_value);
|
|
|
|
|
if let Some(proof) = proof {
|
2018-04-02 13:26:40 -07:00
|
|
|
|
tx.send(Message::Broadcast(
|
2018-03-28 06:51:58 -07:00
|
|
|
|
BroadcastMessage::Value(proof))).unwrap();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-03-23 15:54:40 -07:00
|
|
|
|
/// The main loop of the broadcast task.
|
2018-04-02 13:26:40 -07:00
|
|
|
|
fn inner_run<'a, T>(tx: &'a channel::Sender<Message<T>>,
|
|
|
|
|
rx: &'a channel::Receiver<Message<T>>,
|
|
|
|
|
broadcast_value: Option<T>) -> Result<T, BroadcastError>
|
2018-03-22 15:47:44 -07:00
|
|
|
|
where T: Clone + Debug + Eq + Hash + Send + Sync + Into<Vec<u8>>
|
2018-03-27 13:59:38 -07:00
|
|
|
|
+ From<Vec<u8>> + AsRef<[u8]>
|
|
|
|
|
, Vec<u8>: From<T>
|
2018-03-22 15:47:44 -07:00
|
|
|
|
{
|
2018-03-27 13:59:38 -07:00
|
|
|
|
// Erasure coding scheme: N - 2f value shards and 2f parity shards
|
|
|
|
|
let parity_shard_num = 2 * PLACEHOLDER_F;
|
|
|
|
|
let data_shard_num = PLACEHOLDER_N - parity_shard_num;
|
|
|
|
|
let coding = ReedSolomon::new(data_shard_num, parity_shard_num).unwrap();
|
|
|
|
|
|
|
|
|
|
// Split the value into chunks/shards, encode them with erasure codes.
|
|
|
|
|
// Assemble a Merkle tree from data and parity shards. Take all proofs from
|
|
|
|
|
// this tree and send them, each to its own node.
|
|
|
|
|
//
|
|
|
|
|
// FIXME: Does the node send a proof to itself?
|
|
|
|
|
if let Some(v) = broadcast_value {
|
2018-04-02 13:26:40 -07:00
|
|
|
|
send_shards(v, tx, &coding, data_shard_num, parity_shard_num);
|
2018-03-27 13:59:38 -07:00
|
|
|
|
}
|
2018-03-23 15:54:40 -07:00
|
|
|
|
|
2018-03-28 15:38:02 -07:00
|
|
|
|
// currently known leaf values
|
2018-03-29 09:23:02 -07:00
|
|
|
|
let mut leaf_values: Vec<Option<Box<[u8]>>> = vec![None; PLACEHOLDER_N];
|
2018-03-28 15:38:02 -07:00
|
|
|
|
// number of non-None leaf values
|
|
|
|
|
let mut leaf_values_num = 0;
|
|
|
|
|
// return value
|
2018-03-29 09:23:02 -07:00
|
|
|
|
let mut result: Option<Result<T, BroadcastError>> = None;
|
2018-03-28 15:38:02 -07:00
|
|
|
|
// Write-once root hash of a tree broadcast from the sender associated with
|
|
|
|
|
// this instance.
|
|
|
|
|
let mut root_hash: Option<Vec<u8>> = None;
|
|
|
|
|
// Number of times Echo was received with the same root hash.
|
|
|
|
|
let mut echo_num = 0;
|
|
|
|
|
// Number of times Ready was received with the same root hash.
|
|
|
|
|
let mut ready_num = 0;
|
|
|
|
|
let mut ready_sent = false;
|
2018-03-29 09:23:02 -07:00
|
|
|
|
let mut ready_to_decode = false;
|
2018-03-28 15:38:02 -07:00
|
|
|
|
|
2018-03-22 15:47:44 -07:00
|
|
|
|
// TODO: handle exit conditions
|
2018-03-29 09:23:02 -07:00
|
|
|
|
while result == None {
|
2018-03-22 15:47:44 -07:00
|
|
|
|
// Receive a message from the socket IO task.
|
2018-04-02 13:26:40 -07:00
|
|
|
|
let message = rx.recv().unwrap();
|
2018-03-22 15:47:44 -07:00
|
|
|
|
if let Message::Broadcast(message) = message {
|
|
|
|
|
match message {
|
|
|
|
|
// A value received. Record the value and multicast an echo.
|
|
|
|
|
//
|
|
|
|
|
// TODO: determine if the paper treats multicast as reflexive and
|
|
|
|
|
// add an echo to this node if it does.
|
|
|
|
|
BroadcastMessage::Value(p) => {
|
2018-03-28 15:38:02 -07:00
|
|
|
|
if let None = root_hash {
|
|
|
|
|
root_hash = Some(p.root_hash.clone());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if let &Some(ref h) = &root_hash {
|
|
|
|
|
if p.validate(h.as_slice()) {
|
|
|
|
|
// Save the leaf value for reconstructing the tree
|
|
|
|
|
// later.
|
|
|
|
|
leaf_values[index_of_proof(&p)] =
|
|
|
|
|
Some(Vec::from(p.value.clone())
|
|
|
|
|
.into_boxed_slice());
|
|
|
|
|
leaf_values_num = leaf_values_num + 1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
// Broadcast an echo of this proof.
|
2018-04-02 13:26:40 -07:00
|
|
|
|
tx.send(Message::Broadcast(BroadcastMessage::Echo(p)))
|
2018-03-22 15:47:44 -07:00
|
|
|
|
.unwrap()
|
|
|
|
|
},
|
|
|
|
|
|
|
|
|
|
// An echo received. Verify the proof it contains.
|
|
|
|
|
BroadcastMessage::Echo(p) => {
|
2018-03-28 15:38:02 -07:00
|
|
|
|
if let None = root_hash {
|
|
|
|
|
root_hash = Some(p.root_hash.clone());
|
|
|
|
|
}
|
2018-03-22 15:47:44 -07:00
|
|
|
|
|
2018-03-28 15:38:02 -07:00
|
|
|
|
// call validate with the root hash as argument
|
|
|
|
|
if let &Some(ref h) = &root_hash {
|
|
|
|
|
if p.validate(h.as_slice()) {
|
|
|
|
|
echo_num += 1;
|
|
|
|
|
// Save the leaf value for reconstructing the tree
|
|
|
|
|
// later.
|
|
|
|
|
leaf_values[index_of_proof(&p)] =
|
|
|
|
|
Some(Vec::from(p.value.clone())
|
|
|
|
|
.into_boxed_slice());
|
|
|
|
|
leaf_values_num = leaf_values_num + 1;
|
|
|
|
|
|
2018-03-29 09:23:02 -07:00
|
|
|
|
// upon receiving 2f + 1 matching READY(h)
|
|
|
|
|
// messages, wait for N − 2 f ECHO messages, then
|
|
|
|
|
// decode v
|
|
|
|
|
if ready_to_decode &&
|
|
|
|
|
leaf_values_num >=
|
|
|
|
|
PLACEHOLDER_N - 2 * PLACEHOLDER_F
|
|
|
|
|
{
|
|
|
|
|
result = Some(
|
|
|
|
|
decode_from_shards(&mut leaf_values,
|
|
|
|
|
&coding,
|
|
|
|
|
data_shard_num, h));
|
2018-03-22 15:47:44 -07:00
|
|
|
|
}
|
2018-03-29 09:23:02 -07:00
|
|
|
|
else if leaf_values_num >=
|
|
|
|
|
PLACEHOLDER_N - PLACEHOLDER_F
|
|
|
|
|
{
|
|
|
|
|
result = Some(
|
|
|
|
|
decode_from_shards(&mut leaf_values,
|
|
|
|
|
&coding,
|
|
|
|
|
data_shard_num, h));
|
|
|
|
|
// if Ready has not yet been sent, multicast
|
|
|
|
|
// Ready
|
|
|
|
|
if !ready_sent {
|
|
|
|
|
ready_sent = true;
|
2018-04-02 13:26:40 -07:00
|
|
|
|
tx.send(Message::Broadcast(
|
2018-03-29 09:23:02 -07:00
|
|
|
|
BroadcastMessage::Ready(h.to_owned())))
|
|
|
|
|
.unwrap();
|
|
|
|
|
}
|
2018-03-23 15:54:40 -07:00
|
|
|
|
}
|
2018-03-22 15:47:44 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
},
|
2018-03-23 15:54:40 -07:00
|
|
|
|
|
|
|
|
|
BroadcastMessage::Ready(ref h) => {
|
2018-03-28 15:38:02 -07:00
|
|
|
|
// TODO: Prioritise the Value root hash, possibly. Prevent
|
|
|
|
|
// an incorrect node from blocking progress which it could
|
|
|
|
|
// achieve by sending an incorrect hash.
|
|
|
|
|
if let None = root_hash {
|
|
|
|
|
root_hash = Some(h.clone());
|
2018-03-23 15:54:40 -07:00
|
|
|
|
}
|
2018-03-28 15:38:02 -07:00
|
|
|
|
// Check that the root hash matches.
|
|
|
|
|
if let &Some(ref h) = &root_hash {
|
|
|
|
|
ready_num += 1;
|
2018-03-23 15:54:40 -07:00
|
|
|
|
|
2018-03-28 15:38:02 -07:00
|
|
|
|
// Upon receiving f + 1 matching Ready(h) messages, if
|
|
|
|
|
// Ready has not yet been sent, multicast Ready(h).
|
|
|
|
|
if (ready_num == PLACEHOLDER_F + 1) &&
|
|
|
|
|
!ready_sent
|
|
|
|
|
{
|
2018-04-02 13:26:40 -07:00
|
|
|
|
tx.send(Message::Broadcast(
|
2018-03-28 15:38:02 -07:00
|
|
|
|
BroadcastMessage::Ready(h.to_vec()))).unwrap();
|
|
|
|
|
}
|
2018-03-23 15:54:40 -07:00
|
|
|
|
|
2018-03-28 15:38:02 -07:00
|
|
|
|
// Upon receiving 2f + 1 matching Ready(h) messages,
|
|
|
|
|
// wait for N − 2f Echo messages, then decode v.
|
2018-03-29 09:23:02 -07:00
|
|
|
|
if ready_num > 2 * PLACEHOLDER_F {
|
|
|
|
|
// Wait for N - 2f Echo messages, then decode v.
|
|
|
|
|
if echo_num >= PLACEHOLDER_N - 2 * PLACEHOLDER_F {
|
|
|
|
|
result = Some(
|
|
|
|
|
decode_from_shards(&mut leaf_values,
|
|
|
|
|
&coding,
|
|
|
|
|
data_shard_num, h));
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
ready_to_decode = true;
|
|
|
|
|
}
|
2018-03-28 15:38:02 -07:00
|
|
|
|
}
|
2018-03-23 15:54:40 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
2018-03-22 15:47:44 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
error!("Incorrect message from the socket: {:?}",
|
|
|
|
|
message);
|
|
|
|
|
}
|
|
|
|
|
}
|
2018-03-29 09:23:02 -07:00
|
|
|
|
result.unwrap()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fn decode_from_shards<T>(leaf_values: &mut Vec<Option<Box<[u8]>>>,
|
|
|
|
|
coding: &ReedSolomon,
|
|
|
|
|
data_shard_num: usize,
|
|
|
|
|
root_hash: &Vec<u8>) ->
|
|
|
|
|
Result<T, BroadcastError>
|
|
|
|
|
where T: AsRef<[u8]> + From<Vec<u8>>, Vec<u8>: From<T>
|
|
|
|
|
{
|
|
|
|
|
// Try to interpolate the Merkle tree using the Reed-Solomon erasure coding
|
|
|
|
|
// scheme.
|
|
|
|
|
coding.reconstruct_shards(leaf_values.as_mut_slice()).unwrap();
|
|
|
|
|
|
|
|
|
|
// Recompute the Merkle tree root.
|
|
|
|
|
//
|
|
|
|
|
// Convert shards back to type `T` for tree construction.
|
|
|
|
|
let mut shards_t: Vec<T> = Vec::new();
|
|
|
|
|
for l in leaf_values.iter() {
|
|
|
|
|
if let Some(ref v) = *l {
|
|
|
|
|
let s = Vec::into(v.to_vec());
|
|
|
|
|
shards_t.push(s);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
// Construct the Merkle tree.
|
|
|
|
|
let mtree = MerkleTree::from_vec(&::ring::digest::SHA256, shards_t);
|
|
|
|
|
// If the root hash of the reconstructed tree does not match the one
|
|
|
|
|
// received with proofs then abort.
|
|
|
|
|
if *mtree.root_hash() != *root_hash {
|
|
|
|
|
// NOTE: The paper does not define the meaning of *abort*. But it is
|
|
|
|
|
// sensible not to continue trying to reconstruct the tree after this
|
|
|
|
|
// point. This instance must have received incorrect shards.
|
|
|
|
|
Err(BroadcastError::RootHashMismatch)
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
// Reconstruct the value from the data shards.
|
|
|
|
|
Ok(glue_shards(mtree, data_shard_num))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/// Concatenates the first `n` leaf values of a Merkle tree `m` in one value of
|
|
|
|
|
/// type `T`. This is useful for reconstructing the data value held in the tree
|
|
|
|
|
/// and forgetting the leaves that contain parity information.
|
|
|
|
|
fn glue_shards<T>(m: MerkleTree<T>, n: usize) -> T
|
|
|
|
|
where T: From<Vec<u8>>, Vec<u8>: From<T>
|
|
|
|
|
{
|
|
|
|
|
let mut t: Vec<u8> = Vec::new();
|
|
|
|
|
let mut i = 0;
|
|
|
|
|
|
|
|
|
|
for s in m.into_iter() {
|
|
|
|
|
i += 1;
|
|
|
|
|
if i > n {
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
for b in Vec::from(s).into_iter() {
|
|
|
|
|
t.push(b);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
Vec::into(t)
|
2018-03-23 15:54:40 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// An additional path conversion operation on `Lemma` to allow reconstruction
|
|
|
|
|
/// of erasure-coded `Proof` from `Lemma`s. The output path, when read from left
|
|
|
|
|
/// to right, goes from leaf to root (LSB order).
|
2018-03-28 15:38:02 -07:00
|
|
|
|
fn path_of_lemma(lemma: &Lemma) -> Vec<bool> {
|
2018-03-23 15:54:40 -07:00
|
|
|
|
match lemma.sub_lemma {
|
|
|
|
|
None => {
|
|
|
|
|
match lemma.sibling_hash {
|
|
|
|
|
// lemma terminates with no leaf
|
|
|
|
|
None => vec![],
|
|
|
|
|
// the leaf is on the right
|
|
|
|
|
Some(Positioned::Left(_)) => vec![true],
|
|
|
|
|
// the leaf is on the left
|
|
|
|
|
Some(Positioned::Right(_)) => vec![false],
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
Some(ref l) => {
|
2018-03-28 15:38:02 -07:00
|
|
|
|
let mut p = path_of_lemma(l.as_ref());
|
2018-03-23 15:54:40 -07:00
|
|
|
|
|
|
|
|
|
match lemma.sibling_hash {
|
|
|
|
|
// lemma terminates
|
|
|
|
|
None => (),
|
|
|
|
|
// lemma branches out to the right
|
|
|
|
|
Some(Positioned::Left(_)) => p.push(true),
|
|
|
|
|
// lemma branches out to the left
|
|
|
|
|
Some(Positioned::Right(_)) => p.push(false),
|
|
|
|
|
}
|
|
|
|
|
p
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Further conversion of a binary tree path into an array index.
|
2018-03-28 15:38:02 -07:00
|
|
|
|
fn index_of_path(mut path: Vec<bool>) -> usize {
|
2018-03-23 15:54:40 -07:00
|
|
|
|
let mut idx = 0;
|
|
|
|
|
// Convert to the MSB order.
|
|
|
|
|
path.reverse();
|
|
|
|
|
|
|
|
|
|
for &dir in path.iter() {
|
|
|
|
|
if dir == false {
|
|
|
|
|
idx = idx << 1;
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
idx = (idx << 1) | 1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
idx
|
2018-03-22 15:47:44 -07:00
|
|
|
|
}
|
2018-03-28 15:38:02 -07:00
|
|
|
|
|
|
|
|
|
/// Computes the Merkle tree leaf index of a value in a given proof.
|
|
|
|
|
fn index_of_proof<T>(p: &Proof<T>) -> usize {
|
|
|
|
|
index_of_path(path_of_lemma(&p.lemma))
|
|
|
|
|
}
|