2018-07-18 07:46:46 -07:00
|
|
|
//! # Honey Badger
|
|
|
|
//!
|
|
|
|
//! Honey Badger allows a network of _N_ nodes with at most _f_ faulty ones,
|
|
|
|
//! where _3 f < N_, to input "contributions" - any kind of data -, and to agree on a sequence of
|
|
|
|
//! _batches_ of contributions. The protocol proceeds in _epochs_, starting at number 0, and outputs
|
|
|
|
//! one batch in each epoch. It never terminates: It handles a continuous stream of incoming
|
|
|
|
//! contributions and keeps producing new batches from them. All correct nodes will output the same
|
|
|
|
//! batch for each epoch. Each validator proposes one contribution per epoch, and every batch will
|
|
|
|
//! contain the contributions of at least _N - f_ validators.
|
|
|
|
//!
|
|
|
|
//! ## How it works
|
|
|
|
//!
|
|
|
|
//! In every epoch, every validator encrypts their contribution and proposes it to the others.
|
|
|
|
//! A `CommonSubset` instance determines which proposals are accepted and will be part of the new
|
|
|
|
//! batch. Using threshold encryption, the nodes collaboratively decrypt all accepted
|
|
|
|
//! contributions. Invalid contributions (that e.g. cannot be deserialized) are discarded - their
|
|
|
|
//! proposers must be faulty -, and the remaining ones are output as the new batch. The next epoch
|
|
|
|
//! begins as soon as the validators propose new contributions again.
|
|
|
|
//!
|
|
|
|
//! So it is essentially an endlessly repeating `CommonSubset`, but with the proposed values
|
|
|
|
//! encrypted. The encryption makes it harder for an attacker to try and censor a particular value
|
|
|
|
//! by influencing the set of proposals that make it into the common subset, because they don't
|
|
|
|
//! know the decrypted values before the subset is determined.
|
|
|
|
|
2018-07-05 09:20:53 -07:00
|
|
|
use rand::Rand;
|
2018-05-16 05:23:57 -07:00
|
|
|
use std::collections::btree_map::Entry;
|
2018-07-09 05:29:01 -07:00
|
|
|
use std::collections::{BTreeMap, BTreeSet, VecDeque};
|
2018-05-16 05:23:57 -07:00
|
|
|
use std::fmt::Debug;
|
2018-05-12 07:09:07 -07:00
|
|
|
use std::hash::Hash;
|
2018-06-29 08:10:15 -07:00
|
|
|
use std::marker::PhantomData;
|
2018-07-11 12:15:08 -07:00
|
|
|
use std::sync::Arc;
|
2018-05-12 07:09:07 -07:00
|
|
|
|
|
|
|
use bincode;
|
2018-06-28 08:17:07 -07:00
|
|
|
use itertools::Itertools;
|
2018-06-25 04:07:31 -07:00
|
|
|
use serde::{Deserialize, Serialize};
|
2018-05-12 07:09:07 -07:00
|
|
|
|
2018-07-19 04:56:30 -07:00
|
|
|
use common_subset::{self, CommonSubset};
|
2018-06-19 07:17:16 -07:00
|
|
|
use crypto::{Ciphertext, DecryptionShare};
|
2018-07-08 09:41:50 -07:00
|
|
|
use fault_log::{FaultKind, FaultLog};
|
2018-07-19 06:09:50 -07:00
|
|
|
use messaging::{self, DistAlgorithm, NetworkInfo, Target, TargetedMessage};
|
2018-05-12 07:09:07 -07:00
|
|
|
|
2018-05-20 04:51:33 -07:00
|
|
|
error_chain!{
|
|
|
|
links {
|
|
|
|
CommonSubset(common_subset::Error, common_subset::ErrorKind);
|
|
|
|
}
|
|
|
|
|
|
|
|
foreign_links {
|
|
|
|
Bincode(Box<bincode::ErrorKind>);
|
|
|
|
}
|
|
|
|
|
|
|
|
errors {
|
|
|
|
UnknownSender
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-28 12:31:27 -07:00
|
|
|
/// A Honey Badger builder, to configure the parameters and create new instances of `HoneyBadger`.
|
2018-07-09 05:29:01 -07:00
|
|
|
pub struct HoneyBadgerBuilder<C, NodeUid> {
|
2018-06-28 12:31:27 -07:00
|
|
|
/// Shared network data.
|
2018-07-11 12:15:08 -07:00
|
|
|
netinfo: Arc<NetworkInfo<NodeUid>>,
|
2018-06-28 12:31:27 -07:00
|
|
|
/// The maximum number of future epochs for which we handle messages simultaneously.
|
|
|
|
max_future_epochs: usize,
|
2018-07-09 05:29:01 -07:00
|
|
|
_phantom: PhantomData<C>,
|
2018-06-28 12:31:27 -07:00
|
|
|
}
|
|
|
|
|
2018-07-09 05:29:01 -07:00
|
|
|
impl<C, NodeUid> HoneyBadgerBuilder<C, NodeUid>
|
2018-06-29 08:10:15 -07:00
|
|
|
where
|
2018-07-09 05:29:01 -07:00
|
|
|
C: Serialize + for<'r> Deserialize<'r> + Debug + Hash + Eq,
|
2018-07-05 09:20:53 -07:00
|
|
|
NodeUid: Ord + Clone + Debug + Rand,
|
2018-06-29 08:10:15 -07:00
|
|
|
{
|
|
|
|
/// Returns a new `HoneyBadgerBuilder` configured to use the node IDs and cryptographic keys
|
2018-06-28 14:07:11 -07:00
|
|
|
/// specified by `netinfo`.
|
2018-07-11 12:15:08 -07:00
|
|
|
pub fn new(netinfo: Arc<NetworkInfo<NodeUid>>) -> Self {
|
2018-06-28 12:31:27 -07:00
|
|
|
HoneyBadgerBuilder {
|
|
|
|
netinfo,
|
|
|
|
max_future_epochs: 3,
|
2018-06-28 14:07:11 -07:00
|
|
|
_phantom: PhantomData,
|
2018-06-28 12:31:27 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Sets the maximum number of future epochs for which we handle messages simultaneously.
|
|
|
|
pub fn max_future_epochs(&mut self, max_future_epochs: usize) -> &mut Self {
|
|
|
|
self.max_future_epochs = max_future_epochs;
|
|
|
|
self
|
|
|
|
}
|
|
|
|
|
2018-07-09 05:29:01 -07:00
|
|
|
/// Creates a new Honey Badger instance.
|
|
|
|
pub fn build(&self) -> HoneyBadger<C, NodeUid> {
|
|
|
|
HoneyBadger {
|
2018-06-28 12:31:27 -07:00
|
|
|
netinfo: self.netinfo.clone(),
|
|
|
|
epoch: 0,
|
2018-07-09 05:29:01 -07:00
|
|
|
has_input: false,
|
2018-06-28 12:31:27 -07:00
|
|
|
common_subsets: BTreeMap::new(),
|
|
|
|
max_future_epochs: self.max_future_epochs as u64,
|
|
|
|
messages: MessageQueue(VecDeque::new()),
|
2018-07-10 14:27:18 -07:00
|
|
|
output: Vec::new(),
|
2018-06-28 12:31:27 -07:00
|
|
|
incoming_queue: BTreeMap::new(),
|
|
|
|
received_shares: BTreeMap::new(),
|
2018-07-09 05:29:01 -07:00
|
|
|
decrypted_contributions: BTreeMap::new(),
|
2018-06-28 12:31:27 -07:00
|
|
|
ciphertexts: BTreeMap::new(),
|
2018-07-09 05:29:01 -07:00
|
|
|
}
|
2018-06-28 12:31:27 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-12 07:09:07 -07:00
|
|
|
/// An instance of the Honey Badger Byzantine fault tolerant consensus algorithm.
|
2018-07-05 09:20:53 -07:00
|
|
|
pub struct HoneyBadger<C, NodeUid: Rand> {
|
2018-05-29 05:17:30 -07:00
|
|
|
/// Shared network data.
|
2018-07-11 12:15:08 -07:00
|
|
|
netinfo: Arc<NetworkInfo<NodeUid>>,
|
2018-05-16 05:23:57 -07:00
|
|
|
/// The earliest epoch from which we have not yet received output.
|
2018-05-12 07:09:07 -07:00
|
|
|
epoch: u64,
|
2018-07-09 05:29:01 -07:00
|
|
|
/// Whether we have already submitted a proposal for the current epoch.
|
|
|
|
has_input: bool,
|
2018-05-16 05:23:57 -07:00
|
|
|
/// The Asynchronous Common Subset instance that decides which nodes' transactions to include,
|
|
|
|
/// indexed by epoch.
|
2018-06-18 07:14:17 -07:00
|
|
|
common_subsets: BTreeMap<u64, CommonSubset<NodeUid>>,
|
2018-06-28 08:17:07 -07:00
|
|
|
/// The maximum number of `CommonSubset` instances that we run simultaneously.
|
|
|
|
max_future_epochs: u64,
|
2018-05-14 05:35:06 -07:00
|
|
|
/// The messages that need to be sent to other nodes.
|
2018-07-18 06:39:26 -07:00
|
|
|
messages: MessageQueue<NodeUid>,
|
2018-07-10 14:27:18 -07:00
|
|
|
/// The outputs from completed epochs.
|
2018-07-17 10:27:28 -07:00
|
|
|
output: Vec<Batch<C, NodeUid>>,
|
2018-06-28 08:17:07 -07:00
|
|
|
/// Messages for future epochs that couldn't be handled yet.
|
|
|
|
incoming_queue: BTreeMap<u64, Vec<(NodeUid, MessageContent<NodeUid>)>>,
|
2018-06-19 07:17:16 -07:00
|
|
|
/// Received decryption shares for an epoch. Each decryption share has a sender and a
|
|
|
|
/// proposer. The outer `BTreeMap` has epochs as its key. The next `BTreeMap` has proposers as
|
|
|
|
/// its key. The inner `BTreeMap` has the sender as its key.
|
2018-06-22 02:17:11 -07:00
|
|
|
received_shares: BTreeMap<u64, BTreeMap<NodeUid, BTreeMap<NodeUid, DecryptionShare>>>,
|
2018-06-19 07:17:16 -07:00
|
|
|
/// Decoded accepted proposals.
|
2018-07-09 05:29:01 -07:00
|
|
|
decrypted_contributions: BTreeMap<NodeUid, Vec<u8>>,
|
2018-06-20 08:47:52 -07:00
|
|
|
/// Ciphertexts output by Common Subset in an epoch.
|
2018-06-22 02:17:11 -07:00
|
|
|
ciphertexts: BTreeMap<u64, BTreeMap<NodeUid, Ciphertext>>,
|
2018-05-14 05:35:06 -07:00
|
|
|
}
|
|
|
|
|
2018-07-19 06:09:50 -07:00
|
|
|
pub type Step<C, NodeUid> = messaging::Step<HoneyBadger<C, NodeUid>>;
|
2018-07-09 04:35:26 -07:00
|
|
|
|
2018-07-09 05:29:01 -07:00
|
|
|
impl<C, NodeUid> DistAlgorithm for HoneyBadger<C, NodeUid>
|
2018-06-29 08:10:15 -07:00
|
|
|
where
|
2018-07-09 05:29:01 -07:00
|
|
|
C: Serialize + for<'r> Deserialize<'r> + Debug + Hash + Eq,
|
2018-07-05 09:20:53 -07:00
|
|
|
NodeUid: Ord + Clone + Debug + Rand,
|
2018-05-14 05:35:06 -07:00
|
|
|
{
|
2018-06-18 07:14:17 -07:00
|
|
|
type NodeUid = NodeUid;
|
2018-07-09 05:29:01 -07:00
|
|
|
type Input = C;
|
|
|
|
type Output = Batch<C, NodeUid>;
|
2018-06-18 07:14:17 -07:00
|
|
|
type Message = Message<NodeUid>;
|
2018-05-14 05:35:06 -07:00
|
|
|
type Error = Error;
|
|
|
|
|
2018-07-19 06:09:50 -07:00
|
|
|
fn input(&mut self, input: Self::Input) -> Result<Step<C, NodeUid>> {
|
2018-07-09 04:35:26 -07:00
|
|
|
let fault_log = self.propose(&input)?;
|
2018-07-10 14:27:18 -07:00
|
|
|
self.step(fault_log)
|
2018-05-14 05:35:06 -07:00
|
|
|
}
|
|
|
|
|
2018-06-18 07:14:17 -07:00
|
|
|
fn handle_message(
|
|
|
|
&mut self,
|
|
|
|
sender_id: &NodeUid,
|
|
|
|
message: Self::Message,
|
2018-07-19 06:09:50 -07:00
|
|
|
) -> Result<Step<C, NodeUid>> {
|
2018-07-17 06:54:12 -07:00
|
|
|
if !self.netinfo.is_node_validator(sender_id) {
|
2018-05-20 04:51:33 -07:00
|
|
|
return Err(ErrorKind::UnknownSender.into());
|
2018-05-14 05:35:06 -07:00
|
|
|
}
|
2018-06-19 07:17:16 -07:00
|
|
|
let Message { epoch, content } = message;
|
2018-07-09 04:35:26 -07:00
|
|
|
let mut fault_log = FaultLog::new();
|
2018-06-28 08:17:07 -07:00
|
|
|
if epoch > self.epoch + self.max_future_epochs {
|
|
|
|
// Postpone handling this message.
|
|
|
|
self.incoming_queue
|
|
|
|
.entry(epoch)
|
|
|
|
.or_insert_with(Vec::new)
|
|
|
|
.push((sender_id.clone(), content));
|
2018-07-09 04:35:26 -07:00
|
|
|
} else if epoch == self.epoch {
|
|
|
|
fault_log.extend(self.handle_message_content(sender_id, epoch, content)?);
|
|
|
|
} // And ignore all messages from past epochs.
|
2018-07-10 14:27:18 -07:00
|
|
|
self.step(fault_log)
|
2018-05-14 05:35:06 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
fn terminated(&self) -> bool {
|
|
|
|
false
|
|
|
|
}
|
|
|
|
|
2018-06-18 07:14:17 -07:00
|
|
|
fn our_id(&self) -> &NodeUid {
|
2018-05-29 05:17:30 -07:00
|
|
|
self.netinfo.our_uid()
|
2018-05-14 05:35:06 -07:00
|
|
|
}
|
2018-05-12 07:09:07 -07:00
|
|
|
}
|
|
|
|
|
2018-07-09 05:29:01 -07:00
|
|
|
impl<C, NodeUid> HoneyBadger<C, NodeUid>
|
2018-06-29 08:10:15 -07:00
|
|
|
where
|
2018-07-09 05:29:01 -07:00
|
|
|
C: Serialize + for<'r> Deserialize<'r> + Debug + Hash + Eq,
|
2018-07-05 09:20:53 -07:00
|
|
|
NodeUid: Ord + Clone + Debug + Rand,
|
2018-05-12 07:09:07 -07:00
|
|
|
{
|
2018-06-29 08:10:15 -07:00
|
|
|
/// Returns a new `HoneyBadgerBuilder` configured to use the node IDs and cryptographic keys
|
2018-06-28 14:07:11 -07:00
|
|
|
/// specified by `netinfo`.
|
2018-07-11 12:15:08 -07:00
|
|
|
pub fn builder(netinfo: Arc<NetworkInfo<NodeUid>>) -> HoneyBadgerBuilder<C, NodeUid> {
|
2018-06-28 14:07:11 -07:00
|
|
|
HoneyBadgerBuilder::new(netinfo)
|
|
|
|
}
|
|
|
|
|
2018-07-19 06:09:50 -07:00
|
|
|
fn step(&mut self, fault_log: FaultLog<NodeUid>) -> Result<Step<C, NodeUid>> {
|
2018-07-18 05:15:47 -07:00
|
|
|
Ok(Step::new(
|
|
|
|
self.output.drain(..).collect(),
|
|
|
|
fault_log,
|
|
|
|
self.messages.drain(..).collect(),
|
|
|
|
))
|
2018-07-09 04:35:26 -07:00
|
|
|
}
|
|
|
|
|
2018-07-09 05:29:01 -07:00
|
|
|
/// Proposes a new item in the current epoch.
|
2018-07-19 04:56:30 -07:00
|
|
|
pub fn propose(&mut self, proposal: &C) -> Result<FaultLog<NodeUid>> {
|
2018-06-29 08:20:54 -07:00
|
|
|
if !self.netinfo.is_validator() {
|
2018-07-08 09:41:50 -07:00
|
|
|
return Ok(FaultLog::new());
|
2018-06-25 12:09:45 -07:00
|
|
|
}
|
2018-07-16 06:20:50 -07:00
|
|
|
let step = {
|
2018-07-12 08:53:12 -07:00
|
|
|
let cs = match self.common_subsets.entry(self.epoch) {
|
|
|
|
Entry::Occupied(entry) => entry.into_mut(),
|
|
|
|
Entry::Vacant(entry) => {
|
|
|
|
entry.insert(CommonSubset::new(self.netinfo.clone(), self.epoch)?)
|
|
|
|
}
|
|
|
|
};
|
|
|
|
let ser_prop = bincode::serialize(&proposal)?;
|
|
|
|
let ciphertext = self.netinfo.public_key_set().public_key().encrypt(ser_prop);
|
|
|
|
self.has_input = true;
|
2018-07-18 05:15:47 -07:00
|
|
|
cs.input(bincode::serialize(&ciphertext).unwrap())?
|
2018-05-16 05:23:57 -07:00
|
|
|
};
|
2018-07-18 05:15:47 -07:00
|
|
|
Ok(self.process_output(step, None)?)
|
2018-05-12 07:09:07 -07:00
|
|
|
}
|
|
|
|
|
2018-07-09 05:29:01 -07:00
|
|
|
/// Returns `true` if input for the current epoch has already been provided.
|
|
|
|
pub fn has_input(&self) -> bool {
|
2018-07-12 08:53:12 -07:00
|
|
|
!self.netinfo.is_validator() || self.has_input
|
2018-05-16 05:23:57 -07:00
|
|
|
}
|
|
|
|
|
2018-06-28 08:17:07 -07:00
|
|
|
/// Handles a message for the given epoch.
|
|
|
|
fn handle_message_content(
|
|
|
|
&mut self,
|
|
|
|
sender_id: &NodeUid,
|
|
|
|
epoch: u64,
|
|
|
|
content: MessageContent<NodeUid>,
|
2018-07-19 04:56:30 -07:00
|
|
|
) -> Result<FaultLog<NodeUid>> {
|
2018-06-28 08:17:07 -07:00
|
|
|
match content {
|
|
|
|
MessageContent::CommonSubset(cs_msg) => {
|
|
|
|
self.handle_common_subset_message(sender_id, epoch, cs_msg)
|
|
|
|
}
|
|
|
|
MessageContent::DecryptionShare { proposer_id, share } => {
|
|
|
|
self.handle_decryption_share_message(sender_id, epoch, proposer_id, share)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-14 05:35:06 -07:00
|
|
|
/// Handles a message for the common subset sub-algorithm.
|
2018-05-12 07:09:07 -07:00
|
|
|
fn handle_common_subset_message(
|
|
|
|
&mut self,
|
2018-06-18 07:14:17 -07:00
|
|
|
sender_id: &NodeUid,
|
2018-05-12 07:09:07 -07:00
|
|
|
epoch: u64,
|
2018-06-18 07:14:17 -07:00
|
|
|
message: common_subset::Message<NodeUid>,
|
2018-07-19 04:56:30 -07:00
|
|
|
) -> Result<FaultLog<NodeUid>> {
|
2018-07-08 09:41:50 -07:00
|
|
|
let mut fault_log = FaultLog::new();
|
2018-07-09 04:35:26 -07:00
|
|
|
let step = {
|
2018-05-16 05:23:57 -07:00
|
|
|
// Borrow the instance for `epoch`, or create it.
|
|
|
|
let cs = match self.common_subsets.entry(epoch) {
|
|
|
|
Entry::Occupied(entry) => entry.into_mut(),
|
|
|
|
Entry::Vacant(entry) => {
|
|
|
|
if epoch < self.epoch {
|
2018-07-08 09:41:50 -07:00
|
|
|
// Epoch has already terminated. Message is obsolete.
|
|
|
|
return Ok(fault_log);
|
2018-05-16 05:23:57 -07:00
|
|
|
} else {
|
2018-06-12 02:24:09 -07:00
|
|
|
entry.insert(CommonSubset::new(self.netinfo.clone(), epoch)?)
|
2018-05-16 05:23:57 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
2018-07-18 05:15:47 -07:00
|
|
|
cs.handle_message(sender_id, message)?
|
2018-07-09 04:35:26 -07:00
|
|
|
};
|
2018-07-18 05:15:47 -07:00
|
|
|
fault_log.extend(self.process_output(step, Some(epoch))?);
|
2018-05-16 05:23:57 -07:00
|
|
|
self.remove_terminated(epoch);
|
2018-07-08 09:41:50 -07:00
|
|
|
Ok(fault_log)
|
2018-05-16 05:23:57 -07:00
|
|
|
}
|
2018-05-15 10:18:05 -07:00
|
|
|
|
2018-06-19 07:17:16 -07:00
|
|
|
/// Handles decryption shares sent by `HoneyBadger` instances.
|
2018-06-20 08:47:52 -07:00
|
|
|
fn handle_decryption_share_message(
|
2018-06-19 07:17:16 -07:00
|
|
|
&mut self,
|
|
|
|
sender_id: &NodeUid,
|
|
|
|
epoch: u64,
|
2018-06-21 05:29:40 -07:00
|
|
|
proposer_id: NodeUid,
|
2018-06-22 02:17:11 -07:00
|
|
|
share: DecryptionShare,
|
2018-07-19 04:56:30 -07:00
|
|
|
) -> Result<FaultLog<NodeUid>> {
|
2018-07-08 09:41:50 -07:00
|
|
|
let mut fault_log = FaultLog::new();
|
|
|
|
|
2018-06-21 10:38:07 -07:00
|
|
|
if let Some(ciphertext) = self
|
|
|
|
.ciphertexts
|
2018-07-12 08:53:12 -07:00
|
|
|
.get(&epoch)
|
2018-06-21 10:38:07 -07:00
|
|
|
.and_then(|cts| cts.get(&proposer_id))
|
|
|
|
{
|
|
|
|
if !self.verify_decryption_share(sender_id, &share, ciphertext) {
|
2018-07-08 09:41:50 -07:00
|
|
|
let fault_kind = FaultKind::UnverifiedDecryptionShareSender;
|
|
|
|
fault_log.append(sender_id.clone(), fault_kind);
|
|
|
|
return Ok(fault_log);
|
2018-06-21 10:38:07 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-12 08:53:12 -07:00
|
|
|
// Insert the share.
|
|
|
|
self.received_shares
|
|
|
|
.entry(epoch)
|
|
|
|
.or_insert_with(BTreeMap::new)
|
|
|
|
.entry(proposer_id.clone())
|
|
|
|
.or_insert_with(BTreeMap::new)
|
|
|
|
.insert(sender_id.clone(), share);
|
2018-06-20 08:47:52 -07:00
|
|
|
|
2018-07-12 08:53:12 -07:00
|
|
|
if epoch == self.epoch {
|
|
|
|
self.try_decrypt_proposer_contribution(proposer_id);
|
|
|
|
fault_log.extend(self.try_decrypt_and_output_batch()?);
|
2018-06-20 08:47:52 -07:00
|
|
|
}
|
2018-06-19 07:17:16 -07:00
|
|
|
|
2018-07-08 09:41:50 -07:00
|
|
|
Ok(fault_log)
|
2018-06-20 08:47:52 -07:00
|
|
|
}
|
2018-06-19 07:17:16 -07:00
|
|
|
|
2018-06-21 10:38:07 -07:00
|
|
|
/// Verifies a given decryption share using the sender's public key and the proposer's
|
|
|
|
/// ciphertext. Returns `true` if verification has been successful and `false` if verification
|
|
|
|
/// has failed.
|
|
|
|
fn verify_decryption_share(
|
|
|
|
&self,
|
|
|
|
sender_id: &NodeUid,
|
2018-06-22 02:17:11 -07:00
|
|
|
share: &DecryptionShare,
|
|
|
|
ciphertext: &Ciphertext,
|
2018-06-21 10:38:07 -07:00
|
|
|
) -> bool {
|
2018-06-27 05:25:32 -07:00
|
|
|
if let Some(pk) = self.netinfo.public_key_share(sender_id) {
|
|
|
|
pk.verify_decryption_share(&share, ciphertext)
|
|
|
|
} else {
|
|
|
|
false
|
|
|
|
}
|
2018-06-21 10:38:07 -07:00
|
|
|
}
|
|
|
|
|
2018-07-12 08:53:12 -07:00
|
|
|
/// When contributions of transactions have been decrypted for all valid proposers in this
|
|
|
|
/// epoch, moves those contributions into a batch, outputs the batch and updates the epoch.
|
2018-07-19 04:56:30 -07:00
|
|
|
fn try_output_batch(&mut self) -> Result<FaultLog<NodeUid>> {
|
2018-07-09 05:29:01 -07:00
|
|
|
// Wait until contributions have been successfully decoded for all proposer nodes with correct
|
2018-06-19 07:17:16 -07:00
|
|
|
// ciphertext outputs.
|
2018-07-09 05:29:01 -07:00
|
|
|
if !self.all_contributions_decrypted() {
|
|
|
|
return Ok(FaultLog::new());
|
2018-05-16 05:23:57 -07:00
|
|
|
}
|
2018-06-20 08:47:52 -07:00
|
|
|
|
|
|
|
// Deserialize the output.
|
2018-07-08 09:41:50 -07:00
|
|
|
let mut fault_log = FaultLog::new();
|
2018-07-09 05:29:01 -07:00
|
|
|
let contributions: BTreeMap<NodeUid, C> = self
|
|
|
|
.decrypted_contributions
|
2018-06-20 08:47:52 -07:00
|
|
|
.iter()
|
2018-07-09 05:29:01 -07:00
|
|
|
.flat_map(|(proposer_id, ser_contrib)| {
|
|
|
|
// If deserialization fails, the proposer of that item is faulty. Ignore it.
|
|
|
|
if let Ok(contrib) = bincode::deserialize::<C>(&ser_contrib) {
|
|
|
|
Some((proposer_id.clone(), contrib))
|
2018-07-08 09:41:50 -07:00
|
|
|
} else {
|
|
|
|
let fault_kind = FaultKind::BatchDeserializationFailed;
|
|
|
|
fault_log.append(proposer_id.clone(), fault_kind);
|
|
|
|
None
|
|
|
|
}
|
2018-06-20 08:47:52 -07:00
|
|
|
})
|
|
|
|
.collect();
|
|
|
|
let batch = Batch {
|
|
|
|
epoch: self.epoch,
|
2018-07-09 05:29:01 -07:00
|
|
|
contributions,
|
2018-06-20 08:47:52 -07:00
|
|
|
};
|
|
|
|
debug!(
|
|
|
|
"{:?} Epoch {} output {:?}",
|
|
|
|
self.netinfo.our_uid(),
|
|
|
|
self.epoch,
|
2018-07-12 08:53:12 -07:00
|
|
|
batch.contributions.keys().collect::<Vec<_>>()
|
2018-06-20 08:47:52 -07:00
|
|
|
);
|
|
|
|
// Queue the output and advance the epoch.
|
2018-07-10 14:27:18 -07:00
|
|
|
self.output.push(batch);
|
2018-07-09 04:35:26 -07:00
|
|
|
fault_log.extend(self.update_epoch()?);
|
2018-07-09 05:29:01 -07:00
|
|
|
Ok(fault_log)
|
2018-05-12 07:09:07 -07:00
|
|
|
}
|
2018-05-16 05:23:57 -07:00
|
|
|
|
2018-06-19 07:17:16 -07:00
|
|
|
/// Increments the epoch number and clears any state that is local to the finished epoch.
|
2018-07-19 04:56:30 -07:00
|
|
|
fn update_epoch(&mut self) -> Result<FaultLog<NodeUid>> {
|
2018-06-19 07:17:16 -07:00
|
|
|
// Clear the state of the old epoch.
|
2018-06-20 08:47:52 -07:00
|
|
|
self.ciphertexts.remove(&self.epoch);
|
2018-07-09 05:29:01 -07:00
|
|
|
self.decrypted_contributions.clear();
|
2018-06-19 07:17:16 -07:00
|
|
|
self.received_shares.remove(&self.epoch);
|
|
|
|
self.epoch += 1;
|
2018-07-09 05:29:01 -07:00
|
|
|
self.has_input = false;
|
2018-06-28 08:17:07 -07:00
|
|
|
let max_epoch = self.epoch + self.max_future_epochs;
|
2018-07-08 09:41:50 -07:00
|
|
|
let mut fault_log = FaultLog::new();
|
2018-06-28 08:17:07 -07:00
|
|
|
// TODO: Once stable, use `Iterator::flatten`.
|
|
|
|
for (sender_id, content) in
|
|
|
|
Itertools::flatten(self.incoming_queue.remove(&max_epoch).into_iter())
|
|
|
|
{
|
2018-07-08 09:41:50 -07:00
|
|
|
self.handle_message_content(&sender_id, max_epoch, content)?
|
|
|
|
.merge_into(&mut fault_log);
|
2018-06-28 08:17:07 -07:00
|
|
|
}
|
2018-06-20 08:47:52 -07:00
|
|
|
// Handle any decryption shares received for the new epoch.
|
2018-07-09 05:29:01 -07:00
|
|
|
self.try_decrypt_and_output_batch()?
|
|
|
|
.merge_into(&mut fault_log);
|
2018-07-08 09:41:50 -07:00
|
|
|
Ok(fault_log)
|
2018-06-19 07:17:16 -07:00
|
|
|
}
|
|
|
|
|
2018-07-12 08:53:12 -07:00
|
|
|
/// Tries to decrypt contributions from all proposers and output those in a batch.
|
2018-07-19 04:56:30 -07:00
|
|
|
fn try_decrypt_and_output_batch(&mut self) -> Result<FaultLog<NodeUid>> {
|
2018-07-12 08:53:12 -07:00
|
|
|
// Return if we don't have ciphertexts yet.
|
|
|
|
let proposer_ids: Vec<_> = match self.ciphertexts.get(&self.epoch) {
|
|
|
|
Some(cts) => cts.keys().cloned().collect(),
|
|
|
|
None => {
|
|
|
|
return Ok(FaultLog::new());
|
2018-06-20 08:47:52 -07:00
|
|
|
}
|
2018-07-12 08:53:12 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
// Try to output a batch if all contributions have been decrypted.
|
|
|
|
for proposer_id in proposer_ids {
|
|
|
|
self.try_decrypt_proposer_contribution(proposer_id);
|
2018-06-20 08:47:52 -07:00
|
|
|
}
|
2018-07-12 08:53:12 -07:00
|
|
|
self.try_output_batch()
|
2018-06-20 08:47:52 -07:00
|
|
|
}
|
|
|
|
|
2018-07-09 05:29:01 -07:00
|
|
|
/// Returns true if and only if contributions have been decrypted for all selected proposers in
|
2018-06-20 08:47:52 -07:00
|
|
|
/// this epoch.
|
2018-07-09 05:29:01 -07:00
|
|
|
fn all_contributions_decrypted(&mut self) -> bool {
|
2018-07-12 08:53:12 -07:00
|
|
|
match self.ciphertexts.get(&self.epoch) {
|
|
|
|
None => false, // No ciphertexts yet.
|
|
|
|
Some(ciphertexts) => ciphertexts.keys().eq(self.decrypted_contributions.keys()),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Tries to decrypt the contribution from a given proposer.
|
|
|
|
fn try_decrypt_proposer_contribution(&mut self, proposer_id: NodeUid) {
|
|
|
|
if self.decrypted_contributions.contains_key(&proposer_id) {
|
|
|
|
return; // Already decrypted.
|
|
|
|
}
|
|
|
|
let shares = if let Some(shares) = self
|
|
|
|
.received_shares
|
|
|
|
.get(&self.epoch)
|
|
|
|
.and_then(|sh| sh.get(&proposer_id))
|
|
|
|
{
|
|
|
|
shares
|
|
|
|
} else {
|
|
|
|
return;
|
|
|
|
};
|
2018-06-21 05:29:40 -07:00
|
|
|
if shares.len() <= self.netinfo.num_faulty() {
|
2018-07-12 08:53:12 -07:00
|
|
|
return;
|
2018-06-21 05:29:40 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
if let Some(ciphertext) = self
|
|
|
|
.ciphertexts
|
|
|
|
.get(&self.epoch)
|
|
|
|
.and_then(|cts| cts.get(&proposer_id))
|
|
|
|
{
|
|
|
|
let ids_u64: BTreeMap<&NodeUid, u64> = shares
|
|
|
|
.keys()
|
2018-07-17 06:54:12 -07:00
|
|
|
.map(|id| (id, self.netinfo.node_index(id).unwrap() as u64))
|
2018-06-21 05:29:40 -07:00
|
|
|
.collect();
|
|
|
|
let indexed_shares: BTreeMap<&u64, _> = shares
|
|
|
|
.into_iter()
|
|
|
|
.map(|(id, share)| (&ids_u64[id], share))
|
|
|
|
.collect();
|
2018-07-12 08:53:12 -07:00
|
|
|
match self
|
2018-06-21 05:29:40 -07:00
|
|
|
.netinfo
|
|
|
|
.public_key_set()
|
|
|
|
.decrypt(indexed_shares, ciphertext)
|
|
|
|
{
|
2018-07-12 08:53:12 -07:00
|
|
|
Ok(contrib) => {
|
|
|
|
self.decrypted_contributions.insert(proposer_id, contrib);
|
|
|
|
}
|
|
|
|
Err(err) => error!("{:?} Decryption failed: {:?}.", self.our_id(), err),
|
2018-06-19 07:17:16 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn send_decryption_shares(
|
|
|
|
&mut self,
|
|
|
|
cs_output: BTreeMap<NodeUid, Vec<u8>>,
|
2018-07-19 04:56:30 -07:00
|
|
|
) -> Result<FaultLog<NodeUid>> {
|
2018-07-08 09:41:50 -07:00
|
|
|
let mut fault_log = FaultLog::new();
|
2018-07-12 08:53:12 -07:00
|
|
|
let mut ciphertexts = BTreeMap::new();
|
2018-06-19 07:17:16 -07:00
|
|
|
for (proposer_id, v) in cs_output {
|
2018-06-22 02:17:11 -07:00
|
|
|
let mut ciphertext: Ciphertext;
|
2018-06-19 07:17:16 -07:00
|
|
|
if let Ok(ct) = bincode::deserialize(&v) {
|
|
|
|
ciphertext = ct;
|
|
|
|
} else {
|
|
|
|
warn!("Invalid ciphertext from proposer {:?} ignored", proposer_id);
|
2018-07-08 09:41:50 -07:00
|
|
|
let fault_kind = FaultKind::InvalidCiphertext;
|
|
|
|
fault_log.append(proposer_id.clone(), fault_kind);
|
2018-06-19 07:17:16 -07:00
|
|
|
continue;
|
|
|
|
}
|
2018-07-08 09:41:50 -07:00
|
|
|
let (incorrect_senders, faults) =
|
2018-06-21 10:38:07 -07:00
|
|
|
self.verify_pending_decryption_shares(&proposer_id, &ciphertext);
|
|
|
|
self.remove_incorrect_decryption_shares(&proposer_id, incorrect_senders);
|
2018-07-08 09:41:50 -07:00
|
|
|
fault_log.extend(faults);
|
2018-07-12 08:53:12 -07:00
|
|
|
let (valid, dec_fl) = self.send_decryption_share(&proposer_id, &ciphertext)?;
|
|
|
|
fault_log.extend(dec_fl);
|
|
|
|
if valid {
|
|
|
|
ciphertexts.insert(proposer_id.clone(), ciphertext);
|
|
|
|
self.try_decrypt_proposer_contribution(proposer_id);
|
|
|
|
} else {
|
2018-06-26 05:50:06 -07:00
|
|
|
warn!("Share decryption failed for proposer {:?}", proposer_id);
|
2018-07-08 09:41:50 -07:00
|
|
|
let fault_kind = FaultKind::ShareDecryptionFailed;
|
|
|
|
fault_log.append(proposer_id.clone(), fault_kind);
|
2018-06-19 07:17:16 -07:00
|
|
|
}
|
|
|
|
}
|
2018-07-12 08:53:12 -07:00
|
|
|
self.ciphertexts.insert(self.epoch, ciphertexts);
|
|
|
|
fault_log.extend(self.try_decrypt_and_output_batch()?);
|
2018-07-08 09:41:50 -07:00
|
|
|
Ok(fault_log)
|
2018-06-19 07:17:16 -07:00
|
|
|
}
|
|
|
|
|
2018-06-26 05:50:06 -07:00
|
|
|
/// Verifies the ciphertext and sends decryption shares. Returns whether it is valid.
|
|
|
|
fn send_decryption_share(
|
|
|
|
&mut self,
|
|
|
|
proposer_id: &NodeUid,
|
|
|
|
ciphertext: &Ciphertext,
|
2018-07-19 04:56:30 -07:00
|
|
|
) -> Result<(bool, FaultLog<NodeUid>)> {
|
2018-06-29 08:20:54 -07:00
|
|
|
if !self.netinfo.is_validator() {
|
2018-07-12 08:53:12 -07:00
|
|
|
return Ok((ciphertext.verify(), FaultLog::new()));
|
2018-06-26 05:50:06 -07:00
|
|
|
}
|
2018-07-17 06:54:12 -07:00
|
|
|
let share = match self.netinfo.secret_key_share().decrypt_share(&ciphertext) {
|
2018-07-12 08:53:12 -07:00
|
|
|
None => return Ok((false, FaultLog::new())),
|
2018-06-26 05:50:06 -07:00
|
|
|
Some(share) => share,
|
|
|
|
};
|
|
|
|
// Send the share to remote nodes.
|
|
|
|
let content = MessageContent::DecryptionShare {
|
|
|
|
proposer_id: proposer_id.clone(),
|
|
|
|
share: share.clone(),
|
|
|
|
};
|
|
|
|
let message = Target::All.message(content.with_epoch(self.epoch));
|
|
|
|
self.messages.0.push_back(message);
|
|
|
|
let epoch = self.epoch;
|
2018-07-12 08:53:12 -07:00
|
|
|
let our_id = self.netinfo.our_uid().clone();
|
2018-06-26 05:50:06 -07:00
|
|
|
// Receive the share locally.
|
2018-07-12 08:53:12 -07:00
|
|
|
let fault_log =
|
|
|
|
self.handle_decryption_share_message(&our_id, epoch, proposer_id.clone(), share)?;
|
|
|
|
Ok((true, fault_log))
|
2018-06-26 05:50:06 -07:00
|
|
|
}
|
|
|
|
|
2018-06-22 01:24:49 -07:00
|
|
|
/// Verifies the shares of the current epoch that are pending verification. Returned are the
|
|
|
|
/// senders with incorrect pending shares.
|
2018-06-21 10:38:07 -07:00
|
|
|
fn verify_pending_decryption_shares(
|
|
|
|
&self,
|
|
|
|
proposer_id: &NodeUid,
|
2018-06-22 02:17:11 -07:00
|
|
|
ciphertext: &Ciphertext,
|
2018-07-08 09:41:50 -07:00
|
|
|
) -> (BTreeSet<NodeUid>, FaultLog<NodeUid>) {
|
2018-06-21 10:38:07 -07:00
|
|
|
let mut incorrect_senders = BTreeSet::new();
|
2018-07-08 09:41:50 -07:00
|
|
|
let mut fault_log = FaultLog::new();
|
2018-06-22 01:24:49 -07:00
|
|
|
if let Some(sender_shares) = self
|
|
|
|
.received_shares
|
2018-06-21 10:38:07 -07:00
|
|
|
.get(&self.epoch)
|
|
|
|
.and_then(|e| e.get(proposer_id))
|
|
|
|
{
|
2018-06-22 01:24:49 -07:00
|
|
|
for (sender_id, share) in sender_shares {
|
|
|
|
if !self.verify_decryption_share(sender_id, share, ciphertext) {
|
2018-07-08 09:41:50 -07:00
|
|
|
let fault_kind = FaultKind::UnverifiedDecryptionShareSender;
|
|
|
|
fault_log.append(sender_id.clone(), fault_kind);
|
2018-06-22 01:24:49 -07:00
|
|
|
incorrect_senders.insert(sender_id.clone());
|
2018-06-21 10:38:07 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-07-08 09:41:50 -07:00
|
|
|
(incorrect_senders, fault_log)
|
2018-06-21 10:38:07 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
fn remove_incorrect_decryption_shares(
|
|
|
|
&mut self,
|
|
|
|
proposer_id: &NodeUid,
|
|
|
|
incorrect_senders: BTreeSet<NodeUid>,
|
|
|
|
) {
|
|
|
|
if let Some(sender_shares) = self
|
|
|
|
.received_shares
|
|
|
|
.get_mut(&self.epoch)
|
|
|
|
.and_then(|e| e.get_mut(proposer_id))
|
|
|
|
{
|
|
|
|
for sender_id in incorrect_senders {
|
|
|
|
sender_shares.remove(&sender_id);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-18 05:15:47 -07:00
|
|
|
/// Checks whether the current epoch has output, and if it does, sends out our decryption
|
|
|
|
/// shares. The `epoch` argument allows to differentiate between calls which produce output in
|
|
|
|
/// all conditions, `epoch == None`, and calls which only produce output in a given epoch,
|
|
|
|
/// `epoch == Some(given_epoch)`.
|
2018-07-09 04:35:26 -07:00
|
|
|
fn process_output(
|
|
|
|
&mut self,
|
2018-07-19 06:09:50 -07:00
|
|
|
step: common_subset::Step<NodeUid>,
|
2018-07-18 05:15:47 -07:00
|
|
|
epoch: Option<u64>,
|
2018-07-19 04:56:30 -07:00
|
|
|
) -> Result<FaultLog<NodeUid>> {
|
2018-07-19 06:09:50 -07:00
|
|
|
let common_subset::Step {
|
2018-07-16 04:33:00 -07:00
|
|
|
output,
|
|
|
|
mut fault_log,
|
2018-07-18 05:15:47 -07:00
|
|
|
mut messages,
|
2018-07-16 04:33:00 -07:00
|
|
|
} = step;
|
2018-07-18 05:15:47 -07:00
|
|
|
self.messages.extend_with_epoch(self.epoch, &mut messages);
|
|
|
|
// If this is the current epoch, the message could cause a new output.
|
|
|
|
if epoch.is_none() || epoch == Some(self.epoch) {
|
|
|
|
for cs_output in output {
|
|
|
|
fault_log.extend(self.send_decryption_shares(cs_output)?);
|
|
|
|
// TODO: May also check that there is no further output from Common Subset.
|
|
|
|
}
|
2018-06-19 07:17:16 -07:00
|
|
|
}
|
2018-07-08 09:41:50 -07:00
|
|
|
Ok(fault_log)
|
2018-06-19 07:17:16 -07:00
|
|
|
}
|
|
|
|
|
2018-05-16 05:23:57 -07:00
|
|
|
/// Removes all `CommonSubset` instances from _past_ epochs that have terminated.
|
|
|
|
fn remove_terminated(&mut self, from_epoch: u64) {
|
|
|
|
for epoch in from_epoch..self.epoch {
|
2018-05-21 02:01:49 -07:00
|
|
|
if self
|
|
|
|
.common_subsets
|
2018-05-16 05:23:57 -07:00
|
|
|
.get(&epoch)
|
|
|
|
.map_or(false, CommonSubset::terminated)
|
|
|
|
{
|
2018-05-29 05:17:30 -07:00
|
|
|
debug!(
|
|
|
|
"{:?} Epoch {} has terminated.",
|
|
|
|
self.netinfo.our_uid(),
|
|
|
|
epoch
|
|
|
|
);
|
2018-05-16 05:23:57 -07:00
|
|
|
self.common_subsets.remove(&epoch);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-05-12 07:09:07 -07:00
|
|
|
}
|
|
|
|
|
2018-07-09 05:29:01 -07:00
|
|
|
/// A batch of contributions the algorithm has output.
|
2018-07-12 08:53:12 -07:00
|
|
|
#[derive(Clone, Debug)]
|
2018-07-09 05:29:01 -07:00
|
|
|
pub struct Batch<C, NodeUid> {
|
2018-05-12 07:09:07 -07:00
|
|
|
pub epoch: u64,
|
2018-07-09 05:29:01 -07:00
|
|
|
pub contributions: BTreeMap<NodeUid, C>,
|
2018-06-18 07:14:17 -07:00
|
|
|
}
|
|
|
|
|
2018-07-09 05:29:01 -07:00
|
|
|
impl<C, NodeUid: Ord> Batch<C, NodeUid> {
|
2018-06-30 04:09:47 -07:00
|
|
|
/// Returns an iterator over references to all transactions included in the batch.
|
2018-07-09 05:29:01 -07:00
|
|
|
pub fn iter<'a>(&'a self) -> impl Iterator<Item = <&'a C as IntoIterator>::Item>
|
|
|
|
where
|
|
|
|
&'a C: IntoIterator,
|
|
|
|
{
|
|
|
|
self.contributions.values().flat_map(|item| item)
|
2018-06-18 07:14:17 -07:00
|
|
|
}
|
|
|
|
|
2018-06-30 04:09:47 -07:00
|
|
|
/// Returns an iterator over all transactions included in the batch. Consumes the batch.
|
2018-07-09 05:29:01 -07:00
|
|
|
pub fn into_tx_iter(self) -> impl Iterator<Item = <C as IntoIterator>::Item>
|
|
|
|
where
|
|
|
|
C: IntoIterator,
|
|
|
|
{
|
|
|
|
self.contributions.into_iter().flat_map(|(_, vec)| vec)
|
2018-06-30 04:09:47 -07:00
|
|
|
}
|
|
|
|
|
2018-06-18 07:14:17 -07:00
|
|
|
/// Returns the number of transactions in the batch (without detecting duplicates).
|
2018-07-09 05:29:01 -07:00
|
|
|
pub fn len<Tx>(&self) -> usize
|
|
|
|
where
|
|
|
|
C: AsRef<[Tx]>,
|
|
|
|
{
|
|
|
|
self.contributions
|
|
|
|
.values()
|
|
|
|
.map(C::as_ref)
|
|
|
|
.map(<[Tx]>::len)
|
|
|
|
.sum()
|
2018-06-18 07:14:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns `true` if the batch contains no transactions.
|
2018-07-09 05:29:01 -07:00
|
|
|
pub fn is_empty<Tx>(&self) -> bool
|
|
|
|
where
|
|
|
|
C: AsRef<[Tx]>,
|
|
|
|
{
|
|
|
|
self.contributions
|
|
|
|
.values()
|
|
|
|
.map(C::as_ref)
|
|
|
|
.all(<[Tx]>::is_empty)
|
2018-06-18 07:14:17 -07:00
|
|
|
}
|
2018-05-12 07:09:07 -07:00
|
|
|
}
|
|
|
|
|
2018-06-19 07:17:16 -07:00
|
|
|
/// The content of a `HoneyBadger` message. It should be further annotated with an epoch.
|
2018-07-05 09:20:53 -07:00
|
|
|
#[derive(Clone, Debug, Deserialize, Rand, Serialize)]
|
|
|
|
pub enum MessageContent<NodeUid: Rand> {
|
2018-05-12 07:09:07 -07:00
|
|
|
/// A message belonging to the common subset algorithm in the given epoch.
|
2018-06-19 07:17:16 -07:00
|
|
|
CommonSubset(common_subset::Message<NodeUid>),
|
|
|
|
/// A decrypted share of the output of `proposer_id`.
|
|
|
|
DecryptionShare {
|
|
|
|
proposer_id: NodeUid,
|
2018-06-22 02:17:11 -07:00
|
|
|
share: DecryptionShare,
|
2018-06-19 07:17:16 -07:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2018-07-05 09:20:53 -07:00
|
|
|
impl<NodeUid: Rand> MessageContent<NodeUid> {
|
2018-06-25 11:22:08 -07:00
|
|
|
pub fn with_epoch(self, epoch: u64) -> Message<NodeUid> {
|
2018-06-22 01:19:29 -07:00
|
|
|
Message {
|
2018-06-19 07:17:16 -07:00
|
|
|
epoch,
|
|
|
|
content: self,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-22 01:19:29 -07:00
|
|
|
/// A message sent to or received from another node's Honey Badger instance.
|
2018-07-05 09:20:53 -07:00
|
|
|
#[derive(Clone, Debug, Deserialize, Rand, Serialize)]
|
|
|
|
pub struct Message<NodeUid: Rand> {
|
2018-06-19 07:17:16 -07:00
|
|
|
epoch: u64,
|
2018-06-22 01:19:29 -07:00
|
|
|
content: MessageContent<NodeUid>,
|
2018-05-12 07:09:07 -07:00
|
|
|
}
|
|
|
|
|
2018-07-05 09:20:53 -07:00
|
|
|
impl<NodeUid: Rand> Message<NodeUid> {
|
2018-06-20 08:47:52 -07:00
|
|
|
pub fn epoch(&self) -> u64 {
|
|
|
|
self.epoch
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-19 05:29:31 -07:00
|
|
|
/// The queue of outgoing messages in a `HoneyBadger` instance.
|
|
|
|
#[derive(Deref, DerefMut)]
|
2018-07-18 06:39:26 -07:00
|
|
|
struct MessageQueue<NodeUid: Rand>(VecDeque<TargetedMessage<Message<NodeUid>, NodeUid>>);
|
2018-05-19 05:29:31 -07:00
|
|
|
|
2018-07-05 09:20:53 -07:00
|
|
|
impl<NodeUid: Clone + Debug + Ord + Rand> MessageQueue<NodeUid> {
|
2018-05-19 05:29:31 -07:00
|
|
|
/// Appends to the queue the messages from `cs`, wrapped with `epoch`.
|
2018-07-18 05:15:47 -07:00
|
|
|
fn extend_with_epoch(
|
|
|
|
&mut self,
|
|
|
|
epoch: u64,
|
|
|
|
msgs: &mut VecDeque<TargetedMessage<common_subset::Message<NodeUid>, NodeUid>>,
|
|
|
|
) {
|
2018-06-18 07:14:17 -07:00
|
|
|
let convert = |msg: TargetedMessage<common_subset::Message<NodeUid>, NodeUid>| {
|
2018-06-22 01:19:29 -07:00
|
|
|
msg.map(|cs_msg| MessageContent::CommonSubset(cs_msg).with_epoch(epoch))
|
2018-05-19 05:29:31 -07:00
|
|
|
};
|
2018-07-18 05:15:47 -07:00
|
|
|
self.extend(msgs.drain(..).map(convert));
|
2018-05-19 05:29:31 -07:00
|
|
|
}
|
|
|
|
}
|