Merge pull request #96 from poanetwork/afck-hb-delay

Add a max_future_epochs option to Honey Badger.
This commit is contained in:
Vladimir Komendantskiy 2018-07-05 16:54:19 +01:00 committed by GitHub
commit 0038d3cad9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 146 additions and 34 deletions

View File

@ -436,7 +436,10 @@ fn main() {
sk_set.secret_key_share(id.0 as u64),
pk_set.clone(),
));
HoneyBadger::new(netinfo, args.flag_b, txs.clone()).expect("Instantiate honey_badger")
HoneyBadger::builder(netinfo)
.batch_size(args.flag_b)
.build_with_transactions(txs.clone())
.expect("Instantiate honey_badger")
};
let hw_quality = HwQuality {
latency: Duration::from_millis(args.flag_lag),

View File

@ -127,6 +127,8 @@ pub struct DynamicHoneyBadgerBuilder<Tx, NodeUid> {
batch_size: usize,
/// The epoch at which to join the network.
start_epoch: u64,
/// The maximum number of future epochs for which we handle messages simultaneously.
max_future_epochs: usize,
_phantom: PhantomData<Tx>,
}
@ -143,6 +145,7 @@ where
netinfo,
batch_size: 100,
start_epoch: 0,
max_future_epochs: 3,
_phantom: PhantomData,
}
}
@ -153,6 +156,12 @@ where
self
}
/// Sets the maximum number of future epochs for which we handle messages simultaneously.
pub fn max_future_epochs(&mut self, max_future_epochs: usize) -> &mut Self {
self.max_future_epochs = max_future_epochs;
self
}
/// Sets the epoch at which to join the network as an observer. This requires the node to
/// receive all broadcast messages for `start_epoch` and later.
pub fn start_epoch(&mut self, start_epoch: u64) -> &mut Self {
@ -165,10 +174,14 @@ where
where
Tx: Serialize + for<'r> Deserialize<'r> + Debug + Hash + Eq,
{
let honey_badger = HoneyBadger::new(Rc::new(self.netinfo.clone()), self.batch_size, None)?;
let honey_badger = HoneyBadger::builder(Rc::new(self.netinfo.clone()))
.batch_size(self.batch_size)
.max_future_epochs(self.max_future_epochs)
.build()?;
let dyn_hb = DynamicHoneyBadger {
netinfo: self.netinfo.clone(),
batch_size: self.batch_size,
max_future_epochs: self.max_future_epochs,
start_epoch: self.start_epoch,
votes: BTreeMap::new(),
honey_badger,
@ -191,6 +204,8 @@ where
netinfo: NetworkInfo<NodeUid>,
/// The target number of transactions per batch.
batch_size: usize,
/// The maximum number of future epochs for which we handle messages simultaneously.
max_future_epochs: usize,
/// The first epoch after the latest node change.
start_epoch: u64,
/// Collected votes for adding or removing nodes. Each node has one vote, and casting another
@ -444,7 +459,10 @@ where
let old_buf = self.honey_badger.drain_buffer();
let outputs = (self.honey_badger.output_iter()).flat_map(HbBatch::into_tx_iter);
let buffer = outputs.chain(old_buf).filter(Transaction::is_user);
HoneyBadger::new(netinfo, self.batch_size, buffer)?
HoneyBadger::builder(netinfo)
.batch_size(self.batch_size)
.max_future_epochs(self.max_future_epochs)
.build_with_transactions(buffer)?
};
self.honey_badger = honey_badger;
Ok(())

View File

@ -2,10 +2,12 @@ use std::collections::btree_map::Entry;
use std::collections::{BTreeMap, BTreeSet, HashSet, VecDeque};
use std::fmt::Debug;
use std::hash::Hash;
use std::marker::PhantomData;
use std::rc::Rc;
use std::{cmp, iter, mem};
use bincode;
use itertools::Itertools;
use rand;
use serde::{Deserialize, Serialize};
@ -31,6 +33,82 @@ error_chain!{
}
}
/// A Honey Badger builder, to configure the parameters and create new instances of `HoneyBadger`.
pub struct HoneyBadgerBuilder<Tx, NodeUid> {
/// Shared network data.
netinfo: Rc<NetworkInfo<NodeUid>>,
/// The target number of transactions to be included in each batch.
// TODO: Do experiments and pick a suitable default.
batch_size: usize,
/// The maximum number of future epochs for which we handle messages simultaneously.
max_future_epochs: usize,
_phantom: PhantomData<Tx>,
}
impl<Tx, NodeUid> HoneyBadgerBuilder<Tx, NodeUid>
where
NodeUid: Ord + Clone + Debug,
{
/// Returns a new `HoneyBadgerBuilder` configured to use the node IDs and cryptographic keys
/// specified by `netinfo`.
pub fn new(netinfo: Rc<NetworkInfo<NodeUid>>) -> Self {
HoneyBadgerBuilder {
netinfo,
batch_size: 100,
max_future_epochs: 3,
_phantom: PhantomData,
}
}
/// Sets the target number of transactions per batch.
pub fn batch_size(&mut self, batch_size: usize) -> &mut Self {
self.batch_size = batch_size;
self
}
/// Sets the maximum number of future epochs for which we handle messages simultaneously.
pub fn max_future_epochs(&mut self, max_future_epochs: usize) -> &mut Self {
self.max_future_epochs = max_future_epochs;
self
}
/// Creates a new Honey Badger instance with an empty buffer.
pub fn build(&self) -> HoneyBadgerResult<HoneyBadger<Tx, NodeUid>>
where
Tx: Serialize + for<'r> Deserialize<'r> + Debug + Hash + Eq,
{
self.build_with_transactions(None)
}
/// Returns a new Honey Badger instance that starts with the given transactions in its buffer.
pub fn build_with_transactions<TI>(
&self,
txs: TI,
) -> HoneyBadgerResult<HoneyBadger<Tx, NodeUid>>
where
TI: IntoIterator<Item = Tx>,
Tx: Serialize + for<'r> Deserialize<'r> + Debug + Hash + Eq,
{
let mut honey_badger = HoneyBadger {
netinfo: self.netinfo.clone(),
buffer: Vec::new(),
epoch: 0,
common_subsets: BTreeMap::new(),
batch_size: self.batch_size,
max_future_epochs: self.max_future_epochs as u64,
messages: MessageQueue(VecDeque::new()),
output: VecDeque::new(),
incoming_queue: BTreeMap::new(),
received_shares: BTreeMap::new(),
decrypted_selections: BTreeMap::new(),
ciphertexts: BTreeMap::new(),
};
honey_badger.buffer.extend(txs);
honey_badger.propose()?;
Ok(honey_badger)
}
}
/// An instance of the Honey Badger Byzantine fault tolerant consensus algorithm.
pub struct HoneyBadger<Tx, NodeUid> {
/// Shared network data.
@ -46,10 +124,14 @@ pub struct HoneyBadger<Tx, NodeUid> {
// TODO: Do experiments and recommend a batch size. It should be proportional to
// `num_nodes * num_nodes * log(num_nodes)`.
batch_size: usize,
/// The maximum number of `CommonSubset` instances that we run simultaneously.
max_future_epochs: u64,
/// The messages that need to be sent to other nodes.
messages: MessageQueue<NodeUid>,
/// The outputs from completed epochs.
output: VecDeque<Batch<Tx, NodeUid>>,
/// Messages for future epochs that couldn't be handled yet.
incoming_queue: BTreeMap<u64, Vec<(NodeUid, MessageContent<NodeUid>)>>,
/// Received decryption shares for an epoch. Each decryption share has a sender and a
/// proposer. The outer `BTreeMap` has epochs as its key. The next `BTreeMap` has proposers as
/// its key. The inner `BTreeMap` has the sender as its key.
@ -89,14 +171,15 @@ where
// Ignore all messages from past epochs.
return Ok(());
}
match content {
MessageContent::CommonSubset(cs_msg) => {
self.handle_common_subset_message(sender_id, epoch, cs_msg)
}
MessageContent::DecryptionShare { proposer_id, share } => {
self.handle_decryption_share_message(sender_id, epoch, proposer_id, share)
}
if epoch > self.epoch + self.max_future_epochs {
// Postpone handling this message.
self.incoming_queue
.entry(epoch)
.or_insert_with(Vec::new)
.push((sender_id.clone(), content));
return Ok(());
}
self.handle_message_content(sender_id, epoch, content)
}
fn next_message(&mut self) -> Option<TargetedMessage<Self::Message, NodeUid>> {
@ -121,29 +204,10 @@ where
Tx: Serialize + for<'r> Deserialize<'r> + Debug + Hash + Eq,
NodeUid: Ord + Clone + Debug,
{
/// Returns a new Honey Badger instance with the given parameters, starting at epoch `0`.
pub fn new<TI>(
netinfo: Rc<NetworkInfo<NodeUid>>,
batch_size: usize,
txs: TI,
) -> HoneyBadgerResult<Self>
where
TI: IntoIterator<Item = Tx>,
{
let mut honey_badger = HoneyBadger {
netinfo,
buffer: txs.into_iter().collect(),
epoch: 0,
common_subsets: BTreeMap::new(),
batch_size,
messages: MessageQueue(VecDeque::new()),
output: VecDeque::new(),
received_shares: BTreeMap::new(),
decrypted_selections: BTreeMap::new(),
ciphertexts: BTreeMap::new(),
};
honey_badger.propose()?;
Ok(honey_badger)
/// Returns a new `HoneyBadgerBuilder` configured to use the node IDs and cryptographic keys
/// specified by `netinfo`.
pub fn builder(netinfo: Rc<NetworkInfo<NodeUid>>) -> HoneyBadgerBuilder<Tx, NodeUid> {
HoneyBadgerBuilder::new(netinfo)
}
/// Adds transactions into the buffer.
@ -193,6 +257,23 @@ where
Ok(bincode::serialize(&sample)?)
}
/// Handles a message for the given epoch.
fn handle_message_content(
&mut self,
sender_id: &NodeUid,
epoch: u64,
content: MessageContent<NodeUid>,
) -> HoneyBadgerResult<()> {
match content {
MessageContent::CommonSubset(cs_msg) => {
self.handle_common_subset_message(sender_id, epoch, cs_msg)
}
MessageContent::DecryptionShare { proposer_id, share } => {
self.handle_decryption_share_message(sender_id, epoch, proposer_id, share)
}
}
}
/// Handles a message for the common subset sub-algorithm.
fn handle_common_subset_message(
&mut self,
@ -325,6 +406,13 @@ where
self.decrypted_selections.clear();
self.received_shares.remove(&self.epoch);
self.epoch += 1;
let max_epoch = self.epoch + self.max_future_epochs;
// TODO: Once stable, use `Iterator::flatten`.
for (sender_id, content) in
Itertools::flatten(self.incoming_queue.remove(&max_epoch).into_iter())
{
self.handle_message_content(&sender_id, max_epoch, content)?;
}
// Handle any decryption shares received for the new epoch.
if !self.try_decrypt_and_output_batch()? {
// Continue with this epoch if a batch is not output by `try_decrypt_and_output_batch`.

View File

@ -181,7 +181,10 @@ where
}
fn new_honey_badger(netinfo: Rc<NetworkInfo<NodeUid>>) -> HoneyBadger<usize, NodeUid> {
HoneyBadger::new(netinfo, 12, 0..5).expect("Instantiate honey_badger")
HoneyBadger::builder(netinfo)
.batch_size(12)
.build_with_transactions(0..5)
.expect("Instantiate honey_badger")
}
fn test_honey_badger_different_sizes<A, F>(new_adversary: F, num_txs: usize)