Merge pull request #199 from poanetwork/afck-bc-split

Split up the broadcast module.
This commit is contained in:
Andreas Fackler 2018-08-09 09:38:40 +02:00 committed by GitHub
commit df21cdcb07
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 222 additions and 218 deletions

View File

@ -45,7 +45,7 @@ use crossbeam;
use crypto::poly::Poly;
use crypto::{SecretKey, SecretKeySet};
use hbbft::broadcast::{Broadcast, BroadcastMessage};
use hbbft::broadcast::{Broadcast, Message};
use hbbft::messaging::{DistAlgorithm, NetworkInfo, SourcedMessage};
use network::commst;
use network::connection;
@ -122,7 +122,7 @@ impl<T: Clone + Debug + AsRef<[u8]> + PartialEq + Send + Sync + From<Vec<u8>> +
}
// Initialise the message delivery system and obtain TX and RX handles.
let messaging: Messaging<BroadcastMessage> = Messaging::new(all_ids.len());
let messaging: Messaging<Message> = Messaging::new(all_ids.len());
let rxs_to_comms = messaging.rxs_to_comms();
let tx_from_comms = messaging.tx_from_comms();
let rx_to_algo = messaging.rx_to_algo();
@ -182,7 +182,7 @@ impl<T: Clone + Debug + AsRef<[u8]> + PartialEq + Send + Sync + From<Vec<u8>> +
let rx_to_comms = &rxs_to_comms[node_index];
scope.spawn(move || {
match commst::CommsTask::<BroadcastMessage>::new(
match commst::CommsTask::<Message>::new(
tx_from_comms,
rx_to_comms,
// FIXME: handle error

View File

@ -5,7 +5,7 @@ use itertools::Itertools;
use super::bool_multimap::BoolMultimap;
use super::sbv_broadcast::{self, SbvBroadcast};
use super::{AgreementContent, AgreementMessage, Error, Nonce, Result, Step};
use super::{AgreementContent, Error, Message, Nonce, Result, Step};
use agreement::bool_set::BoolSet;
use common_coin::{self, CommonCoin, CommonCoinMessage};
use messaging::{DistAlgorithm, NetworkInfo, Target};
@ -76,7 +76,7 @@ impl<N: NodeUidT> DistAlgorithm for Agreement<N> {
type NodeUid = N;
type Input = bool;
type Output = bool;
type Message = AgreementMessage;
type Message = Message;
type Error = Error;
fn input(&mut self, input: Self::Input) -> Result<Step<N>> {
@ -84,11 +84,8 @@ impl<N: NodeUidT> DistAlgorithm for Agreement<N> {
}
/// Receive input from a remote node.
fn handle_message(
&mut self,
sender_id: &Self::NodeUid,
AgreementMessage { epoch, content }: Self::Message,
) -> Result<Step<N>> {
fn handle_message(&mut self, sender_id: &Self::NodeUid, msg: Message) -> Result<Step<N>> {
let Message { epoch, content } = msg;
if self.decision.is_some() || (epoch < self.epoch && content.can_expire()) {
// Message is obsolete: We are already in a later epoch or terminated.
Ok(Step::default())

View File

@ -108,8 +108,8 @@ pub enum AgreementContent {
impl AgreementContent {
/// Creates an message with a given epoch number.
pub fn with_epoch(self, epoch: u32) -> AgreementMessage {
AgreementMessage {
pub fn with_epoch(self, epoch: u32) -> Message {
Message {
epoch,
content: self,
}
@ -126,7 +126,7 @@ impl AgreementContent {
/// Messages sent during the binary Byzantine agreement stage.
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Rand)]
pub struct AgreementMessage {
pub struct Message {
pub epoch: u32,
pub content: AgreementContent,
}

View File

@ -1,153 +1,3 @@
//! # Broadcast
//!
//! The Reliable Broadcast Protocol assumes a network of _N_ nodes that send signed messages to
//! each other, with at most _f_ of them faulty, where _3 f < N_. Handling the networking and
//! signing is the responsibility of this crate's user; a message is only handed to the Broadcast
//! instance after it has been verified to be "from node i". One of the nodes is the "proposer"
//! who sends a value. It needs to be determined beforehand, and all nodes need to know and agree
//! who it is. Under the above conditions, the protocol guarantees that either all or none
//! of the correct nodes output a value, and that if the proposer is correct, all correct nodes
//! output the proposed value.
//!
//! ## How it works
//!
//! * The proposer uses a Reed-Solomon code to split the value into _N_ chunks, _N - 2 f_ of which
//! suffice to reconstruct the value. These chunks are put into a Merkle tree, so that with the
//! tree's root hash `h`, branch `bi` and chunk `si`, the `i`-th chunk `si` can be verified by
//! anyone as belonging to the Merkle tree with root hash `h`. These values are "proof" number `i`:
//! `pi = (h, bi, si)`.
//! * The proposer sends `Value(pi)` to node `i`. It translates to: "I am the proposer, and `pi`
//! contains the `i`-th share of my value."
//! * Every (correct) node that receives `Value(pi)` from the proposer sends it on to everyone else
//! as `Echo(pi)`. An `Echo` translates to: "I have received `pi` directly from the proposer." If
//! the proposer sends another `Value` message it is ignored.
//! * So every node that receives at least _f + 1_ `Echo` messages with the same root hash can
//! decode a value.
//! * Every node that has received _N - f_ `Echo`s with the same root hash from different nodes
//! knows that at least _N - 2 f_ _correct_ nodes have sent an `Echo` with that hash to everyone,
//! and therefore everyone will eventually receive at least _N - f_ of them. So upon receiving
//! _N - f_ `Echo`s, they send a `Ready(h)` to everyone. It translates to: "I know that everyone
//! will eventually be able to decode the value with root hash `h`." Moreover, since every correct
//! node only sends one kind of `Echo` message, there is no danger of receiving _N - f_ `Echo`s
//! with two different root hashes.
//! * Even without enough `Echo` messages, if a node receives _2 f + 1_ `Ready` messages, it knows
//! that at least one _correct_ node has sent `Ready`. It therefore also knows that everyone will
//! be able to decode eventually, and multicasts `Ready` itself.
//! * If a node has received _2 f + 1_ `Ready`s (with matching root hash) from different nodes,
//! it knows that at least _2 f + 1_ _correct_ nodes have sent it. Therefore, every correct node
//! will eventually receive _2 f + 1_, and multicast it itself. Therefore, every correct node will
//! eventually receive _2 f + 1_ `Ready`s, too. _And_ we know at this point that every correct
//! node will eventually be able to decode (i.e. receive at least _2 f + 1_ `Echo` messages).
//! * So a node with _2 f + 1_ `Ready`s and _N - 2 f_ `Echos` will decode and _output_ the value,
//! knowing that every other correct node will eventually do the same.
//!
//! ## Example
//!
//! In this example, we manually pass messages between instantiated nodes to simulate a network. The
//! network is composed of 7 nodes, and node 3 is the proposer. We use `u64` as network IDs, and
//! start by creating a common network info. Then we input a randomly generated payload into the
//! proposer and process all the resulting messages in a loop. For the purpose of simulation we
//! annotate each message with the node that produced it. For each output, we perform correctness
//! checks to verify that every node has output the same payload as we provided to the proposer
//! node, and that it did so exactly once.
//!
//! ```
//! extern crate hbbft;
//! extern crate rand;
//!
//! use hbbft::broadcast::{Broadcast, Error, Step};
//! use hbbft::messaging::{DistAlgorithm, NetworkInfo, SourcedMessage, Target, TargetedMessage};
//! use rand::{thread_rng, Rng};
//! use std::collections::{BTreeMap, BTreeSet, VecDeque};
//! use std::iter::once;
//! use std::sync::Arc;
//!
//! fn main() -> Result<(), Error> {
//! // Our simulated network has seven nodes in total, node 3 is the proposer.
//! const NUM_NODES: u64 = 7;
//! const PROPOSER_ID: u64 = 3;
//!
//! let mut rng = thread_rng();
//!
//! // Create a random set of keys for testing.
//! let netinfos = NetworkInfo::generate_map(0..NUM_NODES);
//!
//! // Create initial nodes by instantiating a `Broadcast` for each.
//! let mut nodes = BTreeMap::new();
//! for (i, netinfo) in netinfos {
//! let bc = Broadcast::new(Arc::new(netinfo), PROPOSER_ID)?;
//! nodes.insert(i, bc);
//! }
//!
//! // First we generate a random payload.
//! let mut payload: Vec<_> = vec![0; 128];
//! rng.fill_bytes(&mut payload[..]);
//!
//! // Define a function for handling one step of a `Broadcast` instance. This function appends new
//! // messages onto the message queue and checks whether each node outputs at most once and the
//! // output is correct.
//! let on_step = |id: u64,
//! step: Step<u64>,
//! messages: &mut VecDeque<SourcedMessage<TargetedMessage<_, _>, _>>,
//! finished_nodes: &mut BTreeSet<u64>| {
//! // Annotate messages with the sender ID.
//! messages.extend(step.messages.into_iter().map(|msg| SourcedMessage {
//! source: id,
//! message: msg,
//! }));
//! if !step.output.is_empty() {
//! // The output should be the same as the input we gave to the proposer.
//! assert!(step.output.iter().eq(once(&payload)));
//! // Every node should output exactly once. Here we check the first half of this
//! // statement, namely that every node outputs at most once.
//! assert!(finished_nodes.insert(id));
//! }
//! };
//!
//! let mut messages = VecDeque::new();
//! let mut finished_nodes = BTreeSet::new();
//!
//! // Now we can start the algorithm, its input is the payload.
//! let initial_step = {
//! let proposer = nodes.get_mut(&PROPOSER_ID).unwrap();
//! proposer.input(payload.clone()).unwrap()
//! };
//! on_step(
//! PROPOSER_ID,
//! initial_step,
//! &mut messages,
//! &mut finished_nodes,
//! );
//!
//! // The message loop: The network is simulated by passing messages around from node to node.
//! while let Some(SourcedMessage {
//! source,
//! message: TargetedMessage { target, message },
//! }) = messages.pop_front()
//! {
//! match target {
//! Target::All => {
//! for (id, node) in &mut nodes {
//! let step = node.handle_message(&source, message.clone())?;
//! on_step(*id, step, &mut messages, &mut finished_nodes);
//! }
//! }
//! Target::Node(id) => {
//! let step = {
//! let node = nodes.get_mut(&id).unwrap();
//! node.handle_message(&source, message)?
//! };
//! on_step(id, step, &mut messages, &mut finished_nodes);
//! }
//! };
//! }
//! // Every node should output exactly once. Here we check the second half of this statement,
//! // namely that every node outputs.
//! assert_eq!(finished_nodes, nodes.keys().cloned().collect());
//! Ok(())
//! }
//! ```
use std::collections::BTreeMap;
use std::fmt::{self, Debug};
use std::iter::once;
@ -160,46 +10,16 @@ use reed_solomon_erasure as rse;
use reed_solomon_erasure::ReedSolomon;
use ring::digest;
use super::{Error, Result};
use fault_log::{Fault, FaultKind};
use fmt::{HexBytes, HexList, HexProof};
use messaging::{self, DistAlgorithm, NetworkInfo, Target};
use traits::NodeUidT;
/// A broadcast error.
#[derive(Clone, PartialEq, Debug, Fail)]
pub enum Error {
#[fail(display = "CodingNewReedSolomon error: {}", _0)]
CodingNewReedSolomon(#[cause] rse::Error),
#[fail(display = "CodingEncodeReedSolomon error: {}", _0)]
CodingEncodeReedSolomon(#[cause] rse::Error),
#[fail(display = "CodingReconstructShardsReedSolomon error: {}", _0)]
CodingReconstructShardsReedSolomon(#[cause] rse::Error),
#[fail(
display = "CodingReconstructShardsTrivialReedSolomon error: {}",
_0
)]
CodingReconstructShardsTrivialReedSolomon(#[cause] rse::Error),
#[fail(display = "Instance cannot propose")]
InstanceCannotPropose,
#[fail(display = "Not implemented")]
NotImplemented,
#[fail(display = "Proof construction failed")]
ProofConstructionFailed,
#[fail(display = "Root hash mismatch")]
RootHashMismatch,
#[fail(display = "Threading")]
Threading,
#[fail(display = "Unknown sender")]
UnknownSender,
}
/// A broadcast result.
pub type Result<T> = ::std::result::Result<T, Error>;
/// The three kinds of message sent during the reliable broadcast stage of the
/// consensus algorithm.
#[derive(Serialize, Deserialize, Clone, PartialEq)]
pub enum BroadcastMessage {
pub enum Message {
Value(Proof<Vec<u8>>),
Echo(Proof<Vec<u8>>),
Ready(Vec<u8>),
@ -207,7 +27,7 @@ pub enum BroadcastMessage {
// A random generation impl is provided for test cases. Unfortunately `#[cfg(test)]` does not work
// for integration tests.
impl rand::Rand for BroadcastMessage {
impl rand::Rand for Message {
fn rand<R: rand::Rng>(rng: &mut R) -> Self {
let message_type = *rng.choose(&["value", "echo", "ready"]).unwrap();
@ -220,20 +40,20 @@ impl rand::Rand for BroadcastMessage {
let proof = tree.gen_proof(buffer.to_vec()).unwrap();
match message_type {
"value" => BroadcastMessage::Value(proof),
"echo" => BroadcastMessage::Echo(proof),
"ready" => BroadcastMessage::Ready(b"dummy-ready".to_vec()),
"value" => Message::Value(proof),
"echo" => Message::Echo(proof),
"ready" => Message::Ready(b"dummy-ready".to_vec()),
_ => unreachable!(),
}
}
}
impl Debug for BroadcastMessage {
impl Debug for Message {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
BroadcastMessage::Value(ref v) => write!(f, "Value({:?})", HexProof(&v)),
BroadcastMessage::Echo(ref v) => write!(f, "Echo({:?})", HexProof(&v)),
BroadcastMessage::Ready(ref bytes) => write!(f, "Ready({:?})", HexBytes(bytes)),
Message::Value(ref v) => write!(f, "Value({:?})", HexProof(&v)),
Message::Echo(ref v) => write!(f, "Echo({:?})", HexProof(&v)),
Message::Ready(ref bytes) => write!(f, "Ready({:?})", HexBytes(bytes)),
}
}
}
@ -267,7 +87,7 @@ impl<N: NodeUidT> DistAlgorithm for Broadcast<N> {
// T: Serialize + DeserializeOwned
type Input = Vec<u8>;
type Output = Self::Input;
type Message = BroadcastMessage;
type Message = Message;
type Error = Error;
fn input(&mut self, input: Self::Input) -> Result<Step<N>> {
@ -288,9 +108,9 @@ impl<N: NodeUidT> DistAlgorithm for Broadcast<N> {
return Err(Error::UnknownSender);
}
match message {
BroadcastMessage::Value(p) => self.handle_value(sender_id, p),
BroadcastMessage::Echo(p) => self.handle_echo(sender_id, p),
BroadcastMessage::Ready(ref hash) => self.handle_ready(sender_id, hash),
Message::Value(p) => self.handle_value(sender_id, p),
Message::Echo(p) => self.handle_echo(sender_id, p),
Message::Ready(ref hash) => self.handle_ready(sender_id, hash),
}
}
@ -395,7 +215,7 @@ impl<N: NodeUidT> Broadcast<N> {
result = Ok(proof);
} else {
// Rest of the proofs are sent to remote nodes.
let msg = Target::Node(uid.clone()).message(BroadcastMessage::Value(proof));
let msg = Target::Node(uid.clone()).message(Message::Value(proof));
step.messages.push_back(msg);
}
}
@ -496,7 +316,7 @@ impl<N: NodeUidT> Broadcast<N> {
if !self.netinfo.is_validator() {
return Ok(Step::default());
}
let echo_msg = BroadcastMessage::Echo(p.clone());
let echo_msg = Message::Echo(p.clone());
let mut step: Step<_> = Target::All.message(echo_msg).into();
let our_uid = &self.netinfo.our_uid().clone();
step.extend(self.handle_echo(our_uid, p)?);
@ -509,7 +329,7 @@ impl<N: NodeUidT> Broadcast<N> {
if !self.netinfo.is_validator() {
return Ok(Step::default());
}
let ready_msg = BroadcastMessage::Ready(hash.to_vec());
let ready_msg = Message::Ready(hash.to_vec());
let mut step: Step<_> = Target::All.message(ready_msg).into();
let our_uid = &self.netinfo.our_uid().clone();
step.extend(self.handle_ready(our_uid, hash)?);

32
src/broadcast/error.rs Normal file
View File

@ -0,0 +1,32 @@
use reed_solomon_erasure as rse;
/// A broadcast error.
#[derive(Clone, PartialEq, Debug, Fail)]
pub enum Error {
#[fail(display = "CodingNewReedSolomon error: {}", _0)]
CodingNewReedSolomon(#[cause] rse::Error),
#[fail(display = "CodingEncodeReedSolomon error: {}", _0)]
CodingEncodeReedSolomon(#[cause] rse::Error),
#[fail(display = "CodingReconstructShardsReedSolomon error: {}", _0)]
CodingReconstructShardsReedSolomon(#[cause] rse::Error),
#[fail(
display = "CodingReconstructShardsTrivialReedSolomon error: {}",
_0
)]
CodingReconstructShardsTrivialReedSolomon(#[cause] rse::Error),
#[fail(display = "Instance cannot propose")]
InstanceCannotPropose,
#[fail(display = "Not implemented")]
NotImplemented,
#[fail(display = "Proof construction failed")]
ProofConstructionFailed,
#[fail(display = "Root hash mismatch")]
RootHashMismatch,
#[fail(display = "Threading")]
Threading,
#[fail(display = "Unknown sender")]
UnknownSender,
}
/// A broadcast result.
pub type Result<T> = ::std::result::Result<T, Error>;

155
src/broadcast/mod.rs Normal file
View File

@ -0,0 +1,155 @@
//! # Broadcast
//!
//! The Reliable Broadcast Protocol assumes a network of _N_ nodes that send signed messages to
//! each other, with at most _f_ of them faulty, where _3 f < N_. Handling the networking and
//! signing is the responsibility of this crate's user; a message is only handed to the Broadcast
//! instance after it has been verified to be "from node i". One of the nodes is the "proposer"
//! who sends a value. It needs to be determined beforehand, and all nodes need to know and agree
//! who it is. Under the above conditions, the protocol guarantees that either all or none
//! of the correct nodes output a value, and that if the proposer is correct, all correct nodes
//! output the proposed value.
//!
//! ## How it works
//!
//! * The proposer uses a Reed-Solomon code to split the value into _N_ chunks, _N - 2 f_ of which
//! suffice to reconstruct the value. These chunks are put into a Merkle tree, so that with the
//! tree's root hash `h`, branch `bi` and chunk `si`, the `i`-th chunk `si` can be verified by
//! anyone as belonging to the Merkle tree with root hash `h`. These values are "proof" number `i`:
//! `pi = (h, bi, si)`.
//! * The proposer sends `Value(pi)` to node `i`. It translates to: "I am the proposer, and `pi`
//! contains the `i`-th share of my value."
//! * Every (correct) node that receives `Value(pi)` from the proposer sends it on to everyone else
//! as `Echo(pi)`. An `Echo` translates to: "I have received `pi` directly from the proposer." If
//! the proposer sends another `Value` message it is ignored.
//! * So every node that receives at least _f + 1_ `Echo` messages with the same root hash can
//! decode a value.
//! * Every node that has received _N - f_ `Echo`s with the same root hash from different nodes
//! knows that at least _N - 2 f_ _correct_ nodes have sent an `Echo` with that hash to everyone,
//! and therefore everyone will eventually receive at least _N - f_ of them. So upon receiving
//! _N - f_ `Echo`s, they send a `Ready(h)` to everyone. It translates to: "I know that everyone
//! will eventually be able to decode the value with root hash `h`." Moreover, since every correct
//! node only sends one kind of `Echo` message, there is no danger of receiving _N - f_ `Echo`s
//! with two different root hashes.
//! * Even without enough `Echo` messages, if a node receives _2 f + 1_ `Ready` messages, it knows
//! that at least one _correct_ node has sent `Ready`. It therefore also knows that everyone will
//! be able to decode eventually, and multicasts `Ready` itself.
//! * If a node has received _2 f + 1_ `Ready`s (with matching root hash) from different nodes,
//! it knows that at least _2 f + 1_ _correct_ nodes have sent it. Therefore, every correct node
//! will eventually receive _2 f + 1_, and multicast it itself. Therefore, every correct node will
//! eventually receive _2 f + 1_ `Ready`s, too. _And_ we know at this point that every correct
//! node will eventually be able to decode (i.e. receive at least _2 f + 1_ `Echo` messages).
//! * So a node with _2 f + 1_ `Ready`s and _N - 2 f_ `Echos` will decode and _output_ the value,
//! knowing that every other correct node will eventually do the same.
//!
//! ## Example
//!
//! In this example, we manually pass messages between instantiated nodes to simulate a network. The
//! network is composed of 7 nodes, and node 3 is the proposer. We use `u64` as network IDs, and
//! start by creating a common network info. Then we input a randomly generated payload into the
//! proposer and process all the resulting messages in a loop. For the purpose of simulation we
//! annotate each message with the node that produced it. For each output, we perform correctness
//! checks to verify that every node has output the same payload as we provided to the proposer
//! node, and that it did so exactly once.
//!
//! ```
//! extern crate hbbft;
//! extern crate rand;
//!
//! use hbbft::broadcast::{Broadcast, Error, Step};
//! use hbbft::messaging::{DistAlgorithm, NetworkInfo, SourcedMessage, Target, TargetedMessage};
//! use rand::{thread_rng, Rng};
//! use std::collections::{BTreeMap, BTreeSet, VecDeque};
//! use std::iter::once;
//! use std::sync::Arc;
//!
//! fn main() -> Result<(), Error> {
//! // Our simulated network has seven nodes in total, node 3 is the proposer.
//! const NUM_NODES: u64 = 7;
//! const PROPOSER_ID: u64 = 3;
//!
//! let mut rng = thread_rng();
//!
//! // Create a random set of keys for testing.
//! let netinfos = NetworkInfo::generate_map(0..NUM_NODES);
//!
//! // Create initial nodes by instantiating a `Broadcast` for each.
//! let mut nodes = BTreeMap::new();
//! for (i, netinfo) in netinfos {
//! let bc = Broadcast::new(Arc::new(netinfo), PROPOSER_ID)?;
//! nodes.insert(i, bc);
//! }
//!
//! // First we generate a random payload.
//! let mut payload: Vec<_> = vec![0; 128];
//! rng.fill_bytes(&mut payload[..]);
//!
//! // Define a function for handling one step of a `Broadcast` instance. This function appends new
//! // messages onto the message queue and checks whether each node outputs at most once and the
//! // output is correct.
//! let on_step = |id: u64,
//! step: Step<u64>,
//! messages: &mut VecDeque<SourcedMessage<TargetedMessage<_, _>, _>>,
//! finished_nodes: &mut BTreeSet<u64>| {
//! // Annotate messages with the sender ID.
//! messages.extend(step.messages.into_iter().map(|msg| SourcedMessage {
//! source: id,
//! message: msg,
//! }));
//! if !step.output.is_empty() {
//! // The output should be the same as the input we gave to the proposer.
//! assert!(step.output.iter().eq(once(&payload)));
//! // Every node should output exactly once. Here we check the first half of this
//! // statement, namely that every node outputs at most once.
//! assert!(finished_nodes.insert(id));
//! }
//! };
//!
//! let mut messages = VecDeque::new();
//! let mut finished_nodes = BTreeSet::new();
//!
//! // Now we can start the algorithm, its input is the payload.
//! let initial_step = {
//! let proposer = nodes.get_mut(&PROPOSER_ID).unwrap();
//! proposer.input(payload.clone()).unwrap()
//! };
//! on_step(
//! PROPOSER_ID,
//! initial_step,
//! &mut messages,
//! &mut finished_nodes,
//! );
//!
//! // The message loop: The network is simulated by passing messages around from node to node.
//! while let Some(SourcedMessage {
//! source,
//! message: TargetedMessage { target, message },
//! }) = messages.pop_front()
//! {
//! match target {
//! Target::All => {
//! for (id, node) in &mut nodes {
//! let step = node.handle_message(&source, message.clone())?;
//! on_step(*id, step, &mut messages, &mut finished_nodes);
//! }
//! }
//! Target::Node(id) => {
//! let step = {
//! let node = nodes.get_mut(&id).unwrap();
//! node.handle_message(&source, message)?
//! };
//! on_step(id, step, &mut messages, &mut finished_nodes);
//! }
//! };
//! }
//! // Every node should output exactly once. Here we check the second half of this statement,
//! // namely that every node outputs.
//! assert_eq!(finished_nodes, nodes.keys().cloned().collect());
//! Ok(())
//! }
//! ```
mod broadcast;
mod error;
pub use self::broadcast::{Broadcast, Message, Step};
pub use self::error::{Error, Result};

View File

@ -27,8 +27,8 @@ use std::collections::{BTreeMap, BTreeSet};
use std::result;
use std::sync::Arc;
use agreement::{self, Agreement, AgreementMessage};
use broadcast::{self, Broadcast, BroadcastMessage};
use agreement::{self, Agreement};
use broadcast::{self, Broadcast};
use fmt::HexBytes;
use messaging::{self, DistAlgorithm, NetworkInfo};
use rand::Rand;
@ -65,10 +65,10 @@ type ProposedValue = Vec<u8>;
#[derive(Serialize, Deserialize, Clone, Debug, Rand)]
pub enum Message<N: Rand> {
/// A message for the broadcast algorithm concerning the set element proposed by the given node.
Broadcast(N, BroadcastMessage),
Broadcast(N, broadcast::Message),
/// A message for the agreement algorithm concerning the set element proposed by the given
/// node.
Agreement(N, AgreementMessage),
Agreement(N, agreement::Message),
}
/// Asynchronous Common Subset algorithm instance
@ -175,7 +175,7 @@ impl<N: NodeUidT + Rand> CommonSubset<N> {
&mut self,
sender_id: &N,
proposer_id: &N,
bmessage: BroadcastMessage,
bmessage: broadcast::Message,
) -> Result<Step<N>> {
self.process_broadcast(proposer_id, |bc| bc.handle_message(sender_id, bmessage))
}
@ -186,7 +186,7 @@ impl<N: NodeUidT + Rand> CommonSubset<N> {
&mut self,
sender_id: &N,
proposer_id: &N,
amessage: AgreementMessage,
amessage: agreement::Message,
) -> Result<Step<N>> {
// Send the message to the local instance of Agreement
self.process_agreement(proposer_id, |agreement| {

View File

@ -21,7 +21,7 @@ use std::sync::Arc;
use rand::Rng;
use hbbft::broadcast::{Broadcast, BroadcastMessage};
use hbbft::broadcast::{Broadcast, Message};
use hbbft::messaging::{DistAlgorithm, NetworkInfo, Target, TargetedMessage};
use network::{
Adversary, MessageScheduler, MessageWithSender, NodeUid, RandomAdversary, SilentAdversary,
@ -57,7 +57,7 @@ impl Adversary<Broadcast<NodeUid>> for ProposeAdversary {
self.scheduler.pick_node(nodes)
}
fn push_message(&mut self, _: NodeUid, _: TargetedMessage<BroadcastMessage, NodeUid>) {
fn push_message(&mut self, _: NodeUid, _: TargetedMessage<Message, NodeUid>) {
// All messages are ignored.
}