Remove redundant log statements, minor cleanups.

This commit is contained in:
Andreas Fackler 2018-11-01 11:22:40 +01:00 committed by Andreas Fackler
parent ad35ebafd5
commit 13340d7ad0
8 changed files with 181 additions and 178 deletions

View File

@ -1,7 +1,6 @@
use std::collections::BTreeMap;
use std::fmt::{self, Display};
use std::result;
use std::sync::Arc;
use std::{fmt, result};
use bincode;
use log::debug;
@ -129,7 +128,6 @@ impl<N: NodeIdT, S: SessionIdT> BinaryAgreement<N, S> {
}
// Set the initial estimated value to the input value.
self.estimated = Some(input);
debug!("{}: Input {}", self, input);
let sbvb_step = self.sbv_broadcast.handle_input(input)?;
self.handle_sbvb_step(sbvb_step)
}
@ -401,7 +399,7 @@ impl<N: NodeIdT, S: SessionIdT> BinaryAgreement<N, S> {
}
}
impl<N: NodeIdT, S: SessionIdT> Display for BinaryAgreement<N, S> {
impl<N: NodeIdT, S: SessionIdT> fmt::Display for BinaryAgreement<N, S> {
fn fmt(&self, f: &mut fmt::Formatter) -> result::Result<(), fmt::Error> {
write!(
f,

View File

@ -1,9 +1,10 @@
use std::collections::BTreeMap;
use std::sync::Arc;
use std::{fmt, result};
use byteorder::{BigEndian, ByteOrder};
use hex_fmt::{HexFmt, HexList};
use log::{debug, error, info};
use log::{debug, warn};
use reed_solomon_erasure as rse;
use reed_solomon_erasure::ReedSolomon;
@ -123,10 +124,6 @@ impl<N: NodeIdT> Broadcast<N> {
let data_shard_num = self.coding.data_shard_count();
let parity_shard_num = self.coding.parity_shard_count();
debug!(
"Data shards: {}, parity shards: {}",
data_shard_num, parity_shard_num
);
// Insert the length of `v` so it can be decoded without the padding.
let payload_len = value.len() as u32;
value.splice(0..0, 0..4); // Insert four bytes at the beginning.
@ -142,21 +139,23 @@ impl<N: NodeIdT> Broadcast<N> {
// zeros.
value.resize(shard_len * (data_shard_num + parity_shard_num), 0);
debug!("value_len {}, shard_len {}", value_len, shard_len);
// Divide the vector into chunks/shards.
let shards_iter = value.chunks_mut(shard_len);
// Convert the iterator over slices into a vector of slices.
let mut shards: Vec<&mut [u8]> = shards_iter.collect();
debug!("Shards before encoding: {:0.10}", HexList(&shards));
// Construct the parity chunks/shards
self.coding
.encode(&mut shards)
.expect("the size and number of shards is correct");
.expect("incorrect shard size or number");
debug!("Shards: {:0.10}", HexList(&shards));
debug!(
"{}: Value: {} bytes, {} per shard. Shards: {:0.10}",
self,
value_len,
shard_len,
HexList(&shards)
);
// Create a Merkle tree from the shards.
let mtree = MerkleTree::from_vec(shards.into_iter().map(|shard| shard.to_vec()).collect());
@ -186,18 +185,17 @@ impl<N: NodeIdT> Broadcast<N> {
fn handle_value(&mut self, sender_id: &N, p: Proof<Vec<u8>>) -> Result<Step<N>> {
// If the sender is not the proposer or if this is not the first `Value`, ignore.
if *sender_id != self.proposer_id {
info!(
"Node {:?} received Value from {:?} instead of {:?}.",
self.our_id(),
sender_id,
self.proposer_id
);
let fault_kind = FaultKind::ReceivedValueFromNonProposer;
return Ok(Fault::new(sender_id.clone(), fault_kind).into());
}
if self.echo_sent {
info!("Node {:?} received multiple Values.", self.our_id());
if self.echos.get(self.our_id()) == Some(&p) {
warn!(
"Node {:?} received Value({:?}) multiple times from {:?}.",
self.our_id(),
HexProof(&p),
sender_id
);
return Ok(Step::default());
} else {
return Ok(Fault::new(sender_id.clone(), FaultKind::MultipleValues).into());
@ -216,13 +214,18 @@ impl<N: NodeIdT> Broadcast<N> {
/// Handles a received `Echo` message.
fn handle_echo(&mut self, sender_id: &N, p: Proof<Vec<u8>>) -> Result<Step<N>> {
// If the sender has already sent `Echo`, ignore.
if self.echos.contains_key(sender_id) {
info!(
"Node {:?} received multiple Echos from {:?}.",
self.our_id(),
sender_id,
);
return Ok(Step::default());
if let Some(old_p) = self.echos.get(sender_id) {
if *old_p == p {
warn!(
"Node {:?} received Echo({:?}) multiple times from {:?}.",
self.our_id(),
HexProof(&p),
sender_id,
);
return Ok(Step::default());
} else {
return Ok(Fault::new(sender_id.clone(), FaultKind::MultipleEchos).into());
}
}
// If the proof is invalid, log the faulty-node behavior, and ignore.
@ -246,13 +249,18 @@ impl<N: NodeIdT> Broadcast<N> {
/// Handles a received `Ready` message.
fn handle_ready(&mut self, sender_id: &N, hash: &Digest) -> Result<Step<N>> {
// If the sender has already sent a `Ready` before, ignore.
if self.readys.contains_key(sender_id) {
info!(
"Node {:?} received multiple Readys from {:?}.",
self.our_id(),
sender_id
);
return Ok(Step::default());
if let Some(old_hash) = self.readys.get(sender_id) {
if old_hash == hash {
warn!(
"Node {:?} received Ready({:?}) multiple times from {:?}.",
self.our_id(),
hash,
sender_id
);
return Ok(Step::default());
} else {
return Ok(Fault::new(sender_id.clone(), FaultKind::MultipleReadys).into());
}
}
self.readys.insert(sender_id.clone(), hash.to_vec());
@ -314,34 +322,59 @@ impl<N: NodeIdT> Broadcast<N> {
}
})
}).collect();
if let Some(value) = decode_from_shards(&mut leaf_values, &self.coding, hash) {
if let Some(value) = self.decode_from_shards(&mut leaf_values, hash) {
self.decided = true;
Ok(Step::default().with_output(value))
} else {
Ok(Step::default())
let fault_kind = FaultKind::BroadcastDecoding;
Ok(Fault::new(self.proposer_id.clone(), fault_kind).into())
}
}
/// Returns `true` if the proof is valid and has the same index as the node ID. Otherwise
/// logs an info message.
fn validate_proof(&self, p: &Proof<Vec<u8>>, id: &N) -> bool {
if !p.validate(self.netinfo.num_nodes()) {
info!(
"Node {:?} received invalid proof: {:?}",
self.our_id(),
HexProof(&p)
);
false
} else if self.netinfo.node_index(id) != Some(p.index()) {
info!(
"Node {:?} received proof for wrong position: {:?}.",
self.our_id(),
HexProof(&p)
);
false
} else {
true
/// Interpolates the missing shards and glues together the data shards to retrieve the value.
fn decode_from_shards(
&self,
leaf_values: &mut [Option<Box<[u8]>>],
root_hash: &Digest,
) -> Option<Vec<u8>> {
// Try to interpolate the Merkle tree using the Reed-Solomon erasure coding scheme.
self.coding.reconstruct_shards(leaf_values).ok()?;
// Collect shards for tree construction.
let shards: Vec<Vec<u8>> = leaf_values
.iter()
.filter_map(|l| l.as_ref().map(|v| v.to_vec()))
.collect();
debug!("{}: Reconstructed shards: {:0.10}", self, HexList(&shards));
// Construct the Merkle tree.
let mtree = MerkleTree::from_vec(shards);
// If the root hash of the reconstructed tree does not match the one
// received with proofs then abort.
if mtree.root_hash() != root_hash {
return None; // The proposer is faulty.
}
// Reconstruct the value from the data shards:
// Concatenate the leaf values that are data shards The first four bytes are
// interpreted as the payload size, and the padding beyond that size is dropped.
let count = self.coding.data_shard_count();
let mut bytes = mtree.into_values().into_iter().take(count).flatten();
let payload_len = match (bytes.next(), bytes.next(), bytes.next(), bytes.next()) {
(Some(b0), Some(b1), Some(b2), Some(b3)) => {
BigEndian::read_u32(&[b0, b1, b2, b3]) as usize
}
_ => return None, // The proposer is faulty: no payload size.
};
let payload: Vec<u8> = bytes.take(payload_len).collect();
debug!("{}: Glued data shards {:0.10}", self, HexFmt(&payload));
Some(payload)
}
/// Returns `true` if the proof is valid and has the same index as the node ID.
fn validate_proof(&self, p: &Proof<Vec<u8>>, id: &N) -> bool {
self.netinfo.node_index(id) == Some(p.index()) && p.validate(self.netinfo.num_nodes())
}
/// Returns the number of nodes that have sent us an `Echo` message with this hash.
@ -361,6 +394,12 @@ impl<N: NodeIdT> Broadcast<N> {
}
}
impl<N: NodeIdT> fmt::Display for Broadcast<N> {
fn fmt(&self, f: &mut fmt::Formatter) -> result::Result<(), fmt::Error> {
write!(f, "{:?} Broadcast({:?})", self.our_id(), self.proposer_id)
}
}
/// A wrapper for `ReedSolomon` that doesn't panic if there are no parity shards.
#[derive(Debug)]
enum Coding {
@ -426,47 +465,3 @@ impl Coding {
Ok(())
}
}
fn decode_from_shards(
leaf_values: &mut [Option<Box<[u8]>>],
coding: &Coding,
root_hash: &Digest,
) -> Option<Vec<u8>> {
// Try to interpolate the Merkle tree using the Reed-Solomon erasure coding scheme.
if let Err(err) = coding.reconstruct_shards(leaf_values) {
error!("Shard reconstruction failed: {:?}", err); // Faulty proposer
return None;
}
// Collect shards for tree construction.
let shards: Vec<Vec<u8>> = leaf_values
.iter()
.filter_map(|l| l.as_ref().map(|v| v.to_vec()))
.collect();
debug!("Reconstructed shards: {:0.10}", HexList(&shards));
// Construct the Merkle tree.
let mtree = MerkleTree::from_vec(shards);
// If the root hash of the reconstructed tree does not match the one
// received with proofs then abort.
if mtree.root_hash() != root_hash {
None // The proposer is faulty.
} else {
// Reconstruct the value from the data shards.
glue_shards(mtree, coding.data_shard_count())
}
}
/// Concatenates the first `n` leaf values of a Merkle tree `m`. The first four bytes are
/// interpreted as the payload size, and the padding beyond that size is dropped.
fn glue_shards(m: MerkleTree<Vec<u8>>, n: usize) -> Option<Vec<u8>> {
let mut bytes = m.into_values().into_iter().take(n).flatten();
let payload_len = match (bytes.next(), bytes.next(), bytes.next(), bytes.next()) {
(Some(b0), Some(b1), Some(b2), Some(b3)) => BigEndian::read_u32(&[b0, b1, b2, b3]) as usize,
_ => return None, // The proposing node is faulty: no payload size.
};
let payload: Vec<u8> = bytes.take(payload_len).collect();
debug!("Glued data shards {:0.10}", HexFmt(&payload));
Some(payload)
}

View File

@ -1,11 +1,11 @@
use std::collections::BTreeMap;
use std::mem;
use std::sync::Arc;
use std::{fmt, mem, result};
use bincode;
use crypto::Signature;
use derivative::Derivative;
use log::{debug, info};
use log::{debug, warn};
use rand::{self, Rand};
use serde::{de::DeserializeOwned, Serialize};
@ -202,7 +202,6 @@ where
message: HbMessage<N>,
) -> Result<Step<C, N>> {
if !self.netinfo.is_node_validator(sender_id) {
info!("Unknown sender {:?} of message {:?}", sender_id, message);
return Err(ErrorKind::UnknownSender.into());
}
// Handle the message.
@ -222,17 +221,12 @@ where
sig: Signature,
) -> Result<FaultLog<N>> {
if !self.verify_signature(sender_id, &sig, &kg_msg)? {
info!("Invalid signature from {:?} for: {:?}.", sender_id, kg_msg);
let fault_kind = FaultKind::InvalidKeyGenMessageSignature;
return Ok(Fault::new(sender_id.clone(), fault_kind).into());
}
let kgs = match self.key_gen_state {
Some(ref mut kgs) => kgs,
None => {
info!(
"Unexpected key gen message from {:?}: {:?}.",
sender_id, kg_msg
);
return Ok(Fault::new(sender_id.clone(), FaultKind::UnexpectedKeyGenMessage).into());
}
};
@ -291,7 +285,7 @@ where
let change = if let Some(kgs) = self.take_ready_key_gen() {
// If DKG completed, apply the change, restart Honey Badger, and inform the user.
debug!("{:?} DKG for {:?} complete!", self.our_id(), kgs.change);
debug!("{}: DKG for {:?} complete!", self, kgs.change);
self.netinfo = kgs.key_gen.into_network_info()?;
self.restart_honey_badger(batch_epoch + 1, None);
ChangeState::Complete(Change::NodeChange(kgs.change))
@ -347,14 +341,14 @@ where
if self.key_gen_state.as_ref().map(|kgs| &kgs.change) == Some(change) {
return Ok(Step::default()); // The change is the same as before. Continue DKG as is.
}
debug!("{:?} Restarting DKG for {:?}.", self.our_id(), change);
debug!("{}: Restarting DKG for {:?}.", self, change);
// Use the existing key shares - with the change applied - as keys for DKG.
let mut pub_keys = self.netinfo.public_key_map().clone();
if match *change {
NodeChange::Remove(ref id) => pub_keys.remove(id).is_none(),
NodeChange::Add(ref id, ref pk) => pub_keys.insert(id.clone(), pk.clone()).is_some(),
} {
info!("{:?} No-op change: {:?}", self.our_id(), change);
warn!("{}: No-op change: {:?}", self, change);
}
self.restart_honey_badger(epoch, None);
// TODO: This needs to be the same as `num_faulty` will be in the _new_
@ -486,3 +480,13 @@ where
Ok(pk_opt.map_or(false, |pk| pk.verify(&sig, ser)))
}
}
impl<C, N> fmt::Display for DynamicHoneyBadger<C, N>
where
C: Contribution + Serialize + DeserializeOwned,
N: NodeIdT + Serialize + DeserializeOwned + Rand,
{
fn fmt(&self, f: &mut fmt::Formatter) -> result::Result<(), fmt::Error> {
write!(f, "{:?} DHB(era: {})", self.our_id(), self.start_epoch)
}
}

View File

@ -17,17 +17,23 @@ pub enum FaultKind {
/// `HoneyBadger` received a decryption share for an unaccepted proposer.
UnexpectedDecryptionShare,
/// `HoneyBadger` was unable to deserialize a proposer's ciphertext.
DeserializeCiphertext,
/// `HoneyBadger` received an invalid ciphertext from the proposer.
InvalidCiphertext,
/// `HoneyBadger` was unable to decrypt a share received from a proposer.
ShareDecryptionFailed,
/// `ThresholdDecryption` received multiple shares from the same sender.
MultipleDecryptionShares,
/// `Broadcast` received a `Value` from a node other than the proposer.
ReceivedValueFromNonProposer,
/// `Broadcast` received multiple different `Value`s from the proposer.
MultipleValues,
/// `Broadcast` received multiple different `Echo`s from the same sender.
MultipleEchos,
/// `Broadcast` received multiple different `Ready`s from the same sender.
MultipleReadys,
/// `Broadcast` recevied an Echo message containing an invalid proof.
InvalidProof,
/// `Broadcast` received shards with valid proofs, that couldn't be decoded.
BroadcastDecoding,
/// `HoneyBadger` could not deserialize bytes (i.e. a serialized Batch)
/// from a given proposer into a vector of transactions.
BatchDeserializationFailed,

View File

@ -8,7 +8,7 @@ use std::sync::Arc;
use bincode;
use crypto::Ciphertext;
use log::{debug, error, warn};
use log::error;
use rand::{Rand, Rng};
use serde::{de::DeserializeOwned, Serialize};
use serde_derive::Serialize;
@ -207,7 +207,7 @@ where
require_decryption: bool,
) -> Result<Self> {
let epoch_id = EpochId { hb_id, epoch };
let cs = Subset::new(netinfo.clone(), &epoch_id).map_err(ErrorKind::CreateSubset)?;
let cs = Subset::new(netinfo.clone(), epoch_id).map_err(ErrorKind::CreateSubset)?;
Ok(EpochState {
epoch,
netinfo,
@ -300,12 +300,6 @@ where
Err(_) => fault_log.append(id, FaultKind::BatchDeserializationFailed),
}
}
debug!(
"{:?} Epoch {} output {:?}",
self.netinfo.our_id(),
self.epoch,
batch.contributions.keys().collect::<Vec<_>>()
);
Some((batch, fault_log))
}
@ -318,6 +312,7 @@ where
let mut has_seen_done = false;
for cs_output in cs_outputs {
if has_seen_done {
// TODO: Is there any way we can statically guarantee that?
error!("`SubsetOutput::Done` was not the last `SubsetOutput`");
}
@ -380,12 +375,8 @@ where
fn send_decryption_share(&mut self, proposer_id: N, v: &[u8]) -> Result<Step<C, N>> {
let ciphertext: Ciphertext = match bincode::deserialize(v) {
Ok(ciphertext) => ciphertext,
Err(err) => {
warn!(
"Cannot deserialize ciphertext from {:?}: {:?}",
proposer_id, err
);
return Ok(Fault::new(proposer_id, FaultKind::InvalidCiphertext).into());
Err(_) => {
return Ok(Fault::new(proposer_id, FaultKind::DeserializeCiphertext).into());
}
};
let td_result = match self.decryption.entry(proposer_id.clone()) {
@ -395,8 +386,7 @@ where
match td_result {
Ok(td_step) => self.process_decryption(proposer_id, td_step),
Err(td::Error::InvalidCiphertext(_)) => {
warn!("Invalid ciphertext from {:?}", proposer_id);
Ok(Fault::new(proposer_id.clone(), FaultKind::ShareDecryptionFailed).into())
Ok(Fault::new(proposer_id, FaultKind::InvalidCiphertext).into())
}
Err(err) => Err(ErrorKind::ThresholdDecryption(err).into()),
}

View File

@ -23,11 +23,9 @@
//! * Once all `BinaryAgreement` instances have decided, `Subset` returns the set of all proposed
//! values for which the decision was "yes".
use std::borrow::Borrow;
use std::collections::{BTreeMap, BTreeSet};
use std::fmt::{self, Display};
use std::result;
use std::sync::Arc;
use std::{fmt, result};
use failure::Fail;
use hex_fmt::HexFmt;
@ -82,6 +80,7 @@ pub enum Message<N: Rand> {
pub struct Subset<N: Rand, S> {
/// Shared network information.
netinfo: Arc<NetworkInfo<N>>,
session_id: S,
broadcast_instances: BTreeMap<N, Broadcast<N>>,
ba_instances: BTreeMap<N, BaInstance<N, S>>,
/// `None` means that that item has already been output.
@ -128,7 +127,7 @@ impl<N: NodeIdT + Rand, S: SessionIdT> Subset<N, S> {
///
/// If multiple `Subset`s are instantiated within a single network, they must use different
/// session identifiers to foil replay attacks.
pub fn new<T: Borrow<S>>(netinfo: Arc<NetworkInfo<N>>, session_id: T) -> Result<Self> {
pub fn new(netinfo: Arc<NetworkInfo<N>>, session_id: S) -> Result<Self> {
// Create all broadcast instances.
let mut broadcast_instances: BTreeMap<N, Broadcast<N>> = BTreeMap::new();
for proposer_id in netinfo.all_ids() {
@ -143,7 +142,7 @@ impl<N: NodeIdT + Rand, S: SessionIdT> Subset<N, S> {
let mut ba_instances: BTreeMap<N, BaInstance<N, S>> = BTreeMap::new();
for (proposer_idx, proposer_id) in netinfo.all_ids().enumerate() {
let s_id = BaSessionId {
subset_id: session_id.borrow().clone(),
subset_id: session_id.clone(),
proposer_idx: proposer_idx as u32,
};
ba_instances.insert(
@ -154,6 +153,7 @@ impl<N: NodeIdT + Rand, S: SessionIdT> Subset<N, S> {
Ok(Subset {
netinfo,
session_id,
broadcast_instances,
ba_instances,
broadcast_results: BTreeMap::new(),
@ -169,8 +169,8 @@ impl<N: NodeIdT + Rand, S: SessionIdT> Subset<N, S> {
if !self.netinfo.is_validator() {
return Ok(Step::default());
}
debug!("{} proposing {:0.10}", self, HexFmt(&value));
let id = self.our_id().clone();
debug!("{:?} Proposing {:0.10}", id, HexFmt(&value));
self.process_broadcast(&id, |bc| bc.handle_input(value))
}
@ -241,7 +241,7 @@ impl<N: NodeIdT + Rand, S: SessionIdT> Subset<N, S> {
};
let val_to_insert = if let Some(true) = self.ba_results.get(proposer_id) {
debug!(" {:?} → {:0.10}", proposer_id, HexFmt(&value));
debug!("{} {:?} → {:0.10}", self, proposer_id, HexFmt(&value));
step.output
.push(SubsetOutput::Contribution(proposer_id.clone(), value));
None
@ -253,7 +253,13 @@ impl<N: NodeIdT + Rand, S: SessionIdT> Subset<N, S> {
.broadcast_results
.insert(proposer_id.clone(), val_to_insert)
{
error!("Duplicate insert in broadcast_results: {:?}", inval)
// TODO: Merge `broadcast_instances` and `broadcast_results` into one map. The value
// type should be an enum: either an instance, or a result. Then this would be
// statically impossible.
error!(
"Duplicate insert in broadcast_results: {:?}",
inval.map(HexFmt)
)
}
let set_binary_agreement_input = |ba: &mut BaInstance<N, S>| ba.handle_input(true);
step.extend(self.process_binary_agreement(proposer_id, set_binary_agreement_input)?);
@ -297,9 +303,8 @@ impl<N: NodeIdT + Rand, S: SessionIdT> Subset<N, S> {
}
debug!(
"{:?} Updated Binary Agreement results: {:?}",
self.our_id(),
self.ba_results
"{} updated Binary Agreement results: {:?}",
self, self.ba_results
);
if accepted {
@ -325,7 +330,7 @@ impl<N: NodeIdT + Rand, S: SessionIdT> Subset<N, S> {
.get_mut(proposer_id)
.and_then(Option::take)
{
debug!(" {:?} → {:0.10}", proposer_id, HexFmt(&value));
debug!("{} {:?} → {:0.10}", self, proposer_id, HexFmt(&value));
step.output
.push(SubsetOutput::Contribution(proposer_id.clone(), value));
}
@ -349,10 +354,7 @@ impl<N: NodeIdT + Rand, S: SessionIdT> Subset<N, S> {
if self.ba_results.len() < self.netinfo.num_nodes() {
return None;
}
debug!(
"{:?} All Binary Agreement instances have terminated",
self.our_id()
);
debug!("{}: All Binary Agreement instances have terminated.", self);
// All instances of BinaryAgreement that delivered `true` (or "1" in the paper).
let delivered_1: BTreeSet<&N> = self
.ba_results
@ -361,9 +363,8 @@ impl<N: NodeIdT + Rand, S: SessionIdT> Subset<N, S> {
.map(|(k, _)| k)
.collect();
debug!(
"{:?} Binary Agreement instances that delivered 1: {:?}",
self.our_id(),
delivered_1
"{}: Binary Agreement instances that delivered `true`: {:?}",
self, delivered_1
);
// Results of Broadcast instances in `delivered_1`
@ -375,7 +376,7 @@ impl<N: NodeIdT + Rand, S: SessionIdT> Subset<N, S> {
.collect();
if delivered_1.len() == broadcast_results.len() {
debug!("{:?} Binary Agreement instances completed:", self.our_id());
debug!("{}: All Binary Agreement instances completed.", self);
self.decided = true;
Some(SubsetOutput::Done)
} else {
@ -384,6 +385,12 @@ impl<N: NodeIdT + Rand, S: SessionIdT> Subset<N, S> {
}
}
impl<N: NodeIdT + Rand, S: SessionIdT> fmt::Display for Subset<N, S> {
fn fmt(&self, f: &mut fmt::Formatter) -> result::Result<(), fmt::Error> {
write!(f, "{:?} Subset({})", self.our_id(), self.session_id)
}
}
/// A session identifier for a `BinaryAgreement` instance run as a `Subset` sub-algorithm. It
/// consists of the `Subset` instance's own session ID, and the index of the proposer whose
/// contribution this `BinaryAgreement` is about.
@ -393,7 +400,7 @@ struct BaSessionId<S> {
proposer_idx: u32,
}
impl<S: Display> Display for BaSessionId<S> {
impl<S: fmt::Display> fmt::Display for BaSessionId<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> result::Result<(), fmt::Error> {
write!(
f,

View File

@ -178,7 +178,6 @@ use crypto::{
Ciphertext, Fr, G1Affine, PublicKey, PublicKeySet, SecretKey, SecretKeyShare,
};
use failure::Fail;
use log::error;
use pairing::{CurveAffine, Field};
use rand;
use serde_derive::{Deserialize, Serialize};
@ -394,12 +393,10 @@ impl<N: NodeIdT> SyncKeyGen<N> {
/// Returns the index of the node, or `None` if it is unknown.
fn node_index(&self, node_id: &N) -> Option<u64> {
if let Some(node_idx) = self.pub_keys.keys().position(|id| id == node_id) {
Some(node_idx as u64)
} else {
error!("Unknown node {:?}", node_id);
None
}
self.pub_keys
.keys()
.position(|id| id == node_id)
.map(|idx| idx as u64)
}
/// Returns the number of complete parts. If this is at least `threshold + 1`, the keys can

View File

@ -15,10 +15,11 @@
use std::collections::BTreeMap;
use std::sync::Arc;
use std::{fmt, result};
use crypto::{self, hash_g2, Signature, SignatureShare, G2};
use failure::Fail;
use log::{debug, error};
use log::debug;
use rand_derive::Rand;
use serde_derive::{Deserialize, Serialize};
@ -135,33 +136,34 @@ impl<N: NodeIdT> ThresholdSign<N> {
return Ok(Step::default());
}
let Message(share) = message;
if let Some(pk_i) = self.netinfo.public_key_share(sender_id) {
if !pk_i.verify_g2(&share, self.msg_hash) {
// Log the faulty node and ignore the invalid share.
let fault_kind = FaultKind::UnverifiedSignatureShareSender;
return Ok(Fault::new(sender_id.clone(), fault_kind).into());
}
self.received_shares.insert(sender_id.clone(), share);
} else {
return Err(Error::UnknownSender);
if !self
.netinfo
.public_key_share(sender_id)
.ok_or(Error::UnknownSender)?
.verify_g2(&share, self.msg_hash)
{
// Report the faulty node and ignore the invalid share.
let fault_kind = FaultKind::UnverifiedSignatureShareSender;
return Ok(Fault::new(sender_id.clone(), fault_kind).into());
}
self.received_shares.insert(sender_id.clone(), share);
self.try_output()
}
fn try_output(&mut self) -> Result<Step<N>> {
debug!(
"{:?} received {} shares, had_input = {}",
self.our_id(),
self.received_shares.len(),
self.had_input
);
if self.had_input && self.received_shares.len() > self.netinfo.num_faulty() {
let sig = self.combine_and_verify_sig()?;
debug!("{:?} output {:?}", self.our_id(), sig);
let step = self.sign()?; // Before terminating, make sure we sent our share.
debug!("{} output {:?}", self, sig);
self.terminated = true;
let step = self.handle_input(())?; // Before terminating, make sure we sent our share.
Ok(step.with_output(sig))
} else {
debug!(
"{} received {} shares, {}",
self,
self.received_shares.len(),
if self.had_input { ", had input" } else { "" }
);
Ok(Step::default())
}
}
@ -181,11 +183,15 @@ impl<N: NodeIdT> ThresholdSign<N> {
.public_key()
.verify_g2(&sig, self.msg_hash)
{
// Abort
error!("{:?} main public key verification failed", self.our_id());
Err(Error::VerificationFailed)
} else {
Ok(sig)
}
}
}
impl<N: NodeIdT> fmt::Display for ThresholdSign<N> {
fn fmt(&self, f: &mut fmt::Formatter) -> result::Result<(), fmt::Error> {
write!(f, "{:?} TS({:?})", self.our_id(), self.msg_hash)
}
}