2018-06-20 01:11:33 -07:00
//! A _synchronous_ algorithm for dealerless distributed key generation.
//!
//! This protocol is meant to run in a _completely synchronous_ setting where each node handles all
2018-06-22 01:17:44 -07:00
//! messages in the same order. It can e.g. exchange messages as transactions on top of
//! `HoneyBadger`, or it can run "on-chain", i.e. committing its messages to a blockchain.
2018-06-20 01:11:33 -07:00
//!
//! Its messages are encrypted where necessary, so they can be publicly broadcast.
//!
//! When the protocol completes, every node receives a secret key share suitable for threshold
//! signatures and encryption. The secret master key is not known by anyone. The protocol succeeds
2018-07-19 02:32:32 -07:00
//! if up to _t_ nodes are faulty, where _t_ is the `threshold` parameter. The number of nodes must
//! be at least _2 t + 1_.
2018-06-20 01:11:33 -07:00
//!
2018-07-19 02:32:32 -07:00
//! ## Usage
//!
//! Before beginning the threshold key generation process, each validator needs to generate a
//! regular (non-threshold) key pair and multicast its public key. `SyncKeyGen::new` returns the
2018-07-19 03:28:15 -07:00
//! instance itself and a `Part` message, containing a contribution to the new threshold keys.
//! It needs to be sent to all nodes. `SyncKeyGen::handle_part` in turn produces an `Ack`
2018-07-19 02:32:32 -07:00
//! message, which is also multicast.
//!
2018-07-19 03:28:15 -07:00
//! All nodes must handle the exact same set of `Part` and `Ack` messages. In this sense the
//! algorithm is synchronous: If Alice's `Ack` was handled by Bob but not by Carol, Bob and
2018-07-19 02:32:32 -07:00
//! Carol could receive different public key sets, and secret key shares that don't match. One way
//! to ensure this is to commit the messages to a public ledger before handling them, e.g. by
//! feeding them to a preexisting instance of Honey Badger. The messages will then appear in the
//! same order for everyone.
//!
//! To complete the process, call `SyncKeyGen::generate`. It produces your secret key share and the
//! public key set.
//!
//! While not asynchronous, the algorithm is fault tolerant: It is not necessary to handle a
2018-07-19 03:28:15 -07:00
//! `Part` and all `Ack` messages from every validator. A `Part` is _complete_ if it
//! received at least _2 t + 1_ valid `Ack`s. Only complete `Part`s are used for key
//! generation in the end, and as long as at least one complete `Part` is from a correct node,
2018-07-19 02:32:32 -07:00
//! the new key set is secure. You can use `SyncKeyGen::is_ready` to check whether at least
2018-07-19 03:28:15 -07:00
//! _t + 1_ `Part`s are complete. So all nodes can call `generate` as soon as `is_ready` returns
2018-07-19 02:32:32 -07:00
//! `true`.
//!
//! Alternatively, you can use any stronger criterion, too, as long as all validators call
//! `generate` at the same point, i.e. after handling the same set of messages.
2018-07-19 03:28:15 -07:00
//! `SyncKeyGen::count_complete` returns the number of complete `Part` messages. And
//! `SyncKeyGen::is_node_ready` can be used to check whether a particluar node's `Part` is
2018-07-19 02:32:32 -07:00
//! complete.
//!
2018-07-19 03:28:15 -07:00
//! Finally, observer nodes can also use `SyncKeyGen`. For observers, no `Part` and `Ack`
2018-07-19 02:32:32 -07:00
//! messages will be created and they do not need to send anything. On completion, they will only
//! receive the public key set, but no secret key share.
//!
//! ## Example
2018-07-18 08:59:02 -07:00
//!
//! ```
//! extern crate rand;
//! extern crate hbbft;
//!
//! use std::collections::BTreeMap;
//!
//! use hbbft::crypto::{PublicKey, SecretKey, SignatureShare};
2018-07-19 03:28:15 -07:00
//! use hbbft::sync_key_gen::{PartOutcome, SyncKeyGen};
2018-07-18 08:59:02 -07:00
//!
//! // Two out of four shares will suffice to sign or encrypt something.
//! let (threshold, node_num) = (1, 4);
//!
//! // Generate individual key pairs for encryption. These are not suitable for threshold schemes.
//! let sec_keys: Vec<SecretKey> = (0..node_num).map(|_| rand::random()).collect();
//! let pub_keys: BTreeMap<usize, PublicKey> = sec_keys
//! .iter()
//! .map(SecretKey::public_key)
//! .enumerate()
//! .collect();
//!
2018-07-19 03:28:15 -07:00
//! // Create the `SyncKeyGen` instances. The constructor also outputs the part that needs to
//! // be sent to all other participants, so we save the parts together with their sender ID.
2018-07-18 08:59:02 -07:00
//! let mut nodes = BTreeMap::new();
2018-07-19 03:28:15 -07:00
//! let mut parts = Vec::new();
2018-07-18 08:59:02 -07:00
//! for (id, sk) in sec_keys.into_iter().enumerate() {
2018-07-19 03:28:15 -07:00
//! let (sync_key_gen, opt_part) = SyncKeyGen::new(id, sk, pub_keys.clone(), threshold);
2018-07-18 08:59:02 -07:00
//! nodes.insert(id, sync_key_gen);
2018-07-19 03:28:15 -07:00
//! parts.push((id, opt_part.unwrap())); // Would be `None` for observer nodes.
2018-07-18 08:59:02 -07:00
//! }
//!
2018-07-19 03:28:15 -07:00
//! // All nodes now handle the parts and send the resulting `Ack` messages.
//! let mut acks = Vec::new();
//! for (sender_id, part) in parts {
2018-07-18 08:59:02 -07:00
//! for (&id, node) in &mut nodes {
2018-07-19 03:28:15 -07:00
//! match node.handle_part(&sender_id, part.clone()) {
//! Some(PartOutcome::Valid(ack)) => acks.push((id, ack)),
//! Some(PartOutcome::Invalid(faults)) => panic!("Invalid part: {:?}", faults),
//! None => panic!("We are not an observer, so we should send Ack."),
2018-07-18 08:59:02 -07:00
//! }
//! }
//! }
//!
2018-07-19 03:28:15 -07:00
//! // Finally, we handle all the `Ack`s.
//! for (sender_id, ack) in acks {
2018-07-18 08:59:02 -07:00
//! for node in nodes.values_mut() {
2018-07-19 03:28:15 -07:00
//! node.handle_ack(&sender_id, ack.clone());
2018-07-18 08:59:02 -07:00
//! }
//! }
//!
//! // We have all the information and can generate the key sets.
//! let pub_key_set = nodes[&0].generate().0; // The public key set: identical for all nodes.
//! let mut secret_key_shares = BTreeMap::new();
//! for (&id, node) in &mut nodes {
//! assert!(node.is_ready());
//! let (pks, opt_sks) = node.generate();
//! assert_eq!(pks, pub_key_set); // All nodes now know the public keys and public key shares.
//! let sks = opt_sks.expect("Not an observer node: We receive a secret key share.");
//! secret_key_shares.insert(id as u64, sks);
//! }
//!
//! // Three out of four nodes can now sign a message. Each share can be verified individually.
//! let msg = "Nodes 0 and 1 does not agree with this.";
//! let mut sig_shares: BTreeMap<u64, SignatureShare> = BTreeMap::new();
//! for (&id, sks) in &secret_key_shares {
//! if id != 0 && id != 1 {
//! let sig_share = sks.sign(msg);
//! let pks = pub_key_set.public_key_share(id as u64);
//! assert!(pks.verify(&sig_share, msg));
//! sig_shares.insert(id as u64, sig_share);
//! }
//! }
//!
//! // Two signatures are over the threshold. They are enough to produce a signature that matches
//! // the public master key.
//! let sig = pub_key_set
//! .combine_signatures(&sig_shares)
//! .expect("The shares can be combined.");
//! assert!(pub_key_set.public_key().verify(&sig, msg));
//! ```
//!
//! ## How it works
2018-06-20 01:11:33 -07:00
//!
//! The algorithm is based on ideas from
//! [Distributed Key Generation in the Wild](https://eprint.iacr.org/2012/377.pdf) and
//! [A robust threshold elliptic curve digital signature providing a new verifiable secret sharing scheme](https://www.researchgate.net/profile/Ihab_Ali/publication/4205262_A_robust_threshold_elliptic_curve_digital_signature_providing_a_new_verifiable_secret_sharing_scheme/links/02e7e538f15726323a000000/A-robust-threshold-elliptic-curve-digital-signature-providing-a-new-verifiable-secret-sharing-scheme.pdf?origin=publication_detail).
//!
2018-06-22 01:17:44 -07:00
//! In a trusted dealer scenario, the following steps occur:
//!
2018-07-19 02:32:32 -07:00
//! 1. Dealer generates a `BivarPoly` of degree _t_ and publishes the `BivarCommitment` which is
2018-06-22 01:17:44 -07:00
//! used to publicly verify the polynomial's values.
2018-07-19 02:32:32 -07:00
//! 2. Dealer sends _row_ _m > 0_ to node number _m_.
//! 3. Node _m_, in turn, sends _value_ number _s_ to node number _s_.
//! 4. This process continues until _2 t + 1_ nodes confirm they have received a valid row. If
//! there are at most _t_ faulty nodes, we know that at least _t + 1_ correct nodes sent on an
//! entry of every other node's column to that node.
//! 5. This means every node can reconstruct its column, and the value at _0_ of its column.
//! 6. These values all lie on a univariate polynomial of degree _t_ and can be used as secret keys.
2018-06-22 01:17:44 -07:00
//!
2018-07-19 02:32:32 -07:00
//! In our _dealerless_ environment, at least _t + 1_ nodes each generate a polynomial using the
2018-06-22 01:17:44 -07:00
//! method above. The sum of the secret keys we received from each node is then used as our secret
//! key. No single node knows the secret master key.
2018-06-20 01:11:33 -07:00
use std ::collections ::btree_map ::Entry ;
use std ::collections ::{ BTreeMap , BTreeSet } ;
2018-06-27 04:45:25 -07:00
use std ::fmt ::{ self , Debug , Formatter } ;
2018-06-20 01:11:33 -07:00
use bincode ;
2018-06-21 08:31:15 -07:00
use pairing ::bls12_381 ::{ Fr , G1Affine } ;
2018-06-20 01:11:33 -07:00
use pairing ::{ CurveAffine , Field } ;
use rand ::OsRng ;
2018-06-25 04:07:31 -07:00
use crypto ::poly ::{ BivarCommitment , BivarPoly , Poly } ;
use crypto ::serde_impl ::field_vec ::FieldWrap ;
2018-07-17 06:54:12 -07:00
use crypto ::{ Ciphertext , PublicKey , PublicKeySet , SecretKey , SecretKeyShare } ;
2018-07-08 09:41:50 -07:00
use fault_log ::{ FaultKind , FaultLog } ;
2018-07-19 03:18:01 -07:00
use messaging ::NetworkInfo ;
2018-06-25 04:07:31 -07:00
2018-06-20 01:11:33 -07:00
// TODO: No need to send our own row and value to ourselves.
2018-07-19 02:32:32 -07:00
/// A submission by a validator for the key generation. It must to be sent to all participating
/// nodes and handled by all of them, including the one that produced it.
///
/// The message contains a commitment to a bivariate polynomial, and for each node, an encrypted
2018-07-19 03:28:15 -07:00
/// row of values. If this message receives enough `Ack`s, it will be used as summand to produce
2018-07-19 02:32:32 -07:00
/// the the key set in the end.
2018-06-27 04:45:25 -07:00
#[ derive(Deserialize, Serialize, Clone, Hash, Eq, PartialEq) ]
2018-07-19 03:28:15 -07:00
pub struct Part ( BivarCommitment , Vec < Ciphertext > ) ;
2018-06-20 01:11:33 -07:00
2018-07-19 03:28:15 -07:00
impl Debug for Part {
2018-06-27 04:45:25 -07:00
fn fmt ( & self , f : & mut Formatter ) -> fmt ::Result {
let deg = self . 0. degree ( ) ;
let len = self . 1. len ( ) ;
2018-07-19 03:28:15 -07:00
write! ( f , " Part(<degree {}>, <{} rows>) " , deg , len )
2018-06-27 04:45:25 -07:00
}
}
2018-07-19 03:28:15 -07:00
/// A confirmation that we have received and verified a validator's part. It must be sent to
2018-07-19 02:32:32 -07:00
/// all participating nodes and handled by all of them, including ourselves.
///
2018-07-19 03:28:15 -07:00
/// The message is only produced after we verified our row against the commitment in the `Part`.
2018-07-19 02:32:32 -07:00
/// For each node, it contains one encrypted value of that row.
2018-06-27 04:45:25 -07:00
#[ derive(Deserialize, Serialize, Clone, Hash, Eq, PartialEq) ]
2018-07-19 03:28:15 -07:00
pub struct Ack ( u64 , Vec < Ciphertext > ) ;
2018-06-20 01:11:33 -07:00
2018-07-19 03:28:15 -07:00
impl Debug for Ack {
2018-06-27 04:45:25 -07:00
fn fmt ( & self , f : & mut Formatter ) -> fmt ::Result {
2018-07-19 03:28:15 -07:00
write! ( f , " Ack({}, <{} values> " , self . 0 , self . 1. len ( ) )
2018-06-27 04:45:25 -07:00
}
}
2018-06-20 01:11:33 -07:00
/// The information needed to track a single proposer's secret sharing process.
struct ProposalState {
/// The proposer's commitment.
2018-06-21 08:31:15 -07:00
commit : BivarCommitment ,
2018-07-19 03:28:15 -07:00
/// The verified values we received from `Ack` messages.
2018-06-20 01:11:33 -07:00
values : BTreeMap < u64 , Fr > ,
2018-07-19 03:28:15 -07:00
/// The nodes which have acked this part, valid or not.
acks : BTreeSet < u64 > ,
2018-06-20 01:11:33 -07:00
}
impl ProposalState {
2018-07-19 03:28:15 -07:00
/// Creates a new part state with a commitment.
2018-06-21 08:31:15 -07:00
fn new ( commit : BivarCommitment ) -> ProposalState {
2018-06-20 01:11:33 -07:00
ProposalState {
commit ,
values : BTreeMap ::new ( ) ,
2018-07-19 03:28:15 -07:00
acks : BTreeSet ::new ( ) ,
2018-06-20 01:11:33 -07:00
}
}
2018-07-19 03:28:15 -07:00
/// Returns `true` if at least `2 * threshold + 1` nodes have acked.
2018-06-20 01:11:33 -07:00
fn is_complete ( & self , threshold : usize ) -> bool {
2018-07-19 03:28:15 -07:00
self . acks . len ( ) > 2 * threshold
2018-06-20 01:11:33 -07:00
}
}
2018-07-19 03:28:15 -07:00
/// The outcome of handling and verifying a `Part` message.
pub enum PartOutcome < NodeUid : Clone > {
2018-07-19 02:32:32 -07:00
/// The message was valid: the part of it that was encrypted to us matched the public
2018-07-19 03:28:15 -07:00
/// commitment, so we can multicast an `Ack` message for it.
Valid ( Ack ) ,
// If the Part message passed to `handle_part()` is invalid, the
2018-07-08 09:41:50 -07:00
// fault is logged and passed onto the caller.
2018-07-19 02:32:32 -07:00
/// The message was invalid: the part encrypted to us was malformed or didn't match the
2018-07-19 03:28:15 -07:00
/// commitment. We now know that the proposer is faulty, and dont' send an `Ack`.
2018-07-08 09:41:50 -07:00
Invalid ( FaultLog < NodeUid > ) ,
}
2018-06-20 01:11:33 -07:00
/// A synchronous algorithm for dealerless distributed key generation.
///
/// It requires that all nodes handle all messages in the exact same order.
2018-06-25 04:07:31 -07:00
pub struct SyncKeyGen < NodeUid > {
2018-07-19 03:18:01 -07:00
/// Our node ID.
our_uid : NodeUid ,
2018-06-20 01:11:33 -07:00
/// Our node index.
2018-06-27 02:37:05 -07:00
our_idx : Option < u64 > ,
2018-06-20 01:11:33 -07:00
/// Our secret key.
2018-06-21 08:31:15 -07:00
sec_key : SecretKey ,
2018-06-20 01:11:33 -07:00
/// The public keys of all nodes, by node index.
2018-06-25 04:07:31 -07:00
pub_keys : BTreeMap < NodeUid , PublicKey > ,
2018-07-19 03:28:15 -07:00
/// Proposed bivariate polynomials.
parts : BTreeMap < u64 , ProposalState > ,
2018-06-20 01:11:33 -07:00
/// The degree of the generated polynomial.
threshold : usize ,
}
2018-07-08 09:41:50 -07:00
impl < NodeUid : Ord + Clone + Debug > SyncKeyGen < NodeUid > {
2018-07-19 03:28:15 -07:00
/// Creates a new `SyncKeyGen` instance, together with the `Part` message that should be
2018-07-19 02:32:32 -07:00
/// multicast to all nodes.
///
2018-07-19 03:28:15 -07:00
/// If we are not a validator but only an observer, no `Part` message is produced and no
2018-07-19 02:32:32 -07:00
/// messages need to be sent.
2018-06-20 01:11:33 -07:00
pub fn new (
2018-07-19 03:18:01 -07:00
our_uid : NodeUid ,
2018-06-21 08:31:15 -07:00
sec_key : SecretKey ,
2018-06-25 04:07:31 -07:00
pub_keys : BTreeMap < NodeUid , PublicKey > ,
2018-06-20 01:11:33 -07:00
threshold : usize ,
2018-07-19 03:28:15 -07:00
) -> ( SyncKeyGen < NodeUid > , Option < Part > ) {
2018-06-25 04:07:31 -07:00
let our_idx = pub_keys
. keys ( )
2018-07-19 03:18:01 -07:00
. position ( | uid | * uid = = our_uid )
2018-06-27 02:37:05 -07:00
. map ( | idx | idx as u64 ) ;
2018-06-20 01:11:33 -07:00
let key_gen = SyncKeyGen {
2018-07-19 03:18:01 -07:00
our_uid ,
2018-06-20 01:11:33 -07:00
our_idx ,
sec_key ,
pub_keys ,
2018-07-19 03:28:15 -07:00
parts : BTreeMap ::new ( ) ,
2018-06-20 01:11:33 -07:00
threshold ,
} ;
2018-06-27 02:37:05 -07:00
if our_idx . is_none ( ) {
2018-07-19 03:28:15 -07:00
return ( key_gen , None ) ; // No part: we are an observer.
2018-06-27 02:37:05 -07:00
}
let mut rng = OsRng ::new ( ) . expect ( " OS random number generator " ) ;
2018-07-19 03:28:15 -07:00
let our_part = BivarPoly ::random ( threshold , & mut rng ) ;
let commit = our_part . commitment ( ) ;
2018-06-27 02:37:05 -07:00
let encrypt = | ( i , pk ) : ( usize , & PublicKey ) | {
2018-07-19 03:28:15 -07:00
let row = our_part . row ( i as u64 + 1 ) ;
2018-06-27 02:37:05 -07:00
let bytes = bincode ::serialize ( & row ) . expect ( " failed to serialize row " ) ;
pk . encrypt ( & bytes )
} ;
let rows : Vec < _ > = key_gen . pub_keys . values ( ) . enumerate ( ) . map ( encrypt ) . collect ( ) ;
2018-07-19 03:28:15 -07:00
( key_gen , Some ( Part ( commit , rows ) ) )
2018-06-20 01:11:33 -07:00
}
2018-07-19 03:28:15 -07:00
/// Handles a `Part` message. If it is valid, returns an `Ack` message to be broadcast.
2018-07-19 02:32:32 -07:00
///
/// If we are only an observer, `None` is returned instead and no messages need to be sent.
2018-07-19 03:28:15 -07:00
pub fn handle_part (
2018-06-20 01:11:33 -07:00
& mut self ,
2018-06-25 04:07:31 -07:00
sender_id : & NodeUid ,
2018-07-19 03:28:15 -07:00
Part ( commit , rows ) : Part ,
) -> Option < PartOutcome < NodeUid > > {
2018-06-27 02:37:05 -07:00
let sender_idx = self . node_index ( sender_id ) ? ;
let opt_commit_row = self . our_idx . map ( | idx | commit . row ( idx + 1 ) ) ;
2018-07-19 03:28:15 -07:00
match self . parts . entry ( sender_idx ) {
Entry ::Occupied ( _ ) = > return None , // Ignore multiple parts.
2018-06-20 01:11:33 -07:00
Entry ::Vacant ( entry ) = > {
entry . insert ( ProposalState ::new ( commit ) ) ;
}
}
2018-07-19 03:28:15 -07:00
// If we are only an observer, return `None`. We don't need to send `Ack`.
2018-06-27 02:37:05 -07:00
let our_idx = self . our_idx ? ;
let commit_row = opt_commit_row ? ;
let ser_row = self . sec_key . decrypt ( rows . get ( our_idx as usize ) ? ) ? ;
2018-07-08 09:41:50 -07:00
let row : Poly = if let Ok ( row ) = bincode ::deserialize ( & ser_row ) {
row
} else {
// Log the faulty node and ignore invalid messages.
2018-07-19 03:28:15 -07:00
let fault_log = FaultLog ::init ( sender_id . clone ( ) , FaultKind ::InvalidPartMessage ) ;
return Some ( PartOutcome ::Invalid ( fault_log ) ) ;
2018-07-08 09:41:50 -07:00
} ;
2018-06-20 01:11:33 -07:00
if row . commitment ( ) ! = commit_row {
2018-07-19 03:28:15 -07:00
debug! ( " Invalid part from node {}. " , sender_idx ) ;
let fault_log = FaultLog ::init ( sender_id . clone ( ) , FaultKind ::InvalidPartMessage ) ;
return Some ( PartOutcome ::Invalid ( fault_log ) ) ;
2018-06-20 01:11:33 -07:00
}
// The row is valid: now encrypt one value for each node.
2018-06-27 02:37:05 -07:00
let encrypt = | ( idx , pk ) : ( usize , & PublicKey ) | {
let val = row . evaluate ( idx as u64 + 1 ) ;
let wrap = FieldWrap ::new ( val ) ;
// TODO: Handle errors.
let ser_val = bincode ::serialize ( & wrap ) . expect ( " failed to serialize value " ) ;
pk . encrypt ( ser_val )
} ;
let values = self . pub_keys . values ( ) . enumerate ( ) . map ( encrypt ) . collect ( ) ;
2018-07-19 03:28:15 -07:00
Some ( PartOutcome ::Valid ( Ack ( sender_idx , values ) ) )
2018-06-20 01:11:33 -07:00
}
2018-07-19 03:28:15 -07:00
/// Handles an `Ack` message.
pub fn handle_ack ( & mut self , sender_id : & NodeUid , ack : Ack ) -> FaultLog < NodeUid > {
2018-07-08 09:41:50 -07:00
let mut fault_log = FaultLog ::new ( ) ;
2018-06-27 02:37:05 -07:00
if let Some ( sender_idx ) = self . node_index ( sender_id ) {
2018-07-19 03:28:15 -07:00
if let Err ( err ) = self . handle_ack_or_err ( sender_idx , ack ) {
debug! ( " Invalid ack from node {}: {} " , sender_idx , err ) ;
fault_log . append ( sender_id . clone ( ) , FaultKind ::InvalidAckMessage ) ;
2018-06-27 02:37:05 -07:00
}
2018-06-20 01:11:33 -07:00
}
2018-07-08 09:41:50 -07:00
fault_log
2018-06-20 01:11:33 -07:00
}
2018-07-19 03:28:15 -07:00
/// Returns the number of complete parts. If this is at least `threshold + 1`, the keys can
2018-06-20 01:11:33 -07:00
/// be generated, but it is possible to wait for more to increase security.
pub fn count_complete ( & self ) -> usize {
2018-07-19 03:28:15 -07:00
self . parts
2018-06-20 01:11:33 -07:00
. values ( )
2018-07-19 03:28:15 -07:00
. filter ( | part | part . is_complete ( self . threshold ) )
2018-06-20 01:11:33 -07:00
. count ( )
}
2018-07-19 03:28:15 -07:00
/// Returns `true` if the part of the given node is complete.
2018-06-27 04:45:25 -07:00
pub fn is_node_ready ( & self , proposer_id : & NodeUid ) -> bool {
self . node_index ( proposer_id )
2018-07-19 03:28:15 -07:00
. and_then ( | proposer_idx | self . parts . get ( & proposer_idx ) )
. map_or ( false , | part | part . is_complete ( self . threshold ) )
2018-06-20 01:11:33 -07:00
}
2018-07-19 03:28:15 -07:00
/// Returns `true` if enough parts are complete to safely generate the new key.
2018-06-20 01:11:33 -07:00
pub fn is_ready ( & self ) -> bool {
self . count_complete ( ) > self . threshold
}
2018-07-19 02:32:32 -07:00
/// Returns the new secret key share and the public key set.
2018-06-20 01:11:33 -07:00
///
/// These are only secure if `is_ready` returned `true`. Otherwise it is not guaranteed that
/// none of the nodes knows the secret master key.
2018-07-19 02:32:32 -07:00
///
/// If we are only an observer node, no secret key share is returned.
2018-07-17 06:54:12 -07:00
pub fn generate ( & self ) -> ( PublicKeySet , Option < SecretKeyShare > ) {
2018-06-20 01:11:33 -07:00
let mut pk_commit = Poly ::zero ( ) . commitment ( ) ;
2018-06-27 02:37:05 -07:00
let mut opt_sk_val = self . our_idx . map ( | _ | Fr ::zero ( ) ) ;
2018-07-19 03:28:15 -07:00
let is_complete = | part : & & ProposalState | part . is_complete ( self . threshold ) ;
for part in self . parts . values ( ) . filter ( is_complete ) {
pk_commit + = part . commit . row ( 0 ) ;
2018-06-27 02:37:05 -07:00
if let Some ( sk_val ) = opt_sk_val . as_mut ( ) {
2018-07-19 03:28:15 -07:00
let row : Poly = Poly ::interpolate ( part . values . iter ( ) . take ( self . threshold + 1 ) ) ;
2018-06-27 02:37:05 -07:00
sk_val . add_assign ( & row . evaluate ( 0 ) ) ;
}
2018-06-20 01:11:33 -07:00
}
2018-07-17 06:54:12 -07:00
let opt_sk = opt_sk_val . map ( SecretKeyShare ::from_value ) ;
2018-06-27 02:37:05 -07:00
( pk_commit . into ( ) , opt_sk )
2018-06-20 01:11:33 -07:00
}
2018-07-19 03:18:01 -07:00
/// Consumes the instance, generates the key set and returns a new `NetworkInfo` with the new
/// keys.
pub fn into_network_info ( self ) -> NetworkInfo < NodeUid > {
let ( pk_set , opt_sk_share ) = self . generate ( ) ;
let sk_share = opt_sk_share . unwrap_or_default ( ) ; // TODO: Make this an option.
NetworkInfo ::new ( self . our_uid , sk_share , pk_set , self . sec_key , self . pub_keys )
}
2018-07-19 03:28:15 -07:00
/// Handles an `Ack` message or returns an error string.
fn handle_ack_or_err (
2018-06-20 01:11:33 -07:00
& mut self ,
sender_idx : u64 ,
2018-07-19 03:28:15 -07:00
Ack ( proposer_idx , values ) : Ack ,
2018-06-20 01:11:33 -07:00
) -> Result < ( ) , String > {
2018-06-27 02:37:05 -07:00
if values . len ( ) ! = self . pub_keys . len ( ) {
return Err ( " wrong node count " . to_string ( ) ) ;
}
2018-07-19 03:28:15 -07:00
let part = self
. parts
2018-06-20 01:11:33 -07:00
. get_mut ( & proposer_idx )
. ok_or_else ( | | " sender does not exist " . to_string ( ) ) ? ;
2018-07-19 03:28:15 -07:00
if ! part . acks . insert ( sender_idx ) {
return Err ( " duplicate ack " . to_string ( ) ) ;
2018-06-20 01:11:33 -07:00
}
2018-06-27 02:37:05 -07:00
let our_idx = match self . our_idx {
Some ( our_idx ) = > our_idx ,
None = > return Ok ( ( ) ) , // We are only an observer. Nothing to decrypt for us.
} ;
2018-06-20 01:11:33 -07:00
let ser_val : Vec < u8 > = self
. sec_key
2018-06-27 02:37:05 -07:00
. decrypt ( & values [ our_idx as usize ] )
2018-06-20 01:11:33 -07:00
. ok_or_else ( | | " value decryption failed " . to_string ( ) ) ? ;
let val = bincode ::deserialize ::< FieldWrap < Fr , Fr > > ( & ser_val )
. map_err ( | err | format! ( " deserialization failed: {:?} " , err ) ) ?
. into_inner ( ) ;
2018-07-19 03:28:15 -07:00
if part . commit . evaluate ( our_idx + 1 , sender_idx + 1 ) ! = G1Affine ::one ( ) . mul ( val ) {
2018-06-20 01:11:33 -07:00
return Err ( " wrong value " . to_string ( ) ) ;
}
2018-07-19 03:28:15 -07:00
part . values . insert ( sender_idx + 1 , val ) ;
2018-06-20 01:11:33 -07:00
Ok ( ( ) )
}
2018-06-27 02:37:05 -07:00
/// Returns the index of the node, or `None` if it is unknown.
fn node_index ( & self , node_id : & NodeUid ) -> Option < u64 > {
if let Some ( node_idx ) = self . pub_keys . keys ( ) . position ( | uid | uid = = node_id ) {
Some ( node_idx as u64 )
} else {
debug! ( " Unknown node {:?} " , node_id ) ;
None
}
}
2018-06-20 01:11:33 -07:00
}