clippy fixes; remove old FROST code

This commit is contained in:
Conrado Gouvea 2022-10-14 22:27:47 -03:00 committed by Deirdre Connolly
parent 4b0714b8ca
commit 6f0dffc12e
17 changed files with 28 additions and 1978 deletions

View File

@ -65,10 +65,10 @@ pub struct Item<S: SpendAuth, B: Binding<Scalar = S::Scalar, Point = S::Point>>
impl<S: SpendAuth, B: Binding<Scalar = S::Scalar, Point = S::Point>> Item<S, B> {
/// Create a batch item from a `SpendAuth` signature.
pub fn from_spendauth<'msg, M: AsRef<[u8]>>(
pub fn from_spendauth<M: AsRef<[u8]>>(
vk_bytes: VerificationKeyBytes<S>,
sig: Signature<S>,
msg: &'msg M,
msg: &M,
) -> Self {
// Compute c now to avoid dependency on the msg lifetime.
let c = HStar::<S>::default()
@ -82,10 +82,10 @@ impl<S: SpendAuth, B: Binding<Scalar = S::Scalar, Point = S::Point>> Item<S, B>
}
/// Create a batch item from a `Binding` signature.
pub fn from_binding<'msg, M: AsRef<[u8]>>(
pub fn from_binding<M: AsRef<[u8]>>(
vk_bytes: VerificationKeyBytes<B>,
sig: Signature<B>,
msg: &'msg M,
msg: &M,
) -> Self {
// Compute c now to avoid dependency on the msg lifetime.
let c = HStar::<B>::default()

View File

@ -1,744 +0,0 @@
// -*- mode: rust; -*-
//
// This file is part of reddsa.
// Copyright (c) 2020-2021 Zcash Foundation
// See LICENSE for licensing information.
//
// Authors:
// - Chelsea H. Komlo <me@chelseakomlo.com>
// - Deirdre Connolly <deirdre@zfnd.org>
// - isis agora lovecruft <isis@patternsinthevoid.net>
//! An implementation of FROST (Flexible Round-Optimized Schnorr Threshold)
//! signatures.
//!
//! This implementation has been [independently
//! audited](https://github.com/ZcashFoundation/redjubjub/blob/main/zcash-frost-audit-report-20210323.pdf)
//! as of commit 76ba4ef / March 2021. If you are interested in deploying
//! FROST, please do not hesitate to consult the FROST authors.
//!
//! This implementation currently only supports key generation using a central
//! dealer. In the future, we will add support for key generation via a DKG,
//! as specified in the FROST paper.
//! Internally, keygen_with_dealer generates keys using Verifiable Secret
//! Sharing, where shares are generated using Shamir Secret Sharing.
use alloc::vec::Vec;
use std::{
collections::HashMap,
convert::{TryFrom, TryInto},
marker::PhantomData,
};
use group::{
cofactor::CofactorCurve,
ff::{Field, PrimeField},
Curve, Group, GroupEncoding,
};
use rand_core::{CryptoRng, RngCore};
use zeroize::DefaultIsZeroes;
use crate::{private::SealedScalar, sapling, HStar, Signature, SpendAuth, VerificationKey};
/// A secret scalar value representing a single signer's secret key.
#[derive(Clone, Copy, Default, PartialEq)]
pub struct Secret<S: SpendAuth>(pub(crate) S::Scalar);
// Zeroizes `Secret` to be the `Default` value on drop (when it goes out of
// scope). Luckily the derived `Default` includes the `Default` impl of
// jubjub::Fr/Scalar, which is four 0u64's under the hood.
impl<S: SpendAuth> DefaultIsZeroes for Secret<S> {}
impl From<jubjub::Scalar> for Secret<sapling::SpendAuth> {
fn from(source: jubjub::Scalar) -> Secret<sapling::SpendAuth> {
Secret(source)
}
}
/// A public group element that represents a single signer's public key.
#[derive(Copy, Clone, Debug, PartialEq)]
pub struct Public<S: SpendAuth>(S::Point);
impl From<jubjub::ExtendedPoint> for Public<sapling::SpendAuth> {
fn from(source: jubjub::ExtendedPoint) -> Public<sapling::SpendAuth> {
Public(source)
}
}
/// A share generated by performing a (t-out-of-n) secret sharing scheme where
/// n is the total number of shares and t is the threshold required to
/// reconstruct the secret; in this case we use Shamir's secret sharing.
#[derive(Clone)]
pub struct Share<S: SpendAuth> {
receiver_index: u64,
/// Secret Key.
pub(crate) value: Secret<S>,
/// The commitments to be distributed among signers.
pub(crate) commitment: ShareCommitment<S>,
}
/// A Jubjub point that is a commitment to one coefficient of our secret
/// polynomial.
///
/// This is a (public) commitment to one coefficient of a secret polynomial used
/// for performing verifiable secret sharing for a Shamir secret share.
#[derive(Clone, PartialEq)]
pub(crate) struct Commitment<S: SpendAuth>(pub(crate) <S::Point as CofactorCurve>::Affine);
/// Contains the commitments to the coefficients for our secret polynomial _f_,
/// used to generate participants' key shares.
///
/// [`ShareCommitment`] contains a set of commitments to the coefficients (which
/// themselves are scalars) for a secret polynomial f, where f is used to
/// generate each ith participant's key share f(i). Participants use this set of
/// commitments to perform verifiable secret sharing.
///
/// Note that participants MUST be assured that they have the *same*
/// [`ShareCommitment`], either by performing pairwise comparison, or by using
/// some agreed-upon public location for publication, where each participant can
/// ensure that they received the correct (and same) value.
#[derive(Clone)]
pub struct ShareCommitment<S: SpendAuth>(pub(crate) Vec<Commitment<S>>);
/// The product of all signers' individual commitments, published as part of the
/// final signature.
#[derive(PartialEq)]
pub struct GroupCommitment<S: SpendAuth>(pub(crate) <S::Point as CofactorCurve>::Affine);
/// Secret and public key material generated by a dealer performing
/// [`keygen_with_dealer`].
///
/// To derive a FROST keypair, the receiver of the [`SharePackage`] *must* call
/// .into(), which under the hood also performs validation.
pub struct SharePackage<S: SpendAuth> {
/// The public signing key that represents the entire group.
pub(crate) group_public: VerificationKey<S>,
/// Denotes the participant index each share is owned by.
pub index: u64,
/// This participant's public key.
pub(crate) public: Public<S>,
/// This participant's share.
pub(crate) share: Share<S>,
}
impl<S: SpendAuth> TryFrom<SharePackage<S>> for KeyPackage<S> {
type Error = &'static str;
/// Tries to verify a share and construct a [`KeyPackage`] from it.
///
/// When participants receive a [`SharePackage`] from the dealer, they
/// *MUST* verify the integrity of the share before continuing on to
/// transform it into a signing/verification keypair. Here, we assume that
/// every participant has the same view of the commitment issued by the
/// dealer, but implementations *MUST* make sure that all participants have
/// a consistent view of this commitment in practice.
fn try_from(sharepackage: SharePackage<S>) -> Result<Self, &'static str> {
verify_share(&sharepackage.share)?;
Ok(KeyPackage {
index: sharepackage.index,
secret_share: sharepackage.share.value,
public: sharepackage.public,
group_public: sharepackage.group_public,
})
}
}
/// A FROST keypair, which can be generated either by a trusted dealer or using
/// a DKG.
///
/// When using a central dealer, [`SharePackage`]s are distributed to
/// participants, who then perform verification, before deriving
/// [`KeyPackage`]s, which they store to later use during signing.
#[allow(dead_code)]
pub struct KeyPackage<S: SpendAuth> {
index: u64,
secret_share: Secret<S>,
public: Public<S>,
group_public: VerificationKey<S>,
}
/// Public data that contains all the signer's public keys as well as the
/// group public key.
///
/// Used for verification purposes before publishing a signature.
pub struct PublicKeyPackage<S: SpendAuth> {
/// When performing signing, the coordinator must ensure that they have the
/// correct view of participant's public keys to perform verification before
/// publishing a signature. signer_pubkeys represents all signers for a
/// signing operation.
pub(crate) signer_pubkeys: HashMap<u64, Public<S>>,
/// group_public represents the joint public key for the entire group.
pub group_public: VerificationKey<S>,
}
/// Allows all participants' keys to be generated using a central, trusted
/// dealer.
///
/// Under the hood, this performs verifiable secret sharing, which itself uses
/// Shamir secret sharing, from which each share becomes a participant's secret
/// key. The output from this function is a set of shares along with one single
/// commitment that participants use to verify the integrity of the share. The
/// number of signers is limited to 255.
pub fn keygen_with_dealer<R: RngCore + CryptoRng, S: SpendAuth>(
num_signers: u8,
threshold: u8,
mut rng: R,
) -> Result<(Vec<SharePackage<S>>, PublicKeyPackage<S>), &'static str> {
let mut bytes = [0; 64];
rng.fill_bytes(&mut bytes);
let secret = Secret(S::Scalar::from_bytes_wide(&bytes));
let group_public = VerificationKey::from(&secret.0);
let shares = generate_shares(&secret, num_signers, threshold, rng)?;
let mut sharepackages: Vec<SharePackage<S>> = Vec::with_capacity(num_signers as usize);
let mut signer_pubkeys: HashMap<u64, Public<S>> = HashMap::with_capacity(num_signers as usize);
for share in shares {
let signer_public = Public(S::basepoint() * share.value.0);
sharepackages.push(SharePackage {
index: share.receiver_index,
share: share.clone(),
public: signer_public,
group_public,
});
signer_pubkeys.insert(share.receiver_index, signer_public);
}
Ok((
sharepackages,
PublicKeyPackage {
signer_pubkeys,
group_public,
},
))
}
/// Verifies that a share is consistent with a commitment.
///
/// This ensures that this participant's share has been generated using the same
/// mechanism as all other signing participants. Note that participants *MUST*
/// ensure that they have the same view as all other participants of the
/// commitment!
fn verify_share<S: SpendAuth>(share: &Share<S>) -> Result<(), &'static str> {
let f_result = S::basepoint() * share.value.0;
let x = S::Scalar::from(share.receiver_index as u64);
let (_, result) = share.commitment.0.iter().fold(
(S::Scalar::one(), S::Point::identity()),
|(x_to_the_i, sum_so_far), comm_i| (x_to_the_i * x, sum_so_far + comm_i.0 * x_to_the_i),
);
if !(f_result == result) {
return Err("Share is invalid.");
}
Ok(())
}
/// Creates secret shares for a given secret.
///
/// This function accepts a secret from which shares are generated. While in
/// FROST this secret should always be generated randomly, we allow this secret
/// to be specified for this internal function for testability.
///
/// Internally, [`generate_shares`] performs verifiable secret sharing, which
/// generates shares via Shamir Secret Sharing, and then generates public
/// commitments to those shares.
///
/// More specifically, [`generate_shares`]:
/// - Randomly samples of coefficients [a, b, c], this represents a secret
/// polynomial f
/// - For each participant i, their secret share is f(i)
/// - The commitment to the secret polynomial f is [g^a, g^b, g^c]
fn generate_shares<R: RngCore + CryptoRng, S: SpendAuth>(
secret: &Secret<S>,
numshares: u8,
threshold: u8,
mut rng: R,
) -> Result<Vec<Share<S>>, &'static str> {
if threshold < 1 {
return Err("Threshold cannot be 0");
}
if numshares < 1 {
return Err("Number of shares cannot be 0");
}
if threshold > numshares {
return Err("Threshold cannot exceed numshares");
}
let numcoeffs = threshold - 1;
let mut coefficients: Vec<S::Scalar> = Vec::with_capacity(threshold as usize);
let mut shares: Vec<Share<S>> = Vec::with_capacity(numshares as usize);
let mut commitment: ShareCommitment<S> =
ShareCommitment(Vec::with_capacity(threshold as usize));
for _ in 0..numcoeffs {
let mut bytes = [0; 64];
rng.fill_bytes(&mut bytes);
coefficients.push(S::Scalar::from_bytes_wide(&bytes));
}
// Verifiable secret sharing, to make sure that participants can ensure their secret is consistent
// with every other participant's.
commitment
.0
.push(Commitment((S::basepoint() * secret.0).to_affine()));
for c in &coefficients {
commitment
.0
.push(Commitment((S::basepoint() * c).to_affine()));
}
// Evaluate the polynomial with `secret` as the constant term
// and `coeffs` as the other coefficients at the point x=share_index,
// using Horner's method.
for index in 1..numshares + 1 {
let scalar_index = S::Scalar::from(index as u64);
let mut value = S::Scalar::zero();
// Polynomial evaluation, for this index
for i in (0..numcoeffs).rev() {
value += &coefficients[i as usize];
value *= scalar_index;
}
value += secret.0;
shares.push(Share {
receiver_index: index as u64,
value: Secret(value),
commitment: commitment.clone(),
});
}
Ok(shares)
}
/// Comprised of hiding and binding nonces.
///
/// Note that [`SigningNonces`] must be used *only once* for a signing
/// operation; re-using nonces will result in leakage of a signer's long-lived
/// signing key.
#[derive(Clone, Copy, Default)]
pub struct SigningNonces<S: SpendAuth> {
hiding: S::Scalar,
binding: S::Scalar,
}
// Zeroizes `SigningNonces` to be the `Default` value on drop (when it goes out
// of scope). Luckily the derived `Default` includes the `Default` impl of the
// `jubjub::Fr/Scalar`'s, which is four 0u64's under the hood.
impl<S: SpendAuth> DefaultIsZeroes for SigningNonces<S> {}
impl<S: SpendAuth> SigningNonces<S> {
/// Generates a new signing nonce.
///
/// Each participant generates signing nonces before performing a signing
/// operation.
pub fn new<R>(rng: &mut R) -> Self
where
R: CryptoRng + RngCore,
{
fn random_nonzero_bytes<R>(rng: &mut R) -> [u8; 64]
where
R: CryptoRng + RngCore,
{
let mut bytes = [0; 64];
loop {
rng.fill_bytes(&mut bytes);
if bytes != [0; 64] {
return bytes;
}
}
}
// The values of 'hiding' and 'binding' must be non-zero so that commitments are not the
// identity.
let hiding = S::Scalar::from_bytes_wide(&random_nonzero_bytes(rng));
let binding = S::Scalar::from_bytes_wide(&random_nonzero_bytes(rng));
Self { hiding, binding }
}
}
/// Published by each participant in the first round of the signing protocol.
///
/// This step can be batched if desired by the implementation. Each
/// SigningCommitment can be used for exactly *one* signature.
#[derive(Copy, Clone)]
pub struct SigningCommitments<S: SpendAuth> {
/// The participant index
pub(crate) index: u64,
/// The hiding point.
pub(crate) hiding: S::Point,
/// The binding point.
pub(crate) binding: S::Point,
}
impl<S: SpendAuth> From<(u64, &SigningNonces<S>)> for SigningCommitments<S> {
/// For SpendAuth signatures only, not Binding signatures, in RedDSA/Zcash.
fn from((index, nonces): (u64, &SigningNonces<S>)) -> Self {
Self {
index,
hiding: S::basepoint() * nonces.hiding,
binding: S::basepoint() * nonces.binding,
}
}
}
/// Generated by the coordinator of the signing operation and distributed to
/// each signing party.
pub struct SigningPackage<S: SpendAuth> {
/// The set of commitments participants published in the first round of the
/// protocol.
pub signing_commitments: Vec<SigningCommitments<S>>,
/// Message which each participant will sign.
///
/// Each signer should perform protocol-specific verification on the message.
pub message: Vec<u8>,
}
/// A representation of a single signature used in FROST structures and messages.
#[derive(Clone, Copy, Default, PartialEq)]
pub struct SignatureResponse<S: SpendAuth>(pub(crate) S::Scalar);
/// A participant's signature share, which the coordinator will use to aggregate
/// with all other signer's shares into the joint signature.
#[derive(Clone, Copy, Default)]
pub struct SignatureShare<S: SpendAuth> {
/// Represents the participant index.
pub(crate) index: u64,
/// This participant's signature over the message.
pub(crate) signature: SignatureResponse<S>,
}
// Zeroizes `SignatureShare` to be the `Default` value on drop (when it goes out
// of scope). Luckily the derived `Default` includes the `Default` impl of
// jubjub::Fr/Scalar, which is four 0u64's under the hood, and u32, which is
// 0u32.
impl<S: SpendAuth> DefaultIsZeroes for SignatureShare<S> {}
impl<S: SpendAuth> SignatureShare<S> {
/// Tests if a signature share issued by a participant is valid before
/// aggregating it into a final joint signature to publish.
pub fn check_is_valid(
&self,
pubkey: &Public<S>,
lambda_i: S::Scalar,
commitment: S::Point,
challenge: S::Scalar,
) -> Result<(), &'static str> {
if (S::basepoint() * self.signature.0) != (commitment + pubkey.0 * challenge * lambda_i) {
return Err("Invalid signature share");
}
Ok(())
}
}
/// Done once by each participant, to generate _their_ nonces and commitments
/// that are then used during signing.
///
/// When performing signing using two rounds, num_nonces would equal 1, to
/// perform the first round. Batching entails generating more than one
/// nonce/commitment pair at a time. Nonces should be stored in secret storage
/// for later use, whereas the commitments are published.
/// The number of nonces is limited to 255. This limit can be increased if it
/// turns out to be too conservative.
// TODO: Make sure the above is a correct statement, fix if needed in:
// https://github.com/ZcashFoundation/redjubjub/issues/111
pub fn preprocess<R, S>(
num_nonces: u8,
participant_index: u64,
rng: &mut R,
) -> (Vec<SigningNonces<S>>, Vec<SigningCommitments<S>>)
where
R: CryptoRng + RngCore,
S: SpendAuth,
{
let mut signing_nonces: Vec<SigningNonces<S>> = Vec::with_capacity(num_nonces as usize);
let mut signing_commitments: Vec<SigningCommitments<S>> =
Vec::with_capacity(num_nonces as usize);
for _ in 0..num_nonces {
let nonces = SigningNonces::new(rng);
signing_commitments.push(SigningCommitments::from((participant_index, &nonces)));
signing_nonces.push(nonces);
}
(signing_nonces, signing_commitments)
}
/// Generates the binding factor that ensures each signature share is strongly
/// bound to a signing set, specific set of commitments, and a specific message.
fn gen_rho_i<S: SpendAuth>(index: u64, signing_package: &SigningPackage<S>) -> S::Scalar {
// Hash signature message with HStar before deriving the binding factor.
//
// To avoid a collision with other inputs to the hash that generates the
// binding factor, we should hash our input message first. Our 'standard'
// hash is HStar, which uses a domain separator already, and is the same one
// that generates the binding factor.
let message_hash = HStar::<S>::default()
.update(signing_package.message.as_slice())
.finalize();
let mut hasher = HStar::<S>::default();
hasher
.update("FROST_rho".as_bytes())
.update(index.to_be_bytes())
.update(message_hash.to_repr());
for item in signing_package.signing_commitments.iter() {
hasher.update(item.index.to_be_bytes());
let hiding_bytes = item.hiding.to_bytes();
hasher.update(hiding_bytes);
let binding_bytes = item.binding.to_bytes();
hasher.update(binding_bytes);
}
hasher.finalize()
}
/// Generates the group commitment which is published as part of the joint
/// Schnorr signature.
fn gen_group_commitment<S: SpendAuth>(
signing_package: &SigningPackage<S>,
bindings: &HashMap<u64, S::Scalar>,
) -> Result<GroupCommitment<S>, &'static str> {
let identity = S::Point::identity();
let mut accumulator = identity;
for commitment in signing_package.signing_commitments.iter() {
// The following check prevents a party from accidentally revealing their share.
// Note that the '&&' operator would be sufficient.
if identity == commitment.binding || identity == commitment.hiding {
return Err("Commitment equals the identity.");
}
let rho_i = bindings
.get(&commitment.index)
.ok_or("No matching commitment index")?;
accumulator += commitment.hiding + (commitment.binding * rho_i)
}
Ok(GroupCommitment(accumulator.to_affine()))
}
/// Generates the challenge as is required for Schnorr signatures.
fn gen_challenge<S: SpendAuth>(
signing_package: &SigningPackage<S>,
group_commitment: &GroupCommitment<S>,
group_public: &VerificationKey<S>,
) -> S::Scalar {
let group_commitment_bytes = group_commitment.0.to_bytes();
HStar::<S>::default()
.update(group_commitment_bytes)
.update(group_public.bytes.bytes)
.update(signing_package.message.as_slice())
.finalize()
}
/// Generates the lagrange coefficient for the i'th participant.
fn gen_lagrange_coeff<S: SpendAuth>(
signer_index: u64,
signing_package: &SigningPackage<S>,
) -> Result<S::Scalar, &'static str> {
let mut num = S::Scalar::one();
let mut den = S::Scalar::one();
for commitment in signing_package.signing_commitments.iter() {
if commitment.index == signer_index {
continue;
}
num *= S::Scalar::from(commitment.index as u64);
den *= S::Scalar::from(commitment.index as u64) - S::Scalar::from(signer_index as u64);
}
if den == S::Scalar::zero() {
return Err("Duplicate shares provided");
}
// TODO: handle this unwrap better like other CtOption's
let lagrange_coeff = num * den.invert().unwrap();
Ok(lagrange_coeff)
}
/// Performed once by each participant selected for the signing operation.
///
/// Receives the message to be signed and a set of signing commitments and a set
/// of randomizing commitments to be used in that signing operation, including
/// that for this participant.
///
/// Assumes the participant has already determined which nonce corresponds with
/// the commitment that was assigned by the coordinator in the SigningPackage.
pub fn sign<S: SpendAuth>(
signing_package: &SigningPackage<S>,
participant_nonces: SigningNonces<S>,
share_package: &SharePackage<S>,
) -> Result<SignatureShare<S>, &'static str> {
let mut bindings: HashMap<u64, S::Scalar> =
HashMap::with_capacity(signing_package.signing_commitments.len());
for comm in signing_package.signing_commitments.iter() {
let rho_i = gen_rho_i(comm.index, &signing_package);
bindings.insert(comm.index, rho_i);
}
let lambda_i = gen_lagrange_coeff(share_package.index, &signing_package)?;
let group_commitment = gen_group_commitment(&signing_package, &bindings)?;
let challenge = gen_challenge(
&signing_package,
&group_commitment,
&share_package.group_public,
);
let participant_rho_i = bindings
.get(&share_package.index)
.ok_or("No matching binding!")?;
// The Schnorr signature share
let signature: S::Scalar = participant_nonces.hiding
+ (participant_nonces.binding * participant_rho_i)
+ (lambda_i * share_package.share.value.0 * challenge);
Ok(SignatureShare {
index: share_package.index,
signature: SignatureResponse(signature),
})
}
/// Verifies each participant's signature share, and if all are valid,
/// aggregates the shares into a signature to publish.
///
/// Resulting signature is compatible with verification of a plain SpendAuth
/// signature.
///
/// This operation is performed by a coordinator that can communicate with all
/// the signing participants before publishing the final signature. The
/// coordinator can be one of the participants or a semi-trusted third party
/// (who is trusted to not perform denial of service attacks, but does not learn
/// any secret information). Note that because the coordinator is trusted to
/// report misbehaving parties in order to avoid publishing an invalid
/// signature, if the coordinator themselves is a signer and misbehaves, they
/// can avoid that step. However, at worst, this results in a denial of
/// service attack due to publishing an invalid signature.
pub fn aggregate<S: SpendAuth>(
signing_package: &SigningPackage<S>,
signing_shares: &[SignatureShare<S>],
pubkeys: &PublicKeyPackage<S>,
) -> Result<Signature<S>, &'static str> {
let mut bindings: HashMap<u64, S::Scalar> =
HashMap::with_capacity(signing_package.signing_commitments.len());
for comm in signing_package.signing_commitments.iter() {
let rho_i = gen_rho_i(comm.index, &signing_package);
bindings.insert(comm.index, rho_i);
}
let group_commitment = gen_group_commitment(&signing_package, &bindings)?;
let challenge = gen_challenge(&signing_package, &group_commitment, &pubkeys.group_public);
for signing_share in signing_shares {
let signer_pubkey = pubkeys.signer_pubkeys[&signing_share.index];
let lambda_i = gen_lagrange_coeff(signing_share.index, &signing_package)?;
let signer_commitment = signing_package
.signing_commitments
.iter()
.find(|comm| comm.index == signing_share.index)
.ok_or("No matching signing commitment for signer")?;
let commitment_i =
signer_commitment.hiding + (signer_commitment.binding * bindings[&signing_share.index]);
signing_share.check_is_valid(&signer_pubkey, lambda_i, commitment_i, challenge)?;
}
// The aggregation of the signature shares by summing them up, resulting in
// a plain Schnorr signature.
let mut z = S::Scalar::zero();
for signature_share in signing_shares {
z += signature_share.signature.0;
}
Ok(Signature {
r_bytes: group_commitment.0.to_bytes().as_ref().try_into().unwrap(),
s_bytes: z.to_repr().as_ref().try_into().unwrap(),
_marker: PhantomData,
})
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{private::Sealed, sapling};
use jubjub::Scalar;
use rand::thread_rng;
fn reconstruct_secret<S: SpendAuth>(shares: Vec<Share<S>>) -> Result<S::Scalar, &'static str> {
let numshares = shares.len();
if numshares < 1 {
return Err("No shares provided");
}
let mut lagrange_coeffs: Vec<S::Scalar> = Vec::with_capacity(numshares as usize);
for i in 0..numshares {
let mut num = S::Scalar::one();
let mut den = S::Scalar::one();
for j in 0..numshares {
if j == i {
continue;
}
num *= S::Scalar::from(shares[j].receiver_index as u64);
den *= S::Scalar::from(shares[j].receiver_index as u64)
- S::Scalar::from(shares[i].receiver_index as u64);
}
if den == S::Scalar::zero() {
return Err("Duplicate shares provided");
}
lagrange_coeffs.push(num * den.invert().unwrap());
}
let mut secret = S::Scalar::zero();
for i in 0..numshares {
secret += lagrange_coeffs[i] * shares[i].value.0;
}
Ok(secret)
}
/// This is testing that Shamir's secret sharing to compute and arbitrary
/// value is working.
#[test]
fn check_share_generation() {
let mut rng = thread_rng();
let mut bytes = [0; 64];
rng.fill_bytes(&mut bytes);
let secret = Secret(Scalar::from_bytes_wide(&bytes));
let _ = sapling::SpendAuth::basepoint() * secret.0;
let shares = generate_shares::<_, sapling::SpendAuth>(&secret, 5, 3, rng).unwrap();
for share in shares.iter() {
assert_eq!(verify_share(&share), Ok(()));
}
assert_eq!(reconstruct_secret(shares).unwrap(), secret.0)
}
}

View File

@ -24,11 +24,7 @@ extern crate std;
pub mod batch;
mod constants;
mod error;
#[cfg(feature = "std")]
pub mod frost;
mod hash;
#[cfg(feature = "std")]
mod messages;
pub mod orchard;
pub mod sapling;
#[cfg(feature = "alloc")]

View File

@ -1,55 +0,0 @@
use proptest::{
arbitrary::{any, Arbitrary},
prelude::*,
};
use super::*;
impl Arbitrary for Header {
type Parameters = ();
fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy {
(
any::<MsgVersion>(),
any::<ParticipantId>(),
any::<ParticipantId>(),
)
.prop_filter(
"Sender and receiver participant IDs can not be the same",
|(_, sender, receiver)| sender != receiver,
)
.prop_map(|(version, sender, receiver)| Header {
version: version,
sender: sender,
receiver: receiver,
})
.boxed()
}
type Strategy = BoxedStrategy<Self>;
}
impl Arbitrary for MsgVersion {
type Parameters = ();
fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy {
Just(constants::BASIC_FROST_SERIALIZATION).boxed()
}
type Strategy = BoxedStrategy<Self>;
}
impl Arbitrary for ParticipantId {
type Parameters = ();
fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy {
prop_oneof![
(u64::MIN..=constants::MAX_SIGNER_PARTICIPANT_ID).prop_map(ParticipantId::Signer),
Just(ParticipantId::Dealer),
Just(ParticipantId::Aggregator),
]
.boxed()
}
type Strategy = BoxedStrategy<Self>;
}

View File

@ -1,31 +0,0 @@
//! Definitions of constants.
use super::MsgVersion;
/// The first version of FROST messages
pub const BASIC_FROST_SERIALIZATION: MsgVersion = MsgVersion(0);
/// The fixed participant ID for the dealer.
pub const DEALER_PARTICIPANT_ID: u64 = u64::MAX - 1;
/// The fixed participant ID for the aggregator.
pub const AGGREGATOR_PARTICIPANT_ID: u64 = u64::MAX;
/// The maximum `ParticipantId::Signer` in this serialization format.
///
/// We reserve two participant IDs for the dealer and aggregator.
pub const MAX_SIGNER_PARTICIPANT_ID: u64 = u64::MAX - 2;
/// The maximum number of signers
///
/// By protocol the number of signers can'e be more than 255.
pub const MAX_SIGNERS: u8 = 255;
/// The maximum length of a Zcash message, in bytes.
pub const ZCASH_MAX_PROTOCOL_MESSAGE_LEN: usize = 2 * 1024 * 1024;
/// The minimum number of signers of any FROST setup.
pub const MIN_SIGNERS: usize = 2;
/// The minimum number of signers that must sign.
pub const MIN_THRESHOLD: usize = 2;

View File

@ -1,68 +0,0 @@
//! Serialization rules specified in [RFC-001#Serialize-Deserialize]
//!
//! We automatically serialize and deserialize using serde derivations where possible.
//! Sometimes we need to implement ourselves, this file holds that code.
//!
//! [RFC-001#Serialize-Deserialize]: https://github.com/ZcashFoundation/redjubjub/blob/main/rfcs/0001-messages.md#serializationdeserialization
use serde::ser::{Serialize, Serializer};
use serde::de::{self, Deserialize, Deserializer, Visitor};
use super::constants::{
AGGREGATOR_PARTICIPANT_ID, DEALER_PARTICIPANT_ID, MAX_SIGNER_PARTICIPANT_ID,
};
use super::*;
use std::fmt;
impl Serialize for ParticipantId {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match *self {
ParticipantId::Signer(id) => {
assert!(id <= MAX_SIGNER_PARTICIPANT_ID);
serializer.serialize_u64(id)
}
ParticipantId::Dealer => serializer.serialize_u64(DEALER_PARTICIPANT_ID),
ParticipantId::Aggregator => serializer.serialize_u64(AGGREGATOR_PARTICIPANT_ID),
}
}
}
struct ParticipantIdVisitor;
impl<'de> Visitor<'de> for ParticipantIdVisitor {
type Value = ParticipantId;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str(
format!("an integer between {} and {}", std::u64::MIN, std::u64::MAX).as_str(),
)
}
fn visit_u64<E>(self, value: u64) -> Result<Self::Value, E>
where
E: de::Error,
{
// Note: deserialization can't fail, because all values are valid.
if value == DEALER_PARTICIPANT_ID {
return Ok(ParticipantId::Dealer);
} else if value == AGGREGATOR_PARTICIPANT_ID {
return Ok(ParticipantId::Aggregator);
} else {
return Ok(ParticipantId::Signer(value));
}
}
}
impl<'de> Deserialize<'de> for ParticipantId {
fn deserialize<D>(deserializer: D) -> Result<ParticipantId, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_u64(ParticipantIdVisitor)
}
}

View File

@ -1,2 +0,0 @@
mod integration;
mod prop;

View File

@ -1,815 +0,0 @@
use crate::{
frost,
messages::{
validate::{MsgErr, Validate},
*,
},
sapling, verification_key,
};
use rand::thread_rng;
use serde_json;
use std::convert::TryFrom;
#[test]
fn validate_version() {
// A version number that we expect to be always invalid
const INVALID_VERSION: u8 = u8::MAX;
let setup = basic_setup();
let header = Header {
version: MsgVersion(INVALID_VERSION),
sender: setup.dealer,
receiver: setup.signer1,
};
let validate = Validate::validate(&header);
assert_eq!(validate, Err(MsgErr::WrongVersion));
let validate = Validate::validate(&Header {
version: constants::BASIC_FROST_SERIALIZATION,
sender: setup.dealer,
receiver: setup.signer1,
})
.err();
assert_eq!(validate, None);
}
#[test]
fn validate_sender_receiver() {
let setup = basic_setup();
let header = Header {
version: constants::BASIC_FROST_SERIALIZATION,
sender: setup.signer1,
receiver: setup.signer1,
};
let validate = Validate::validate(&header);
assert_eq!(validate, Err(MsgErr::SameSenderAndReceiver));
}
#[test]
fn validate_sharepackage() {
let setup = basic_setup();
let (mut shares, _pubkeys) = frost::keygen_with_dealer::<_, sapling::SpendAuth>(
setup.num_signers,
setup.threshold,
setup.rng.clone(),
)
.unwrap();
let header = create_valid_header(setup.signer1, setup.signer2);
let group_public = VerificationKey::from(
verification_key::VerificationKey::try_from(shares[0].group_public.bytes).unwrap(),
);
let secret_share = Secret(shares[0].share.value.0.to_bytes());
let participants = vec![setup.signer1, setup.signer2];
shares.truncate(2);
let share_commitment = generate_share_commitment(&shares, participants);
let payload = Payload::SharePackage(SharePackage {
group_public,
secret_share: secret_share,
share_commitment: share_commitment,
});
let validate_payload = Validate::validate(&payload);
let valid_payload = validate_payload.expect("a valid payload").clone();
let message = Message {
header,
payload: valid_payload.clone(),
};
let validate_message = Validate::validate(&message);
assert_eq!(validate_message, Err(MsgErr::SenderMustBeDealer));
// change the header
let header = create_valid_header(setup.dealer, setup.aggregator);
let message = Message {
header,
payload: valid_payload,
};
let validate_message = Validate::validate(&message);
assert_eq!(validate_message, Err(MsgErr::ReceiverMustBeSigner));
let participants = vec![setup.signer1];
shares.truncate(1);
let mut share_commitment = generate_share_commitment(&shares, participants);
// change the payload to have only 1 commitment
let payload = Payload::SharePackage(SharePackage {
group_public,
secret_share: secret_share,
share_commitment: share_commitment.clone(),
});
let validate_payload = Validate::validate(&payload);
assert_eq!(
validate_payload,
Err(MsgErr::NotEnoughCommitments(constants::MIN_SIGNERS))
);
// build and use too many commitments
for i in 2..constants::MAX_SIGNERS as u64 + 2 {
share_commitment.insert(
ParticipantId::Signer(i),
share_commitment.clone()[&setup.signer1],
);
}
let payload = Payload::SharePackage(SharePackage {
group_public,
secret_share,
share_commitment,
});
let validate_payload = Validate::validate(&payload);
assert_eq!(validate_payload, Err(MsgErr::TooManyCommitments));
}
#[test]
fn serialize_sharepackage() {
let setup = basic_setup();
let (mut shares, _pubkeys) = frost::keygen_with_dealer::<_, sapling::SpendAuth>(
setup.num_signers,
setup.threshold,
setup.rng.clone(),
)
.unwrap();
let header = create_valid_header(setup.dealer, setup.signer1);
let group_public = VerificationKey::from(
verification_key::VerificationKey::try_from(shares[0].group_public.bytes).unwrap(),
);
let secret_share = Secret(shares[0].share.value.0.to_bytes());
let participants = vec![setup.signer1];
shares.truncate(1);
let share_commitment = generate_share_commitment(&shares, participants);
let payload = Payload::SharePackage(SharePackage {
group_public,
secret_share,
share_commitment: share_commitment.clone(),
});
let message = Message {
header: header,
payload: payload.clone(),
};
// check general structure and header serialization/deserialization
serialize_message(message, setup.dealer, setup.signer1);
// check payload serialization/deserialization
let mut payload_serialized_bytes = bincode::serialize(&payload).unwrap();
// check the message type is correct
let deserialized_msg_type: MsgType =
bincode::deserialize(&payload_serialized_bytes[0..4]).unwrap();
assert_eq!(deserialized_msg_type, MsgType::SharePackage);
// remove the msg_type from the the payload
payload_serialized_bytes =
(&payload_serialized_bytes[4..payload_serialized_bytes.len()]).to_vec();
// group_public is 32 bytes
let deserialized_group_public: VerificationKey =
bincode::deserialize(&payload_serialized_bytes[0..32]).unwrap();
// secret share is 32 bytes
let deserialized_secret_share: Secret =
bincode::deserialize(&payload_serialized_bytes[32..64]).unwrap();
// rest of the message is the map: 32(Commitment) + 8(ParticipantId) + 8(map.len())
let deserialized_share_commitment: BTreeMap<ParticipantId, Commitment> =
bincode::deserialize(&payload_serialized_bytes[64..112]).unwrap();
// check the map len
let deserialized_map_len: u64 =
bincode::deserialize(&payload_serialized_bytes[64..72]).unwrap();
assert_eq!(deserialized_map_len, 1);
// no leftover bytes
assert_eq!(payload_serialized_bytes.len(), 112);
assert_eq!(deserialized_group_public, group_public);
assert_eq!(deserialized_secret_share, secret_share);
assert_eq!(deserialized_share_commitment, share_commitment);
}
#[test]
fn validate_signingcommitments() {
let mut setup = basic_setup();
let (_nonce, commitment) =
frost::preprocess::<_, sapling::SpendAuth>(1, u64::from(setup.signer1), &mut setup.rng);
let header = create_valid_header(setup.aggregator, setup.signer2);
let payload = Payload::SigningCommitments(SigningCommitments {
hiding: Commitment(jubjub::AffinePoint::from(commitment[0].hiding).to_bytes()),
binding: Commitment(jubjub::AffinePoint::from(commitment[0].binding).to_bytes()),
});
let message = Message {
header,
payload: payload.clone(),
};
let validate_message = Validate::validate(&message);
assert_eq!(validate_message, Err(MsgErr::SenderMustBeSigner));
// change the header
let header = create_valid_header(setup.signer1, setup.signer2);
let message = Message {
header,
payload: payload.clone(),
};
let validate_message = Validate::validate(&message);
assert_eq!(validate_message, Err(MsgErr::ReceiverMustBeAggregator));
// change the header to be valid
let header = create_valid_header(setup.signer1, setup.aggregator);
let validate_message = Validate::validate(&Message { header, payload }).err();
assert_eq!(validate_message, None);
}
#[test]
fn serialize_signingcommitments() {
let mut setup = basic_setup();
let (_nonce, commitment) =
frost::preprocess::<_, sapling::SpendAuth>(1, u64::from(setup.signer1), &mut setup.rng);
let header = create_valid_header(setup.aggregator, setup.signer1);
let hiding = Commitment(jubjub::AffinePoint::from(commitment[0].hiding).to_bytes());
let binding = Commitment(jubjub::AffinePoint::from(commitment[0].binding).to_bytes());
let payload = Payload::SigningCommitments(SigningCommitments { hiding, binding });
let message = Message {
header: header,
payload: payload.clone(),
};
// check general structure serialization/deserialization
serialize_message(message, setup.aggregator, setup.signer1);
// check payload serialization/deserialization
let mut payload_serialized_bytes = bincode::serialize(&payload).unwrap();
// check the message type is correct
let deserialized_msg_type: MsgType =
bincode::deserialize(&payload_serialized_bytes[0..4]).unwrap();
assert_eq!(deserialized_msg_type, MsgType::SigningCommitments);
// remove the msg_type from the the payload
payload_serialized_bytes =
(&payload_serialized_bytes[4..payload_serialized_bytes.len()]).to_vec();
// hiding is 32 bytes
let deserialized_hiding: Commitment =
bincode::deserialize(&payload_serialized_bytes[0..32]).unwrap();
// binding is 43 bytes kore
let deserialized_binding: Commitment =
bincode::deserialize(&payload_serialized_bytes[32..64]).unwrap();
// no leftover bytes
assert_eq!(payload_serialized_bytes.len(), 64);
assert_eq!(deserialized_hiding, hiding);
assert_eq!(deserialized_binding, binding);
}
#[test]
fn validate_signingpackage() {
let mut setup = basic_setup();
let (_nonce, commitment1) = frost::preprocess(1, u64::from(setup.signer1), &mut setup.rng);
let (_nonce, commitment2) = frost::preprocess(1, u64::from(setup.signer2), &mut setup.rng);
let header = create_valid_header(setup.signer1, setup.signer2);
// try with only 1 commitment
let commitments = vec![commitment1[0]];
let participants = vec![setup.signer1];
let signing_commitments = create_signing_commitments(commitments, participants);
let payload = Payload::SigningPackage(SigningPackage {
signing_commitments: signing_commitments.clone(),
message: "hola".as_bytes().to_vec(),
});
let validate_payload = Validate::validate(&payload);
assert_eq!(
validate_payload,
Err(MsgErr::NotEnoughCommitments(constants::MIN_SIGNERS))
);
// add too many commitments
let mut big_signing_commitments = BTreeMap::<ParticipantId, SigningCommitments>::new();
for i in 0..constants::MAX_SIGNERS as u64 + 1 {
big_signing_commitments.insert(
ParticipantId::Signer(i),
signing_commitments[&setup.signer1].clone(),
);
}
let payload = Payload::SigningPackage(SigningPackage {
signing_commitments: big_signing_commitments,
message: "hola".as_bytes().to_vec(),
});
let validate_payload = Validate::validate(&payload);
assert_eq!(validate_payload, Err(MsgErr::TooManyCommitments));
// change to 2 commitments
let commitments = vec![commitment1[0], commitment2[0]];
let participants = vec![setup.signer1, setup.signer2];
let signing_commitments = create_signing_commitments(commitments, participants);
let big_message = [0u8; constants::ZCASH_MAX_PROTOCOL_MESSAGE_LEN + 1].to_vec();
let payload = Payload::SigningPackage(SigningPackage {
signing_commitments: signing_commitments.clone(),
message: big_message,
});
let validate_payload = Validate::validate(&payload);
assert_eq!(validate_payload, Err(MsgErr::MsgTooBig));
let message = Message {
header,
payload: payload.clone(),
};
let validate_message = Validate::validate(&message);
assert_eq!(validate_message, Err(MsgErr::SenderMustBeAggregator));
// change header
let header = create_valid_header(setup.aggregator, setup.dealer);
let message = Message {
header: header,
payload: payload.clone(),
};
let validate_message = Validate::validate(&message);
assert_eq!(validate_message, Err(MsgErr::ReceiverMustBeSigner));
let header = create_valid_header(setup.aggregator, setup.signer1);
let payload = Payload::SigningPackage(SigningPackage {
signing_commitments,
message: "hola".as_bytes().to_vec(),
});
let validate_message = Validate::validate(&Message { header, payload }).err();
assert_eq!(validate_message, None);
}
#[test]
fn serialize_signingpackage() {
let mut setup = basic_setup();
let (_nonce, commitment1) = frost::preprocess(1, u64::from(setup.signer1), &mut setup.rng);
let (_nonce, commitment2) = frost::preprocess(1, u64::from(setup.signer2), &mut setup.rng);
let header = create_valid_header(setup.aggregator, setup.signer1);
let commitments = vec![commitment1[0], commitment2[0]];
let participants = vec![setup.signer1, setup.signer2];
let signing_commitments = create_signing_commitments(commitments, participants);
let payload = Payload::SigningPackage(SigningPackage {
signing_commitments: signing_commitments.clone(),
message: "hola".as_bytes().to_vec(),
});
let message = Message {
header: header,
payload: payload.clone(),
};
// check general structure serialization/deserialization
serialize_message(message, setup.aggregator, setup.signer1);
// check payload serialization/deserialization
let mut payload_serialized_bytes = bincode::serialize(&payload).unwrap();
// check the message type is correct
let deserialized_msg_type: MsgType =
bincode::deserialize(&payload_serialized_bytes[0..4]).unwrap();
assert_eq!(deserialized_msg_type, MsgType::SigningPackage);
// remove the msg_type from the the payload
payload_serialized_bytes =
(&payload_serialized_bytes[4..payload_serialized_bytes.len()]).to_vec();
// check the map len
let deserialized_map_len: u64 = bincode::deserialize(&payload_serialized_bytes[0..8]).unwrap();
assert_eq!(deserialized_map_len, 2);
// Each SigningCommitment is 64 bytes and the ParticipantId is 8 bytes.
// This is multiplied by the map len, also include the map len bytes.
let deserialized_signing_commitments: BTreeMap<ParticipantId, SigningCommitments> =
bincode::deserialize(&payload_serialized_bytes[0..152]).unwrap();
// Message is from the end of the map up to the end of the message.
let deserialized_message: Vec<u8> =
bincode::deserialize(&payload_serialized_bytes[152..payload_serialized_bytes.len()])
.unwrap();
// no leftover bytes
assert_eq!(payload_serialized_bytes.len(), 164);
assert_eq!(deserialized_signing_commitments, signing_commitments);
assert_eq!(deserialized_message, "hola".as_bytes().to_vec());
}
#[test]
fn validate_signatureshare() {
let mut setup = basic_setup();
// signers and aggregator should have this data from `SharePackage`
let (shares, _pubkeys) =
frost::keygen_with_dealer(setup.num_signers, setup.threshold, setup.rng.clone()).unwrap();
// create a signing package, this is done in the aggregator side.
// the signrs should have this data from `SigningPackage`
let (nonce1, commitment1) = frost::preprocess(1, u64::from(setup.signer1), &mut setup.rng);
let (_nonce2, commitment2) = frost::preprocess(1, u64::from(setup.signer2), &mut setup.rng);
let commitments = vec![commitment1[0], commitment2[0]];
let participants = vec![setup.signer1, setup.signer2];
let signing_commitments = create_signing_commitments(commitments, participants);
let signing_package = frost::SigningPackage::from(SigningPackage {
signing_commitments: signing_commitments.clone(),
message: "hola".as_bytes().to_vec(),
});
// here we get started with the `SignatureShare` message.
let signature_share = frost::sign(&signing_package, nonce1[0], &shares[0]).unwrap();
// this header is invalid
let header = create_valid_header(setup.aggregator, setup.signer1);
let payload = Payload::SignatureShare(SignatureShare {
signature: SignatureResponse(signature_share.signature.0.to_bytes()),
});
let message = Message {
header,
payload: payload.clone(),
};
let validate_message = Validate::validate(&message);
assert_eq!(validate_message, Err(MsgErr::SenderMustBeSigner));
// change the header, still invalid.
let header = create_valid_header(setup.signer1, setup.signer2);
let message = Message {
header,
payload: payload.clone(),
};
let validate_message = Validate::validate(&message);
assert_eq!(validate_message, Err(MsgErr::ReceiverMustBeAggregator));
// change the header to be valid
let header = create_valid_header(setup.signer1, setup.aggregator);
let validate_message = Validate::validate(&Message { header, payload }).err();
assert_eq!(validate_message, None);
}
#[test]
fn serialize_signatureshare() {
let mut setup = basic_setup();
// signers and aggregator should have this data from `SharePackage`
let (shares, _pubkeys) =
frost::keygen_with_dealer(setup.num_signers, setup.threshold, setup.rng.clone()).unwrap();
// create a signing package, this is done in the aggregator side.
// the signers should have this data from `SigningPackage`
let (nonce1, commitment1) = frost::preprocess(1, u64::from(setup.signer1), &mut setup.rng);
let (_nonce2, commitment2) = frost::preprocess(1, u64::from(setup.signer2), &mut setup.rng);
let commitments = vec![commitment1[0], commitment2[0]];
let participants = vec![setup.signer1, setup.signer2];
let signing_commitments = create_signing_commitments(commitments, participants);
let signing_package = frost::SigningPackage::from(SigningPackage {
signing_commitments: signing_commitments.clone(),
message: "hola".as_bytes().to_vec(),
});
// here we get started with the `SignatureShare` message.
let signature_share = frost::sign(&signing_package, nonce1[0], &shares[0]).unwrap();
// valid header
let header = create_valid_header(setup.signer1, setup.aggregator);
let signature = SignatureResponse(signature_share.signature.0.to_bytes());
let payload = Payload::SignatureShare(SignatureShare { signature });
let message = Message {
header: header,
payload: payload.clone(),
};
// check general structure serialization/deserialization
serialize_message(message, setup.signer1, setup.aggregator);
// check payload serialization/deserialization
let mut payload_serialized_bytes = bincode::serialize(&payload).unwrap();
// check the message type is correct
let deserialized_msg_type: MsgType =
bincode::deserialize(&payload_serialized_bytes[0..4]).unwrap();
assert_eq!(deserialized_msg_type, MsgType::SignatureShare);
// remove the msg_type from the the payload
payload_serialized_bytes =
(&payload_serialized_bytes[4..payload_serialized_bytes.len()]).to_vec();
// signature is 32 bytes
let deserialized_signature: SignatureResponse =
bincode::deserialize(&payload_serialized_bytes[0..32]).unwrap();
// no leftover bytes
assert_eq!(payload_serialized_bytes.len(), 32);
assert_eq!(deserialized_signature, signature);
}
#[test]
fn validate_aggregatesignature() {
let (setup, group_signature_res) = full_setup();
// this header is invalid
let header = create_valid_header(setup.signer1, setup.aggregator);
let payload = Payload::AggregateSignature(AggregateSignature {
group_commitment: GroupCommitment::from(group_signature_res),
schnorr_signature: SignatureResponse::from(group_signature_res),
});
let message = Message {
header,
payload: payload.clone(),
};
let validate_message = Validate::validate(&message);
assert_eq!(validate_message, Err(MsgErr::SenderMustBeAggregator));
// change the header, still invalid.
let header = create_valid_header(setup.aggregator, setup.dealer);
let message = Message {
header,
payload: payload.clone(),
};
let validate_message = Validate::validate(&message);
assert_eq!(validate_message, Err(MsgErr::ReceiverMustBeSigner));
// change the header to be valid
let header = create_valid_header(setup.aggregator, setup.signer1);
let validate_message = Validate::validate(&Message { header, payload }).err();
assert_eq!(validate_message, None);
}
#[test]
fn serialize_aggregatesignature() {
let (setup, group_signature_res) = full_setup();
let header = create_valid_header(setup.aggregator, setup.signer1);
let group_commitment = GroupCommitment::from(group_signature_res);
let schnorr_signature = SignatureResponse::from(group_signature_res);
let payload = Payload::AggregateSignature(AggregateSignature {
group_commitment,
schnorr_signature,
});
let message = Message {
header,
payload: payload.clone(),
};
// check general structure serialization/deserialization
serialize_message(message, setup.aggregator, setup.signer1);
// check payload serialization/deserialization
let mut payload_serialized_bytes = bincode::serialize(&payload).unwrap();
// check the message type is correct
let deserialized_msg_type: MsgType =
bincode::deserialize(&payload_serialized_bytes[0..4]).unwrap();
assert_eq!(deserialized_msg_type, MsgType::AggregateSignature);
// remove the msg_type from the the payload
payload_serialized_bytes =
(&payload_serialized_bytes[4..payload_serialized_bytes.len()]).to_vec();
// group_commitment is 32 bytes
let deserialized_group_commiment: GroupCommitment =
bincode::deserialize(&payload_serialized_bytes[0..32]).unwrap();
// schnorr_signature is 32 bytes
let deserialized_schnorr_signature: SignatureResponse =
bincode::deserialize(&payload_serialized_bytes[32..64]).unwrap();
// no leftover bytes
assert_eq!(payload_serialized_bytes.len(), 64);
assert_eq!(deserialized_group_commiment, group_commitment);
assert_eq!(deserialized_schnorr_signature, schnorr_signature);
}
#[test]
fn btreemap() {
let mut setup = basic_setup();
let mut map = BTreeMap::new();
let (_nonce, commitment) = frost::preprocess(1, u64::from(setup.signer1), &mut setup.rng);
let commitments = vec![commitment[0]];
let participants = vec![setup.signer1];
let signing_commitments = create_signing_commitments(commitments, participants);
map.insert(ParticipantId::Signer(1), &signing_commitments);
map.insert(ParticipantId::Signer(2), &signing_commitments);
map.insert(ParticipantId::Signer(0), &signing_commitments);
// Check the ascending order
let mut map_iter = map.iter();
let (key, _) = map_iter.next().unwrap();
assert_eq!(*key, ParticipantId::Signer(0));
let (key, _) = map_iter.next().unwrap();
assert_eq!(*key, ParticipantId::Signer(1));
let (key, _) = map_iter.next().unwrap();
assert_eq!(*key, ParticipantId::Signer(2));
// Add a repeated key
map.insert(ParticipantId::Signer(1), &signing_commitments);
// BTreeMap is not increasing
assert_eq!(map.len(), 3);
}
// utility functions
fn create_valid_header(sender: ParticipantId, receiver: ParticipantId) -> Header {
Validate::validate(&Header {
version: constants::BASIC_FROST_SERIALIZATION,
sender: sender,
receiver: receiver,
})
.expect("always a valid header")
.clone()
}
fn serialize_header(
header_serialized_bytes: Vec<u8>,
sender: ParticipantId,
receiver: ParticipantId,
) {
let deserialized_version: MsgVersion =
bincode::deserialize(&header_serialized_bytes[0..1]).unwrap();
let deserialized_sender: ParticipantId =
bincode::deserialize(&header_serialized_bytes[1..9]).unwrap();
let deserialized_receiver: ParticipantId =
bincode::deserialize(&header_serialized_bytes[9..17]).unwrap();
assert_eq!(deserialized_version, constants::BASIC_FROST_SERIALIZATION);
assert_eq!(deserialized_sender, sender);
assert_eq!(deserialized_receiver, receiver);
}
fn serialize_message(message: Message, sender: ParticipantId, receiver: ParticipantId) {
let serialized_bytes = bincode::serialize(&message).unwrap();
let deserialized_bytes: Message = bincode::deserialize(&serialized_bytes).unwrap();
assert_eq!(message, deserialized_bytes);
let serialized_json = serde_json::to_string(&message).unwrap();
let deserialized_json: Message = serde_json::from_str(serialized_json.as_str()).unwrap();
assert_eq!(message, deserialized_json);
let header_serialized_bytes = bincode::serialize(&message.header).unwrap();
serialize_header(header_serialized_bytes, sender, receiver);
// make sure the message fields are in the right order
let message_serialized_bytes = bincode::serialize(&message).unwrap();
let deserialized_header: Header =
bincode::deserialize(&message_serialized_bytes[0..17]).unwrap();
let deserialized_payload: Payload =
bincode::deserialize(&message_serialized_bytes[17..message_serialized_bytes.len()])
.unwrap();
assert_eq!(deserialized_header, message.header);
assert_eq!(deserialized_payload, message.payload);
}
struct Setup {
rng: rand::rngs::ThreadRng,
num_signers: u8,
threshold: u8,
dealer: ParticipantId,
aggregator: ParticipantId,
signer1: ParticipantId,
signer2: ParticipantId,
}
fn basic_setup() -> Setup {
Setup {
rng: thread_rng(),
num_signers: 3,
threshold: 2,
dealer: ParticipantId::Dealer,
aggregator: ParticipantId::Aggregator,
signer1: ParticipantId::Signer(0),
signer2: ParticipantId::Signer(1),
}
}
fn full_setup() -> (Setup, signature::Signature<sapling::SpendAuth>) {
let mut setup = basic_setup();
// aggregator creates the shares and pubkeys for this round
let (shares, pubkeys) =
frost::keygen_with_dealer(setup.num_signers, setup.threshold, setup.rng.clone()).unwrap();
let mut nonces: std::collections::HashMap<u64, Vec<frost::SigningNonces<sapling::SpendAuth>>> =
std::collections::HashMap::with_capacity(setup.threshold as usize);
let mut commitments: Vec<frost::SigningCommitments<sapling::SpendAuth>> =
Vec::with_capacity(setup.threshold as usize);
// aggregator generates nonces and signing commitments for each participant.
for participant_index in 1..(setup.threshold + 1) {
let (nonce, commitment) = frost::preprocess(1, participant_index as u64, &mut setup.rng);
nonces.insert(participant_index as u64, nonce);
commitments.push(commitment[0]);
}
// aggregator generates a signing package
let mut signature_shares: Vec<frost::SignatureShare<sapling::SpendAuth>> =
Vec::with_capacity(setup.threshold as usize);
let message = "message to sign".as_bytes().to_vec();
let signing_package = frost::SigningPackage {
message: message.clone(),
signing_commitments: commitments,
};
// each participant generates their signature share
for (participant_index, nonce) in nonces {
let share_package = shares
.iter()
.find(|share| participant_index == share.index)
.unwrap();
let nonce_to_use = nonce[0];
let signature_share = frost::sign(&signing_package, nonce_to_use, share_package).unwrap();
signature_shares.push(signature_share);
}
// aggregator generate the final signature
let final_signature =
frost::aggregate(&signing_package, &signature_shares[..], &pubkeys).unwrap();
(setup, final_signature)
}
fn generate_share_commitment(
shares: &Vec<frost::SharePackage<sapling::SpendAuth>>,
participants: Vec<ParticipantId>,
) -> BTreeMap<ParticipantId, Commitment> {
assert_eq!(shares.len(), participants.len());
participants
.into_iter()
.zip(shares)
.map(|(participant_id, share)| {
(
participant_id,
Commitment::from(share.share.commitment.0[0].clone()),
)
})
.collect()
}
fn create_signing_commitments(
commitments: Vec<frost::SigningCommitments<sapling::SpendAuth>>,
participants: Vec<ParticipantId>,
) -> BTreeMap<ParticipantId, SigningCommitments> {
assert_eq!(commitments.len(), participants.len());
participants
.into_iter()
.zip(commitments)
.map(|(participant_id, commitment)| {
let signing_commitment = SigningCommitments {
hiding: Commitment(jubjub::AffinePoint::from(commitment.hiding).to_bytes()),
binding: Commitment(jubjub::AffinePoint::from(commitment.binding).to_bytes()),
};
(participant_id, signing_commitment)
})
.collect()
}

View File

@ -1,15 +0,0 @@
use proptest::prelude::*;
use crate::messages::*;
proptest! {
#[test]
fn serialize_message(
message in any::<Message>(),
) {
let serialized = bincode::serialize(&message).unwrap();
let deserialized: Message = bincode::deserialize(serialized.as_slice()).unwrap();
prop_assert_eq!(message, deserialized);
}
}

View File

@ -1,143 +0,0 @@
//! Validation rules specified in [RFC-001#rules]
//!
//! [RFC-001#rules]: https://github.com/ZcashFoundation/redjubjub/blob/main/rfcs/0001-messages.md#rules
use super::constants::{
BASIC_FROST_SERIALIZATION, MAX_SIGNERS, MIN_SIGNERS, MIN_THRESHOLD,
ZCASH_MAX_PROTOCOL_MESSAGE_LEN,
};
use super::*;
use thiserror::Error;
pub trait Validate {
fn validate(&self) -> Result<&Self, MsgErr>;
}
impl Validate for Message {
fn validate(&self) -> Result<&Self, MsgErr> {
match self.payload {
Payload::SharePackage(_) => {
if self.header.sender != ParticipantId::Dealer {
return Err(MsgErr::SenderMustBeDealer);
}
if !matches!(self.header.receiver, ParticipantId::Signer(_)) {
return Err(MsgErr::ReceiverMustBeSigner);
}
}
Payload::SigningCommitments(_) => {
if !matches!(self.header.sender, ParticipantId::Signer(_)) {
return Err(MsgErr::SenderMustBeSigner);
}
if self.header.receiver != ParticipantId::Aggregator {
return Err(MsgErr::ReceiverMustBeAggregator);
}
}
Payload::SigningPackage(_) => {
if self.header.sender != ParticipantId::Aggregator {
return Err(MsgErr::SenderMustBeAggregator);
}
if !matches!(self.header.receiver, ParticipantId::Signer(_)) {
return Err(MsgErr::ReceiverMustBeSigner);
}
}
Payload::SignatureShare(_) => {
if !matches!(self.header.sender, ParticipantId::Signer(_)) {
return Err(MsgErr::SenderMustBeSigner);
}
if self.header.receiver != ParticipantId::Aggregator {
return Err(MsgErr::ReceiverMustBeAggregator);
}
}
Payload::AggregateSignature(_) => {
if self.header.sender != ParticipantId::Aggregator {
return Err(MsgErr::SenderMustBeAggregator);
}
if !matches!(self.header.receiver, ParticipantId::Signer(_)) {
return Err(MsgErr::ReceiverMustBeSigner);
}
}
}
self.header.validate()?;
self.payload.validate()?;
Ok(self)
}
}
impl Validate for Header {
fn validate(&self) -> Result<&Self, MsgErr> {
// Validate the message version.
// By now we only have 1 valid version so we compare against that.
if self.version != BASIC_FROST_SERIALIZATION {
return Err(MsgErr::WrongVersion);
}
// Make sure the sender and the receiver are not the same.
if self.sender == self.receiver {
return Err(MsgErr::SameSenderAndReceiver);
}
Ok(self)
}
}
impl Validate for Payload {
fn validate(&self) -> Result<&Self, MsgErr> {
match self {
Payload::SharePackage(share_package) => {
if share_package.share_commitment.len() < MIN_SIGNERS {
return Err(MsgErr::NotEnoughCommitments(MIN_SIGNERS));
}
if share_package.share_commitment.len() > MAX_SIGNERS.into() {
return Err(MsgErr::TooManyCommitments);
}
}
Payload::SigningCommitments(_) => {}
Payload::SigningPackage(signing_package) => {
if signing_package.message.len() > ZCASH_MAX_PROTOCOL_MESSAGE_LEN {
return Err(MsgErr::MsgTooBig);
}
if signing_package.signing_commitments.len() < MIN_THRESHOLD {
return Err(MsgErr::NotEnoughCommitments(MIN_THRESHOLD));
}
if signing_package.signing_commitments.len() > MAX_SIGNERS.into() {
return Err(MsgErr::TooManyCommitments);
}
}
Payload::SignatureShare(_) => {}
Payload::AggregateSignature(_) => {}
}
Ok(self)
}
}
/// The error a message can produce if it fails validation.
#[derive(Error, Debug, PartialEq)]
pub enum MsgErr {
#[error("wrong version number")]
WrongVersion,
#[error("sender and receiver are the same")]
SameSenderAndReceiver,
#[error("the sender of this message must be the dealer")]
SenderMustBeDealer,
#[error("the receiver of this message must be a signer")]
ReceiverMustBeSigner,
#[error("the sender of this message must be a signer")]
SenderMustBeSigner,
#[error("the receiver of this message must be the aggregator")]
ReceiverMustBeAggregator,
#[error("the sender of this message must be the aggregator")]
SenderMustBeAggregator,
#[error("the number of signers must be at least {0}")]
NotEnoughCommitments(usize),
#[error("the number of signers can't be more than {}", MAX_SIGNERS)]
TooManyCommitments,
#[error(
"the message field can't be bigger than {}",
ZCASH_MAX_PROTOCOL_MESSAGE_LEN
)]
MsgTooBig,
}

View File

@ -94,7 +94,7 @@ impl NonAdjacentForm for pallas::Scalar {
let mut naf = [0i8; 256];
let mut x_u64 = [0u64; 5];
LittleEndian::read_u64_into(&self.to_repr().as_ref(), &mut x_u64[0..4]);
LittleEndian::read_u64_into(self.to_repr().as_ref(), &mut x_u64[0..4]);
let width = 1 << w;
let window_mask = width - 1;
@ -105,14 +105,13 @@ impl NonAdjacentForm for pallas::Scalar {
// Construct a buffer of bits of the scalar, starting at bit `pos`
let u64_idx = pos / 64;
let bit_idx = pos % 64;
let bit_buf: u64;
if bit_idx < 64 - w {
let bit_buf = if bit_idx < 64 - w {
// This window's bits are contained in a single u64
bit_buf = x_u64[u64_idx] >> bit_idx;
x_u64[u64_idx] >> bit_idx
} else {
// Combine the current u64's bits with the bits from the next u64
bit_buf = (x_u64[u64_idx] >> bit_idx) | (x_u64[1 + u64_idx] << (64 - bit_idx));
}
(x_u64[u64_idx] >> bit_idx) | (x_u64[1 + u64_idx] << (64 - bit_idx))
};
// Add the carry into the current window
let window = carry + (bit_buf & window_mask);
@ -148,7 +147,7 @@ impl<'a> From<&'a pallas::Point> for LookupTable5<pallas::Point> {
let mut Ai = [*A; 8];
let A2 = A.double();
for i in 0..7 {
Ai[i + 1] = &A2 + Ai[i];
Ai[i + 1] = A2 + Ai[i];
}
// Now Ai = [A, 3A, 5A, 7A, 9A, 11A, 13A, 15A]
LookupTable5(Ai)
@ -183,10 +182,11 @@ impl VartimeMultiscalarMul for pallas::Point {
let mut t = r.double();
for (naf, lookup_table) in nafs.iter().zip(lookup_tables.iter()) {
#[allow(clippy::comparison_chain)]
if naf[i] > 0 {
t = &t + &lookup_table.select(naf[i] as usize);
t += lookup_table.select(naf[i] as usize)
} else if naf[i] < 0 {
t = &t - &lookup_table.select(-naf[i] as usize);
t -= lookup_table.select(-naf[i] as usize);
}
}

View File

@ -88,14 +88,13 @@ impl NonAdjacentForm for jubjub::Scalar {
// Construct a buffer of bits of the scalar, starting at bit `pos`
let u64_idx = pos / 64;
let bit_idx = pos % 64;
let bit_buf: u64;
if bit_idx < 64 - w {
let bit_buf: u64 = if bit_idx < 64 - w {
// This window's bits are contained in a single u64
bit_buf = x_u64[u64_idx] >> bit_idx;
x_u64[u64_idx] >> bit_idx
} else {
// Combine the current u64's bits with the bits from the next u64
bit_buf = (x_u64[u64_idx] >> bit_idx) | (x_u64[1 + u64_idx] << (64 - bit_idx));
}
(x_u64[u64_idx] >> bit_idx) | (x_u64[1 + u64_idx] << (64 - bit_idx))
};
// Add the carry into the current window
let window = carry + (bit_buf & window_mask);
@ -150,7 +149,7 @@ impl<'a> From<&'a ExtendedPoint> for LookupTable5<ExtendedNielsPoint> {
let mut Ai = [A.to_niels(); 8];
let A2 = A.double();
for i in 0..7 {
Ai[i + 1] = (&A2 + &Ai[i]).to_niels();
Ai[i + 1] = (A2 + Ai[i]).to_niels();
}
// Now Ai = [A, 3A, 5A, 7A, 9A, 11A, 13A, 15A]
LookupTable5(Ai)
@ -184,10 +183,11 @@ impl VartimeMultiscalarMul for ExtendedPoint {
let mut t = r.double();
for (naf, lookup_table) in nafs.iter().zip(lookup_tables.iter()) {
#[allow(clippy::comparison_chain)]
if naf[i] > 0 {
t = &t + &lookup_table.select(naf[i] as usize);
t += lookup_table.select(naf[i] as usize);
} else if naf[i] < 0 {
t = &t - &lookup_table.select(-naf[i] as usize);
t -= lookup_table.select(-naf[i] as usize);
}
}

View File

@ -33,7 +33,7 @@ pub struct SigningKey<T: SigType> {
impl<'a, T: SigType> From<&'a SigningKey<T>> for VerificationKey<T> {
fn from(sk: &'a SigningKey<T>) -> VerificationKey<T> {
sk.pk.clone()
sk.pk
}
}

View File

@ -10,7 +10,7 @@
use core::{
convert::{TryFrom, TryInto},
hash::{Hash, Hasher},
hash::Hash,
marker::PhantomData,
};
@ -24,7 +24,7 @@ use crate::{Error, Randomizer, SigType, Signature, SpendAuth};
/// This is useful for representing a compressed verification key; the
/// [`VerificationKey`] type in this library holds other decompressed state
/// used in signature verification.
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct VerificationKeyBytes<T: SigType> {
pub(crate) bytes: [u8; 32],
@ -46,13 +46,6 @@ impl<T: SigType> From<VerificationKeyBytes<T>> for [u8; 32] {
}
}
impl<T: SigType> Hash for VerificationKeyBytes<T> {
fn hash<H: Hasher>(&self, state: &mut H) {
self.bytes.hash(state);
self._marker.hash(state);
}
}
/// A valid RedDSA verification key.
///
/// This type holds decompressed state used in signature verification; if the

View File

@ -1,66 +0,0 @@
#![cfg(all(feature = "std", feature = "serde"))]
use rand::thread_rng;
use std::collections::HashMap;
use reddsa::{frost, sapling};
#[test]
fn check_sign_with_dealer() {
let mut rng = thread_rng();
let numsigners = 5;
let threshold = 3;
let (shares, pubkeys) = frost::keygen_with_dealer(numsigners, threshold, &mut rng).unwrap();
let mut nonces: HashMap<u64, Vec<frost::SigningNonces<sapling::SpendAuth>>> =
HashMap::with_capacity(threshold as usize);
let mut commitments: Vec<frost::SigningCommitments<sapling::SpendAuth>> =
Vec::with_capacity(threshold as usize);
// Round 1, generating nonces and signing commitments for each participant.
for participant_index in 1..(threshold + 1) {
// Generate one (1) nonce and one SigningCommitments instance for each
// participant, up to _threshold_.
let (nonce, commitment) = frost::preprocess(1, participant_index as u64, &mut rng);
nonces.insert(participant_index as u64, nonce);
commitments.push(commitment[0]);
}
// This is what the signature aggregator / coordinator needs to do:
// - decide what message to sign
// - take one (unused) commitment per signing participant
let mut signature_shares: Vec<frost::SignatureShare<sapling::SpendAuth>> =
Vec::with_capacity(threshold as usize);
let message = "message to sign".as_bytes();
let signing_package = frost::SigningPackage {
message: message.to_vec(),
signing_commitments: commitments,
};
// Round 2: each participant generates their signature share
for (participant_index, nonce) in nonces {
let share_package = shares
.iter()
.find(|share| participant_index == share.index)
.unwrap();
let nonce_to_use = nonce[0];
// Each participant generates their signature share.
let signature_share = frost::sign(&signing_package, nonce_to_use, share_package).unwrap();
signature_shares.push(signature_share);
}
// The aggregator collects the signing shares from all participants and
// generates the final signature.
let group_signature_res = frost::aggregate(&signing_package, &signature_shares[..], &pubkeys);
assert!(group_signature_res.is_ok());
let group_signature = group_signature_res.unwrap();
// Check that the threshold signature can be verified by the group public
// key (aka verification key).
assert!(pubkeys
.group_public
.verify(&message, &group_signature)
.is_ok());
// TODO: also check that the SharePackage.group_public also verifies the group signature.
}

View File

@ -9,7 +9,7 @@ use reddsa::*;
fn verify_librustzcash_spendauth() {
for (msg, sig, pk_bytes) in LIBRUSTZCASH_SPENDAUTH_SIGS.iter() {
assert!(VerificationKey::try_from(*pk_bytes)
.and_then(|pk| pk.verify(&msg, &sig))
.and_then(|pk| pk.verify(msg, sig))
.is_ok());
}
}
@ -18,7 +18,7 @@ fn verify_librustzcash_spendauth() {
fn verify_librustzcash_binding() {
for (msg, sig, pk_bytes) in LIBRUSTZCASH_BINDING_SIGS.iter() {
assert!(VerificationKey::try_from(*pk_bytes)
.and_then(|pk| pk.verify(&msg, &sig))
.and_then(|pk| pk.verify(msg, sig))
.is_ok());
}
}

View File

@ -7,7 +7,7 @@ use reddsa::*;
#[test]
fn identity_publickey_passes() {
let identity = AffinePoint::identity();
assert_eq!(<bool>::from(identity.is_small_order()), true);
assert!(<bool>::from(identity.is_small_order()));
let bytes = identity.to_bytes();
let pk_bytes = VerificationKeyBytes::<sapling::SpendAuth>::from(bytes);
assert!(VerificationKey::<sapling::SpendAuth>::try_from(pk_bytes).is_ok());
@ -17,7 +17,7 @@ fn identity_publickey_passes() {
fn smallorder_publickey_passes() {
// (1,0) is a point of order 4 on any Edwards curve
let order4 = AffinePoint::from_raw_unchecked(Fq::one(), Fq::zero());
assert_eq!(<bool>::from(order4.is_small_order()), true);
assert!(<bool>::from(order4.is_small_order()));
let bytes = order4.to_bytes();
let pk_bytes = VerificationKeyBytes::<sapling::SpendAuth>::from(bytes);
assert!(VerificationKey::<sapling::SpendAuth>::try_from(pk_bytes).is_ok());