Remove `frost-redjubjub` (#246)

Remove frost-redjubjub
This commit is contained in:
Pili Guerra 2023-02-16 23:44:12 +01:00 committed by GitHub
parent c39e95f973
commit a0eead7f13
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
30 changed files with 0 additions and 5029 deletions

View File

@ -2,7 +2,6 @@
resolver = "2"
members = [
"frost-core",
#"frost-redjubjub",
"frost-ed25519",
"frost-ed448",
"frost-p256",

View File

@ -1,46 +0,0 @@
[package]
name = "redjubjub"
edition = "2018"
# When releasing to crates.io:
# - Update html_root_url
# - Update CHANGELOG.md
# - Create git tag.
version = "0.4.0"
authors = ["Henry de Valence <hdevalence@hdevalence.ca>", "Deirdre Connolly <durumcrustulum@gmail.com>", "Chelsea Komlo <me@chelseakomlo.com>"]
readme = "README.md"
license = "MIT OR Apache-2.0"
repository = "https://github.com/ZcashFoundation/redjubjub"
categories = ["cryptography"]
keywords = ["cryptography", "crypto", "jubjub", "redjubjub", "zcash"]
description = "A standalone implementation of the RedJubjub signature scheme."
[package.metadata.docs.rs]
features = ["nightly"]
[dependencies]
blake2b_simd = "1.0"
byteorder = "1.4"
digest = "0.10"
jubjub = "0.8"
rand_core = "0.6"
serde = { version = "1", optional = true, features = ["derive"] }
thiserror = "1.0"
zeroize = { version = "1", default-features = false, features = ["zeroize_derive"] }
[dev-dependencies]
bincode = "1"
criterion = "0.3"
proptest-derive = "0.3"
lazy_static = "1.4"
proptest = "1.0"
rand = "0.8"
rand_chacha = "0.3"
serde_json = "1.0"
[features]
nightly = []
default = ["serde"]
[[bench]]
name = "bench"
harness = false

View File

@ -1,52 +0,0 @@
A minimal [RedJubjub][redjubjub] implementation for use in [Zebra][zebra].
Two parameterizations of RedJubjub are used in Zcash, one for
`BindingSig` and one for `SpendAuthSig`. This library distinguishes
these in the type system, using the [sealed] `SigType` trait as a
type-level enum.
In addition to the `Signature`, `SigningKey`, `VerificationKey` types,
the library also provides `VerificationKeyBytes`, a [refinement] of a
`[u8; 32]` indicating that bytes represent an encoding of a RedJubjub
verification key. This allows the `VerificationKey` type to cache
verification checks related to the verification key encoding.
## Examples
Creating a `BindingSig`, serializing and deserializing it, and
verifying the signature:
```rust
# use std::convert::TryFrom;
use rand::thread_rng;
use redjubjub::*;
let msg = b"Hello!";
// Generate a secret key and sign the message
let sk = SigningKey::<Binding>::new(thread_rng());
let sig = sk.sign(thread_rng(), msg);
// Types can be converted to raw byte arrays using From/Into
let sig_bytes: [u8; 64] = sig.into();
let pk_bytes: [u8; 32] = VerificationKey::from(&sk).into();
// Deserialize and verify the signature.
let sig: Signature<Binding> = sig_bytes.into();
assert!(
VerificationKey::try_from(pk_bytes)
.and_then(|pk| pk.verify(msg, &sig))
.is_ok()
);
```
## docs
```shell,no_run
cargo doc --features "nightly" --open
```
[redjubjub]: https://zips.z.cash/protocol/protocol.pdf#concretereddsa
[zebra]: https://github.com/ZcashFoundation/zebra
[refinement]: https://en.wikipedia.org/wiki/Refinement_type
[sealed]: https://rust-lang.github.io/api-guidelines/future-proofing.html#sealed-traits-protect-against-downstream-implementations-c-sealed

View File

@ -1,95 +0,0 @@
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput};
use rand::{thread_rng, Rng};
use redjubjub::*;
use std::convert::TryFrom;
enum Item {
SpendAuth {
vk_bytes: VerificationKeyBytes<SpendAuth>,
sig: Signature<SpendAuth>,
},
Binding {
vk_bytes: VerificationKeyBytes<Binding>,
sig: Signature<Binding>,
},
}
fn sigs_with_distinct_keys() -> impl Iterator<Item = Item> {
std::iter::repeat_with(|| {
let mut rng = thread_rng();
let msg = b"Bench";
match rng.gen::<u8>() % 2 {
0 => {
let sk = SigningKey::<SpendAuth>::new(thread_rng());
let vk_bytes = VerificationKey::from(&sk).into();
let sig = sk.sign(thread_rng(), &msg[..]);
Item::SpendAuth { vk_bytes, sig }
}
1 => {
let sk = SigningKey::<Binding>::new(thread_rng());
let vk_bytes = VerificationKey::from(&sk).into();
let sig = sk.sign(thread_rng(), &msg[..]);
Item::Binding { vk_bytes, sig }
}
_ => panic!(),
}
})
}
fn bench_batch_verify(c: &mut Criterion) {
let mut group = c.benchmark_group("Batch Verification");
for &n in [8usize, 16, 24, 32, 40, 48, 56, 64].iter() {
group.throughput(Throughput::Elements(n as u64));
let sigs = sigs_with_distinct_keys().take(n).collect::<Vec<_>>();
group.bench_with_input(
BenchmarkId::new("Unbatched verification", n),
&sigs,
|b, sigs| {
b.iter(|| {
for item in sigs.iter() {
let msg = b"Bench";
match item {
Item::SpendAuth { vk_bytes, sig } => {
let _ = VerificationKey::try_from(*vk_bytes)
.and_then(|vk| vk.verify(msg, sig));
}
Item::Binding { vk_bytes, sig } => {
let _ = VerificationKey::try_from(*vk_bytes)
.and_then(|vk| vk.verify(msg, sig));
}
}
}
})
},
);
group.bench_with_input(
BenchmarkId::new("Batched verification", n),
&sigs,
|b, sigs| {
b.iter(|| {
let mut batch = batch::Verifier::new();
for item in sigs.iter() {
let msg = b"Bench";
match item {
Item::SpendAuth { vk_bytes, sig } => {
batch.queue((*vk_bytes, *sig, msg));
}
Item::Binding { vk_bytes, sig } => {
batch.queue((*vk_bytes, *sig, msg));
}
}
}
batch.verify(thread_rng())
})
},
);
}
group.finish();
}
criterion_group!(benches, bench_batch_verify);
criterion_main!(benches);

View File

@ -1,264 +0,0 @@
// -*- mode: rust; -*-
//
// This file is part of redjubjub.
// Copyright (c) 2019-2021 Zcash Foundation
// See LICENSE for licensing information.
//
// Authors:
// - Deirdre Connolly <deirdre@zfnd.org>
// - Henry de Valence <hdevalence@hdevalence.ca>
//! Performs batch RedJubjub signature verification.
//!
//! Batch verification asks whether *all* signatures in some set are valid,
//! rather than asking whether *each* of them is valid. This allows sharing
//! computations among all signature verifications, performing less work overall
//! at the cost of higher latency (the entire batch must complete), complexity of
//! caller code (which must assemble a batch of signatures across work-items),
//! and loss of the ability to easily pinpoint failing signatures.
//!
use std::convert::TryFrom;
use jubjub::*;
use rand_core::{CryptoRng, RngCore};
use crate::{private::Sealed, scalar_mul::VartimeMultiscalarMul, *};
// Shim to generate a random 128bit value in a [u64; 4], without
// importing `rand`.
fn gen_128_bits<R: RngCore + CryptoRng>(mut rng: R) -> [u64; 4] {
let mut bytes = [0u64; 4];
bytes[0] = rng.next_u64();
bytes[1] = rng.next_u64();
bytes
}
#[derive(Clone, Debug)]
enum Inner {
SpendAuth {
vk_bytes: VerificationKeyBytes<SpendAuth>,
sig: Signature<SpendAuth>,
c: Scalar,
},
Binding {
vk_bytes: VerificationKeyBytes<Binding>,
sig: Signature<Binding>,
c: Scalar,
},
}
/// A batch verification item.
///
/// This struct exists to allow batch processing to be decoupled from the
/// lifetime of the message. This is useful when using the batch verification API
/// in an async context.
#[derive(Clone, Debug)]
pub struct Item {
inner: Inner,
}
impl<'msg, M: AsRef<[u8]>>
From<(
VerificationKeyBytes<SpendAuth>,
Signature<SpendAuth>,
&'msg M,
)> for Item
{
fn from(
(vk_bytes, sig, msg): (
VerificationKeyBytes<SpendAuth>,
Signature<SpendAuth>,
&'msg M,
),
) -> Self {
// Compute c now to avoid dependency on the msg lifetime.
let c = HStar::default()
.update(&sig.r_bytes[..])
.update(&vk_bytes.bytes[..])
.update(msg)
.finalize();
Self {
inner: Inner::SpendAuth { vk_bytes, sig, c },
}
}
}
impl<'msg, M: AsRef<[u8]>> From<(VerificationKeyBytes<Binding>, Signature<Binding>, &'msg M)>
for Item
{
fn from(
(vk_bytes, sig, msg): (VerificationKeyBytes<Binding>, Signature<Binding>, &'msg M),
) -> Self {
// Compute c now to avoid dependency on the msg lifetime.
let c = HStar::default()
.update(&sig.r_bytes[..])
.update(&vk_bytes.bytes[..])
.update(msg)
.finalize();
Self {
inner: Inner::Binding { vk_bytes, sig, c },
}
}
}
impl Item {
/// Perform non-batched verification of this `Item`.
///
/// This is useful (in combination with `Item::clone`) for implementing fallback
/// logic when batch verification fails. In contrast to
/// [`VerificationKey::verify`](crate::VerificationKey::verify), which requires
/// borrowing the message data, the `Item` type is unlinked from the lifetime of
/// the message.
#[allow(non_snake_case)]
pub fn verify_single(self) -> Result<(), Error> {
match self.inner {
Inner::Binding { vk_bytes, sig, c } => VerificationKey::<Binding>::try_from(vk_bytes)
.and_then(|vk| vk.verify_prehashed(&sig, c)),
Inner::SpendAuth { vk_bytes, sig, c } => {
VerificationKey::<SpendAuth>::try_from(vk_bytes)
.and_then(|vk| vk.verify_prehashed(&sig, c))
}
}
}
}
#[derive(Default)]
/// A batch verification context.
pub struct Verifier {
/// Signature data queued for verification.
signatures: Vec<Item>,
}
impl Verifier {
/// Construct a new batch verifier.
pub fn new() -> Verifier {
Verifier::default()
}
/// Queue an Item for verification.
pub fn queue<I: Into<Item>>(&mut self, item: I) {
self.signatures.push(item.into());
}
/// Perform batch verification, returning `Ok(())` if all signatures were
/// valid and `Err` otherwise.
///
/// The batch verification equation is:
///
/// h_G * -[sum(z_i * s_i)]P_G + sum(\[z_i\]R_i + [z_i * c_i]VK_i) = 0_G
///
/// which we split out into:
///
/// h_G * -[sum(z_i * s_i)]P_G + sum(\[z_i\]R_i) + sum([z_i * c_i]VK_i) = 0_G
///
/// so that we can use multiscalar multiplication speedups.
///
/// where for each signature i,
/// - VK_i is the verification key;
/// - R_i is the signature's R value;
/// - s_i is the signature's s value;
/// - c_i is the hash of the message and other data;
/// - z_i is a random 128-bit Scalar;
/// - h_G is the cofactor of the group;
/// - P_G is the generator of the subgroup;
///
/// Since RedJubjub uses different subgroups for different types
/// of signatures, SpendAuth's and Binding's, we need to have yet
/// another point and associated scalar accumulator for all the
/// signatures of each type in our batch, but we can still
/// amortize computation nicely in one multiscalar multiplication:
///
/// h_G * ( [-sum(z_i * s_i): i_type == SpendAuth]P_SpendAuth + [-sum(z_i * s_i): i_type == Binding]P_Binding + sum(\[z_i\]R_i) + sum([z_i * c_i]VK_i) ) = 0_G
///
/// As follows elliptic curve scalar multiplication convention,
/// scalar variables are lowercase and group point variables
/// are uppercase. This does not exactly match the RedDSA
/// notation in the [protocol specification §B.1][ps].
///
/// [ps]: https://zips.z.cash/protocol/protocol.pdf#reddsabatchverify
#[allow(non_snake_case)]
pub fn verify<R: RngCore + CryptoRng>(self, mut rng: R) -> Result<(), Error> {
let n = self.signatures.len();
let mut VK_coeffs = Vec::with_capacity(n);
let mut VKs = Vec::with_capacity(n);
let mut R_coeffs = Vec::with_capacity(self.signatures.len());
let mut Rs = Vec::with_capacity(self.signatures.len());
let mut P_spendauth_coeff = Scalar::zero();
let mut P_binding_coeff = Scalar::zero();
for item in self.signatures.iter() {
let (s_bytes, r_bytes, c) = match item.inner {
Inner::SpendAuth { sig, c, .. } => (sig.s_bytes, sig.r_bytes, c),
Inner::Binding { sig, c, .. } => (sig.s_bytes, sig.r_bytes, c),
};
let s = {
// XXX-jubjub: should not use CtOption here
let maybe_scalar = Scalar::from_bytes(&s_bytes);
if maybe_scalar.is_some().into() {
maybe_scalar.unwrap()
} else {
return Err(Error::InvalidSignature);
}
};
let R = {
// XXX-jubjub: should not use CtOption here
// XXX-jubjub: inconsistent ownership in from_bytes
let maybe_point = AffinePoint::from_bytes(r_bytes);
if maybe_point.is_some().into() {
jubjub::ExtendedPoint::from(maybe_point.unwrap())
} else {
return Err(Error::InvalidSignature);
}
};
let VK = match item.inner {
Inner::SpendAuth { vk_bytes, .. } => {
VerificationKey::<SpendAuth>::try_from(vk_bytes.bytes)?.point
}
Inner::Binding { vk_bytes, .. } => {
VerificationKey::<Binding>::try_from(vk_bytes.bytes)?.point
}
};
let z = Scalar::from_raw(gen_128_bits(&mut rng));
let P_coeff = z * s;
match item.inner {
Inner::SpendAuth { .. } => {
P_spendauth_coeff -= P_coeff;
}
Inner::Binding { .. } => {
P_binding_coeff -= P_coeff;
}
};
R_coeffs.push(z);
Rs.push(R);
VK_coeffs.push(Scalar::zero() + (z * c));
VKs.push(VK);
}
use std::iter::once;
let scalars = once(&P_spendauth_coeff)
.chain(once(&P_binding_coeff))
.chain(VK_coeffs.iter())
.chain(R_coeffs.iter());
let basepoints = [SpendAuth::basepoint(), Binding::basepoint()];
let points = basepoints.iter().chain(VKs.iter()).chain(Rs.iter());
let check = ExtendedPoint::vartime_multiscalar_mul(scalars, points);
if check.is_small_order().into() {
Ok(())
} else {
Err(Error::InvalidSignature)
}
}
}

View File

@ -1,24 +0,0 @@
// -*- mode: rust; -*-
//
// This file is part of redjubjub.
// Copyright (c) 2019-2021 Zcash Foundation
// See LICENSE for licensing information.
//
// Authors:
// - Henry de Valence <hdevalence@hdevalence.ca>
/// The byte-encoding of the basepoint for `SpendAuthSig`.
// Extracted ad-hoc from librustzcash
// XXX add tests for this value.
pub const SPENDAUTHSIG_BASEPOINT_BYTES: [u8; 32] = [
48, 181, 242, 170, 173, 50, 86, 48, 188, 221, 219, 206, 77, 103, 101, 109, 5, 253, 28, 194,
208, 55, 187, 83, 117, 182, 233, 109, 158, 1, 161, 215,
];
/// The byte-encoding of the basepoint for `BindingSig`.
// Extracted ad-hoc from librustzcash
// XXX add tests for this value.
pub const BINDINGSIG_BASEPOINT_BYTES: [u8; 32] = [
139, 106, 11, 56, 185, 250, 174, 60, 59, 128, 59, 71, 176, 241, 70, 173, 80, 171, 34, 30, 110,
42, 251, 230, 219, 222, 69, 203, 169, 211, 129, 237,
];

View File

@ -1,25 +0,0 @@
// -*- mode: rust; -*-
//
// This file is part of redjubjub.
// Copyright (c) 2019-2021 Zcash Foundation
// See LICENSE for licensing information.
//
// Authors:
// - Deirdre Connolly <deirdre@zfnd.org>
// - Henry de Valence <hdevalence@hdevalence.ca>
use thiserror::Error;
/// An error related to RedJubJub signatures.
#[derive(Error, Debug, Copy, Clone, Eq, PartialEq)]
pub enum Error {
/// The encoding of a signing key was malformed.
#[error("Malformed signing key encoding.")]
MalformedSigningKey,
/// The encoding of a verification key was malformed.
#[error("Malformed verification key encoding.")]
MalformedVerificationKey,
/// Signature verification failed.
#[error("Invalid signature.")]
InvalidSignature,
}

View File

@ -1,733 +0,0 @@
// -*- mode: rust; -*-
//
// This file is part of redjubjub.
// Copyright (c) 2020-2021 Zcash Foundation
// See LICENSE for licensing information.
//
// Authors:
// - Chelsea H. Komlo <me@chelseakomlo.com>
// - Deirdre Connolly <deirdre@zfnd.org>
// - isis agora lovecruft <isis@patternsinthevoid.net>
//! An implementation of FROST (Flexible Round-Optimized Schnorr Threshold)
//! signatures.
//!
//! This implementation has been [independently
//! audited](https://github.com/ZcashFoundation/redjubjub/blob/main/zcash-frost-audit-report-20210323.pdf)
//! as of commit 76ba4ef / March 2021. If you are interested in deploying
//! FROST, please do not hesitate to consult the FROST authors.
//!
//! This implementation currently only supports key generation using a central
//! dealer. In the future, we will add support for key generation via a DKG,
//! as specified in the FROST paper.
//! Internally, keygen_with_dealer generates keys using Verifiable Secret
//! Sharing, where shares are generated using Shamir Secret Sharing.
use std::{collections::HashMap, convert::TryFrom, marker::PhantomData};
use jubjub::Scalar;
use rand_core::{CryptoRng, RngCore};
use zeroize::DefaultIsZeroes;
use crate::private::Sealed;
use crate::{HStar, Signature, SpendAuth, VerificationKey};
/// A secret scalar value representing a single signer's secret key.
#[derive(Clone, Copy, Default, PartialEq)]
pub struct Secret(pub(crate) Scalar);
// Zeroizes `Secret` to be the `Default` value on drop (when it goes out of
// scope). Luckily the derived `Default` includes the `Default` impl of
// jubjub::Fr/Scalar, which is four 0u64's under the hood.
impl DefaultIsZeroes for Secret {}
impl From<Scalar> for Secret {
fn from(source: Scalar) -> Secret {
Secret(source)
}
}
/// A public group element that represents a single signer's public key.
#[derive(Copy, Clone, Debug, PartialEq)]
pub struct Public(jubjub::ExtendedPoint);
impl From<jubjub::ExtendedPoint> for Public {
fn from(source: jubjub::ExtendedPoint) -> Public {
Public(source)
}
}
/// A share generated by performing a (t-out-of-n) secret sharing scheme where
/// n is the total number of shares and t is the threshold required to
/// reconstruct the secret; in this case we use Shamir's secret sharing.
#[derive(Clone)]
pub struct Share {
receiver_index: u64,
/// Secret Key.
pub(crate) value: Secret,
/// The commitments to be distributed among signers.
pub(crate) commitment: ShareCommitment,
}
/// A Jubjub point that is a commitment to one coefficient of our secret
/// polynomial.
///
/// This is a (public) commitment to one coefficient of a secret polynomial used
/// for performing verifiable secret sharing for a Shamir secret share.
#[derive(Clone, PartialEq)]
pub(crate) struct Commitment(pub(crate) jubjub::AffinePoint);
/// Contains the commitments to the coefficients for our secret polynomial _f_,
/// used to generate participants' key shares.
///
/// [`ShareCommitment`] contains a set of commitments to the coefficients (which
/// themselves are scalars) for a secret polynomial f, where f is used to
/// generate each ith participant's key share f(i). Participants use this set of
/// commitments to perform verifiable secret sharing.
///
/// Note that participants MUST be assured that they have the *same*
/// [`ShareCommitment`], either by performing pairwise comparison, or by using
/// some agreed-upon public location for publication, where each participant can
/// ensure that they received the correct (and same) value.
#[derive(Clone)]
pub struct ShareCommitment(pub(crate) Vec<Commitment>);
/// The product of all signers' individual commitments, published as part of the
/// final signature.
#[derive(PartialEq)]
pub struct GroupCommitment(pub(crate) jubjub::AffinePoint);
/// Secret and public key material generated by a dealer performing
/// [`keygen_with_dealer`].
///
/// To derive a FROST keypair, the receiver of the [`SharePackage`] *must* call
/// .into(), which under the hood also performs validation.
pub struct SharePackage {
/// The public signing key that represents the entire group.
pub(crate) group_public: VerificationKey<SpendAuth>,
/// Denotes the participant index each share is owned by.
pub index: u64,
/// This participant's public key.
pub(crate) public: Public,
/// This participant's share.
pub(crate) share: Share,
}
impl TryFrom<SharePackage> for KeyPackage {
type Error = &'static str;
/// Tries to verify a share and construct a [`KeyPackage`] from it.
///
/// When participants receive a [`SharePackage`] from the dealer, they
/// *MUST* verify the integrity of the share before continuing on to
/// transform it into a signing/verification keypair. Here, we assume that
/// every participant has the same view of the commitment issued by the
/// dealer, but implementations *MUST* make sure that all participants have
/// a consistent view of this commitment in practice.
fn try_from(sharepackage: SharePackage) -> Result<Self, &'static str> {
verify_share(&sharepackage.share)?;
Ok(KeyPackage {
index: sharepackage.index,
secret_share: sharepackage.share.value,
public: sharepackage.public,
group_public: sharepackage.group_public,
})
}
}
/// A FROST keypair, which can be generated either by a trusted dealer or using
/// a DKG.
///
/// When using a central dealer, [`SharePackage`]s are distributed to
/// participants, who then perform verification, before deriving
/// [`KeyPackage`]s, which they store to later use during signing.
#[allow(dead_code)]
pub struct KeyPackage {
index: u64,
secret_share: Secret,
public: Public,
group_public: VerificationKey<SpendAuth>,
}
/// Public data that contains all the signer's public keys as well as the
/// group public key.
///
/// Used for verification purposes before publishing a signature.
pub struct PublicKeyPackage {
/// When performing signing, the coordinator must ensure that they have the
/// correct view of participant's public keys to perform verification before
/// publishing a signature. signer_pubkeys represents all signers for a
/// signing operation.
pub(crate) signer_pubkeys: HashMap<u64, Public>,
/// group_public represents the joint public key for the entire group.
pub group_public: VerificationKey<SpendAuth>,
}
/// Allows all participants' keys to be generated using a central, trusted
/// dealer.
///
/// Under the hood, this performs verifiable secret sharing, which itself uses
/// Shamir secret sharing, from which each share becomes a participant's secret
/// key. The output from this function is a set of shares along with one single
/// commitment that participants use to verify the integrity of the share. The
/// number of signers is limited to 255.
pub fn keygen_with_dealer<R: RngCore + CryptoRng>(
num_signers: u8,
threshold: u8,
mut rng: R,
) -> Result<(Vec<SharePackage>, PublicKeyPackage), &'static str> {
let mut bytes = [0; 64];
rng.fill_bytes(&mut bytes);
let secret = Secret(Scalar::from_bytes_wide(&bytes));
let group_public = VerificationKey::from(&secret.0);
let shares = generate_shares(&secret, num_signers, threshold, rng)?;
let mut sharepackages: Vec<SharePackage> = Vec::with_capacity(num_signers as usize);
let mut signer_pubkeys: HashMap<u64, Public> = HashMap::with_capacity(num_signers as usize);
for share in shares {
let signer_public = Public(SpendAuth::basepoint() * share.value.0);
sharepackages.push(SharePackage {
index: share.receiver_index,
share: share.clone(),
public: signer_public,
group_public,
});
signer_pubkeys.insert(share.receiver_index, signer_public);
}
Ok((
sharepackages,
PublicKeyPackage {
signer_pubkeys,
group_public,
},
))
}
/// Verifies that a share is consistent with a commitment.
///
/// This ensures that this participant's share has been generated using the same
/// mechanism as all other signing participants. Note that participants *MUST*
/// ensure that they have the same view as all other participants of the
/// commitment!
fn verify_share(share: &Share) -> Result<(), &'static str> {
let f_result = SpendAuth::basepoint() * share.value.0;
let x = Scalar::from(share.receiver_index as u64);
let (_, result) = share.commitment.0.iter().fold(
(Scalar::one(), jubjub::ExtendedPoint::identity()),
|(x_to_the_i, sum_so_far), comm_i| (x_to_the_i * x, sum_so_far + comm_i.0 * x_to_the_i),
);
if !(f_result == result) {
return Err("Share is invalid.");
}
Ok(())
}
/// Creates secret shares for a given secret.
///
/// This function accepts a secret from which shares are generated. While in
/// FROST this secret should always be generated randomly, we allow this secret
/// to be specified for this internal function for testability.
///
/// Internally, [`generate_shares`] performs verifiable secret sharing, which
/// generates shares via Shamir Secret Sharing, and then generates public
/// commitments to those shares.
///
/// More specifically, [`generate_shares`]:
/// - Randomly samples of coefficients [a, b, c], this represents a secret
/// polynomial f
/// - For each participant i, their secret share is f(i)
/// - The commitment to the secret polynomial f is [g^a, g^b, g^c]
fn generate_shares<R: RngCore + CryptoRng>(
secret: &Secret,
numshares: u8,
threshold: u8,
mut rng: R,
) -> Result<Vec<Share>, &'static str> {
if threshold < 1 {
return Err("Threshold cannot be 0");
}
if numshares < 1 {
return Err("Number of shares cannot be 0");
}
if threshold > numshares {
return Err("Threshold cannot exceed numshares");
}
let numcoeffs = threshold - 1;
let mut coefficients: Vec<Scalar> = Vec::with_capacity(threshold as usize);
let mut shares: Vec<Share> = Vec::with_capacity(numshares as usize);
let mut commitment: ShareCommitment = ShareCommitment(Vec::with_capacity(threshold as usize));
for _ in 0..numcoeffs {
let mut bytes = [0; 64];
rng.fill_bytes(&mut bytes);
coefficients.push(Scalar::from_bytes_wide(&bytes));
}
// Verifiable secret sharing, to make sure that participants can ensure their secret is consistent
// with every other participant's.
commitment.0.push(Commitment(jubjub::AffinePoint::from(
SpendAuth::basepoint() * secret.0,
)));
for c in &coefficients {
commitment.0.push(Commitment(jubjub::AffinePoint::from(
SpendAuth::basepoint() * c,
)));
}
// Evaluate the polynomial with `secret` as the constant term
// and `coeffs` as the other coefficients at the point x=share_index,
// using Horner's method.
for index in 1..numshares + 1 {
let scalar_index = Scalar::from(index as u64);
let mut value = Scalar::zero();
// Polynomial evaluation, for this index
for i in (0..numcoeffs).rev() {
value += &coefficients[i as usize];
value *= scalar_index;
}
value += secret.0;
shares.push(Share {
receiver_index: index as u64,
value: Secret(value),
commitment: commitment.clone(),
});
}
Ok(shares)
}
/// Comprised of hiding and binding nonces.
///
/// Note that [`SigningNonces`] must be used *only once* for a signing
/// operation; re-using nonces will result in leakage of a signer's long-lived
/// signing key.
#[derive(Clone, Copy, Default)]
pub struct SigningNonces {
hiding: Scalar,
binding: Scalar,
}
// Zeroizes `SigningNonces` to be the `Default` value on drop (when it goes out
// of scope). Luckily the derived `Default` includes the `Default` impl of the
// `jubjub::Fr/Scalar`'s, which is four 0u64's under the hood.
impl DefaultIsZeroes for SigningNonces {}
impl SigningNonces {
/// Generates a new signing nonce.
///
/// Each participant generates signing nonces before performing a signing
/// operation.
pub fn new<R>(rng: &mut R) -> Self
where
R: CryptoRng + RngCore,
{
fn random_nonzero_bytes<R>(rng: &mut R) -> [u8; 64]
where
R: CryptoRng + RngCore,
{
let mut bytes = [0; 64];
loop {
rng.fill_bytes(&mut bytes);
if bytes != [0; 64] {
return bytes;
}
}
}
// The values of 'hiding' and 'binding' must be non-zero so that commitments are not the
// identity.
let hiding = Scalar::from_bytes_wide(&random_nonzero_bytes(rng));
let binding = Scalar::from_bytes_wide(&random_nonzero_bytes(rng));
Self { hiding, binding }
}
}
/// Published by each participant in the first round of the signing protocol.
///
/// This step can be batched if desired by the implementation. Each
/// SigningCommitment can be used for exactly *one* signature.
#[derive(Copy, Clone)]
pub struct SigningCommitments {
/// The participant index
pub(crate) index: u64,
/// The hiding point.
pub(crate) hiding: jubjub::ExtendedPoint,
/// The binding point.
pub(crate) binding: jubjub::ExtendedPoint,
}
impl From<(u64, &SigningNonces)> for SigningCommitments {
/// For SpendAuth signatures only, not Binding signatures, in RedJubjub/Zcash.
fn from((index, nonces): (u64, &SigningNonces)) -> Self {
Self {
index,
hiding: SpendAuth::basepoint() * nonces.hiding,
binding: SpendAuth::basepoint() * nonces.binding,
}
}
}
/// Generated by the coordinator of the signing operation and distributed to
/// each signing party.
pub struct SigningPackage {
/// The set of commitments participants published in the first round of the
/// protocol.
pub signing_commitments: Vec<SigningCommitments>,
/// Message which each participant will sign.
///
/// Each signer should perform protocol-specific verification on the message.
pub message: Vec<u8>,
}
/// A representation of a single signature used in FROST structures and messages.
#[derive(Clone, Copy, Default, PartialEq)]
pub struct SignatureResponse(pub(crate) Scalar);
/// A participant's signature share, which the coordinator will use to aggregate
/// with all other signer's shares into the joint signature.
#[derive(Clone, Copy, Default)]
pub struct SignatureShare {
/// Represents the participant index.
pub(crate) index: u64,
/// This participant's signature over the message.
pub(crate) signature: SignatureResponse,
}
// Zeroizes `SignatureShare` to be the `Default` value on drop (when it goes out
// of scope). Luckily the derived `Default` includes the `Default` impl of
// jubjub::Fr/Scalar, which is four 0u64's under the hood, and u32, which is
// 0u32.
impl DefaultIsZeroes for SignatureShare {}
impl SignatureShare {
/// Tests if a signature share issued by a participant is valid before
/// aggregating it into a final joint signature to publish.
pub fn check_is_valid(
&self,
pubkey: &Public,
lambda_i: Scalar,
commitment: jubjub::ExtendedPoint,
challenge: Scalar,
) -> Result<(), &'static str> {
if (SpendAuth::basepoint() * self.signature.0)
!= (commitment + pubkey.0 * challenge * lambda_i)
{
return Err("Invalid signature share");
}
Ok(())
}
}
/// Done once by each participant, to generate _their_ nonces and commitments
/// that are then used during signing.
///
/// When performing signing using two rounds, num_nonces would equal 1, to
/// perform the first round. Batching entails generating more than one
/// nonce/commitment pair at a time. Nonces should be stored in secret storage
/// for later use, whereas the commitments are published.
/// The number of nonces is limited to 255. This limit can be increased if it
/// turns out to be too conservative.
// TODO: Make sure the above is a correct statement, fix if needed in:
// https://github.com/ZcashFoundation/redjubjub/issues/111
pub fn preprocess<R>(
num_nonces: u8,
participant_index: u64,
rng: &mut R,
) -> (Vec<SigningNonces>, Vec<SigningCommitments>)
where
R: CryptoRng + RngCore,
{
let mut signing_nonces: Vec<SigningNonces> = Vec::with_capacity(num_nonces as usize);
let mut signing_commitments: Vec<SigningCommitments> = Vec::with_capacity(num_nonces as usize);
for _ in 0..num_nonces {
let nonces = SigningNonces::new(rng);
signing_commitments.push(SigningCommitments::from((participant_index, &nonces)));
signing_nonces.push(nonces);
}
(signing_nonces, signing_commitments)
}
/// Generates the binding factor that ensures each signature share is strongly
/// bound to a signing set, specific set of commitments, and a specific message.
fn gen_rho_i(index: u64, signing_package: &SigningPackage) -> Scalar {
// Hash signature message with HStar before deriving the binding factor.
//
// To avoid a collision with other inputs to the hash that generates the
// binding factor, we should hash our input message first. Our 'standard'
// hash is HStar, which uses a domain separator already, and is the same one
// that generates the binding factor.
let message_hash = HStar::default()
.update(signing_package.message.as_slice())
.finalize();
let mut hasher = HStar::default();
hasher
.update("FROST_rho".as_bytes())
.update(index.to_be_bytes())
.update(message_hash.to_bytes());
for item in signing_package.signing_commitments.iter() {
hasher.update(item.index.to_be_bytes());
let hiding_bytes = jubjub::AffinePoint::from(item.hiding).to_bytes();
hasher.update(hiding_bytes);
let binding_bytes = jubjub::AffinePoint::from(item.binding).to_bytes();
hasher.update(binding_bytes);
}
hasher.finalize()
}
/// Generates the group commitment which is published as part of the joint
/// Schnorr signature.
fn gen_group_commitment(
signing_package: &SigningPackage,
bindings: &HashMap<u64, Scalar>,
) -> Result<GroupCommitment, &'static str> {
let identity = jubjub::ExtendedPoint::identity();
let mut accumulator = identity;
for commitment in signing_package.signing_commitments.iter() {
// The following check prevents a party from accidentally revealing their share.
// Note that the '&&' operator would be sufficient.
if identity == commitment.binding || identity == commitment.hiding {
return Err("Commitment equals the identity.");
}
let rho_i = bindings
.get(&commitment.index)
.ok_or("No matching commitment index")?;
accumulator += commitment.hiding + (commitment.binding * rho_i)
}
Ok(GroupCommitment(jubjub::AffinePoint::from(accumulator)))
}
/// Generates the challenge as is required for Schnorr signatures.
fn gen_challenge(
signing_package: &SigningPackage,
group_commitment: &GroupCommitment,
group_public: &VerificationKey<SpendAuth>,
) -> Scalar {
let group_commitment_bytes = jubjub::AffinePoint::from(group_commitment.0).to_bytes();
HStar::default()
.update(group_commitment_bytes)
.update(group_public.bytes.bytes)
.update(signing_package.message.as_slice())
.finalize()
}
/// Generates the lagrange coefficient for the i'th participant.
fn gen_lagrange_coeff(
signer_index: u64,
signing_package: &SigningPackage,
) -> Result<Scalar, &'static str> {
let mut num = Scalar::one();
let mut den = Scalar::one();
for commitment in signing_package.signing_commitments.iter() {
if commitment.index == signer_index {
continue;
}
num *= Scalar::from(commitment.index as u64);
den *= Scalar::from(commitment.index as u64) - Scalar::from(signer_index as u64);
}
if den == Scalar::zero() {
return Err("Duplicate shares provided");
}
// TODO: handle this unwrap better like other CtOption's
let lagrange_coeff = num * den.invert().unwrap();
Ok(lagrange_coeff)
}
/// Performed once by each participant selected for the signing operation.
///
/// Receives the message to be signed and a set of signing commitments and a set
/// of randomizing commitments to be used in that signing operation, including
/// that for this participant.
///
/// Assumes the participant has already determined which nonce corresponds with
/// the commitment that was assigned by the coordinator in the SigningPackage.
pub fn sign(
signing_package: &SigningPackage,
participant_nonces: SigningNonces,
share_package: &SharePackage,
) -> Result<SignatureShare, &'static str> {
let mut bindings: HashMap<u64, Scalar> =
HashMap::with_capacity(signing_package.signing_commitments.len());
for comm in signing_package.signing_commitments.iter() {
let rho_i = gen_rho_i(comm.index, &signing_package);
bindings.insert(comm.index, rho_i);
}
let lambda_i = gen_lagrange_coeff(share_package.index, &signing_package)?;
let group_commitment = gen_group_commitment(&signing_package, &bindings)?;
let challenge = gen_challenge(
&signing_package,
&group_commitment,
&share_package.group_public,
);
let participant_rho_i = bindings
.get(&share_package.index)
.ok_or("No matching binding!")?;
// The Schnorr signature share
let signature: Scalar = participant_nonces.hiding
+ (participant_nonces.binding * participant_rho_i)
+ (lambda_i * share_package.share.value.0 * challenge);
Ok(SignatureShare {
index: share_package.index,
signature: SignatureResponse(signature),
})
}
/// Verifies each participant's signature share, and if all are valid,
/// aggregates the shares into a signature to publish.
///
/// Resulting signature is compatible with verification of a plain SpendAuth
/// signature.
///
/// This operation is performed by a coordinator that can communicate with all
/// the signing participants before publishing the final signature. The
/// coordinator can be one of the participants or a semi-trusted third party
/// (who is trusted to not perform denial of service attacks, but does not learn
/// any secret information). Note that because the coordinator is trusted to
/// report misbehaving parties in order to avoid publishing an invalid
/// signature, if the coordinator themselves is a signer and misbehaves, they
/// can avoid that step. However, at worst, this results in a denial of
/// service attack due to publishing an invalid signature.
pub fn aggregate(
signing_package: &SigningPackage,
signing_shares: &[SignatureShare],
pubkeys: &PublicKeyPackage,
) -> Result<Signature<SpendAuth>, &'static str> {
let mut bindings: HashMap<u64, Scalar> =
HashMap::with_capacity(signing_package.signing_commitments.len());
for comm in signing_package.signing_commitments.iter() {
let rho_i = gen_rho_i(comm.index, &signing_package);
bindings.insert(comm.index, rho_i);
}
let group_commitment = gen_group_commitment(&signing_package, &bindings)?;
let challenge = gen_challenge(&signing_package, &group_commitment, &pubkeys.group_public);
for signing_share in signing_shares {
let signer_pubkey = pubkeys.signer_pubkeys[&signing_share.index];
let lambda_i = gen_lagrange_coeff(signing_share.index, &signing_package)?;
let signer_commitment = signing_package
.signing_commitments
.iter()
.find(|comm| comm.index == signing_share.index)
.ok_or("No matching signing commitment for signer")?;
let commitment_i =
signer_commitment.hiding + (signer_commitment.binding * bindings[&signing_share.index]);
signing_share.check_is_valid(&signer_pubkey, lambda_i, commitment_i, challenge)?;
}
// The aggregation of the signature shares by summing them up, resulting in
// a plain Schnorr signature.
let mut z = Scalar::zero();
for signature_share in signing_shares {
z += signature_share.signature.0;
}
Ok(Signature {
r_bytes: jubjub::AffinePoint::from(group_commitment.0).to_bytes(),
s_bytes: z.to_bytes(),
_marker: PhantomData,
})
}
#[cfg(test)]
mod tests {
use super::*;
use rand::thread_rng;
fn reconstruct_secret(shares: Vec<Share>) -> Result<Scalar, &'static str> {
let numshares = shares.len();
if numshares < 1 {
return Err("No shares provided");
}
let mut lagrange_coeffs: Vec<Scalar> = Vec::with_capacity(numshares as usize);
for i in 0..numshares {
let mut num = Scalar::one();
let mut den = Scalar::one();
for j in 0..numshares {
if j == i {
continue;
}
num *= Scalar::from(shares[j].receiver_index as u64);
den *= Scalar::from(shares[j].receiver_index as u64)
- Scalar::from(shares[i].receiver_index as u64);
}
if den == Scalar::zero() {
return Err("Duplicate shares provided");
}
lagrange_coeffs.push(num * den.invert().unwrap());
}
let mut secret = Scalar::zero();
for i in 0..numshares {
secret += lagrange_coeffs[i] * shares[i].value.0;
}
Ok(secret)
}
/// This is testing that Shamir's secret sharing to compute and arbitrary
/// value is working.
#[test]
fn check_share_generation() {
let mut rng = thread_rng();
let mut bytes = [0; 64];
rng.fill_bytes(&mut bytes);
let secret = Secret(Scalar::from_bytes_wide(&bytes));
let _ = SpendAuth::basepoint() * secret.0;
let shares = generate_shares(&secret, 5, 3, rng).unwrap();
for share in shares.iter() {
assert_eq!(verify_share(&share), Ok(()));
}
assert_eq!(reconstruct_secret(shares).unwrap(), secret.0)
}
}

View File

@ -1,40 +0,0 @@
// -*- mode: rust; -*-
//
// This file is part of redjubjub.
// Copyright (c) 2019-2021 Zcash Foundation
// See LICENSE for licensing information.
//
// Authors:
// - Deirdre Connolly <deirdre@zfnd.org>
// - Henry de Valence <hdevalence@hdevalence.ca>
use blake2b_simd::{Params, State};
use jubjub::Scalar;
/// Provides H^star, the hash-to-scalar function used by RedJubjub.
pub struct HStar {
state: State,
}
impl Default for HStar {
fn default() -> Self {
let state = Params::new()
.hash_length(64)
.personal(b"Zcash_RedJubjubH")
.to_state();
Self { state }
}
}
impl HStar {
/// Add `data` to the hash, and return `Self` for chaining.
pub fn update(&mut self, data: impl AsRef<[u8]>) -> &mut Self {
self.state.update(data.as_ref());
self
}
/// Consume `self` to compute the hash output.
pub fn finalize(&self) -> Scalar {
Scalar::from_bytes_wide(self.state.finalize().as_array())
}
}

View File

@ -1,78 +0,0 @@
// -*- mode: rust; -*-
//
// This file is part of redjubjub.
// Copyright (c) 2019-2021 Zcash Foundation
// See LICENSE for licensing information.
//
// Authors:
// - Deirdre Connolly <deirdre@zfnd.org>
// - Henry de Valence <hdevalence@hdevalence.ca>
#![deny(missing_docs)]
#![doc = include_str!("../README.md")]
pub mod batch;
mod constants;
mod error;
pub mod frost;
mod hash;
mod messages;
mod scalar_mul;
pub(crate) mod signature;
mod signing_key;
mod verification_key;
/// An element of the JubJub scalar field used for randomization of public and secret keys.
pub type Randomizer = jubjub::Scalar;
use hash::HStar;
pub use error::Error;
pub use signature::Signature;
pub use signing_key::SigningKey;
pub use verification_key::{VerificationKey, VerificationKeyBytes};
/// Abstracts over different RedJubJub parameter choices, [`Binding`]
/// and [`SpendAuth`].
///
/// As described [at the end of §5.4.6][concretereddsa] of the Zcash
/// protocol specification, the generator used in RedJubjub is left as
/// an unspecified parameter, chosen differently for each of
/// `BindingSig` and `SpendAuthSig`.
///
/// To handle this, we encode the parameter choice as a genuine type
/// parameter.
///
/// [concretereddsa]: https://zips.z.cash/protocol/protocol.pdf#concretereddsa
pub trait SigType: private::Sealed {}
/// A type variable corresponding to Zcash's `BindingSig`.
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum Binding {}
impl SigType for Binding {}
/// A type variable corresponding to Zcash's `SpendAuthSig`.
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum SpendAuth {}
impl SigType for SpendAuth {}
pub(crate) mod private {
use super::*;
pub trait Sealed: Copy + Clone + Eq + PartialEq + std::fmt::Debug {
fn basepoint() -> jubjub::ExtendedPoint;
}
impl Sealed for Binding {
fn basepoint() -> jubjub::ExtendedPoint {
jubjub::AffinePoint::from_bytes(constants::BINDINGSIG_BASEPOINT_BYTES)
.unwrap()
.into()
}
}
impl Sealed for SpendAuth {
fn basepoint() -> jubjub::ExtendedPoint {
jubjub::AffinePoint::from_bytes(constants::SPENDAUTHSIG_BASEPOINT_BYTES)
.unwrap()
.into()
}
}
}

View File

@ -1,269 +0,0 @@
//! The FROST communication messages specified in [RFC-001]
//!
//! [RFC-001]: https://github.com/ZcashFoundation/redjubjub/blob/main/rfcs/0001-messages.md
use crate::{frost, signature, verification_key, SpendAuth};
use serde::{Deserialize, Serialize};
use std::collections::BTreeMap;
#[cfg(test)]
use proptest_derive::Arbitrary;
#[cfg(test)]
mod arbitrary;
mod constants;
mod serialize;
#[cfg(test)]
mod tests;
mod validate;
/// Define our own `Secret` type instead of using [`frost::Secret`].
///
/// The serialization design specifies that `Secret` is a [`jubjub::Scalar`] that uses:
/// "a 32-byte little-endian canonical representation".
#[derive(Serialize, Deserialize, Debug, PartialEq, Clone, Copy)]
#[cfg_attr(test, derive(Arbitrary))]
pub struct Secret([u8; 32]);
/// Define our own `Commitment` type instead of using [`frost::Commitment`].
///
/// The serialization design specifies that `Commitment` is an [`jubjub::AffinePoint`] that uses:
/// "a 32-byte little-endian canonical representation".
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Copy)]
#[cfg_attr(test, derive(Arbitrary))]
pub struct Commitment([u8; 32]);
impl From<frost::Commitment> for Commitment {
fn from(value: frost::Commitment) -> Commitment {
Commitment(jubjub::AffinePoint::from(value.0).to_bytes())
}
}
/// Define our own `GroupCommitment` type instead of using [`frost::GroupCommitment`].
///
/// The serialization design specifies that `GroupCommitment` is an [`jubjub::AffinePoint`] that uses:
/// "a 32-byte little-endian canonical representation".
#[derive(Serialize, Deserialize, Debug, PartialEq, Clone, Copy)]
#[cfg_attr(test, derive(Arbitrary))]
pub struct GroupCommitment([u8; 32]);
/// Define our own `SignatureResponse` type instead of using [`frost::SignatureResponse`].
///
/// The serialization design specifies that `SignatureResponse` is a [`jubjub::Scalar`] that uses:
/// "a 32-byte little-endian canonical representation".
#[derive(Serialize, Deserialize, Debug, PartialEq, Clone, Copy)]
#[cfg_attr(test, derive(Arbitrary))]
pub struct SignatureResponse([u8; 32]);
impl From<signature::Signature<SpendAuth>> for SignatureResponse {
fn from(value: signature::Signature<SpendAuth>) -> SignatureResponse {
SignatureResponse(value.s_bytes)
}
}
impl From<signature::Signature<SpendAuth>> for GroupCommitment {
fn from(value: signature::Signature<SpendAuth>) -> GroupCommitment {
GroupCommitment(value.r_bytes)
}
}
/// Define our own `VerificationKey` type instead of using [`verification_key::VerificationKey<SpendAuth>`].
///
/// The serialization design specifies that `VerificationKey` is a [`verification_key::VerificationKeyBytes`] that uses:
/// "a 32-byte little-endian canonical representation".
#[derive(Serialize, Deserialize, PartialEq, Debug, Copy, Clone)]
#[cfg_attr(test, derive(Arbitrary))]
pub struct VerificationKey([u8; 32]);
impl From<verification_key::VerificationKey<SpendAuth>> for VerificationKey {
fn from(value: verification_key::VerificationKey<SpendAuth>) -> VerificationKey {
VerificationKey(<[u8; 32]>::from(value))
}
}
/// The data required to serialize a frost message.
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
#[cfg_attr(test, derive(Arbitrary))]
pub struct Message {
header: Header,
payload: Payload,
}
/// The data required to serialize the common header fields for every message.
///
/// Note: the `msg_type` is derived from the `payload` enum variant.
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone, Copy)]
pub struct Header {
version: MsgVersion,
sender: ParticipantId,
receiver: ParticipantId,
}
/// The data required to serialize the payload for a message.
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
#[cfg_attr(test, derive(Arbitrary))]
pub enum Payload {
SharePackage(SharePackage),
SigningCommitments(SigningCommitments),
SigningPackage(SigningPackage),
SignatureShare(SignatureShare),
AggregateSignature(AggregateSignature),
}
/// The numeric values used to identify each [`Payload`] variant during serialization.
// TODO: spec says `#[repr(u8)]` but it is incompatible with `bincode`
// manual serialization and deserialization is needed.
#[repr(u32)]
#[non_exhaustive]
#[derive(Serialize, Deserialize, Debug, PartialEq)]
enum MsgType {
SharePackage,
SigningCommitments,
SigningPackage,
SignatureShare,
AggregateSignature,
}
/// The numeric values used to identify the protocol version during serialization.
#[derive(PartialEq, Serialize, Deserialize, Debug, Clone, Copy)]
pub struct MsgVersion(u8);
/// The numeric values used to identify each participant during serialization.
///
/// In the `frost` module, participant ID `0` should be invalid.
/// But in serialization, we want participants to be indexed from `0..n`,
/// where `n` is the number of participants.
/// This helps us look up their shares and commitments in serialized arrays.
/// So in serialization, we assign the dealer and aggregator the highest IDs,
/// and mark those IDs as invalid for signers.
///
/// "When performing Shamir secret sharing, a polynomial `f(x)` is used to generate
/// each partys share of the secret. The actual secret is `f(0)` and the party with
/// ID `i` will be given a share with value `f(i)`.
/// Since a DKG may be implemented in the future, we recommend that the ID `0` be declared invalid."
/// https://raw.githubusercontent.com/ZcashFoundation/redjubjub/main/zcash-frost-audit-report-20210323.pdf#d
#[derive(PartialEq, Eq, Hash, PartialOrd, Debug, Copy, Clone, Ord)]
pub enum ParticipantId {
/// A serialized participant ID for a signer.
///
/// Must be less than or equal to [`constants::MAX_SIGNER_PARTICIPANT_ID`].
Signer(u64),
/// The fixed participant ID for the dealer as defined in [`constants::DEALER_PARTICIPANT_ID`].
Dealer,
/// The fixed participant ID for the aggregator as defined in [`constants::AGGREGATOR_PARTICIPANT_ID`].
Aggregator,
}
impl From<ParticipantId> for u64 {
fn from(value: ParticipantId) -> u64 {
match value {
// An id of `0` is invalid in frost.
ParticipantId::Signer(id) => id + 1,
ParticipantId::Dealer => constants::DEALER_PARTICIPANT_ID,
ParticipantId::Aggregator => constants::AGGREGATOR_PARTICIPANT_ID,
}
}
}
/// The data required to serialize [`frost::SharePackage`].
///
/// The dealer sends this message to each signer for this round.
/// With this, the signer should be able to build a [`SharePackage`] and use
/// the [`frost::sign()`] function.
///
/// Note: [`frost::SharePackage::public`] can be calculated from [`SharePackage::secret_share`].
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
#[cfg_attr(test, derive(Arbitrary))]
pub struct SharePackage {
/// The public signing key that represents the entire group:
/// [`frost::SharePackage::group_public`].
group_public: VerificationKey,
/// This participant's secret key share: [`frost::SharePackage::share`].
secret_share: Secret,
/// The commitments to the coefficients for our secret polynomial _f_,
/// used to generate participants' key shares. Participants use these to perform
/// verifiable secret sharing.
/// Share packages that contain duplicate or missing [`ParticipantId`]s are invalid.
/// [`ParticipantId`]s must be serialized in ascending numeric order.
share_commitment: BTreeMap<ParticipantId, Commitment>,
}
/// The data required to serialize [`frost::SigningCommitments`].
///
/// Each signer must send this message to the aggregator.
/// A signing commitment from the first round of the signing protocol.
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
#[cfg_attr(test, derive(Arbitrary))]
pub struct SigningCommitments {
/// The hiding point: [`frost::SigningCommitments::hiding`]
hiding: Commitment,
/// The binding point: [`frost::SigningCommitments::binding`]
binding: Commitment,
}
/// The data required to serialize [`frost::SigningPackage`].
///
/// The aggregator decides what message is going to be signed and
/// sends it to each signer with all the commitments collected.
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
#[cfg_attr(test, derive(Arbitrary))]
pub struct SigningPackage {
/// The collected commitments for each signer as a hashmap of
/// unique participant identifiers: [`frost::SigningPackage::signing_commitments`]
///
/// Signing packages that contain duplicate or missing [`ParticipantId`]s are invalid.
signing_commitments: BTreeMap<ParticipantId, SigningCommitments>,
/// The message to be signed: [`frost::SigningPackage::message`].
///
/// Each signer should perform protocol-specific verification on the message.
message: Vec<u8>,
}
impl From<SigningPackage> for frost::SigningPackage {
fn from(value: SigningPackage) -> frost::SigningPackage {
let mut signing_commitments = Vec::new();
for (participant_id, commitment) in &value.signing_commitments {
let s = frost::SigningCommitments {
index: u64::from(*participant_id),
// TODO: The `from_bytes()` response is a `CtOption` so we have to `unwrap()`
hiding: jubjub::ExtendedPoint::from(
jubjub::AffinePoint::from_bytes(commitment.hiding.0).unwrap(),
),
binding: jubjub::ExtendedPoint::from(
jubjub::AffinePoint::from_bytes(commitment.binding.0).unwrap(),
),
};
signing_commitments.push(s);
}
frost::SigningPackage {
signing_commitments,
message: value.message,
}
}
}
/// The data required to serialize [`frost::SignatureShare`].
///
/// Each signer sends their signatures to the aggregator who is going to collect them
/// and generate a final spend signature.
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
#[cfg_attr(test, derive(Arbitrary))]
pub struct SignatureShare {
/// This participant's signature over the message: [`frost::SignatureShare::signature`]
signature: SignatureResponse,
}
/// The data required to serialize a successful output from [`frost::aggregate()`].
///
/// The final signature is broadcasted by the aggregator to all signers.
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
#[cfg_attr(test, derive(Arbitrary))]
pub struct AggregateSignature {
/// The aggregated group commitment: [`signature::Signature::r_bytes`] returned by [`frost::aggregate()`]
group_commitment: GroupCommitment,
/// A plain Schnorr signature created by summing all the signature shares:
/// [`signature::Signature::s_bytes`] returned by [`frost::aggregate()`]
schnorr_signature: SignatureResponse,
}

View File

@ -1,55 +0,0 @@
use proptest::{
arbitrary::{any, Arbitrary},
prelude::*,
};
use super::*;
impl Arbitrary for Header {
type Parameters = ();
fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy {
(
any::<MsgVersion>(),
any::<ParticipantId>(),
any::<ParticipantId>(),
)
.prop_filter(
"Sender and receiver participant IDs can not be the same",
|(_, sender, receiver)| sender != receiver,
)
.prop_map(|(version, sender, receiver)| Header {
version: version,
sender: sender,
receiver: receiver,
})
.boxed()
}
type Strategy = BoxedStrategy<Self>;
}
impl Arbitrary for MsgVersion {
type Parameters = ();
fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy {
Just(constants::BASIC_FROST_SERIALIZATION).boxed()
}
type Strategy = BoxedStrategy<Self>;
}
impl Arbitrary for ParticipantId {
type Parameters = ();
fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy {
prop_oneof![
(u64::MIN..=constants::MAX_SIGNER_PARTICIPANT_ID).prop_map(ParticipantId::Signer),
Just(ParticipantId::Dealer),
Just(ParticipantId::Aggregator),
]
.boxed()
}
type Strategy = BoxedStrategy<Self>;
}

View File

@ -1,31 +0,0 @@
//! Definitions of constants.
use super::MsgVersion;
/// The first version of FROST messages
pub const BASIC_FROST_SERIALIZATION: MsgVersion = MsgVersion(0);
/// The fixed participant ID for the dealer.
pub const DEALER_PARTICIPANT_ID: u64 = u64::MAX - 1;
/// The fixed participant ID for the aggregator.
pub const AGGREGATOR_PARTICIPANT_ID: u64 = u64::MAX;
/// The maximum `ParticipantId::Signer` in this serialization format.
///
/// We reserve two participant IDs for the dealer and aggregator.
pub const MAX_SIGNER_PARTICIPANT_ID: u64 = u64::MAX - 2;
/// The maximum number of signers
///
/// By protocol the number of signers can'e be more than 255.
pub const MAX_SIGNERS: u8 = 255;
/// The maximum length of a Zcash message, in bytes.
pub const ZCASH_MAX_PROTOCOL_MESSAGE_LEN: usize = 2 * 1024 * 1024;
/// The minimum number of signers of any FROST setup.
pub const MIN_SIGNERS: usize = 2;
/// The minimum number of signers that must sign.
pub const MIN_THRESHOLD: usize = 2;

View File

@ -1,68 +0,0 @@
//! Serialization rules specified in [RFC-001#Serialize-Deserialize]
//!
//! We automatically serialize and deserialize using serde derivations where possible.
//! Sometimes we need to implement ourselves, this file holds that code.
//!
//! [RFC-001#Serialize-Deserialize]: https://github.com/ZcashFoundation/redjubjub/blob/main/rfcs/0001-messages.md#serializationdeserialization
use serde::ser::{Serialize, Serializer};
use serde::de::{self, Deserialize, Deserializer, Visitor};
use super::constants::{
AGGREGATOR_PARTICIPANT_ID, DEALER_PARTICIPANT_ID, MAX_SIGNER_PARTICIPANT_ID,
};
use super::*;
use std::fmt;
impl Serialize for ParticipantId {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match *self {
ParticipantId::Signer(id) => {
assert!(id <= MAX_SIGNER_PARTICIPANT_ID);
serializer.serialize_u64(id)
}
ParticipantId::Dealer => serializer.serialize_u64(DEALER_PARTICIPANT_ID),
ParticipantId::Aggregator => serializer.serialize_u64(AGGREGATOR_PARTICIPANT_ID),
}
}
}
struct ParticipantIdVisitor;
impl<'de> Visitor<'de> for ParticipantIdVisitor {
type Value = ParticipantId;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str(
format!("an integer between {} and {}", std::u64::MIN, std::u64::MAX).as_str(),
)
}
fn visit_u64<E>(self, value: u64) -> Result<Self::Value, E>
where
E: de::Error,
{
// Note: deserialization can't fail, because all values are valid.
if value == DEALER_PARTICIPANT_ID {
return Ok(ParticipantId::Dealer);
} else if value == AGGREGATOR_PARTICIPANT_ID {
return Ok(ParticipantId::Aggregator);
} else {
return Ok(ParticipantId::Signer(value));
}
}
}
impl<'de> Deserialize<'de> for ParticipantId {
fn deserialize<D>(deserializer: D) -> Result<ParticipantId, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_u64(ParticipantIdVisitor)
}
}

View File

@ -1,2 +0,0 @@
mod integration;
mod prop;

View File

@ -1,805 +0,0 @@
use crate::{
frost,
messages::{
validate::{MsgErr, Validate},
*,
},
verification_key,
};
use rand::thread_rng;
use serde_json;
use std::convert::TryFrom;
#[test]
fn validate_version() {
// A version number that we expect to be always invalid
const INVALID_VERSION: u8 = u8::MAX;
let setup = basic_setup();
let header = Header {
version: MsgVersion(INVALID_VERSION),
sender: setup.dealer,
receiver: setup.signer1,
};
let validate = Validate::validate(&header);
assert_eq!(validate, Err(MsgErr::WrongVersion));
let validate = Validate::validate(&Header {
version: constants::BASIC_FROST_SERIALIZATION,
sender: setup.dealer,
receiver: setup.signer1,
})
.err();
assert_eq!(validate, None);
}
#[test]
fn validate_sender_receiver() {
let setup = basic_setup();
let header = Header {
version: constants::BASIC_FROST_SERIALIZATION,
sender: setup.signer1,
receiver: setup.signer1,
};
let validate = Validate::validate(&header);
assert_eq!(validate, Err(MsgErr::SameSenderAndReceiver));
}
#[test]
fn validate_sharepackage() {
let setup = basic_setup();
let (mut shares, _pubkeys) =
frost::keygen_with_dealer(setup.num_signers, setup.threshold, setup.rng.clone()).unwrap();
let header = create_valid_header(setup.signer1, setup.signer2);
let group_public = VerificationKey::from(
verification_key::VerificationKey::try_from(shares[0].group_public.bytes).unwrap(),
);
let secret_share = Secret(shares[0].share.value.0.to_bytes());
let participants = vec![setup.signer1, setup.signer2];
shares.truncate(2);
let share_commitment = generate_share_commitment(&shares, participants);
let payload = Payload::SharePackage(SharePackage {
group_public,
secret_share: secret_share,
share_commitment: share_commitment,
});
let validate_payload = Validate::validate(&payload);
let valid_payload = validate_payload.expect("a valid payload").clone();
let message = Message {
header,
payload: valid_payload.clone(),
};
let validate_message = Validate::validate(&message);
assert_eq!(validate_message, Err(MsgErr::SenderMustBeDealer));
// change the header
let header = create_valid_header(setup.dealer, setup.aggregator);
let message = Message {
header,
payload: valid_payload,
};
let validate_message = Validate::validate(&message);
assert_eq!(validate_message, Err(MsgErr::ReceiverMustBeSigner));
let participants = vec![setup.signer1];
shares.truncate(1);
let mut share_commitment = generate_share_commitment(&shares, participants);
// change the payload to have only 1 commitment
let payload = Payload::SharePackage(SharePackage {
group_public,
secret_share: secret_share,
share_commitment: share_commitment.clone(),
});
let validate_payload = Validate::validate(&payload);
assert_eq!(
validate_payload,
Err(MsgErr::NotEnoughCommitments(constants::MIN_SIGNERS))
);
// build and use too many commitments
for i in 2..constants::MAX_SIGNERS as u64 + 2 {
share_commitment.insert(
ParticipantId::Signer(i),
share_commitment.clone()[&setup.signer1],
);
}
let payload = Payload::SharePackage(SharePackage {
group_public,
secret_share,
share_commitment,
});
let validate_payload = Validate::validate(&payload);
assert_eq!(validate_payload, Err(MsgErr::TooManyCommitments));
}
#[test]
fn serialize_sharepackage() {
let setup = basic_setup();
let (mut shares, _pubkeys) =
frost::keygen_with_dealer(setup.num_signers, setup.threshold, setup.rng.clone()).unwrap();
let header = create_valid_header(setup.dealer, setup.signer1);
let group_public = VerificationKey::from(
verification_key::VerificationKey::try_from(shares[0].group_public.bytes).unwrap(),
);
let secret_share = Secret(shares[0].share.value.0.to_bytes());
let participants = vec![setup.signer1];
shares.truncate(1);
let share_commitment = generate_share_commitment(&shares, participants);
let payload = Payload::SharePackage(SharePackage {
group_public,
secret_share,
share_commitment: share_commitment.clone(),
});
let message = Message {
header: header,
payload: payload.clone(),
};
// check general structure and header serialization/deserialization
serialize_message(message, setup.dealer, setup.signer1);
// check payload serialization/deserialization
let mut payload_serialized_bytes = bincode::serialize(&payload).unwrap();
// check the message type is correct
let deserialized_msg_type: MsgType =
bincode::deserialize(&payload_serialized_bytes[0..4]).unwrap();
assert_eq!(deserialized_msg_type, MsgType::SharePackage);
// remove the msg_type from the the payload
payload_serialized_bytes =
(&payload_serialized_bytes[4..payload_serialized_bytes.len()]).to_vec();
// group_public is 32 bytes
let deserialized_group_public: VerificationKey =
bincode::deserialize(&payload_serialized_bytes[0..32]).unwrap();
// secret share is 32 bytes
let deserialized_secret_share: Secret =
bincode::deserialize(&payload_serialized_bytes[32..64]).unwrap();
// rest of the message is the map: 32(Commitment) + 8(ParticipantId) + 8(map.len())
let deserialized_share_commitment: BTreeMap<ParticipantId, Commitment> =
bincode::deserialize(&payload_serialized_bytes[64..112]).unwrap();
// check the map len
let deserialized_map_len: u64 =
bincode::deserialize(&payload_serialized_bytes[64..72]).unwrap();
assert_eq!(deserialized_map_len, 1);
// no leftover bytes
assert_eq!(payload_serialized_bytes.len(), 112);
assert_eq!(deserialized_group_public, group_public);
assert_eq!(deserialized_secret_share, secret_share);
assert_eq!(deserialized_share_commitment, share_commitment);
}
#[test]
fn validate_signingcommitments() {
let mut setup = basic_setup();
let (_nonce, commitment) = frost::preprocess(1, u64::from(setup.signer1), &mut setup.rng);
let header = create_valid_header(setup.aggregator, setup.signer2);
let payload = Payload::SigningCommitments(SigningCommitments {
hiding: Commitment(jubjub::AffinePoint::from(commitment[0].hiding).to_bytes()),
binding: Commitment(jubjub::AffinePoint::from(commitment[0].binding).to_bytes()),
});
let message = Message {
header,
payload: payload.clone(),
};
let validate_message = Validate::validate(&message);
assert_eq!(validate_message, Err(MsgErr::SenderMustBeSigner));
// change the header
let header = create_valid_header(setup.signer1, setup.signer2);
let message = Message {
header,
payload: payload.clone(),
};
let validate_message = Validate::validate(&message);
assert_eq!(validate_message, Err(MsgErr::ReceiverMustBeAggregator));
// change the header to be valid
let header = create_valid_header(setup.signer1, setup.aggregator);
let validate_message = Validate::validate(&Message { header, payload }).err();
assert_eq!(validate_message, None);
}
#[test]
fn serialize_signingcommitments() {
let mut setup = basic_setup();
let (_nonce, commitment) = frost::preprocess(1, u64::from(setup.signer1), &mut setup.rng);
let header = create_valid_header(setup.aggregator, setup.signer1);
let hiding = Commitment(jubjub::AffinePoint::from(commitment[0].hiding).to_bytes());
let binding = Commitment(jubjub::AffinePoint::from(commitment[0].binding).to_bytes());
let payload = Payload::SigningCommitments(SigningCommitments { hiding, binding });
let message = Message {
header: header,
payload: payload.clone(),
};
// check general structure serialization/deserialization
serialize_message(message, setup.aggregator, setup.signer1);
// check payload serialization/deserialization
let mut payload_serialized_bytes = bincode::serialize(&payload).unwrap();
// check the message type is correct
let deserialized_msg_type: MsgType =
bincode::deserialize(&payload_serialized_bytes[0..4]).unwrap();
assert_eq!(deserialized_msg_type, MsgType::SigningCommitments);
// remove the msg_type from the the payload
payload_serialized_bytes =
(&payload_serialized_bytes[4..payload_serialized_bytes.len()]).to_vec();
// hiding is 32 bytes
let deserialized_hiding: Commitment =
bincode::deserialize(&payload_serialized_bytes[0..32]).unwrap();
// binding is 43 bytes kore
let deserialized_binding: Commitment =
bincode::deserialize(&payload_serialized_bytes[32..64]).unwrap();
// no leftover bytes
assert_eq!(payload_serialized_bytes.len(), 64);
assert_eq!(deserialized_hiding, hiding);
assert_eq!(deserialized_binding, binding);
}
#[test]
fn validate_signingpackage() {
let mut setup = basic_setup();
let (_nonce, commitment1) = frost::preprocess(1, u64::from(setup.signer1), &mut setup.rng);
let (_nonce, commitment2) = frost::preprocess(1, u64::from(setup.signer2), &mut setup.rng);
let header = create_valid_header(setup.signer1, setup.signer2);
// try with only 1 commitment
let commitments = vec![commitment1[0]];
let participants = vec![setup.signer1];
let signing_commitments = create_signing_commitments(commitments, participants);
let payload = Payload::SigningPackage(SigningPackage {
signing_commitments: signing_commitments.clone(),
message: "hola".as_bytes().to_vec(),
});
let validate_payload = Validate::validate(&payload);
assert_eq!(
validate_payload,
Err(MsgErr::NotEnoughCommitments(constants::MIN_SIGNERS))
);
// add too many commitments
let mut big_signing_commitments = BTreeMap::<ParticipantId, SigningCommitments>::new();
for i in 0..constants::MAX_SIGNERS as u64 + 1 {
big_signing_commitments.insert(
ParticipantId::Signer(i),
signing_commitments[&setup.signer1].clone(),
);
}
let payload = Payload::SigningPackage(SigningPackage {
signing_commitments: big_signing_commitments,
message: "hola".as_bytes().to_vec(),
});
let validate_payload = Validate::validate(&payload);
assert_eq!(validate_payload, Err(MsgErr::TooManyCommitments));
// change to 2 commitments
let commitments = vec![commitment1[0], commitment2[0]];
let participants = vec![setup.signer1, setup.signer2];
let signing_commitments = create_signing_commitments(commitments, participants);
let big_message = [0u8; constants::ZCASH_MAX_PROTOCOL_MESSAGE_LEN + 1].to_vec();
let payload = Payload::SigningPackage(SigningPackage {
signing_commitments: signing_commitments.clone(),
message: big_message,
});
let validate_payload = Validate::validate(&payload);
assert_eq!(validate_payload, Err(MsgErr::MsgTooBig));
let message = Message {
header,
payload: payload.clone(),
};
let validate_message = Validate::validate(&message);
assert_eq!(validate_message, Err(MsgErr::SenderMustBeAggregator));
// change header
let header = create_valid_header(setup.aggregator, setup.dealer);
let message = Message {
header: header,
payload: payload.clone(),
};
let validate_message = Validate::validate(&message);
assert_eq!(validate_message, Err(MsgErr::ReceiverMustBeSigner));
let header = create_valid_header(setup.aggregator, setup.signer1);
let payload = Payload::SigningPackage(SigningPackage {
signing_commitments,
message: "hola".as_bytes().to_vec(),
});
let validate_message = Validate::validate(&Message { header, payload }).err();
assert_eq!(validate_message, None);
}
#[test]
fn serialize_signingpackage() {
let mut setup = basic_setup();
let (_nonce, commitment1) = frost::preprocess(1, u64::from(setup.signer1), &mut setup.rng);
let (_nonce, commitment2) = frost::preprocess(1, u64::from(setup.signer2), &mut setup.rng);
let header = create_valid_header(setup.aggregator, setup.signer1);
let commitments = vec![commitment1[0], commitment2[0]];
let participants = vec![setup.signer1, setup.signer2];
let signing_commitments = create_signing_commitments(commitments, participants);
let payload = Payload::SigningPackage(SigningPackage {
signing_commitments: signing_commitments.clone(),
message: "hola".as_bytes().to_vec(),
});
let message = Message {
header: header,
payload: payload.clone(),
};
// check general structure serialization/deserialization
serialize_message(message, setup.aggregator, setup.signer1);
// check payload serialization/deserialization
let mut payload_serialized_bytes = bincode::serialize(&payload).unwrap();
// check the message type is correct
let deserialized_msg_type: MsgType =
bincode::deserialize(&payload_serialized_bytes[0..4]).unwrap();
assert_eq!(deserialized_msg_type, MsgType::SigningPackage);
// remove the msg_type from the the payload
payload_serialized_bytes =
(&payload_serialized_bytes[4..payload_serialized_bytes.len()]).to_vec();
// check the map len
let deserialized_map_len: u64 = bincode::deserialize(&payload_serialized_bytes[0..8]).unwrap();
assert_eq!(deserialized_map_len, 2);
// Each SigningCommitment is 64 bytes and the ParticipantId is 8 bytes.
// This is multiplied by the map len, also include the map len bytes.
let deserialized_signing_commitments: BTreeMap<ParticipantId, SigningCommitments> =
bincode::deserialize(&payload_serialized_bytes[0..152]).unwrap();
// Message is from the end of the map up to the end of the message.
let deserialized_message: Vec<u8> =
bincode::deserialize(&payload_serialized_bytes[152..payload_serialized_bytes.len()])
.unwrap();
// no leftover bytes
assert_eq!(payload_serialized_bytes.len(), 164);
assert_eq!(deserialized_signing_commitments, signing_commitments);
assert_eq!(deserialized_message, "hola".as_bytes().to_vec());
}
#[test]
fn validate_signatureshare() {
let mut setup = basic_setup();
// signers and aggregator should have this data from `SharePackage`
let (shares, _pubkeys) =
frost::keygen_with_dealer(setup.num_signers, setup.threshold, setup.rng.clone()).unwrap();
// create a signing package, this is done in the aggregator side.
// the signrs should have this data from `SigningPackage`
let (nonce1, commitment1) = frost::preprocess(1, u64::from(setup.signer1), &mut setup.rng);
let (_nonce2, commitment2) = frost::preprocess(1, u64::from(setup.signer2), &mut setup.rng);
let commitments = vec![commitment1[0], commitment2[0]];
let participants = vec![setup.signer1, setup.signer2];
let signing_commitments = create_signing_commitments(commitments, participants);
let signing_package = frost::SigningPackage::from(SigningPackage {
signing_commitments: signing_commitments.clone(),
message: "hola".as_bytes().to_vec(),
});
// here we get started with the `SignatureShare` message.
let signature_share = frost::sign(&signing_package, nonce1[0], &shares[0]).unwrap();
// this header is invalid
let header = create_valid_header(setup.aggregator, setup.signer1);
let payload = Payload::SignatureShare(SignatureShare {
signature: SignatureResponse(signature_share.signature.0.to_bytes()),
});
let message = Message {
header,
payload: payload.clone(),
};
let validate_message = Validate::validate(&message);
assert_eq!(validate_message, Err(MsgErr::SenderMustBeSigner));
// change the header, still invalid.
let header = create_valid_header(setup.signer1, setup.signer2);
let message = Message {
header,
payload: payload.clone(),
};
let validate_message = Validate::validate(&message);
assert_eq!(validate_message, Err(MsgErr::ReceiverMustBeAggregator));
// change the header to be valid
let header = create_valid_header(setup.signer1, setup.aggregator);
let validate_message = Validate::validate(&Message { header, payload }).err();
assert_eq!(validate_message, None);
}
#[test]
fn serialize_signatureshare() {
let mut setup = basic_setup();
// signers and aggregator should have this data from `SharePackage`
let (shares, _pubkeys) =
frost::keygen_with_dealer(setup.num_signers, setup.threshold, setup.rng.clone()).unwrap();
// create a signing package, this is done in the aggregator side.
// the signers should have this data from `SigningPackage`
let (nonce1, commitment1) = frost::preprocess(1, u64::from(setup.signer1), &mut setup.rng);
let (_nonce2, commitment2) = frost::preprocess(1, u64::from(setup.signer2), &mut setup.rng);
let commitments = vec![commitment1[0], commitment2[0]];
let participants = vec![setup.signer1, setup.signer2];
let signing_commitments = create_signing_commitments(commitments, participants);
let signing_package = frost::SigningPackage::from(SigningPackage {
signing_commitments: signing_commitments.clone(),
message: "hola".as_bytes().to_vec(),
});
// here we get started with the `SignatureShare` message.
let signature_share = frost::sign(&signing_package, nonce1[0], &shares[0]).unwrap();
// valid header
let header = create_valid_header(setup.signer1, setup.aggregator);
let signature = SignatureResponse(signature_share.signature.0.to_bytes());
let payload = Payload::SignatureShare(SignatureShare { signature });
let message = Message {
header: header,
payload: payload.clone(),
};
// check general structure serialization/deserialization
serialize_message(message, setup.signer1, setup.aggregator);
// check payload serialization/deserialization
let mut payload_serialized_bytes = bincode::serialize(&payload).unwrap();
// check the message type is correct
let deserialized_msg_type: MsgType =
bincode::deserialize(&payload_serialized_bytes[0..4]).unwrap();
assert_eq!(deserialized_msg_type, MsgType::SignatureShare);
// remove the msg_type from the the payload
payload_serialized_bytes =
(&payload_serialized_bytes[4..payload_serialized_bytes.len()]).to_vec();
// signature is 32 bytes
let deserialized_signature: SignatureResponse =
bincode::deserialize(&payload_serialized_bytes[0..32]).unwrap();
// no leftover bytes
assert_eq!(payload_serialized_bytes.len(), 32);
assert_eq!(deserialized_signature, signature);
}
#[test]
fn validate_aggregatesignature() {
let (setup, group_signature_res) = full_setup();
// this header is invalid
let header = create_valid_header(setup.signer1, setup.aggregator);
let payload = Payload::AggregateSignature(AggregateSignature {
group_commitment: GroupCommitment::from(group_signature_res),
schnorr_signature: SignatureResponse::from(group_signature_res),
});
let message = Message {
header,
payload: payload.clone(),
};
let validate_message = Validate::validate(&message);
assert_eq!(validate_message, Err(MsgErr::SenderMustBeAggregator));
// change the header, still invalid.
let header = create_valid_header(setup.aggregator, setup.dealer);
let message = Message {
header,
payload: payload.clone(),
};
let validate_message = Validate::validate(&message);
assert_eq!(validate_message, Err(MsgErr::ReceiverMustBeSigner));
// change the header to be valid
let header = create_valid_header(setup.aggregator, setup.signer1);
let validate_message = Validate::validate(&Message { header, payload }).err();
assert_eq!(validate_message, None);
}
#[test]
fn serialize_aggregatesignature() {
let (setup, group_signature_res) = full_setup();
let header = create_valid_header(setup.aggregator, setup.signer1);
let group_commitment = GroupCommitment::from(group_signature_res);
let schnorr_signature = SignatureResponse::from(group_signature_res);
let payload = Payload::AggregateSignature(AggregateSignature {
group_commitment,
schnorr_signature,
});
let message = Message {
header,
payload: payload.clone(),
};
// check general structure serialization/deserialization
serialize_message(message, setup.aggregator, setup.signer1);
// check payload serialization/deserialization
let mut payload_serialized_bytes = bincode::serialize(&payload).unwrap();
// check the message type is correct
let deserialized_msg_type: MsgType =
bincode::deserialize(&payload_serialized_bytes[0..4]).unwrap();
assert_eq!(deserialized_msg_type, MsgType::AggregateSignature);
// remove the msg_type from the the payload
payload_serialized_bytes =
(&payload_serialized_bytes[4..payload_serialized_bytes.len()]).to_vec();
// group_commitment is 32 bytes
let deserialized_group_commiment: GroupCommitment =
bincode::deserialize(&payload_serialized_bytes[0..32]).unwrap();
// schnorr_signature is 32 bytes
let deserialized_schnorr_signature: SignatureResponse =
bincode::deserialize(&payload_serialized_bytes[32..64]).unwrap();
// no leftover bytes
assert_eq!(payload_serialized_bytes.len(), 64);
assert_eq!(deserialized_group_commiment, group_commitment);
assert_eq!(deserialized_schnorr_signature, schnorr_signature);
}
#[test]
fn btreemap() {
let mut setup = basic_setup();
let mut map = BTreeMap::new();
let (_nonce, commitment) = frost::preprocess(1, u64::from(setup.signer1), &mut setup.rng);
let commitments = vec![commitment[0]];
let participants = vec![setup.signer1];
let signing_commitments = create_signing_commitments(commitments, participants);
map.insert(ParticipantId::Signer(1), &signing_commitments);
map.insert(ParticipantId::Signer(2), &signing_commitments);
map.insert(ParticipantId::Signer(0), &signing_commitments);
// Check the ascending order
let mut map_iter = map.iter();
let (key, _) = map_iter.next().unwrap();
assert_eq!(*key, ParticipantId::Signer(0));
let (key, _) = map_iter.next().unwrap();
assert_eq!(*key, ParticipantId::Signer(1));
let (key, _) = map_iter.next().unwrap();
assert_eq!(*key, ParticipantId::Signer(2));
// Add a repeated key
map.insert(ParticipantId::Signer(1), &signing_commitments);
// BTreeMap is not increasing
assert_eq!(map.len(), 3);
}
// utility functions
fn create_valid_header(sender: ParticipantId, receiver: ParticipantId) -> Header {
Validate::validate(&Header {
version: constants::BASIC_FROST_SERIALIZATION,
sender: sender,
receiver: receiver,
})
.expect("always a valid header")
.clone()
}
fn serialize_header(
header_serialized_bytes: Vec<u8>,
sender: ParticipantId,
receiver: ParticipantId,
) {
let deserialized_version: MsgVersion =
bincode::deserialize(&header_serialized_bytes[0..1]).unwrap();
let deserialized_sender: ParticipantId =
bincode::deserialize(&header_serialized_bytes[1..9]).unwrap();
let deserialized_receiver: ParticipantId =
bincode::deserialize(&header_serialized_bytes[9..17]).unwrap();
assert_eq!(deserialized_version, constants::BASIC_FROST_SERIALIZATION);
assert_eq!(deserialized_sender, sender);
assert_eq!(deserialized_receiver, receiver);
}
fn serialize_message(message: Message, sender: ParticipantId, receiver: ParticipantId) {
let serialized_bytes = bincode::serialize(&message).unwrap();
let deserialized_bytes: Message = bincode::deserialize(&serialized_bytes).unwrap();
assert_eq!(message, deserialized_bytes);
let serialized_json = serde_json::to_string(&message).unwrap();
let deserialized_json: Message = serde_json::from_str(serialized_json.as_str()).unwrap();
assert_eq!(message, deserialized_json);
let header_serialized_bytes = bincode::serialize(&message.header).unwrap();
serialize_header(header_serialized_bytes, sender, receiver);
// make sure the message fields are in the right order
let message_serialized_bytes = bincode::serialize(&message).unwrap();
let deserialized_header: Header =
bincode::deserialize(&message_serialized_bytes[0..17]).unwrap();
let deserialized_payload: Payload =
bincode::deserialize(&message_serialized_bytes[17..message_serialized_bytes.len()])
.unwrap();
assert_eq!(deserialized_header, message.header);
assert_eq!(deserialized_payload, message.payload);
}
struct Setup {
rng: rand::rngs::ThreadRng,
num_signers: u8,
threshold: u8,
dealer: ParticipantId,
aggregator: ParticipantId,
signer1: ParticipantId,
signer2: ParticipantId,
}
fn basic_setup() -> Setup {
Setup {
rng: thread_rng(),
num_signers: 3,
threshold: 2,
dealer: ParticipantId::Dealer,
aggregator: ParticipantId::Aggregator,
signer1: ParticipantId::Signer(0),
signer2: ParticipantId::Signer(1),
}
}
fn full_setup() -> (Setup, signature::Signature<SpendAuth>) {
let mut setup = basic_setup();
// aggregator creates the shares and pubkeys for this round
let (shares, pubkeys) =
frost::keygen_with_dealer(setup.num_signers, setup.threshold, setup.rng.clone()).unwrap();
let mut nonces: std::collections::HashMap<u64, Vec<frost::SigningNonces>> =
std::collections::HashMap::with_capacity(setup.threshold as usize);
let mut commitments: Vec<frost::SigningCommitments> =
Vec::with_capacity(setup.threshold as usize);
// aggregator generates nonces and signing commitments for each participant.
for participant_index in 1..(setup.threshold + 1) {
let (nonce, commitment) = frost::preprocess(1, participant_index as u64, &mut setup.rng);
nonces.insert(participant_index as u64, nonce);
commitments.push(commitment[0]);
}
// aggregator generates a signing package
let mut signature_shares: Vec<frost::SignatureShare> =
Vec::with_capacity(setup.threshold as usize);
let message = "message to sign".as_bytes().to_vec();
let signing_package = frost::SigningPackage {
message: message.clone(),
signing_commitments: commitments,
};
// each participant generates their signature share
for (participant_index, nonce) in nonces {
let share_package = shares
.iter()
.find(|share| participant_index == share.index)
.unwrap();
let nonce_to_use = nonce[0];
let signature_share = frost::sign(&signing_package, nonce_to_use, share_package).unwrap();
signature_shares.push(signature_share);
}
// aggregator generate the final signature
let final_signature =
frost::aggregate(&signing_package, &signature_shares[..], &pubkeys).unwrap();
(setup, final_signature)
}
fn generate_share_commitment(
shares: &Vec<frost::SharePackage>,
participants: Vec<ParticipantId>,
) -> BTreeMap<ParticipantId, Commitment> {
assert_eq!(shares.len(), participants.len());
participants
.into_iter()
.zip(shares)
.map(|(participant_id, share)| {
(
participant_id,
Commitment::from(share.share.commitment.0[0].clone()),
)
})
.collect()
}
fn create_signing_commitments(
commitments: Vec<frost::SigningCommitments>,
participants: Vec<ParticipantId>,
) -> BTreeMap<ParticipantId, SigningCommitments> {
assert_eq!(commitments.len(), participants.len());
participants
.into_iter()
.zip(commitments)
.map(|(participant_id, commitment)| {
let signing_commitment = SigningCommitments {
hiding: Commitment(jubjub::AffinePoint::from(commitment.hiding).to_bytes()),
binding: Commitment(jubjub::AffinePoint::from(commitment.binding).to_bytes()),
};
(participant_id, signing_commitment)
})
.collect()
}

View File

@ -1,15 +0,0 @@
use proptest::prelude::*;
use crate::messages::*;
proptest! {
#[test]
fn serialize_message(
message in any::<Message>(),
) {
let serialized = bincode::serialize(&message).unwrap();
let deserialized: Message = bincode::deserialize(serialized.as_slice()).unwrap();
prop_assert_eq!(message, deserialized);
}
}

View File

@ -1,143 +0,0 @@
//! Validation rules specified in [RFC-001#rules]
//!
//! [RFC-001#rules]: https://github.com/ZcashFoundation/redjubjub/blob/main/rfcs/0001-messages.md#rules
use super::constants::{
BASIC_FROST_SERIALIZATION, MAX_SIGNERS, MIN_SIGNERS, MIN_THRESHOLD,
ZCASH_MAX_PROTOCOL_MESSAGE_LEN,
};
use super::*;
use thiserror::Error;
pub trait Validate {
fn validate(&self) -> Result<&Self, MsgErr>;
}
impl Validate for Message {
fn validate(&self) -> Result<&Self, MsgErr> {
match self.payload {
Payload::SharePackage(_) => {
if self.header.sender != ParticipantId::Dealer {
return Err(MsgErr::SenderMustBeDealer);
}
if !matches!(self.header.receiver, ParticipantId::Signer(_)) {
return Err(MsgErr::ReceiverMustBeSigner);
}
}
Payload::SigningCommitments(_) => {
if !matches!(self.header.sender, ParticipantId::Signer(_)) {
return Err(MsgErr::SenderMustBeSigner);
}
if self.header.receiver != ParticipantId::Aggregator {
return Err(MsgErr::ReceiverMustBeAggregator);
}
}
Payload::SigningPackage(_) => {
if self.header.sender != ParticipantId::Aggregator {
return Err(MsgErr::SenderMustBeAggregator);
}
if !matches!(self.header.receiver, ParticipantId::Signer(_)) {
return Err(MsgErr::ReceiverMustBeSigner);
}
}
Payload::SignatureShare(_) => {
if !matches!(self.header.sender, ParticipantId::Signer(_)) {
return Err(MsgErr::SenderMustBeSigner);
}
if self.header.receiver != ParticipantId::Aggregator {
return Err(MsgErr::ReceiverMustBeAggregator);
}
}
Payload::AggregateSignature(_) => {
if self.header.sender != ParticipantId::Aggregator {
return Err(MsgErr::SenderMustBeAggregator);
}
if !matches!(self.header.receiver, ParticipantId::Signer(_)) {
return Err(MsgErr::ReceiverMustBeSigner);
}
}
}
self.header.validate()?;
self.payload.validate()?;
Ok(self)
}
}
impl Validate for Header {
fn validate(&self) -> Result<&Self, MsgErr> {
// Validate the message version.
// By now we only have 1 valid version so we compare against that.
if self.version != BASIC_FROST_SERIALIZATION {
return Err(MsgErr::WrongVersion);
}
// Make sure the sender and the receiver are not the same.
if self.sender == self.receiver {
return Err(MsgErr::SameSenderAndReceiver);
}
Ok(self)
}
}
impl Validate for Payload {
fn validate(&self) -> Result<&Self, MsgErr> {
match self {
Payload::SharePackage(share_package) => {
if share_package.share_commitment.len() < MIN_SIGNERS {
return Err(MsgErr::NotEnoughCommitments(MIN_SIGNERS));
}
if share_package.share_commitment.len() > MAX_SIGNERS.into() {
return Err(MsgErr::TooManyCommitments);
}
}
Payload::SigningCommitments(_) => {}
Payload::SigningPackage(signing_package) => {
if signing_package.message.len() > ZCASH_MAX_PROTOCOL_MESSAGE_LEN {
return Err(MsgErr::MsgTooBig);
}
if signing_package.signing_commitments.len() < MIN_THRESHOLD {
return Err(MsgErr::NotEnoughCommitments(MIN_THRESHOLD));
}
if signing_package.signing_commitments.len() > MAX_SIGNERS.into() {
return Err(MsgErr::TooManyCommitments);
}
}
Payload::SignatureShare(_) => {}
Payload::AggregateSignature(_) => {}
}
Ok(self)
}
}
/// The error a message can produce if it fails validation.
#[derive(Error, Debug, PartialEq)]
pub enum MsgErr {
#[error("wrong version number")]
WrongVersion,
#[error("sender and receiver are the same")]
SameSenderAndReceiver,
#[error("the sender of this message must be the dealer")]
SenderMustBeDealer,
#[error("the receiver of this message must be a signer")]
ReceiverMustBeSigner,
#[error("the sender of this message must be a signer")]
SenderMustBeSigner,
#[error("the receiver of this message must be the aggregator")]
ReceiverMustBeAggregator,
#[error("the sender of this message must be the aggregator")]
SenderMustBeAggregator,
#[error("the number of signers must be at least {0}")]
NotEnoughCommitments(usize),
#[error("the number of signers can't be more than {}", MAX_SIGNERS)]
TooManyCommitments,
#[error(
"the message field can't be bigger than {}",
ZCASH_MAX_PROTOCOL_MESSAGE_LEN
)]
MsgTooBig,
}

View File

@ -1,195 +0,0 @@
// -*- mode: rust; -*-
//
// This file is part of redjubjub.
// Copyright (c) 2019-2021 Zcash Foundation
// Copyright (c) 2017-2021 isis agora lovecruft, Henry de Valence
// See LICENSE for licensing information.
//
// Authors:
// - isis agora lovecruft <isis@patternsinthevoid.net>
// - Henry de Valence <hdevalence@hdevalence.ca>
// - Deirdre Connolly <deirdre@zfnd.org>
use std::{borrow::Borrow, fmt::Debug};
use jubjub::*;
pub trait NonAdjacentForm {
fn non_adjacent_form(&self, w: usize) -> [i8; 256];
}
/// A trait for variable-time multiscalar multiplication without precomputation.
pub trait VartimeMultiscalarMul {
/// The type of point being multiplied, e.g., `AffinePoint`.
type Point;
/// Given an iterator of public scalars and an iterator of
/// `Option`s of points, compute either `Some(Q)`, where
/// $$
/// Q = c\_1 P\_1 + \cdots + c\_n P\_n,
/// $$
/// if all points were `Some(P_i)`, or else return `None`.
fn optional_multiscalar_mul<I, J>(scalars: I, points: J) -> Option<Self::Point>
where
I: IntoIterator,
I::Item: Borrow<Scalar>,
J: IntoIterator<Item = Option<Self::Point>>;
/// Given an iterator of public scalars and an iterator of
/// public points, compute
/// $$
/// Q = c\_1 P\_1 + \cdots + c\_n P\_n,
/// $$
/// using variable-time operations.
///
/// It is an error to call this function with two iterators of different lengths.
fn vartime_multiscalar_mul<I, J>(scalars: I, points: J) -> Self::Point
where
I: IntoIterator,
I::Item: Borrow<Scalar>,
J: IntoIterator,
J::Item: Borrow<Self::Point>,
Self::Point: Clone,
{
Self::optional_multiscalar_mul(
scalars,
points.into_iter().map(|p| Some(p.borrow().clone())),
)
.unwrap()
}
}
impl NonAdjacentForm for Scalar {
/// Compute a width-\\(w\\) "Non-Adjacent Form" of this scalar.
///
/// Thanks to curve25519-dalek
fn non_adjacent_form(&self, w: usize) -> [i8; 256] {
// required by the NAF definition
debug_assert!(w >= 2);
// required so that the NAF digits fit in i8
debug_assert!(w <= 8);
use byteorder::{ByteOrder, LittleEndian};
let mut naf = [0i8; 256];
let mut x_u64 = [0u64; 5];
LittleEndian::read_u64_into(&self.to_bytes(), &mut x_u64[0..4]);
let width = 1 << w;
let window_mask = width - 1;
let mut pos = 0;
let mut carry = 0;
while pos < 256 {
// Construct a buffer of bits of the scalar, starting at bit `pos`
let u64_idx = pos / 64;
let bit_idx = pos % 64;
let bit_buf: u64;
if bit_idx < 64 - w {
// This window's bits are contained in a single u64
bit_buf = x_u64[u64_idx] >> bit_idx;
} else {
// Combine the current u64's bits with the bits from the next u64
bit_buf = (x_u64[u64_idx] >> bit_idx) | (x_u64[1 + u64_idx] << (64 - bit_idx));
}
// Add the carry into the current window
let window = carry + (bit_buf & window_mask);
if window & 1 == 0 {
// If the window value is even, preserve the carry and continue.
// Why is the carry preserved?
// If carry == 0 and window & 1 == 0, then the next carry should be 0
// If carry == 1 and window & 1 == 0, then bit_buf & 1 == 1 so the next carry should be 1
pos += 1;
continue;
}
if window < width / 2 {
carry = 0;
naf[pos] = window as i8;
} else {
carry = 1;
naf[pos] = (window as i8).wrapping_sub(width as i8);
}
pos += w;
}
naf
}
}
/// Holds odd multiples 1A, 3A, ..., 15A of a point A.
#[derive(Copy, Clone)]
pub(crate) struct LookupTable5<T>(pub(crate) [T; 8]);
impl<T: Copy> LookupTable5<T> {
/// Given public, odd \\( x \\) with \\( 0 < x < 2^4 \\), return \\(xA\\).
pub fn select(&self, x: usize) -> T {
debug_assert_eq!(x & 1, 1);
debug_assert!(x < 16);
self.0[x / 2]
}
}
impl<T: Debug> Debug for LookupTable5<T> {
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
write!(f, "LookupTable5({:?})", self.0)
}
}
impl<'a> From<&'a ExtendedPoint> for LookupTable5<ExtendedNielsPoint> {
#[allow(non_snake_case)]
fn from(A: &'a ExtendedPoint) -> Self {
let mut Ai = [A.to_niels(); 8];
let A2 = A.double();
for i in 0..7 {
Ai[i + 1] = (&A2 + &Ai[i]).to_niels();
}
// Now Ai = [A, 3A, 5A, 7A, 9A, 11A, 13A, 15A]
LookupTable5(Ai)
}
}
impl VartimeMultiscalarMul for ExtendedPoint {
type Point = ExtendedPoint;
#[allow(non_snake_case)]
fn optional_multiscalar_mul<I, J>(scalars: I, points: J) -> Option<ExtendedPoint>
where
I: IntoIterator,
I::Item: Borrow<Scalar>,
J: IntoIterator<Item = Option<ExtendedPoint>>,
{
let nafs: Vec<_> = scalars
.into_iter()
.map(|c| c.borrow().non_adjacent_form(5))
.collect();
let lookup_tables = points
.into_iter()
.map(|P_opt| P_opt.map(|P| LookupTable5::<ExtendedNielsPoint>::from(&P)))
.collect::<Option<Vec<_>>>()?;
let mut r = ExtendedPoint::identity();
for i in (0..256).rev() {
let mut t = r.double();
for (naf, lookup_table) in nafs.iter().zip(lookup_tables.iter()) {
if naf[i] > 0 {
t = &t + &lookup_table.select(naf[i] as usize);
} else if naf[i] < 0 {
t = &t - &lookup_table.select(-naf[i] as usize);
}
}
r = t;
}
Some(r)
}
}

View File

@ -1,45 +0,0 @@
// -*- mode: rust; -*-
//
// This file is part of redjubjub.
// Copyright (c) 2019-2021 Zcash Foundation
// See LICENSE for licensing information.
//
// Authors:
// - Henry de Valence <hdevalence@hdevalence.ca>
//! Redjubjub Signatures
use std::marker::PhantomData;
use crate::SigType;
/// A RedJubJub signature.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct Signature<T: SigType> {
pub(crate) r_bytes: [u8; 32],
pub(crate) s_bytes: [u8; 32],
pub(crate) _marker: PhantomData<T>,
}
impl<T: SigType> From<[u8; 64]> for Signature<T> {
fn from(bytes: [u8; 64]) -> Signature<T> {
let mut r_bytes = [0; 32];
r_bytes.copy_from_slice(&bytes[0..32]);
let mut s_bytes = [0; 32];
s_bytes.copy_from_slice(&bytes[32..64]);
Signature {
r_bytes,
s_bytes,
_marker: PhantomData,
}
}
}
impl<T: SigType> From<Signature<T>> for [u8; 64] {
fn from(sig: Signature<T>) -> [u8; 64] {
let mut bytes = [0; 64];
bytes[0..32].copy_from_slice(&sig.r_bytes[..]);
bytes[32..64].copy_from_slice(&sig.s_bytes[..]);
bytes
}
}

View File

@ -1,133 +0,0 @@
// -*- mode: rust; -*-
//
// This file is part of redjubjub.
// Copyright (c) 2019-2021 Zcash Foundation
// See LICENSE for licensing information.
//
// Authors:
// - Deirdre Connolly <deirdre@zfnd.org>
// - Henry de Valence <hdevalence@hdevalence.ca>
use std::{
convert::{TryFrom, TryInto},
marker::PhantomData,
};
use crate::{Error, Randomizer, SigType, Signature, SpendAuth, VerificationKey};
use jubjub::Scalar;
use rand_core::{CryptoRng, RngCore};
/// A RedJubJub signing key.
#[derive(Copy, Clone, Debug)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "serde", serde(try_from = "SerdeHelper"))]
#[cfg_attr(feature = "serde", serde(into = "SerdeHelper"))]
#[cfg_attr(feature = "serde", serde(bound = "T: SigType"))]
pub struct SigningKey<T: SigType> {
sk: Scalar,
pk: VerificationKey<T>,
}
impl<'a, T: SigType> From<&'a SigningKey<T>> for VerificationKey<T> {
fn from(sk: &'a SigningKey<T>) -> VerificationKey<T> {
sk.pk.clone()
}
}
impl<T: SigType> From<SigningKey<T>> for [u8; 32] {
fn from(sk: SigningKey<T>) -> [u8; 32] {
sk.sk.to_bytes()
}
}
impl<T: SigType> TryFrom<[u8; 32]> for SigningKey<T> {
type Error = Error;
fn try_from(bytes: [u8; 32]) -> Result<Self, Self::Error> {
// XXX-jubjub: this should not use CtOption
let maybe_sk = Scalar::from_bytes(&bytes);
if maybe_sk.is_some().into() {
let sk = maybe_sk.unwrap();
let pk = VerificationKey::from(&sk);
Ok(SigningKey { sk, pk })
} else {
Err(Error::MalformedSigningKey)
}
}
}
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
struct SerdeHelper([u8; 32]);
impl<T: SigType> TryFrom<SerdeHelper> for SigningKey<T> {
type Error = Error;
fn try_from(helper: SerdeHelper) -> Result<Self, Self::Error> {
helper.0.try_into()
}
}
impl<T: SigType> From<SigningKey<T>> for SerdeHelper {
fn from(sk: SigningKey<T>) -> Self {
Self(sk.into())
}
}
impl SigningKey<SpendAuth> {
/// Randomize this public key with the given `randomizer`.
pub fn randomize(&self, randomizer: &Randomizer) -> SigningKey<SpendAuth> {
let sk = &self.sk + randomizer;
let pk = VerificationKey::from(&sk);
SigningKey { sk, pk }
}
}
impl<T: SigType> SigningKey<T> {
/// Generate a new signing key.
pub fn new<R: RngCore + CryptoRng>(mut rng: R) -> SigningKey<T> {
let sk = {
let mut bytes = [0; 64];
rng.fill_bytes(&mut bytes);
Scalar::from_bytes_wide(&bytes)
};
let pk = VerificationKey::from(&sk);
SigningKey { sk, pk }
}
/// Create a signature of type `T` on `msg` using this `SigningKey`.
// Similar to signature::Signer but without boxed errors.
pub fn sign<R: RngCore + CryptoRng>(&self, mut rng: R, msg: &[u8]) -> Signature<T> {
use crate::HStar;
// Choose a byte sequence uniformly at random of length
// (\ell_H + 128)/8 bytes. For RedJubjub this is (512 + 128)/8 = 80.
let random_bytes = {
let mut bytes = [0; 80];
rng.fill_bytes(&mut bytes);
bytes
};
let nonce = HStar::default()
.update(&random_bytes[..])
.update(&self.pk.bytes.bytes[..]) // XXX ugly
.update(msg)
.finalize();
let r_bytes = jubjub::AffinePoint::from(&T::basepoint() * &nonce).to_bytes();
let c = HStar::default()
.update(&r_bytes[..])
.update(&self.pk.bytes.bytes[..]) // XXX ugly
.update(msg)
.finalize();
let s_bytes = (&nonce + &(&c * &self.sk)).to_bytes();
Signature {
r_bytes,
s_bytes,
_marker: PhantomData,
}
}
}

View File

@ -1,202 +0,0 @@
// -*- mode: rust; -*-
//
// This file is part of redjubjub.
// Copyright (c) 2019-2021 Zcash Foundation
// See LICENSE for licensing information.
//
// Authors:
// - Deirdre Connolly <deirdre@zfnd.org>
// - Henry de Valence <hdevalence@hdevalence.ca>
use std::{
convert::TryFrom,
hash::{Hash, Hasher},
marker::PhantomData,
};
use jubjub::Scalar;
use crate::{Error, Randomizer, SigType, Signature, SpendAuth};
/// A refinement type for `[u8; 32]` indicating that the bytes represent
/// an encoding of a RedJubJub verification key.
///
/// This is useful for representing a compressed verification key; the
/// [`VerificationKey`] type in this library holds other decompressed state
/// used in signature verification.
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct VerificationKeyBytes<T: SigType> {
pub(crate) bytes: [u8; 32],
pub(crate) _marker: PhantomData<T>,
}
impl<T: SigType> From<[u8; 32]> for VerificationKeyBytes<T> {
fn from(bytes: [u8; 32]) -> VerificationKeyBytes<T> {
VerificationKeyBytes {
bytes,
_marker: PhantomData,
}
}
}
impl<T: SigType> From<VerificationKeyBytes<T>> for [u8; 32] {
fn from(refined: VerificationKeyBytes<T>) -> [u8; 32] {
refined.bytes
}
}
impl<T: SigType> Hash for VerificationKeyBytes<T> {
fn hash<H: Hasher>(&self, state: &mut H) {
self.bytes.hash(state);
self._marker.hash(state);
}
}
/// A valid RedJubJub verification key.
///
/// This type holds decompressed state used in signature verification; if the
/// verification key may not be used immediately, it is probably better to use
/// [`VerificationKeyBytes`], which is a refinement type for `[u8; 32]`.
///
/// ## Consensus properties
///
/// The `TryFrom<VerificationKeyBytes>` conversion performs the following Zcash
/// consensus rule checks:
///
/// 1. The check that the bytes are a canonical encoding of a verification key;
/// 2. The check that the verification key is not a point of small order.
#[derive(PartialEq, Copy, Clone, Debug)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "serde", serde(try_from = "VerificationKeyBytes<T>"))]
#[cfg_attr(feature = "serde", serde(into = "VerificationKeyBytes<T>"))]
#[cfg_attr(feature = "serde", serde(bound = "T: SigType"))]
pub struct VerificationKey<T: SigType> {
// XXX-jubjub: this should just be Point
pub(crate) point: jubjub::ExtendedPoint,
pub(crate) bytes: VerificationKeyBytes<T>,
}
impl<T: SigType> From<VerificationKey<T>> for VerificationKeyBytes<T> {
fn from(pk: VerificationKey<T>) -> VerificationKeyBytes<T> {
pk.bytes
}
}
impl<T: SigType> From<VerificationKey<T>> for [u8; 32] {
fn from(pk: VerificationKey<T>) -> [u8; 32] {
pk.bytes.bytes
}
}
impl<T: SigType> TryFrom<VerificationKeyBytes<T>> for VerificationKey<T> {
type Error = Error;
fn try_from(bytes: VerificationKeyBytes<T>) -> Result<Self, Self::Error> {
// XXX-jubjub: this should not use CtOption
// XXX-jubjub: this takes ownership of bytes, while Fr doesn't.
// This checks that the encoding is canonical...
let maybe_point = jubjub::AffinePoint::from_bytes(bytes.bytes);
if maybe_point.is_some().into() {
let point: jubjub::ExtendedPoint = maybe_point.unwrap().into();
// Note that small-order verification keys (including the identity) are not
// rejected here. Previously they were rejected, but this was a bug as the
// RedDSA specification allows them. Zcash Sapling rejects small-order points
// for the RedJubjub spend authorization key rk; this now occurs separately.
// Meanwhile, Zcash Orchard uses a prime-order group, so the only small-order
// point would be the identity, which is allowed in Orchard.
Ok(VerificationKey { point, bytes })
} else {
Err(Error::MalformedVerificationKey)
}
}
}
impl<T: SigType> TryFrom<[u8; 32]> for VerificationKey<T> {
type Error = Error;
fn try_from(bytes: [u8; 32]) -> Result<Self, Self::Error> {
use std::convert::TryInto;
VerificationKeyBytes::from(bytes).try_into()
}
}
impl VerificationKey<SpendAuth> {
/// Randomize this verification key with the given `randomizer`.
///
/// Randomization is only supported for `SpendAuth` keys.
pub fn randomize(&self, randomizer: &Randomizer) -> VerificationKey<SpendAuth> {
use crate::private::Sealed;
let point = &self.point + &(&SpendAuth::basepoint() * randomizer);
let bytes = VerificationKeyBytes {
bytes: jubjub::AffinePoint::from(&point).to_bytes(),
_marker: PhantomData,
};
VerificationKey { bytes, point }
}
}
impl<T: SigType> VerificationKey<T> {
pub(crate) fn from(s: &Scalar) -> VerificationKey<T> {
let point = &T::basepoint() * s;
let bytes = VerificationKeyBytes {
bytes: jubjub::AffinePoint::from(&point).to_bytes(),
_marker: PhantomData,
};
VerificationKey { bytes, point }
}
/// Verify a purported `signature` over `msg` made by this verification key.
// This is similar to impl signature::Verifier but without boxed errors
pub fn verify(&self, msg: &[u8], signature: &Signature<T>) -> Result<(), Error> {
use crate::HStar;
let c = HStar::default()
.update(&signature.r_bytes[..])
.update(&self.bytes.bytes[..]) // XXX ugly
.update(msg)
.finalize();
self.verify_prehashed(signature, c)
}
/// Verify a purported `signature` with a prehashed challenge.
#[allow(non_snake_case)]
pub(crate) fn verify_prehashed(
&self,
signature: &Signature<T>,
c: Scalar,
) -> Result<(), Error> {
let r = {
// XXX-jubjub: should not use CtOption here
// XXX-jubjub: inconsistent ownership in from_bytes
let maybe_point = jubjub::AffinePoint::from_bytes(signature.r_bytes);
if maybe_point.is_some().into() {
jubjub::ExtendedPoint::from(maybe_point.unwrap())
} else {
return Err(Error::InvalidSignature);
}
};
let s = {
// XXX-jubjub: should not use CtOption here
let maybe_scalar = Scalar::from_bytes(&signature.s_bytes);
if maybe_scalar.is_some().into() {
maybe_scalar.unwrap()
} else {
return Err(Error::InvalidSignature);
}
};
// XXX rewrite as normal double scalar mul
// Verify check is h * ( - s * B + R + c * A) == 0
// h * ( s * B - c * A - R) == 0
let sB = &T::basepoint() * &s;
let cA = &self.point * &c;
let check = sB - cA - r;
if check.is_small_order().into() {
Ok(())
} else {
Err(Error::InvalidSignature)
}
}
}

View File

@ -1,99 +0,0 @@
use rand::thread_rng;
use redjubjub::*;
#[test]
fn spendauth_batch_verify() {
let mut rng = thread_rng();
let mut batch = batch::Verifier::new();
for _ in 0..32 {
let sk = SigningKey::<SpendAuth>::new(&mut rng);
let vk = VerificationKey::from(&sk);
let msg = b"BatchVerifyTest";
let sig = sk.sign(&mut rng, &msg[..]);
batch.queue((vk.into(), sig, msg));
}
assert!(batch.verify(rng).is_ok());
}
#[test]
fn binding_batch_verify() {
let mut rng = thread_rng();
let mut batch = batch::Verifier::new();
for _ in 0..32 {
let sk = SigningKey::<Binding>::new(&mut rng);
let vk = VerificationKey::from(&sk);
let msg = b"BatchVerifyTest";
let sig = sk.sign(&mut rng, &msg[..]);
batch.queue((vk.into(), sig, msg));
}
assert!(batch.verify(rng).is_ok());
}
#[test]
fn alternating_batch_verify() {
let mut rng = thread_rng();
let mut batch = batch::Verifier::new();
for i in 0..32 {
let item: batch::Item = match i % 2 {
0 => {
let sk = SigningKey::<SpendAuth>::new(&mut rng);
let vk = VerificationKey::from(&sk);
let msg = b"BatchVerifyTest";
let sig = sk.sign(&mut rng, &msg[..]);
(vk.into(), sig, msg).into()
}
1 => {
let sk = SigningKey::<Binding>::new(&mut rng);
let vk = VerificationKey::from(&sk);
let msg = b"BatchVerifyTest";
let sig = sk.sign(&mut rng, &msg[..]);
(vk.into(), sig, msg).into()
}
_ => unreachable!(),
};
batch.queue(item);
}
assert!(batch.verify(rng).is_ok());
}
#[test]
fn bad_batch_verify() {
let mut rng = thread_rng();
let bad_index = 4; // must be even
let mut batch = batch::Verifier::new();
let mut items = Vec::new();
for i in 0..32 {
let item: batch::Item = match i % 2 {
0 => {
let sk = SigningKey::<SpendAuth>::new(&mut rng);
let vk = VerificationKey::from(&sk);
let msg = b"BatchVerifyTest";
let sig = if i != bad_index {
sk.sign(&mut rng, &msg[..])
} else {
sk.sign(&mut rng, b"bad")
};
(vk.into(), sig, msg).into()
}
1 => {
let sk = SigningKey::<Binding>::new(&mut rng);
let vk = VerificationKey::from(&sk);
let msg = b"BatchVerifyTest";
let sig = sk.sign(&mut rng, &msg[..]);
(vk.into(), sig, msg).into()
}
_ => unreachable!(),
};
items.push(item.clone());
batch.queue(item);
}
assert!(batch.verify(rng).is_err());
for (i, item) in items.drain(..).enumerate() {
if i != bad_index {
assert!(item.verify_single().is_ok());
} else {
assert!(item.verify_single().is_err());
}
}
}

View File

@ -1,111 +0,0 @@
use std::convert::TryFrom;
use proptest::prelude::*;
use redjubjub::*;
proptest! {
#[test]
fn secretkey_serialization(
bytes in prop::array::uniform32(any::<u8>()),
) {
let sk_result_from = SigningKey::<SpendAuth>::try_from(bytes);
let sk_result_bincode: Result<SigningKey::<SpendAuth>, _>
= bincode::deserialize(&bytes[..]);
// Check 1: both decoding methods should agree
match (sk_result_from, sk_result_bincode) {
// Both agree on success
(Ok(sk_from), Ok(sk_bincode)) => {
let pk_bytes_from = VerificationKeyBytes::from(VerificationKey::from(&sk_from));
let pk_bytes_bincode = VerificationKeyBytes::from(VerificationKey::from(&sk_bincode));
assert_eq!(pk_bytes_from, pk_bytes_bincode);
// Check 2: bincode encoding should match original bytes.
let bytes_bincode = bincode::serialize(&sk_from).unwrap();
assert_eq!(&bytes[..], &bytes_bincode[..]);
// Check 3: From encoding should match original bytes.
let bytes_from: [u8; 32] = sk_bincode.into();
assert_eq!(&bytes[..], &bytes_from[..]);
}
// Both agree on failure
(Err(_), Err(_)) => {},
_ => panic!("bincode and try_from do not agree"),
}
}
#[test]
fn publickeybytes_serialization(
bytes in prop::array::uniform32(any::<u8>()),
) {
let pk_bytes_from = VerificationKeyBytes::<SpendAuth>::from(bytes);
let pk_bytes_bincode: VerificationKeyBytes::<SpendAuth>
= bincode::deserialize(&bytes[..]).unwrap();
// Check 1: both decoding methods should have the same result.
assert_eq!(pk_bytes_from, pk_bytes_bincode);
// Check 2: bincode encoding should match original bytes.
let bytes_bincode = bincode::serialize(&pk_bytes_from).unwrap();
assert_eq!(&bytes[..], &bytes_bincode[..]);
// Check 3: From encoding should match original bytes.
let bytes_from: [u8; 32] = pk_bytes_bincode.into();
assert_eq!(&bytes[..], &bytes_from[..]);
}
#[test]
fn publickey_serialization(
bytes in prop::array::uniform32(any::<u8>()),
) {
let pk_result_try_from = VerificationKey::<SpendAuth>::try_from(bytes);
let pk_result_bincode: Result<VerificationKey::<SpendAuth>, _>
= bincode::deserialize(&bytes[..]);
// Check 1: both decoding methods should have the same result
match (pk_result_try_from, pk_result_bincode) {
// Both agree on success
(Ok(pk_try_from), Ok(pk_bincode)) => {
// Check 2: bincode encoding should match original bytes
let bytes_bincode = bincode::serialize(&pk_try_from).unwrap();
assert_eq!(&bytes[..], &bytes_bincode[..]);
// Check 3: From encoding should match original bytes
let bytes_from: [u8; 32] = pk_bincode.into();
assert_eq!(&bytes[..], &bytes_from[..]);
},
// Both agree on failure
(Err(_), Err(_)) => {},
_ => panic!("bincode and try_from do not agree"),
}
}
#[test]
fn signature_serialization(
lo in prop::array::uniform32(any::<u8>()),
hi in prop::array::uniform32(any::<u8>()),
) {
// array length hack
let bytes = {
let mut bytes = [0; 64];
bytes[0..32].copy_from_slice(&lo[..]);
bytes[32..64].copy_from_slice(&hi[..]);
bytes
};
let sig_bytes_from = Signature::<SpendAuth>::from(bytes);
let sig_bytes_bincode: Signature::<SpendAuth>
= bincode::deserialize(&bytes[..]).unwrap();
// Check 1: both decoding methods should have the same result.
assert_eq!(sig_bytes_from, sig_bytes_bincode);
// Check 2: bincode encoding should match original bytes.
let bytes_bincode = bincode::serialize(&sig_bytes_from).unwrap();
assert_eq!(&bytes[..], &bytes_bincode[..]);
// Check 3: From encoding should match original bytes.
let bytes_from: [u8; 64] = sig_bytes_bincode.into();
assert_eq!(&bytes[..], &bytes_from[..]);
}
}

View File

@ -1,62 +0,0 @@
use rand::thread_rng;
use std::collections::HashMap;
use redjubjub::frost;
#[test]
fn check_sign_with_dealer() {
let mut rng = thread_rng();
let numsigners = 5;
let threshold = 3;
let (shares, pubkeys) = frost::keygen_with_dealer(numsigners, threshold, &mut rng).unwrap();
let mut nonces: HashMap<u64, Vec<frost::SigningNonces>> =
HashMap::with_capacity(threshold as usize);
let mut commitments: Vec<frost::SigningCommitments> = Vec::with_capacity(threshold as usize);
// Round 1, generating nonces and signing commitments for each participant.
for participant_index in 1..(threshold + 1) {
// Generate one (1) nonce and one SigningCommitments instance for each
// participant, up to _threshold_.
let (nonce, commitment) = frost::preprocess(1, participant_index as u64, &mut rng);
nonces.insert(participant_index as u64, nonce);
commitments.push(commitment[0]);
}
// This is what the signature aggregator / coordinator needs to do:
// - decide what message to sign
// - take one (unused) commitment per signing participant
let mut signature_shares: Vec<frost::SignatureShare> = Vec::with_capacity(threshold as usize);
let message = "message to sign".as_bytes();
let signing_package = frost::SigningPackage {
message: message.to_vec(),
signing_commitments: commitments,
};
// Round 2: each participant generates their signature share
for (participant_index, nonce) in nonces {
let share_package = shares
.iter()
.find(|share| participant_index == share.index)
.unwrap();
let nonce_to_use = nonce[0];
// Each participant generates their signature share.
let signature_share = frost::sign(&signing_package, nonce_to_use, share_package).unwrap();
signature_shares.push(signature_share);
}
// The aggregator collects the signing shares from all participants and
// generates the final signature.
let group_signature_res = frost::aggregate(&signing_package, &signature_shares[..], &pubkeys);
assert!(group_signature_res.is_ok());
let group_signature = group_signature_res.unwrap();
// Check that the threshold signature can be verified by the group public
// key (aka verification key).
assert!(pubkeys
.group_public
.verify(&message, &group_signature)
.is_ok());
// TODO: also check that the SharePackage.group_public also verifies the group signature.
}

File diff suppressed because it is too large Load Diff

View File

@ -1,8 +0,0 @@
# Seeds for failure cases proptest has generated in the past. It is
# automatically read and these particular cases re-run before any
# novel cases are generated.
#
# It is recommended to check this file in to source control so that
# everyone who runs the test benefits from these saved cases.
cc 25716e9dc4549b01b395fca1fc076fc34300ad972ac59c5e098f7ec90a03446b # shrinks to tweaks = [ChangePubkey], rng_seed = 946433020594646748
cc ddb674b23f131d33cdbe34f3959f7fc076f4815a12ad5d6f741ae38d2689d1c2 # shrinks to tweaks = [ChangePubkey, ChangePubkey, ChangePubkey, ChangePubkey], rng_seed = 8346595811973717667

View File

@ -1,153 +0,0 @@
use std::convert::TryFrom;
use proptest::prelude::*;
use rand_core::{CryptoRng, RngCore};
use redjubjub::*;
/// A signature test-case, containing signature data and expected validity.
#[derive(Clone, Debug)]
struct SignatureCase<T: SigType> {
msg: Vec<u8>,
sig: Signature<T>,
pk_bytes: VerificationKeyBytes<T>,
invalid_pk_bytes: VerificationKeyBytes<T>,
is_valid: bool,
}
/// A modification to a test-case.
#[derive(Copy, Clone, Debug)]
enum Tweak {
/// No-op, used to check that unchanged cases verify.
None,
/// Change the message the signature is defined for, invalidating the signature.
ChangeMessage,
/// Change the public key the signature is defined for, invalidating the signature.
ChangePubkey,
/* XXX implement this -- needs to regenerate a custom signature because the
nonce commitment is fed into the hash, so it has to have torsion at signing
time.
/// Change the case to have a torsion component in the signature's `r` value.
AddTorsion,
*/
/* XXX implement this -- needs custom handling of field arithmetic.
/// Change the signature's `s` scalar to be unreduced (mod L), invalidating the signature.
UnreducedScalar,
*/
}
impl<T: SigType> SignatureCase<T> {
fn new<R: RngCore + CryptoRng>(mut rng: R, msg: Vec<u8>) -> Self {
let sk = SigningKey::new(&mut rng);
let sig = sk.sign(&mut rng, &msg);
let pk_bytes = VerificationKey::from(&sk).into();
let invalid_pk_bytes = VerificationKey::from(&SigningKey::new(&mut rng)).into();
Self {
msg,
sig,
pk_bytes,
invalid_pk_bytes,
is_valid: true,
}
}
// Check that signature verification succeeds or fails, as expected.
fn check(&self) -> bool {
// The signature data is stored in (refined) byte types, but do a round trip
// conversion to raw bytes to exercise those code paths.
let sig = {
let bytes: [u8; 64] = self.sig.into();
Signature::<T>::from(bytes)
};
let pk_bytes = {
let bytes: [u8; 32] = self.pk_bytes.into();
VerificationKeyBytes::<T>::from(bytes)
};
// Check that the verification key is a valid RedJubjub verification key.
let pub_key = VerificationKey::try_from(pk_bytes)
.expect("The test verification key to be well-formed.");
// Check that signature validation has the expected result.
self.is_valid == pub_key.verify(&self.msg, &sig).is_ok()
}
fn apply_tweak(&mut self, tweak: &Tweak) {
match tweak {
Tweak::None => {}
Tweak::ChangeMessage => {
// Changing the message makes the signature invalid.
self.msg.push(90);
self.is_valid = false;
}
Tweak::ChangePubkey => {
// Changing the public key makes the signature invalid.
self.pk_bytes = self.invalid_pk_bytes;
self.is_valid = false;
}
}
}
}
fn tweak_strategy() -> impl Strategy<Value = Tweak> {
prop_oneof![
10 => Just(Tweak::None),
1 => Just(Tweak::ChangeMessage),
1 => Just(Tweak::ChangePubkey),
]
}
use rand_chacha::ChaChaRng;
use rand_core::SeedableRng;
proptest! {
#[test]
fn tweak_signature(
tweaks in prop::collection::vec(tweak_strategy(), (0,5)),
rng_seed in prop::array::uniform32(any::<u8>()),
) {
// Use a deterministic RNG so that test failures can be reproduced.
// Seeding with 64 bits of entropy is INSECURE and this code should
// not be copied outside of this test!
let mut rng = ChaChaRng::from_seed(rng_seed);
// Create a test case for each signature type.
let msg = b"test message for proptests";
let mut binding = SignatureCase::<Binding>::new(&mut rng, msg.to_vec());
let mut spendauth = SignatureCase::<SpendAuth>::new(&mut rng, msg.to_vec());
// Apply tweaks to each case.
for t in &tweaks {
binding.apply_tweak(t);
spendauth.apply_tweak(t);
}
assert!(binding.check());
assert!(spendauth.check());
}
#[test]
fn randomization_commutes_with_pubkey_homomorphism(rng_seed in prop::array::uniform32(any::<u8>())) {
// Use a deterministic RNG so that test failures can be reproduced.
let mut rng = ChaChaRng::from_seed(rng_seed);
let r = {
// XXX-jubjub: better API for this
let mut bytes = [0; 64];
rng.fill_bytes(&mut bytes[..]);
Randomizer::from_bytes_wide(&bytes)
};
let sk = SigningKey::<SpendAuth>::new(&mut rng);
let pk = VerificationKey::from(&sk);
let sk_r = sk.randomize(&r);
let pk_r = pk.randomize(&r);
let pk_r_via_sk_rand: [u8; 32] = VerificationKeyBytes::from(VerificationKey::from(&sk_r)).into();
let pk_r_via_pk_rand: [u8; 32] = VerificationKeyBytes::from(pk_r).into();
assert_eq!(pk_r_via_pk_rand, pk_r_via_sk_rand);
}
}

View File

@ -1,24 +0,0 @@
use std::convert::TryFrom;
use jubjub::{AffinePoint, Fq};
use redjubjub::*;
#[test]
fn identity_publickey_passes() {
let identity = AffinePoint::identity();
assert_eq!(<bool>::from(identity.is_small_order()), true);
let bytes = identity.to_bytes();
let pk_bytes = VerificationKeyBytes::<SpendAuth>::from(bytes);
assert!(VerificationKey::<SpendAuth>::try_from(pk_bytes).is_ok());
}
#[test]
fn smallorder_publickey_passes() {
// (1,0) is a point of order 4 on any Edwards curve
let order4 = AffinePoint::from_raw_unchecked(Fq::one(), Fq::zero());
assert_eq!(<bool>::from(order4.is_small_order()), true);
let bytes = order4.to_bytes();
let pk_bytes = VerificationKeyBytes::<SpendAuth>::from(bytes);
assert!(VerificationKey::<SpendAuth>::try_from(pk_bytes).is_ok());
}