Merge pull request #111 from zcash/transcript-api-2

New Transcript API (and modified commitment scheme)
This commit is contained in:
ebfull 2021-01-13 16:50:47 -07:00 committed by GitHub
commit ccca639591
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
30 changed files with 1564 additions and 1759 deletions

View File

@ -42,6 +42,7 @@ metrics = "=0.13.0-alpha.13"
metrics-macros = "=0.1.0-alpha.9"
num_cpus = "1.13"
rand = "0.7"
blake2b_simd = "0.5"
[features]
sanity-checks = []

View File

@ -3,9 +3,8 @@ extern crate criterion;
extern crate halo2;
use crate::arithmetic::{small_multiexp, FieldExt};
use crate::pasta::{EqAffine, Fp, Fq};
use crate::pasta::{EqAffine, Fp};
use crate::poly::commitment::Params;
use crate::transcript::DummyHash;
use halo2::*;
use criterion::{black_box, Criterion};
@ -13,7 +12,7 @@ use criterion::{black_box, Criterion};
fn criterion_benchmark(c: &mut Criterion) {
// small multiexp
{
let params: Params<EqAffine> = Params::new::<DummyHash<Fq>>(5);
let params: Params<EqAffine> = Params::new(5);
let g = &mut params.get_g();
let len = g.len() / 2;
let (g_lo, g_hi) = g.split_at_mut(len);

View File

@ -6,7 +6,7 @@ use halo2::arithmetic::FieldExt;
use halo2::pasta::{EqAffine, Fp, Fq};
use halo2::plonk::*;
use halo2::poly::commitment::Params;
use halo2::transcript::DummyHash;
use halo2::transcript::{DummyHashRead, DummyHashWrite};
use std::marker::PhantomData;
@ -18,7 +18,7 @@ fn bench_with_k(name: &str, k: u32, c: &mut Criterion) {
pub struct Variable(Column<Advice>, usize);
// Initialize the polynomial commitment parameters
let params: Params<EqAffine> = Params::new::<DummyHash<Fq>>(k);
let params: Params<EqAffine> = Params::new(k);
struct PLONKConfig {
a: Column<Advice>,
@ -239,7 +239,8 @@ fn bench_with_k(name: &str, k: u32, c: &mut Criterion) {
};
// Create a proof
Proof::create::<DummyHash<Fq>, DummyHash<Fp>, _>(&params, &pk, &circuit, &[])
let mut transcript = DummyHashWrite::init(vec![], Fq::one());
create_proof(&params, &pk, &circuit, &[], &mut transcript)
.expect("proof generation should not fail")
});
});
@ -250,15 +251,16 @@ fn bench_with_k(name: &str, k: u32, c: &mut Criterion) {
};
// Create a proof
let proof = Proof::create::<DummyHash<Fq>, DummyHash<Fp>, _>(&params, &pk, &circuit, &[])
let mut transcript = DummyHashWrite::init(vec![], Fq::one());
create_proof(&params, &pk, &circuit, &[], &mut transcript)
.expect("proof generation should not fail");
let proof = transcript.finalize();
c.bench_function(&verifier_name, |b| {
b.iter(|| {
let msm = params.empty_msm();
let guard = proof
.verify::<DummyHash<Fq>, DummyHash<Fp>>(&params, pk.get_vk(), msm, &[])
.unwrap();
let mut transcript = DummyHashRead::init(&proof[..], Fq::one());
let guard = verify_proof(&params, pk.get_vk(), msm, &[], &mut transcript).unwrap();
let msm = guard.clone().use_challenges();
assert!(msm.eval());
});

View File

@ -4,7 +4,7 @@ use halo2::{
pasta::{EqAffine, Fp, Fq},
plonk::*,
poly::commitment::{Blind, Params},
transcript::DummyHash,
transcript::{DummyHashRead, DummyHashWrite},
};
use std::marker::PhantomData;
@ -249,7 +249,7 @@ fn main() {
let k = 11;
// Initialize the polynomial commitment parameters
let params: Params<EqAffine> = Params::new::<DummyHash<Fq>>(k);
let params: Params<EqAffine> = Params::new(k);
let empty_circuit: MyCircuit<Fp> = MyCircuit { a: None, k };
@ -273,18 +273,18 @@ fn main() {
};
// Create a proof
let proof =
Proof::create::<DummyHash<Fq>, DummyHash<Fp>, _>(&params, &pk, &circuit, &[pubinputs])
.expect("proof generation should not fail");
let mut transcript = DummyHashWrite::init(vec![], Fq::one());
create_proof(&params, &pk, &circuit, &[pubinputs], &mut transcript)
.expect("proof generation should not fail");
let proof: Vec<u8> = transcript.finalize();
println!("[Prover] {}", recorder);
recorder.clear();
let pubinput_slice = &[pubinput];
let msm = params.empty_msm();
let guard = proof
.verify::<DummyHash<Fq>, DummyHash<Fp>>(&params, pk.get_vk(), msm, pubinput_slice)
.unwrap();
let mut transcript = DummyHashRead::init(&proof[..], Fq::one());
let guard = verify_proof(&params, pk.get_vk(), msm, pubinput_slice, &mut transcript).unwrap();
let msm = guard.clone().use_challenges();
assert!(msm.eval());

View File

@ -122,6 +122,10 @@ pub trait CurveAffine:
/// The base field over which this elliptic curve is constructed.
type Base: FieldExt;
/// Personalization of BLAKE2b hasher used to generate the uniform
/// random string.
const BLAKE2B_PERSONALIZATION: &'static [u8; 16];
/// Obtains the additive identity.
fn zero() -> Self;

View File

@ -16,15 +16,6 @@ pub trait FieldExt:
/// Inverse of `ROOT_OF_UNITY`
const ROOT_OF_UNITY_INV: Self;
/// The value $(2^S)^{-1} \mod t$.
const UNROLL_T_EXPONENT: [u64; 4];
/// Represents $t$ where $2^S \cdot t = p - 1$ with $t$ odd.
const T_EXPONENT: [u64; 4];
/// The value $t^{-1} \mod 2^S$.
const UNROLL_S_EXPONENT: u64;
/// Generator of the $t-order$ multiplicative subgroup
const DELTA: Self;
@ -67,68 +58,6 @@ pub trait FieldExt:
/// byte representation of an integer.
fn from_bytes_wide(bytes: &[u8; 64]) -> Self;
/// Returns a square root of this element, if it exists and this element is
/// nonzero. Always returns the same square root, and it is efficient to
/// check that it has done so using `extract_radix2_vartime`.
fn deterministic_sqrt(&self) -> Option<Self> {
let sqrt = self.sqrt();
if bool::from(sqrt.is_none()) {
return None;
}
let sqrt = sqrt.unwrap();
let extracted = sqrt.extract_radix2_vartime()?;
if extracted.1 >> (Self::S - 1) == 1 {
Some(-sqrt)
} else {
Some(sqrt)
}
}
/// Returns an element $a$ of multiplicative order $t$ together with an
/// integer `s` such that `self` is the square of $a \cdot \omega^{s}$ if
/// indeed `self` is a square.
fn extract_radix2_vartime(&self) -> Option<(Self, u64)> {
if bool::from(self.ct_is_zero()) {
return None;
}
// TODO: these can probably be simplified
let t = self.pow_vartime(&[1 << Self::S, 0, 0, 0]);
let t = t.pow_vartime(&Self::UNROLL_T_EXPONENT);
let t = t.pow_vartime(&Self::UNROLL_T_EXPONENT);
let s = self.pow_vartime(&Self::T_EXPONENT);
let mut s = s.pow_vartime(&[Self::UNROLL_S_EXPONENT, 0, 0, 0]);
let mut m = Self::S;
let mut c = Self::ROOT_OF_UNITY_INV;
let mut extract: u64 = 0;
let mut cur = 1;
while s != Self::one() {
let mut i = 1;
{
let mut s2i = s;
s2i = s2i.square();
while s2i != Self::one() {
i += 1;
s2i = s2i.square();
}
}
for _ in 0..(m - i) {
c = c.square();
cur <<= 1;
}
extract |= cur;
s *= c;
m = i;
}
Some((t, extract))
}
/// Exponentiates `self` by `by`, where `by` is a little-endian order
/// integer exponent.
fn pow(&self, by: &[u64; 4]) -> Self {

View File

@ -11,7 +11,7 @@ use super::{Fp, Fq};
use crate::arithmetic::{Curve, CurveAffine, FieldExt, Group};
macro_rules! new_curve_impl {
($name:ident, $name_affine:ident, $base:ident, $scalar:ident) => {
($name:ident, $name_affine:ident, $base:ident, $scalar:ident, $blake2b_personalization:literal) => {
/// Represents a point in the projective coordinate space.
#[derive(Copy, Clone, Debug)]
pub struct $name {
@ -497,6 +497,8 @@ macro_rules! new_curve_impl {
type Scalar = $scalar;
type Base = $base;
const BLAKE2B_PERSONALIZATION: &'static [u8; 16] = $blake2b_personalization;
fn zero() -> Self {
Self {
x: $base::zero(),
@ -700,5 +702,5 @@ macro_rules! new_curve_impl {
};
}
new_curve_impl!(Ep, EpAffine, Fp, Fq);
new_curve_impl!(Eq, EqAffine, Fq, Fp);
new_curve_impl!(Ep, EpAffine, Fp, Fq, b"halo2_____pallas");
new_curve_impl!(Eq, EqAffine, Fq, Fp, b"halo2______vesta");

View File

@ -6,21 +6,3 @@ mod fq;
pub use fp::*;
pub use fq::*;
#[cfg(test)]
use ff::{Field, PrimeField};
#[cfg(test)]
use crate::arithmetic::FieldExt;
#[test]
fn test_extract() {
let a = Fq::rand();
let a = a.square();
let (t, s) = a.extract_radix2_vartime().unwrap();
assert_eq!(
t.pow_vartime(&[1 << Fq::S, 0, 0, 0]) * Fq::ROOT_OF_UNITY.pow_vartime(&[s, 0, 0, 0]),
a
);
assert_eq!(a.deterministic_sqrt().unwrap().square(), a);
}

View File

@ -643,20 +643,7 @@ impl FieldExt for Fp {
0xb4ed8e647196dad1,
0x2cd5282c53116b5c,
]);
const UNROLL_T_EXPONENT: [u64; 4] = [
0x955a0a417453113c,
0x0000000022016b89,
0xc000000000000000,
0x3f7ed4c6,
];
const T_EXPONENT: [u64; 4] = [
0x094cf91b992d30ed,
0x00000000224698fc,
0x0000000000000000,
0x40000000,
];
const DELTA: Self = DELTA;
const UNROLL_S_EXPONENT: u64 = 0x204ace5;
const TWO_INV: Self = Fp::from_raw([
0xcc96987680000001,
0x11234c7e04a67c8d,
@ -791,13 +778,6 @@ fn test_sqrt() {
assert!(v == Fp::TWO_INV || (-v) == Fp::TWO_INV);
}
#[test]
fn test_deterministic_sqrt() {
// NB: TWO_INV is standing in as a "random" field element
let v = (Fp::TWO_INV).square().deterministic_sqrt().unwrap();
assert!(v == Fp::TWO_INV || (-v) == Fp::TWO_INV);
}
#[test]
fn test_zeta() {
assert_eq!(

View File

@ -643,20 +643,7 @@ impl FieldExt for Fq {
0xf4c8f353124086c1,
0x2235e1a7415bf936,
]);
const UNROLL_T_EXPONENT: [u64; 4] = [
0xcc771cc2ac1e1664,
0x00000000062dfe9e,
0xc000000000000000,
0xb89e9c7,
];
const T_EXPONENT: [u64; 4] = [
0x0994a8dd8c46eb21,
0x00000000224698fc,
0x0000000000000000,
0x40000000,
];
const DELTA: Self = DELTA;
const UNROLL_S_EXPONENT: u64 = 0xd1d858e1;
const TWO_INV: Self = Fq::from_raw([
0xc623759080000001,
0x11234c7e04ca546e,
@ -791,13 +778,6 @@ fn test_sqrt() {
assert!(v == Fq::TWO_INV || (-v) == Fq::TWO_INV);
}
#[test]
fn test_deterministic_sqrt() {
// NB: TWO_INV is standing in as a "random" field element
let v = (Fq::TWO_INV).square().deterministic_sqrt().unwrap();
assert!(v == Fq::TWO_INV || (-v) == Fq::TWO_INV);
}
#[test]
fn test_zeta() {
assert_eq!(

View File

@ -6,9 +6,7 @@
//! [plonk]: https://eprint.iacr.org/2019/953
use crate::arithmetic::CurveAffine;
use crate::poly::{
multiopen, Coeff, EvaluationDomain, ExtendedLagrangeCoeff, LagrangeCoeff, Polynomial,
};
use crate::poly::{Coeff, EvaluationDomain, ExtendedLagrangeCoeff, LagrangeCoeff, Polynomial};
use crate::transcript::ChallengeScalar;
mod circuit;
@ -48,20 +46,6 @@ pub struct ProvingKey<C: CurveAffine> {
permutations: Vec<permutation::ProvingKey<C>>,
}
/// This is an object which represents a (Turbo)PLONK proof.
// This structure must never allow points at infinity.
#[derive(Debug, Clone)]
pub struct Proof<C: CurveAffine> {
advice_commitments: Vec<C>,
permutations: Vec<permutation::Proof<C>>,
lookups: Vec<lookup::Proof<C>>,
advice_evals: Vec<C::Scalar>,
aux_evals: Vec<C::Scalar>,
fixed_evals: Vec<C::Scalar>,
vanishing: vanishing::Proof<C>,
multiopening: multiopen::Proof<C>,
}
/// This is an error that could occur during proving or circuit synthesis.
// TODO: these errors need to be cleaned up
#[derive(Debug)]
@ -122,7 +106,7 @@ fn test_proving() {
use crate::dev::MockProver;
use crate::pasta::{EqAffine, Fp, Fq};
use crate::poly::commitment::{Blind, Params};
use crate::transcript::DummyHash;
use crate::transcript::{DummyHashRead, DummyHashWrite};
use circuit::{Advice, Column, Fixed};
use std::marker::PhantomData;
const K: u32 = 5;
@ -132,7 +116,7 @@ fn test_proving() {
pub struct Variable(Column<Advice>, usize);
// Initialize the polynomial commitment parameters
let params: Params<EqAffine> = Params::new::<DummyHash<Fq>>(K);
let params: Params<EqAffine> = Params::new(K);
struct PLONKConfig {
a: Column<Advice>,
@ -471,20 +455,23 @@ fn test_proving() {
assert_eq!(prover.verify(), Ok(()));
for _ in 0..100 {
let mut transcript = DummyHashWrite::init(vec![], Fq::one());
// Create a proof
let proof = Proof::create::<DummyHash<Fq>, DummyHash<Fp>, _>(
create_proof(
&params,
&pk,
&circuit,
&[pubinputs.clone()],
&mut transcript,
)
.expect("proof generation should not fail");
let proof: Vec<u8> = transcript.finalize();
let pubinput_slice = &[pubinput];
let msm = params.empty_msm();
let guard = proof
.verify::<DummyHash<Fq>, DummyHash<Fp>>(&params, pk.get_vk(), msm, pubinput_slice)
.unwrap();
let mut transcript = DummyHashRead::init(&proof[..], Fq::one());
let guard =
verify_proof(&params, pk.get_vk(), msm, pubinput_slice, &mut transcript).unwrap();
{
let msm = guard.clone().use_challenges();
assert!(msm.eval());
@ -496,9 +483,9 @@ fn test_proving() {
}
let msm = guard.clone().use_challenges();
assert!(msm.clone().eval());
let guard = proof
.verify::<DummyHash<Fq>, DummyHash<Fp>>(&params, pk.get_vk(), msm, pubinput_slice)
.unwrap();
let mut transcript = DummyHashRead::init(&proof[..], Fq::one());
let guard =
verify_proof(&params, pk.get_vk(), msm, pubinput_slice, &mut transcript).unwrap();
{
let msm = guard.clone().use_challenges();
assert!(msm.eval());

View File

@ -1,5 +1,4 @@
use super::circuit::{Any, Column};
use crate::arithmetic::CurveAffine;
mod prover;
mod verifier;
@ -37,15 +36,3 @@ impl Argument {
3
}
}
#[derive(Clone, Debug)]
pub(crate) struct Proof<C: CurveAffine> {
product_commitment: C,
product_eval: C::Scalar,
product_inv_eval: C::Scalar,
permuted_input_commitment: C,
permuted_table_commitment: C,
permuted_input_eval: C::Scalar,
permuted_input_inv_eval: C::Scalar,
permuted_table_eval: C::Scalar,
}

View File

@ -2,7 +2,7 @@ use super::super::{
circuit::{Any, Column},
ChallengeBeta, ChallengeGamma, ChallengeTheta, ChallengeX, Error, ProvingKey,
};
use super::{Argument, Proof};
use super::Argument;
use crate::{
arithmetic::{eval_polynomial, parallelize, BatchInvert, Curve, CurveAffine, FieldExt},
poly::{
@ -10,7 +10,7 @@ use crate::{
multiopen::ProverQuery,
Coeff, EvaluationDomain, ExtendedLagrangeCoeff, LagrangeCoeff, Polynomial, Rotation,
},
transcript::{Hasher, Transcript},
transcript::TranscriptWrite,
};
use ff::Field;
use std::{collections::BTreeMap, iter};
@ -47,13 +47,10 @@ pub(in crate::plonk) struct Committed<'a, C: CurveAffine> {
pub(in crate::plonk) struct Constructed<C: CurveAffine> {
permuted_input_poly: Polynomial<C::Scalar, Coeff>,
permuted_input_blind: Blind<C::Scalar>,
permuted_input_commitment: C,
permuted_table_poly: Polynomial<C::Scalar, Coeff>,
permuted_table_blind: Blind<C::Scalar>,
permuted_table_commitment: C,
product_poly: Polynomial<C::Scalar, Coeff>,
product_blind: Blind<C::Scalar>,
product_commitment: C,
}
pub(in crate::plonk) struct Evaluated<C: CurveAffine> {
@ -75,24 +72,19 @@ impl Argument {
/// - constructs Permuted<C> struct using permuted_input_value = A', and
/// permuted_table_column = S'.
/// The Permuted<C> struct is used to update the Lookup, and is then returned.
pub(in crate::plonk) fn commit_permuted<
'a,
C: CurveAffine,
HBase: Hasher<C::Base>,
HScalar: Hasher<C::Scalar>,
>(
pub(in crate::plonk) fn commit_permuted<'a, C: CurveAffine, T: TranscriptWrite<C>>(
&self,
pk: &ProvingKey<C>,
params: &Params<C>,
domain: &EvaluationDomain<C::Scalar>,
theta: ChallengeTheta<C::Scalar>,
theta: ChallengeTheta<C>,
advice_values: &'a [Polynomial<C::Scalar, LagrangeCoeff>],
fixed_values: &'a [Polynomial<C::Scalar, LagrangeCoeff>],
aux_values: &'a [Polynomial<C::Scalar, LagrangeCoeff>],
advice_cosets: &'a [Polynomial<C::Scalar, ExtendedLagrangeCoeff>],
fixed_cosets: &'a [Polynomial<C::Scalar, ExtendedLagrangeCoeff>],
aux_cosets: &'a [Polynomial<C::Scalar, ExtendedLagrangeCoeff>],
transcript: &mut Transcript<C, HBase, HScalar>,
transcript: &mut T,
) -> Result<Permuted<'a, C>, Error> {
// Closure to get values of columns and compress them
let compress_columns = |columns: &[Column<Any>]| {
@ -150,12 +142,12 @@ impl Argument {
// Hash permuted input commitment
transcript
.absorb_point(&permuted_input_commitment)
.write_point(permuted_input_commitment)
.map_err(|_| Error::TranscriptError)?;
// Hash permuted table commitment
transcript
.absorb_point(&permuted_table_commitment)
.write_point(permuted_table_commitment)
.map_err(|_| Error::TranscriptError)?;
let permuted_input_coset = pk
@ -197,14 +189,14 @@ impl<'a, C: CurveAffine> Permuted<'a, C> {
/// grand product polynomial over the lookup. The grand product polynomial
/// is used to populate the Product<C> struct. The Product<C> struct is
/// added to the Lookup and finally returned by the method.
pub(in crate::plonk) fn commit_product<HBase: Hasher<C::Base>, HScalar: Hasher<C::Scalar>>(
pub(in crate::plonk) fn commit_product<T: TranscriptWrite<C>>(
self,
pk: &ProvingKey<C>,
params: &Params<C>,
theta: ChallengeTheta<C::Scalar>,
beta: ChallengeBeta<C::Scalar>,
gamma: ChallengeGamma<C::Scalar>,
transcript: &mut Transcript<C, HBase, HScalar>,
theta: ChallengeTheta<C>,
beta: ChallengeBeta<C>,
gamma: ChallengeGamma<C>,
transcript: &mut T,
) -> Result<Committed<'a, C>, Error> {
// Goal is to compute the products of fractions
//
@ -331,7 +323,7 @@ impl<'a, C: CurveAffine> Permuted<'a, C> {
// Hash product commitment
transcript
.absorb_point(&product_commitment)
.write_point(product_commitment)
.map_err(|_| Error::TranscriptError)?;
Ok(Committed::<'a, C> {
@ -354,9 +346,9 @@ impl<'a, C: CurveAffine> Committed<'a, C> {
pub(in crate::plonk) fn construct(
self,
pk: &'a ProvingKey<C>,
theta: ChallengeTheta<C::Scalar>,
beta: ChallengeBeta<C::Scalar>,
gamma: ChallengeGamma<C::Scalar>,
theta: ChallengeTheta<C>,
beta: ChallengeBeta<C>,
gamma: ChallengeGamma<C>,
) -> Result<
(
Constructed<C>,
@ -434,13 +426,10 @@ impl<'a, C: CurveAffine> Committed<'a, C> {
Constructed {
permuted_input_poly: permuted.permuted_input_poly,
permuted_input_blind: permuted.permuted_input_blind,
permuted_input_commitment: permuted.permuted_input_commitment,
permuted_table_poly: permuted.permuted_table_poly,
permuted_table_blind: permuted.permuted_table_blind,
permuted_table_commitment: permuted.permuted_table_commitment,
product_poly: self.product_poly,
product_blind: self.product_blind,
product_commitment: self.product_commitment,
},
expressions,
))
@ -448,12 +437,12 @@ impl<'a, C: CurveAffine> Committed<'a, C> {
}
impl<C: CurveAffine> Constructed<C> {
pub(in crate::plonk) fn evaluate<HBase: Hasher<C::Base>, HScalar: Hasher<C::Scalar>>(
pub(in crate::plonk) fn evaluate<T: TranscriptWrite<C>>(
self,
pk: &ProvingKey<C>,
x: ChallengeX<C::Scalar>,
transcript: &mut Transcript<C, HBase, HScalar>,
) -> Evaluated<C> {
x: ChallengeX<C>,
transcript: &mut T,
) -> Result<Evaluated<C>, Error> {
let domain = &pk.vk.domain;
let x_inv = domain.rotate_omega(*x, Rotation(-1));
@ -471,17 +460,19 @@ impl<C: CurveAffine> Constructed<C> {
.chain(Some(permuted_input_inv_eval))
.chain(Some(permuted_table_eval))
{
transcript.absorb_scalar(eval);
transcript
.write_scalar(eval)
.map_err(|_| Error::TranscriptError)?;
}
Evaluated {
Ok(Evaluated {
constructed: self,
product_eval,
product_inv_eval,
permuted_input_eval,
permuted_input_inv_eval,
permuted_table_eval,
}
})
}
}
@ -489,7 +480,7 @@ impl<C: CurveAffine> Evaluated<C> {
pub(in crate::plonk) fn open<'a>(
&'a self,
pk: &'a ProvingKey<C>,
x: ChallengeX<C::Scalar>,
x: ChallengeX<C>,
) -> impl Iterator<Item = ProverQuery<'a, C>> + Clone {
let x_inv = pk.vk.domain.rotate_omega(*x, Rotation(-1));
@ -520,29 +511,16 @@ impl<C: CurveAffine> Evaluated<C> {
point: x_inv,
poly: &self.constructed.permuted_input_poly,
blind: self.constructed.permuted_input_blind,
eval: self.permuted_input_eval,
eval: self.permuted_input_inv_eval,
}))
// Open lookup product commitments at x_inv
.chain(Some(ProverQuery {
point: x_inv,
poly: &self.constructed.product_poly,
blind: self.constructed.product_blind,
eval: self.product_eval,
eval: self.product_inv_eval,
}))
}
pub(crate) fn build(self) -> Proof<C> {
Proof {
product_commitment: self.constructed.product_commitment,
product_eval: self.product_eval,
product_inv_eval: self.product_inv_eval,
permuted_input_commitment: self.constructed.permuted_input_commitment,
permuted_table_commitment: self.constructed.permuted_table_commitment,
permuted_input_eval: self.permuted_input_eval,
permuted_input_inv_eval: self.permuted_input_inv_eval,
permuted_table_eval: self.permuted_table_eval,
}
}
}
/// Given a column of input values A and a column of table values S,

View File

@ -1,51 +1,110 @@
use std::iter;
use super::super::circuit::{Any, Column};
use super::{Argument, Proof};
use super::Argument;
use crate::{
arithmetic::CurveAffine,
plonk::{ChallengeBeta, ChallengeGamma, ChallengeTheta, ChallengeX, Error, VerifyingKey},
poly::{multiopen::VerifierQuery, Rotation},
transcript::{Hasher, Transcript},
transcript::TranscriptRead,
};
use ff::Field;
impl<C: CurveAffine> Proof<C> {
pub(in crate::plonk) fn absorb_permuted_commitments<
HBase: Hasher<C::Base>,
HScalar: Hasher<C::Scalar>,
>(
pub struct PermutationCommitments<C: CurveAffine> {
permuted_input_commitment: C,
permuted_table_commitment: C,
}
pub struct Committed<C: CurveAffine> {
permuted: PermutationCommitments<C>,
product_commitment: C,
}
pub struct Evaluated<C: CurveAffine> {
committed: Committed<C>,
product_eval: C::Scalar,
product_inv_eval: C::Scalar,
permuted_input_eval: C::Scalar,
permuted_input_inv_eval: C::Scalar,
permuted_table_eval: C::Scalar,
}
impl Argument {
pub(in crate::plonk) fn read_permuted_commitments<C: CurveAffine, T: TranscriptRead<C>>(
&self,
transcript: &mut Transcript<C, HBase, HScalar>,
) -> Result<(), Error> {
transcript
.absorb_point(&self.permuted_input_commitment)
transcript: &mut T,
) -> Result<PermutationCommitments<C>, Error> {
let permuted_input_commitment = transcript
.read_point()
.map_err(|_| Error::TranscriptError)?;
let permuted_table_commitment = transcript
.read_point()
.map_err(|_| Error::TranscriptError)?;
transcript
.absorb_point(&self.permuted_table_commitment)
.map_err(|_| Error::TranscriptError)
}
pub(in crate::plonk) fn absorb_product_commitment<
HBase: Hasher<C::Base>,
HScalar: Hasher<C::Scalar>,
>(
&self,
transcript: &mut Transcript<C, HBase, HScalar>,
) -> Result<(), Error> {
transcript
.absorb_point(&self.product_commitment)
.map_err(|_| Error::TranscriptError)
Ok(PermutationCommitments {
permuted_input_commitment,
permuted_table_commitment,
})
}
}
impl<C: CurveAffine> PermutationCommitments<C> {
pub(in crate::plonk) fn read_product_commitment<T: TranscriptRead<C>>(
self,
transcript: &mut T,
) -> Result<Committed<C>, Error> {
let product_commitment = transcript
.read_point()
.map_err(|_| Error::TranscriptError)?;
Ok(Committed {
permuted: self,
product_commitment,
})
}
}
impl<C: CurveAffine> Committed<C> {
pub(crate) fn evaluate<T: TranscriptRead<C>>(
self,
transcript: &mut T,
) -> Result<Evaluated<C>, Error> {
let product_eval = transcript
.read_scalar()
.map_err(|_| Error::TranscriptError)?;
let product_inv_eval = transcript
.read_scalar()
.map_err(|_| Error::TranscriptError)?;
let permuted_input_eval = transcript
.read_scalar()
.map_err(|_| Error::TranscriptError)?;
let permuted_input_inv_eval = transcript
.read_scalar()
.map_err(|_| Error::TranscriptError)?;
let permuted_table_eval = transcript
.read_scalar()
.map_err(|_| Error::TranscriptError)?;
Ok(Evaluated {
committed: self,
product_eval,
product_inv_eval,
permuted_input_eval,
permuted_input_inv_eval,
permuted_table_eval,
})
}
}
impl<C: CurveAffine> Evaluated<C> {
pub(in crate::plonk) fn expressions<'a>(
&'a self,
vk: &'a VerifyingKey<C>,
l_0: C::Scalar,
argument: &'a Argument,
theta: ChallengeTheta<C::Scalar>,
beta: ChallengeBeta<C::Scalar>,
gamma: ChallengeGamma<C::Scalar>,
theta: ChallengeTheta<C>,
beta: ChallengeBeta<C>,
gamma: ChallengeGamma<C>,
advice_evals: &[C::Scalar],
fixed_evals: &[C::Scalar],
aux_evals: &[C::Scalar],
@ -98,19 +157,10 @@ impl<C: CurveAffine> Proof<C> {
))
}
pub(in crate::plonk) fn evals(&self) -> impl Iterator<Item = &C::Scalar> {
iter::empty()
.chain(Some(&self.product_eval))
.chain(Some(&self.product_inv_eval))
.chain(Some(&self.permuted_input_eval))
.chain(Some(&self.permuted_input_inv_eval))
.chain(Some(&self.permuted_table_eval))
}
pub(in crate::plonk) fn queries<'a>(
&'a self,
vk: &'a VerifyingKey<C>,
x: ChallengeX<C::Scalar>,
x: ChallengeX<C>,
) -> impl Iterator<Item = VerifierQuery<'a, C>> + Clone {
let x_inv = vk.domain.rotate_omega(*x, Rotation(-1));
@ -118,31 +168,31 @@ impl<C: CurveAffine> Proof<C> {
// Open lookup product commitments at x
.chain(Some(VerifierQuery {
point: *x,
commitment: &self.product_commitment,
commitment: &self.committed.product_commitment,
eval: self.product_eval,
}))
// Open lookup input commitments at x
.chain(Some(VerifierQuery {
point: *x,
commitment: &self.permuted_input_commitment,
commitment: &self.committed.permuted.permuted_input_commitment,
eval: self.permuted_input_eval,
}))
// Open lookup table commitments at x
.chain(Some(VerifierQuery {
point: *x,
commitment: &self.permuted_table_commitment,
commitment: &self.committed.permuted.permuted_table_commitment,
eval: self.permuted_table_eval,
}))
// Open lookup input commitments at \omega^{-1} x
.chain(Some(VerifierQuery {
point: x_inv,
commitment: &self.permuted_input_commitment,
commitment: &self.committed.permuted.permuted_input_commitment,
eval: self.permuted_input_inv_eval,
}))
// Open lookup product commitments at \omega^{-1} x
.chain(Some(VerifierQuery {
point: x_inv,
commitment: &self.product_commitment,
commitment: &self.committed.product_commitment,
eval: self.product_inv_eval,
}))
}

View File

@ -56,11 +56,3 @@ pub(crate) struct ProvingKey<C: CurveAffine> {
polys: Vec<Polynomial<C::Scalar, Coeff>>,
cosets: Vec<Polynomial<C::Scalar, ExtendedLagrangeCoeff>>,
}
#[derive(Debug, Clone)]
pub(crate) struct Proof<C: CurveAffine> {
permutation_product_commitment: C,
permutation_product_eval: C::Scalar,
permutation_product_inv_eval: C::Scalar,
permutation_evals: Vec<C::Scalar>,
}

View File

@ -1,7 +1,7 @@
use ff::Field;
use std::iter;
use super::{Argument, Proof, ProvingKey};
use super::{Argument, ProvingKey};
use crate::{
arithmetic::{eval_polynomial, parallelize, BatchInvert, Curve, CurveAffine, FieldExt},
plonk::{self, ChallengeBeta, ChallengeGamma, ChallengeX, Error},
@ -10,7 +10,7 @@ use crate::{
multiopen::ProverQuery,
Coeff, ExtendedLagrangeCoeff, LagrangeCoeff, Polynomial, Rotation,
},
transcript::{Hasher, Transcript},
transcript::TranscriptWrite,
};
pub(crate) struct Committed<C: CurveAffine> {
@ -18,13 +18,11 @@ pub(crate) struct Committed<C: CurveAffine> {
permutation_product_coset: Polynomial<C::Scalar, ExtendedLagrangeCoeff>,
permutation_product_coset_inv: Polynomial<C::Scalar, ExtendedLagrangeCoeff>,
permutation_product_blind: Blind<C::Scalar>,
permutation_product_commitment: C,
}
pub(crate) struct Constructed<C: CurveAffine> {
permutation_product_poly: Polynomial<C::Scalar, Coeff>,
permutation_product_blind: Blind<C::Scalar>,
permutation_product_commitment: C,
}
pub(crate) struct Evaluated<C: CurveAffine> {
@ -35,19 +33,15 @@ pub(crate) struct Evaluated<C: CurveAffine> {
}
impl Argument {
pub(in crate::plonk) fn commit<
C: CurveAffine,
HBase: Hasher<C::Base>,
HScalar: Hasher<C::Scalar>,
>(
pub(in crate::plonk) fn commit<C: CurveAffine, T: TranscriptWrite<C>>(
&self,
params: &Params<C>,
pk: &plonk::ProvingKey<C>,
pkey: &ProvingKey<C>,
advice: &[Polynomial<C::Scalar, LagrangeCoeff>],
beta: ChallengeBeta<C::Scalar>,
gamma: ChallengeGamma<C::Scalar>,
transcript: &mut Transcript<C, HBase, HScalar>,
beta: ChallengeBeta<C>,
gamma: ChallengeGamma<C>,
transcript: &mut T,
) -> Result<Committed<C>, Error> {
let domain = &pk.vk.domain;
@ -129,7 +123,7 @@ impl Argument {
// Hash the permutation product commitment
transcript
.absorb_point(&permutation_product_commitment)
.write_point(permutation_product_commitment)
.map_err(|_| Error::TranscriptError)?;
Ok(Committed {
@ -137,7 +131,6 @@ impl Argument {
permutation_product_coset,
permutation_product_coset_inv,
permutation_product_blind,
permutation_product_commitment,
})
}
}
@ -149,8 +142,8 @@ impl<C: CurveAffine> Committed<C> {
p: &'a Argument,
pkey: &'a ProvingKey<C>,
advice_cosets: &'a [Polynomial<C::Scalar, ExtendedLagrangeCoeff>],
beta: ChallengeBeta<C::Scalar>,
gamma: ChallengeGamma<C::Scalar>,
beta: ChallengeBeta<C>,
gamma: ChallengeGamma<C>,
) -> Result<
(
Constructed<C>,
@ -211,7 +204,6 @@ impl<C: CurveAffine> Committed<C> {
Constructed {
permutation_product_poly: self.permutation_product_poly,
permutation_product_blind: self.permutation_product_blind,
permutation_product_commitment: self.permutation_product_commitment,
},
expressions,
))
@ -219,7 +211,7 @@ impl<C: CurveAffine> Committed<C> {
}
impl<C: CurveAffine> super::ProvingKey<C> {
fn evaluate(&self, x: ChallengeX<C::Scalar>) -> Vec<C::Scalar> {
fn evaluate(&self, x: ChallengeX<C>) -> Vec<C::Scalar> {
self.polys
.iter()
.map(|poly| eval_polynomial(poly, *x))
@ -229,7 +221,7 @@ impl<C: CurveAffine> super::ProvingKey<C> {
fn open<'a>(
&'a self,
evals: &'a [C::Scalar],
x: ChallengeX<C::Scalar>,
x: ChallengeX<C>,
) -> impl Iterator<Item = ProverQuery<'a, C>> + Clone {
self.polys
.iter()
@ -244,13 +236,13 @@ impl<C: CurveAffine> super::ProvingKey<C> {
}
impl<C: CurveAffine> Constructed<C> {
pub(in crate::plonk) fn evaluate<HBase: Hasher<C::Base>, HScalar: Hasher<C::Scalar>>(
pub(in crate::plonk) fn evaluate<T: TranscriptWrite<C>>(
self,
pk: &plonk::ProvingKey<C>,
pkey: &ProvingKey<C>,
x: ChallengeX<C::Scalar>,
transcript: &mut Transcript<C, HBase, HScalar>,
) -> Evaluated<C> {
x: ChallengeX<C>,
transcript: &mut T,
) -> Result<Evaluated<C>, Error> {
let domain = &pk.vk.domain;
let permutation_product_eval = eval_polynomial(&self.permutation_product_poly, *x);
@ -268,15 +260,17 @@ impl<C: CurveAffine> Constructed<C> {
.chain(Some(&permutation_product_inv_eval))
.chain(permutation_evals.iter())
{
transcript.absorb_scalar(*eval);
transcript
.write_scalar(*eval)
.map_err(|_| Error::TranscriptError)?;
}
Evaluated {
Ok(Evaluated {
constructed: self,
permutation_product_eval,
permutation_product_inv_eval,
permutation_evals,
}
})
}
}
@ -285,7 +279,7 @@ impl<C: CurveAffine> Evaluated<C> {
&'a self,
pk: &'a plonk::ProvingKey<C>,
pkey: &'a ProvingKey<C>,
x: ChallengeX<C::Scalar>,
x: ChallengeX<C>,
) -> impl Iterator<Item = ProverQuery<'a, C>> + Clone {
let x_inv = pk.vk.domain.rotate_omega(*x, Rotation(-1));
@ -306,13 +300,4 @@ impl<C: CurveAffine> Evaluated<C> {
// Open permutation polynomial commitments at x
.chain(pkey.open(&self.permutation_evals, x))
}
pub(crate) fn build(self) -> Proof<C> {
Proof {
permutation_product_commitment: self.constructed.permutation_product_commitment,
permutation_product_eval: self.permutation_product_eval,
permutation_product_inv_eval: self.permutation_product_inv_eval,
permutation_evals: self.permutation_evals,
}
}
}

View File

@ -1,41 +1,80 @@
use ff::Field;
use std::iter;
use super::{Argument, Proof, VerifyingKey};
use super::{Argument, VerifyingKey};
use crate::{
arithmetic::{CurveAffine, FieldExt},
plonk::{self, ChallengeBeta, ChallengeGamma, ChallengeX, Error},
poly::{multiopen::VerifierQuery, Rotation},
transcript::{Hasher, Transcript},
transcript::TranscriptRead,
};
impl<C: CurveAffine> Proof<C> {
pub(crate) fn check_lengths(&self, p: &Argument) -> Result<(), Error> {
if self.permutation_evals.len() != p.columns.len() {
return Err(Error::IncompatibleParams);
pub struct Committed<C: CurveAffine> {
permutation_product_commitment: C,
}
pub struct Evaluated<C: CurveAffine> {
permutation_product_commitment: C,
permutation_product_eval: C::Scalar,
permutation_product_inv_eval: C::Scalar,
permutation_evals: Vec<C::Scalar>,
}
impl Argument {
pub(crate) fn read_product_commitment<C: CurveAffine, T: TranscriptRead<C>>(
&self,
transcript: &mut T,
) -> Result<Committed<C>, Error> {
let permutation_product_commitment = transcript
.read_point()
.map_err(|_| Error::TranscriptError)?;
Ok(Committed {
permutation_product_commitment,
})
}
}
impl<C: CurveAffine> Committed<C> {
pub(crate) fn evaluate<T: TranscriptRead<C>>(
self,
vkey: &VerifyingKey<C>,
transcript: &mut T,
) -> Result<Evaluated<C>, Error> {
let permutation_product_eval = transcript
.read_scalar()
.map_err(|_| Error::TranscriptError)?;
let permutation_product_inv_eval = transcript
.read_scalar()
.map_err(|_| Error::TranscriptError)?;
let mut permutation_evals = Vec::with_capacity(vkey.commitments.len());
for _ in 0..vkey.commitments.len() {
permutation_evals.push(
transcript
.read_scalar()
.map_err(|_| Error::TranscriptError)?,
);
}
Ok(())
}
pub(crate) fn absorb_commitments<HBase: Hasher<C::Base>, HScalar: Hasher<C::Scalar>>(
&self,
transcript: &mut Transcript<C, HBase, HScalar>,
) -> Result<(), Error> {
transcript
.absorb_point(&self.permutation_product_commitment)
.map_err(|_| Error::TranscriptError)
Ok(Evaluated {
permutation_product_commitment: self.permutation_product_commitment,
permutation_product_eval,
permutation_product_inv_eval,
permutation_evals,
})
}
}
impl<C: CurveAffine> Evaluated<C> {
pub(in crate::plonk) fn expressions<'a>(
&'a self,
vk: &'a plonk::VerifyingKey<C>,
p: &'a Argument,
advice_evals: &'a [C::Scalar],
l_0: C::Scalar,
beta: ChallengeBeta<C::Scalar>,
gamma: ChallengeGamma<C::Scalar>,
x: ChallengeX<C::Scalar>,
beta: ChallengeBeta<C>,
gamma: ChallengeGamma<C>,
x: ChallengeX<C>,
) -> impl Iterator<Item = C::Scalar> + 'a {
iter::empty()
// l_0(X) * (1 - z(X)) = 0
@ -70,18 +109,11 @@ impl<C: CurveAffine> Proof<C> {
}))
}
pub(crate) fn evals(&self) -> impl Iterator<Item = &C::Scalar> {
iter::empty()
.chain(Some(&self.permutation_product_eval))
.chain(Some(&self.permutation_product_inv_eval))
.chain(self.permutation_evals.iter())
}
pub(in crate::plonk) fn queries<'a>(
&'a self,
vk: &'a plonk::VerifyingKey<C>,
vkey: &'a VerifyingKey<C>,
x: ChallengeX<C::Scalar>,
x: ChallengeX<C>,
) -> impl Iterator<Item = VerifierQuery<'a, C>> + Clone {
let x_inv = vk.domain.rotate_omega(*x, Rotation(-1));

View File

@ -3,7 +3,7 @@ use std::iter;
use super::{
circuit::{Advice, Assignment, Circuit, Column, ConstraintSystem, Fixed},
vanishing, ChallengeBeta, ChallengeGamma, ChallengeTheta, ChallengeX, ChallengeY, Error, Proof,
vanishing, ChallengeBeta, ChallengeGamma, ChallengeTheta, ChallengeX, ChallengeY, Error,
ProvingKey,
};
use crate::arithmetic::{eval_polynomial, Curve, CurveAffine, FieldExt};
@ -12,371 +12,345 @@ use crate::poly::{
multiopen::{self, ProverQuery},
LagrangeCoeff, Polynomial,
};
use crate::transcript::{Hasher, Transcript};
use crate::transcript::TranscriptWrite;
impl<C: CurveAffine> Proof<C> {
/// This creates a proof for the provided `circuit` when given the public
/// parameters `params` and the proving key [`ProvingKey`] that was
/// generated previously for the same circuit.
pub fn create<
HBase: Hasher<C::Base>,
HScalar: Hasher<C::Scalar>,
ConcreteCircuit: Circuit<C::Scalar>,
>(
params: &Params<C>,
pk: &ProvingKey<C>,
circuit: &ConcreteCircuit,
aux: &[Polynomial<C::Scalar, LagrangeCoeff>],
) -> Result<Self, Error> {
if aux.len() != pk.vk.cs.num_aux_columns {
return Err(Error::IncompatibleParams);
/// This creates a proof for the provided `circuit` when given the public
/// parameters `params` and the proving key [`ProvingKey`] that was
/// generated previously for the same circuit.
pub fn create_proof<C: CurveAffine, T: TranscriptWrite<C>, ConcreteCircuit: Circuit<C::Scalar>>(
params: &Params<C>,
pk: &ProvingKey<C>,
circuit: &ConcreteCircuit,
aux: &[Polynomial<C::Scalar, LagrangeCoeff>],
transcript: &mut T,
) -> Result<(), Error> {
if aux.len() != pk.vk.cs.num_aux_columns {
return Err(Error::IncompatibleParams);
}
struct WitnessCollection<F: Field> {
advice: Vec<Polynomial<F, LagrangeCoeff>>,
_marker: std::marker::PhantomData<F>,
}
impl<F: Field> Assignment<F> for WitnessCollection<F> {
fn assign_advice(
&mut self,
column: Column<Advice>,
row: usize,
to: impl FnOnce() -> Result<F, Error>,
) -> Result<(), Error> {
*self
.advice
.get_mut(column.index())
.and_then(|v| v.get_mut(row))
.ok_or(Error::BoundsFailure)? = to()?;
Ok(())
}
struct WitnessCollection<F: Field> {
advice: Vec<Polynomial<F, LagrangeCoeff>>,
_marker: std::marker::PhantomData<F>,
fn assign_fixed(
&mut self,
_: Column<Fixed>,
_: usize,
_: impl FnOnce() -> Result<F, Error>,
) -> Result<(), Error> {
// We only care about advice columns here
Ok(())
}
impl<F: Field> Assignment<F> for WitnessCollection<F> {
fn assign_advice(
&mut self,
column: Column<Advice>,
row: usize,
to: impl FnOnce() -> Result<F, Error>,
) -> Result<(), Error> {
*self
.advice
.get_mut(column.index())
.and_then(|v| v.get_mut(row))
.ok_or(Error::BoundsFailure)? = to()?;
fn copy(&mut self, _: usize, _: usize, _: usize, _: usize, _: usize) -> Result<(), Error> {
// We only care about advice columns here
Ok(())
}
fn assign_fixed(
&mut self,
_: Column<Fixed>,
_: usize,
_: impl FnOnce() -> Result<F, Error>,
) -> Result<(), Error> {
// We only care about advice columns here
Ok(())
}
fn copy(
&mut self,
_: usize,
_: usize,
_: usize,
_: usize,
_: usize,
) -> Result<(), Error> {
// We only care about advice columns here
Ok(())
}
Ok(())
}
}
let domain = &pk.vk.domain;
let mut meta = ConstraintSystem::default();
let config = ConcreteCircuit::configure(&mut meta);
let domain = &pk.vk.domain;
let mut meta = ConstraintSystem::default();
let config = ConcreteCircuit::configure(&mut meta);
let mut witness = WitnessCollection {
advice: vec![domain.empty_lagrange(); meta.num_advice_columns],
_marker: std::marker::PhantomData,
};
let mut witness = WitnessCollection {
advice: vec![domain.empty_lagrange(); meta.num_advice_columns],
_marker: std::marker::PhantomData,
};
// Synthesize the circuit to obtain the witness and other information.
circuit.synthesize(&mut witness, config)?;
// Synthesize the circuit to obtain the witness and other information.
circuit.synthesize(&mut witness, config)?;
let witness = witness;
let witness = witness;
// Create a transcript for obtaining Fiat-Shamir challenges.
let mut transcript = Transcript::<C, HBase, HScalar>::new();
// Compute commitments to aux column polynomials
let aux_commitments_projective: Vec<_> = aux
.iter()
.map(|poly| params.commit_lagrange(poly, Blind::default()))
.collect();
let mut aux_commitments = vec![C::zero(); aux_commitments_projective.len()];
C::Projective::batch_to_affine(&aux_commitments_projective, &mut aux_commitments);
let aux_commitments = aux_commitments;
drop(aux_commitments_projective);
metrics::counter!("aux_commitments", aux_commitments.len() as u64);
// Compute commitments to aux column polynomials
let aux_commitments_projective: Vec<_> = aux
.iter()
.map(|poly| params.commit_lagrange(poly, Blind::default()))
.collect();
let mut aux_commitments = vec![C::zero(); aux_commitments_projective.len()];
C::Projective::batch_to_affine(&aux_commitments_projective, &mut aux_commitments);
let aux_commitments = aux_commitments;
drop(aux_commitments_projective);
metrics::counter!("aux_commitments", aux_commitments.len() as u64);
for commitment in &aux_commitments {
transcript
.common_point(*commitment)
.map_err(|_| Error::TranscriptError)?;
}
for commitment in &aux_commitments {
transcript
.absorb_point(commitment)
.map_err(|_| Error::TranscriptError)?;
}
let aux_polys: Vec<_> = aux
.iter()
.map(|poly| {
let lagrange_vec = domain.lagrange_from_vec(poly.to_vec());
domain.lagrange_to_coeff(lagrange_vec)
})
.collect();
let aux_polys: Vec<_> = aux
.iter()
.map(|poly| {
let lagrange_vec = domain.lagrange_from_vec(poly.to_vec());
domain.lagrange_to_coeff(lagrange_vec)
})
.collect();
let aux_cosets: Vec<_> = meta
.aux_queries
.iter()
.map(|&(column, at)| {
let poly = aux_polys[column.index()].clone();
domain.coeff_to_extended(poly, at)
})
.collect();
let aux_cosets: Vec<_> = meta
.aux_queries
.iter()
.map(|&(column, at)| {
let poly = aux_polys[column.index()].clone();
domain.coeff_to_extended(poly, at)
})
.collect();
// Compute commitments to advice column polynomials
let advice_blinds: Vec<_> = witness
.advice
.iter()
.map(|_| Blind(C::Scalar::rand()))
.collect();
let advice_commitments_projective: Vec<_> = witness
.advice
.iter()
.zip(advice_blinds.iter())
.map(|(poly, blind)| params.commit_lagrange(poly, *blind))
.collect();
let mut advice_commitments = vec![C::zero(); advice_commitments_projective.len()];
C::Projective::batch_to_affine(&advice_commitments_projective, &mut advice_commitments);
let advice_commitments = advice_commitments;
drop(advice_commitments_projective);
metrics::counter!("advice_commitments", advice_commitments.len() as u64);
// Compute commitments to advice column polynomials
let advice_blinds: Vec<_> = witness
.advice
.iter()
.map(|_| Blind(C::Scalar::rand()))
.collect();
let advice_commitments_projective: Vec<_> = witness
.advice
.iter()
.zip(advice_blinds.iter())
.map(|(poly, blind)| params.commit_lagrange(poly, *blind))
.collect();
let mut advice_commitments = vec![C::zero(); advice_commitments_projective.len()];
C::Projective::batch_to_affine(&advice_commitments_projective, &mut advice_commitments);
let advice_commitments = advice_commitments;
drop(advice_commitments_projective);
metrics::counter!("advice_commitments", advice_commitments.len() as u64);
for commitment in &advice_commitments {
transcript
.write_point(*commitment)
.map_err(|_| Error::TranscriptError)?;
}
for commitment in &advice_commitments {
transcript
.absorb_point(commitment)
.map_err(|_| Error::TranscriptError)?;
}
let advice_polys: Vec<_> = witness
.advice
.clone()
.into_iter()
.map(|poly| domain.lagrange_to_coeff(poly))
.collect();
let advice_polys: Vec<_> = witness
.advice
.clone()
let advice_cosets: Vec<_> = meta
.advice_queries
.iter()
.map(|&(column, at)| {
let poly = advice_polys[column.index()].clone();
domain.coeff_to_extended(poly, at)
})
.collect();
// Sample theta challenge for keeping lookup columns linearly independent
let theta = ChallengeTheta::get(transcript);
// Construct and commit to permuted values for each lookup
let lookups = pk
.vk
.cs
.lookups
.iter()
.map(|lookup| {
lookup.commit_permuted(
&pk,
&params,
&domain,
theta,
&witness.advice,
&pk.fixed_values,
&aux,
&advice_cosets,
&pk.fixed_cosets,
&aux_cosets,
transcript,
)
})
.collect::<Result<Vec<_>, _>>()?;
// Sample beta challenge
let beta = ChallengeBeta::get(transcript);
// Sample gamma challenge
let gamma = ChallengeGamma::get(transcript);
// Commit to permutations, if any.
let permutations = pk
.vk
.cs
.permutations
.iter()
.zip(pk.permutations.iter())
.map(|(p, pkey)| p.commit(params, pk, pkey, &witness.advice, beta, gamma, transcript))
.collect::<Result<Vec<_>, _>>()?;
// Construct and commit to products for each lookup
let lookups = lookups
.into_iter()
.map(|lookup| lookup.commit_product(&pk, &params, theta, beta, gamma, transcript))
.collect::<Result<Vec<_>, _>>()?;
// Obtain challenge for keeping all separate gates linearly independent
let y = ChallengeY::get(transcript);
// Evaluate the h(X) polynomial's constraint system expressions for the permutation constraints, if any.
let (permutations, permutation_expressions): (Vec<_>, Vec<_>) = {
let tmp = permutations
.into_iter()
.map(|poly| domain.lagrange_to_coeff(poly))
.collect();
let advice_cosets: Vec<_> = meta
.advice_queries
.iter()
.map(|&(column, at)| {
let poly = advice_polys[column.index()].clone();
domain.coeff_to_extended(poly, at)
})
.collect();
// Sample theta challenge for keeping lookup columns linearly independent
let theta = ChallengeTheta::get(&mut transcript);
// Construct and commit to permuted values for each lookup
let lookups = pk
.vk
.cs
.lookups
.iter()
.map(|lookup| {
lookup.commit_permuted(
&pk,
&params,
&domain,
theta,
&witness.advice,
&pk.fixed_values,
&aux,
&advice_cosets,
&pk.fixed_cosets,
&aux_cosets,
&mut transcript,
)
})
.collect::<Result<Vec<_>, _>>()?;
// Sample beta challenge
let beta = ChallengeBeta::get(&mut transcript);
// Sample gamma challenge
let gamma = ChallengeGamma::get(&mut transcript);
// Commit to permutations, if any.
let permutations = pk
.vk
.cs
.permutations
.iter()
.zip(pk.vk.cs.permutations.iter())
.zip(pk.permutations.iter())
.map(|(p, pkey)| {
p.commit(
params,
pk,
pkey,
&witness.advice,
beta,
gamma,
&mut transcript,
)
.map(|((p, argument), pkey)| {
p.construct(pk, argument, pkey, &advice_cosets, beta, gamma)
})
.collect::<Result<Vec<_>, _>>()?;
// Construct and commit to products for each lookup
let lookups = lookups
tmp.into_iter().unzip()
};
// Evaluate the h(X) polynomial's constraint system expressions for the lookup constraints, if any.
let (lookups, lookup_expressions): (Vec<_>, Vec<_>) = {
let tmp = lookups
.into_iter()
.map(|lookup| lookup.commit_product(&pk, &params, theta, beta, gamma, &mut transcript))
.map(|p| p.construct(pk, theta, beta, gamma))
.collect::<Result<Vec<_>, _>>()?;
// Obtain challenge for keeping all separate gates linearly independent
let y = ChallengeY::get(&mut transcript);
tmp.into_iter().unzip()
};
// Evaluate the h(X) polynomial's constraint system expressions for the permutation constraints, if any.
let (permutations, permutation_expressions): (Vec<_>, Vec<_>) = {
let tmp = permutations
.into_iter()
.zip(pk.vk.cs.permutations.iter())
.zip(pk.permutations.iter())
.map(|((p, argument), pkey)| {
p.construct(pk, argument, pkey, &advice_cosets, beta, gamma)
})
.collect::<Result<Vec<_>, _>>()?;
// Evaluate the h(X) polynomial's constraint system expressions for the constraints provided
let expressions = iter::empty()
// Custom constraints
.chain(meta.gates.iter().map(|poly| {
poly.evaluate(
&|index| pk.fixed_cosets[index].clone(),
&|index| advice_cosets[index].clone(),
&|index| aux_cosets[index].clone(),
&|a, b| a + &b,
&|a, b| a * &b,
&|a, scalar| a * scalar,
)
}))
// Permutation constraints, if any.
.chain(permutation_expressions.into_iter().flatten())
// Lookup constraints, if any.
.chain(lookup_expressions.into_iter().flatten());
tmp.into_iter().unzip()
};
// Construct the vanishing argument
let vanishing = vanishing::Argument::construct(params, domain, expressions, y, transcript)?;
// Evaluate the h(X) polynomial's constraint system expressions for the lookup constraints, if any.
let (lookups, lookup_expressions): (Vec<_>, Vec<_>) = {
let tmp = lookups
.into_iter()
.map(|p| p.construct(pk, theta, beta, gamma))
.collect::<Result<Vec<_>, _>>()?;
let x = ChallengeX::get(transcript);
tmp.into_iter().unzip()
};
// Evaluate polynomials at omega^i x
let advice_evals: Vec<_> = meta
.advice_queries
.iter()
.map(|&(column, at)| {
eval_polynomial(&advice_polys[column.index()], domain.rotate_omega(*x, at))
})
.collect();
// Evaluate the h(X) polynomial's constraint system expressions for the constraints provided
let expressions = iter::empty()
// Custom constraints
.chain(meta.gates.iter().map(|poly| {
poly.evaluate(
&|index| pk.fixed_cosets[index].clone(),
&|index| advice_cosets[index].clone(),
&|index| aux_cosets[index].clone(),
&|a, b| a + &b,
&|a, b| a * &b,
&|a, scalar| a * scalar,
)
}))
// Permutation constraints, if any.
.chain(permutation_expressions.into_iter().flatten())
// Lookup constraints, if any.
.chain(lookup_expressions.into_iter().flatten());
let aux_evals: Vec<_> = meta
.aux_queries
.iter()
.map(|&(column, at)| {
eval_polynomial(&aux_polys[column.index()], domain.rotate_omega(*x, at))
})
.collect();
// Construct the vanishing argument
let vanishing =
vanishing::Argument::construct(params, domain, expressions, y, &mut transcript)?;
let fixed_evals: Vec<_> = meta
.fixed_queries
.iter()
.map(|&(column, at)| {
eval_polynomial(&pk.fixed_polys[column.index()], domain.rotate_omega(*x, at))
})
.collect();
let x = ChallengeX::get(&mut transcript);
// Hash each column evaluation
for eval in advice_evals
.iter()
.chain(aux_evals.iter())
.chain(fixed_evals.iter())
{
transcript
.write_scalar(*eval)
.map_err(|_| Error::TranscriptError)?;
}
// Evaluate polynomials at omega^i x
let advice_evals: Vec<_> = meta
.advice_queries
.iter()
.map(|&(column, at)| {
eval_polynomial(&advice_polys[column.index()], domain.rotate_omega(*x, at))
})
.collect();
let vanishing = vanishing.evaluate(x, transcript)?;
let aux_evals: Vec<_> = meta
.aux_queries
.iter()
.map(|&(column, at)| {
eval_polynomial(&aux_polys[column.index()], domain.rotate_omega(*x, at))
})
.collect();
// Evaluate the permutations, if any, at omega^i x.
let permutations = permutations
.into_iter()
.zip(pk.permutations.iter())
.map(|(p, pkey)| p.evaluate(pk, pkey, x, transcript))
.collect::<Result<Vec<_>, _>>()?;
let fixed_evals: Vec<_> = meta
.fixed_queries
.iter()
.map(|&(column, at)| {
eval_polynomial(&pk.fixed_polys[column.index()], domain.rotate_omega(*x, at))
})
.collect();
// Evaluate the lookups, if any, at omega^i x.
let lookups = lookups
.into_iter()
.map(|p| p.evaluate(pk, x, transcript))
.collect::<Result<Vec<_>, _>>()?;
// Hash each column evaluation
for eval in advice_evals
.iter()
.chain(aux_evals.iter())
.chain(fixed_evals.iter())
{
transcript.absorb_scalar(*eval);
}
let vanishing = vanishing.evaluate(x, &mut transcript);
// Evaluate the permutations, if any, at omega^i x.
let permutations = permutations
.into_iter()
.zip(pk.permutations.iter())
.map(|(p, pkey)| p.evaluate(pk, pkey, x, &mut transcript))
.collect::<Vec<_>>();
// Evaluate the lookups, if any, at omega^i x.
let lookups = lookups
.into_iter()
.map(|p| p.evaluate(pk, x, &mut transcript))
.collect::<Vec<_>>();
let instances =
iter::empty()
.chain(pk.vk.cs.advice_queries.iter().enumerate().map(
|(query_index, &(column, at))| ProverQuery {
point: domain.rotate_omega(*x, at),
poly: &advice_polys[column.index()],
blind: advice_blinds[column.index()],
eval: advice_evals[query_index],
},
))
.chain(pk.vk.cs.aux_queries.iter().enumerate().map(
|(query_index, &(column, at))| ProverQuery {
let instances =
iter::empty()
.chain(pk.vk.cs.advice_queries.iter().enumerate().map(
|(query_index, &(column, at))| ProverQuery {
point: domain.rotate_omega(*x, at),
poly: &advice_polys[column.index()],
blind: advice_blinds[column.index()],
eval: advice_evals[query_index],
},
))
.chain(
pk.vk
.cs
.aux_queries
.iter()
.enumerate()
.map(|(query_index, &(column, at))| ProverQuery {
point: domain.rotate_omega(*x, at),
poly: &aux_polys[column.index()],
blind: Blind::default(),
eval: aux_evals[query_index],
},
))
.chain(pk.vk.cs.fixed_queries.iter().enumerate().map(
|(query_index, &(column, at))| ProverQuery {
}),
)
.chain(
pk.vk
.cs
.fixed_queries
.iter()
.enumerate()
.map(|(query_index, &(column, at))| ProverQuery {
point: domain.rotate_omega(*x, at),
poly: &pk.fixed_polys[column.index()],
blind: Blind::default(),
eval: fixed_evals[query_index],
},
))
// We query the h(X) polynomial at x
.chain(vanishing.open(x))
.chain(
permutations
.iter()
.zip(pk.permutations.iter())
.map(|(p, pkey)| p.open(pk, pkey, x))
.into_iter()
.flatten(),
)
.chain(lookups.iter().map(|p| p.open(pk, x)).into_iter().flatten());
}),
)
// We query the h(X) polynomial at x
.chain(vanishing.open(x))
.chain(
permutations
.iter()
.zip(pk.permutations.iter())
.map(|(p, pkey)| p.open(pk, pkey, x))
.into_iter()
.flatten(),
)
.chain(lookups.iter().map(|p| p.open(pk, x)).into_iter().flatten());
let multiopening = multiopen::Proof::create(params, &mut transcript, instances)
.map_err(|_| Error::OpeningError)?;
Ok(Proof {
advice_commitments,
permutations: permutations.into_iter().map(|p| p.build()).collect(),
lookups: lookups.into_iter().map(|p| p.build()).collect(),
advice_evals,
fixed_evals,
aux_evals,
vanishing: vanishing.build(),
multiopening,
})
}
multiopen::create_proof(params, transcript, instances).map_err(|_| Error::OpeningError)
}

View File

@ -9,9 +9,3 @@ mod verifier;
pub(crate) struct Argument<C: CurveAffine> {
_marker: PhantomData<C>,
}
#[derive(Debug, Clone)]
pub(crate) struct Proof<C: CurveAffine> {
h_commitments: Vec<C>,
h_evals: Vec<C::Scalar>,
}

View File

@ -1,4 +1,4 @@
use super::{Argument, Proof};
use super::Argument;
use crate::{
arithmetic::{eval_polynomial, Curve, CurveAffine, FieldExt},
plonk::{ChallengeX, ChallengeY, Error},
@ -7,13 +7,12 @@ use crate::{
multiopen::ProverQuery,
Coeff, EvaluationDomain, ExtendedLagrangeCoeff, Polynomial,
},
transcript::{Hasher, Transcript},
transcript::TranscriptWrite,
};
pub(in crate::plonk) struct Constructed<C: CurveAffine> {
h_pieces: Vec<Polynomial<C::Scalar, Coeff>>,
h_blinds: Vec<Blind<C::Scalar>>,
h_commitments: Vec<C>,
}
pub(in crate::plonk) struct Evaluated<C: CurveAffine> {
@ -22,12 +21,12 @@ pub(in crate::plonk) struct Evaluated<C: CurveAffine> {
}
impl<C: CurveAffine> Argument<C> {
pub(in crate::plonk) fn construct<HBase: Hasher<C::Base>, HScalar: Hasher<C::Scalar>>(
pub(in crate::plonk) fn construct<T: TranscriptWrite<C>>(
params: &Params<C>,
domain: &EvaluationDomain<C::Scalar>,
expressions: impl Iterator<Item = Polynomial<C::Scalar, ExtendedLagrangeCoeff>>,
y: ChallengeY<C::Scalar>,
transcript: &mut Transcript<C, HBase, HScalar>,
y: ChallengeY<C>,
transcript: &mut T,
) -> Result<Constructed<C>, Error> {
// Evaluate the h(X) polynomial's constraint system expressions for the constraints provided
let h_poly = expressions.fold(domain.empty_extended(), |h_poly, v| h_poly * *y + &v);
@ -59,24 +58,20 @@ impl<C: CurveAffine> Argument<C> {
// Hash each h(X) piece
for c in h_commitments.iter() {
transcript
.absorb_point(c)
.write_point(*c)
.map_err(|_| Error::TranscriptError)?;
}
Ok(Constructed {
h_pieces,
h_blinds,
h_commitments,
})
Ok(Constructed { h_pieces, h_blinds })
}
}
impl<C: CurveAffine> Constructed<C> {
pub(in crate::plonk) fn evaluate<HBase: Hasher<C::Base>, HScalar: Hasher<C::Scalar>>(
pub(in crate::plonk) fn evaluate<T: TranscriptWrite<C>>(
self,
x: ChallengeX<C::Scalar>,
transcript: &mut Transcript<C, HBase, HScalar>,
) -> Evaluated<C> {
x: ChallengeX<C>,
transcript: &mut T,
) -> Result<Evaluated<C>, Error> {
let h_evals: Vec<_> = self
.h_pieces
.iter()
@ -85,20 +80,22 @@ impl<C: CurveAffine> Constructed<C> {
// Hash each advice evaluation
for eval in &h_evals {
transcript.absorb_scalar(*eval);
transcript
.write_scalar(*eval)
.map_err(|_| Error::TranscriptError)?;
}
Evaluated {
Ok(Evaluated {
constructed: self,
h_evals,
}
})
}
}
impl<C: CurveAffine> Evaluated<C> {
pub(in crate::plonk) fn open<'a>(
&'a self,
x: ChallengeX<C::Scalar>,
x: ChallengeX<C>,
) -> impl Iterator<Item = ProverQuery<'a, C>> + Clone {
self.constructed
.h_pieces
@ -112,11 +109,4 @@ impl<C: CurveAffine> Evaluated<C> {
eval: *h_eval,
})
}
pub(in crate::plonk) fn build(self) -> Proof<C> {
Proof {
h_commitments: self.constructed.h_commitments,
h_evals: self.h_evals,
}
}
}

View File

@ -1,46 +1,56 @@
use ff::Field;
use super::Proof;
use crate::{
arithmetic::CurveAffine,
plonk::{ChallengeX, ChallengeY, Error, VerifyingKey},
poly::multiopen::VerifierQuery,
transcript::{Hasher, Transcript},
transcript::{read_n_points, read_n_scalars, TranscriptRead},
};
impl<C: CurveAffine> Proof<C> {
pub(in crate::plonk) fn check_lengths(&self, vk: &VerifyingKey<C>) -> Result<(), Error> {
if self.h_commitments.len() != self.h_evals.len() {
return Err(Error::IncompatibleParams);
}
use super::Argument;
if self.h_commitments.len() != vk.domain.get_quotient_poly_degree() {
return Err(Error::IncompatibleParams);
}
pub struct Committed<C: CurveAffine> {
h_commitments: Vec<C>,
}
Ok(())
}
pub struct Evaluated<C: CurveAffine> {
h_commitments: Vec<C>,
h_evals: Vec<C::Scalar>,
}
pub(in crate::plonk) fn absorb_commitments<
HBase: Hasher<C::Base>,
HScalar: Hasher<C::Scalar>,
>(
&self,
transcript: &mut Transcript<C, HBase, HScalar>,
) -> Result<(), Error> {
impl<C: CurveAffine> Argument<C> {
pub(in crate::plonk) fn read_commitments<T: TranscriptRead<C>>(
vk: &VerifyingKey<C>,
transcript: &mut T,
) -> Result<Committed<C>, Error> {
// Obtain a commitment to h(X) in the form of multiple pieces of degree n - 1
for c in &self.h_commitments {
transcript
.absorb_point(c)
.map_err(|_| Error::TranscriptError)?;
}
Ok(())
}
let h_commitments = read_n_points(transcript, vk.domain.get_quotient_poly_degree())
.map_err(|_| Error::TranscriptError)?;
Ok(Committed { h_commitments })
}
}
impl<C: CurveAffine> Committed<C> {
pub(in crate::plonk) fn evaluate<T: TranscriptRead<C>>(
self,
transcript: &mut T,
) -> Result<Evaluated<C>, Error> {
let h_evals = read_n_scalars(transcript, self.h_commitments.len())
.map_err(|_| Error::TranscriptError)?;
Ok(Evaluated {
h_commitments: self.h_commitments,
h_evals,
})
}
}
impl<C: CurveAffine> Evaluated<C> {
pub(in crate::plonk) fn verify(
&self,
expressions: impl Iterator<Item = C::Scalar>,
y: ChallengeY<C::Scalar>,
y: ChallengeY<C>,
xn: C::Scalar,
) -> Result<(), Error> {
let expected_h_eval = expressions.fold(C::Scalar::zero(), |h_eval, v| h_eval * &*y + &v);
@ -60,13 +70,9 @@ impl<C: CurveAffine> Proof<C> {
Ok(())
}
pub(in crate::plonk) fn evals(&self) -> impl Iterator<Item = &C::Scalar> {
self.h_evals.iter()
}
pub(in crate::plonk) fn queries<'a>(
&'a self,
x: ChallengeX<C::Scalar>,
x: ChallengeX<C>,
) -> impl Iterator<Item = VerifierQuery<'a, C>> + Clone {
self.h_commitments
.iter()

View File

@ -2,215 +2,103 @@ use ff::Field;
use std::iter;
use super::{
ChallengeBeta, ChallengeGamma, ChallengeTheta, ChallengeX, ChallengeY, Error, Proof,
vanishing, ChallengeBeta, ChallengeGamma, ChallengeTheta, ChallengeX, ChallengeY, Error,
VerifyingKey,
};
use crate::arithmetic::{CurveAffine, FieldExt};
use crate::poly::{
commitment::{Guard, Params, MSM},
multiopen::VerifierQuery,
multiopen::{self, VerifierQuery},
};
use crate::transcript::{Hasher, Transcript};
use crate::transcript::{read_n_points, read_n_scalars, TranscriptRead};
impl<'a, C: CurveAffine> Proof<C> {
/// Returns a boolean indicating whether or not the proof is valid
pub fn verify<HBase: Hasher<C::Base>, HScalar: Hasher<C::Scalar>>(
&'a self,
params: &'a Params<C>,
vk: &'a VerifyingKey<C>,
msm: MSM<'a, C>,
aux_commitments: &'a [C],
) -> Result<Guard<'a, C>, Error> {
self.check_lengths(vk, aux_commitments)?;
// Check that aux_commitments matches the expected number of aux_columns
// and self.aux_evals
if aux_commitments.len() != vk.cs.num_aux_columns
|| self.aux_evals.len() != vk.cs.num_aux_columns
{
return Err(Error::IncompatibleParams);
}
// Create a transcript for obtaining Fiat-Shamir challenges.
let mut transcript = Transcript::<C, HBase, HScalar>::new();
// Hash the aux (external) commitments into the transcript
for commitment in aux_commitments {
transcript
.absorb_point(commitment)
.map_err(|_| Error::TranscriptError)?;
}
// Hash the prover's advice commitments into the transcript
for commitment in &self.advice_commitments {
transcript
.absorb_point(commitment)
.map_err(|_| Error::TranscriptError)?;
}
// Sample theta challenge for keeping lookup columns linearly independent
let theta = ChallengeTheta::get(&mut transcript);
// Hash each lookup permuted commitment
for lookup in &self.lookups {
lookup.absorb_permuted_commitments(&mut transcript)?;
}
// Sample beta challenge
let beta = ChallengeBeta::get(&mut transcript);
// Sample gamma challenge
let gamma = ChallengeGamma::get(&mut transcript);
// Hash each permutation product commitment
for permutation in &self.permutations {
permutation.absorb_commitments(&mut transcript)?;
}
// Hash each lookup product commitment
for lookup in &self.lookups {
lookup.absorb_product_commitment(&mut transcript)?;
}
// Sample y challenge, which keeps the gates linearly independent.
let y = ChallengeY::get(&mut transcript);
self.vanishing.absorb_commitments(&mut transcript)?;
// Sample x challenge, which is used to ensure the circuit is
// satisfied with high probability.
let x = ChallengeX::get(&mut transcript);
// This check ensures the circuit is satisfied so long as the polynomial
// commitments open to the correct values.
self.check_hx(params, vk, theta, beta, gamma, y, x)?;
for eval in self
.advice_evals
.iter()
.chain(self.aux_evals.iter())
.chain(self.fixed_evals.iter())
.chain(self.vanishing.evals())
.chain(
self.permutations
.iter()
.map(|p| p.evals())
.into_iter()
.flatten(),
)
.chain(self.lookups.iter().map(|p| p.evals()).into_iter().flatten())
{
transcript.absorb_scalar(*eval);
}
let queries =
iter::empty()
.chain(vk.cs.advice_queries.iter().enumerate().map(
|(query_index, &(column, at))| VerifierQuery {
point: vk.domain.rotate_omega(*x, at),
commitment: &self.advice_commitments[column.index()],
eval: self.advice_evals[query_index],
},
))
.chain(
vk.cs
.aux_queries
.iter()
.enumerate()
.map(|(query_index, &(column, at))| VerifierQuery {
point: vk.domain.rotate_omega(*x, at),
commitment: &aux_commitments[column.index()],
eval: self.aux_evals[query_index],
}),
)
.chain(vk.cs.fixed_queries.iter().enumerate().map(
|(query_index, &(column, at))| VerifierQuery {
point: vk.domain.rotate_omega(*x, at),
commitment: &vk.fixed_commitments[column.index()],
eval: self.fixed_evals[query_index],
},
))
.chain(self.vanishing.queries(x));
// We are now convinced the circuit is satisfied so long as the
// polynomial commitments open to the correct values.
self.multiopening
.verify(
params,
&mut transcript,
queries
.chain(
self.permutations
.iter()
.zip(vk.permutations.iter())
.map(|(p, vkey)| p.queries(vk, vkey, x))
.into_iter()
.flatten(),
)
.chain(
self.lookups
.iter()
.map(|p| p.queries(vk, x))
.into_iter()
.flatten(),
),
msm,
)
.map_err(|_| Error::OpeningError)
/// Returns a boolean indicating whether or not the proof is valid
pub fn verify_proof<'a, C: CurveAffine, T: TranscriptRead<C>>(
params: &'a Params<C>,
vk: &VerifyingKey<C>,
msm: MSM<'a, C>,
aux_commitments: &[C],
transcript: &mut T,
) -> Result<Guard<'a, C>, Error> {
// Check that aux_commitments matches the expected number of aux columns
if aux_commitments.len() != vk.cs.num_aux_columns {
return Err(Error::IncompatibleParams);
}
/// Checks that the lengths of vectors are consistent with the constraint
/// system
fn check_lengths(&self, vk: &VerifyingKey<C>, aux_commitments: &[C]) -> Result<(), Error> {
// Check that aux_commitments matches the expected number of aux_columns
// and self.aux_evals
if aux_commitments.len() != vk.cs.num_aux_columns
|| self.aux_evals.len() != vk.cs.num_aux_columns
{
return Err(Error::IncompatibleParams);
}
if self.fixed_evals.len() != vk.cs.fixed_queries.len() {
return Err(Error::IncompatibleParams);
}
if self.advice_evals.len() != vk.cs.advice_queries.len() {
return Err(Error::IncompatibleParams);
}
if self.permutations.len() != vk.cs.permutations.len() {
return Err(Error::IncompatibleParams);
}
for (permutation, p) in self.permutations.iter().zip(vk.cs.permutations.iter()) {
permutation.check_lengths(p)?;
}
self.vanishing.check_lengths(vk)?;
if self.lookups.len() != vk.cs.lookups.len() {
return Err(Error::IncompatibleParams);
}
if self.advice_commitments.len() != vk.cs.num_advice_columns {
return Err(Error::IncompatibleParams);
}
Ok(())
// Hash the aux (external) commitments into the transcript
for commitment in aux_commitments {
transcript
.common_point(*commitment)
.map_err(|_| Error::TranscriptError)?
}
/// Checks that this proof's h_evals are correct, and thus that all of the
/// rules are satisfied.
fn check_hx(
&self,
params: &'a Params<C>,
vk: &VerifyingKey<C>,
theta: ChallengeTheta<C::Scalar>,
beta: ChallengeBeta<C::Scalar>,
gamma: ChallengeGamma<C::Scalar>,
y: ChallengeY<C::Scalar>,
x: ChallengeX<C::Scalar>,
) -> Result<(), Error> {
// Hash the prover's advice commitments into the transcript
let advice_commitments =
read_n_points(transcript, vk.cs.num_advice_columns).map_err(|_| Error::TranscriptError)?;
// Sample theta challenge for keeping lookup columns linearly independent
let theta = ChallengeTheta::get(transcript);
// Hash each lookup permuted commitment
let lookups = vk
.cs
.lookups
.iter()
.map(|argument| argument.read_permuted_commitments(transcript))
.collect::<Result<Vec<_>, _>>()?;
// Sample beta challenge
let beta = ChallengeBeta::get(transcript);
// Sample gamma challenge
let gamma = ChallengeGamma::get(transcript);
// Hash each permutation product commitment
let permutations = vk
.cs
.permutations
.iter()
.map(|argument| argument.read_product_commitment(transcript))
.collect::<Result<Vec<_>, _>>()?;
// Hash each lookup product commitment
let lookups = lookups
.into_iter()
.map(|lookup| lookup.read_product_commitment(transcript))
.collect::<Result<Vec<_>, _>>()?;
// Sample y challenge, which keeps the gates linearly independent.
let y = ChallengeY::get(transcript);
let vanishing = vanishing::Argument::read_commitments(vk, transcript)?;
// Sample x challenge, which is used to ensure the circuit is
// satisfied with high probability.
let x = ChallengeX::get(transcript);
let advice_evals = read_n_scalars(transcript, vk.cs.advice_queries.len())
.map_err(|_| Error::TranscriptError)?;
let aux_evals =
read_n_scalars(transcript, vk.cs.aux_queries.len()).map_err(|_| Error::TranscriptError)?;
let fixed_evals = read_n_scalars(transcript, vk.cs.fixed_queries.len())
.map_err(|_| Error::TranscriptError)?;
let vanishing = vanishing.evaluate(transcript)?;
let permutations = permutations
.into_iter()
.zip(vk.permutations.iter())
.map(|(permutation, vkey)| permutation.evaluate(vkey, transcript))
.collect::<Result<Vec<_>, _>>()?;
let lookups = lookups
.into_iter()
.map(|lookup| lookup.evaluate(transcript))
.collect::<Result<Vec<_>, _>>()?;
// This check ensures the circuit is satisfied so long as the polynomial
// commitments open to the correct values.
{
// x^n
let xn = x.pow(&[params.n as u64, 0, 0, 0]);
@ -225,26 +113,26 @@ impl<'a, C: CurveAffine> Proof<C> {
// Evaluate the circuit using the custom gates provided
.chain(vk.cs.gates.iter().map(|poly| {
poly.evaluate(
&|index| self.fixed_evals[index],
&|index| self.advice_evals[index],
&|index| self.aux_evals[index],
&|index| fixed_evals[index],
&|index| advice_evals[index],
&|index| aux_evals[index],
&|a, b| a + &b,
&|a, b| a * &b,
&|a, scalar| a * &scalar,
)
}))
.chain(
self.permutations
permutations
.iter()
.zip(vk.cs.permutations.iter())
.map(|(p, argument)| {
p.expressions(vk, argument, &self.advice_evals, l_0, beta, gamma, x)
p.expressions(vk, argument, &advice_evals, l_0, beta, gamma, x)
})
.into_iter()
.flatten(),
)
.chain(
self.lookups
lookups
.iter()
.zip(vk.cs.lookups.iter())
.map(|(p, argument)| {
@ -255,15 +143,70 @@ impl<'a, C: CurveAffine> Proof<C> {
theta,
beta,
gamma,
&self.advice_evals,
&self.fixed_evals,
&self.aux_evals,
&advice_evals,
&fixed_evals,
&aux_evals,
)
})
.into_iter()
.flatten(),
);
self.vanishing.verify(expressions, y, xn)
vanishing.verify(expressions, y, xn)?;
}
let queries = iter::empty()
.chain(
vk.cs
.advice_queries
.iter()
.enumerate()
.map(|(query_index, &(column, at))| VerifierQuery {
point: vk.domain.rotate_omega(*x, at),
commitment: &advice_commitments[column.index()],
eval: advice_evals[query_index],
}),
)
.chain(
vk.cs
.aux_queries
.iter()
.enumerate()
.map(|(query_index, &(column, at))| VerifierQuery {
point: vk.domain.rotate_omega(*x, at),
commitment: &aux_commitments[column.index()],
eval: aux_evals[query_index],
}),
)
.chain(
vk.cs
.fixed_queries
.iter()
.enumerate()
.map(|(query_index, &(column, at))| VerifierQuery {
point: vk.domain.rotate_omega(*x, at),
commitment: &vk.fixed_commitments[column.index()],
eval: fixed_evals[query_index],
}),
)
.chain(vanishing.queries(x))
.chain(
permutations
.iter()
.zip(vk.permutations.iter())
.map(|(p, vkey)| p.queries(vk, vkey, x))
.into_iter()
.flatten(),
)
.chain(
lookups
.iter()
.map(|p| p.queries(vk, x))
.into_iter()
.flatten(),
);
// We are now convinced the circuit is satisfied so long as the
// polynomial commitments open to the correct values.
multiopen::verify_proof(params, transcript, queries, msm).map_err(|_| Error::OpeningError)
}

View File

@ -3,11 +3,13 @@
//!
//! [halo]: https://eprint.iacr.org/2019/1021
use blake2b_simd::{Params as Blake2bParams, State as Blake2bState};
use super::{Coeff, LagrangeCoeff, Polynomial};
use crate::arithmetic::{best_fft, best_multiexp, parallelize, Curve, CurveAffine, FieldExt};
use crate::transcript::Hasher;
use ff::{Field, PrimeField};
use std::convert::TryInto;
use std::ops::{Add, AddAssign, Mul, MulAssign};
mod msm;
@ -15,7 +17,8 @@ mod prover;
mod verifier;
pub use msm::MSM;
pub use verifier::{Accumulator, Guard};
pub use prover::create_proof;
pub use verifier::{verify_proof, Accumulator, Guard};
/// These are the public parameters for the polynomial commitment scheme.
#[derive(Debug)]
@ -25,21 +28,13 @@ pub struct Params<C: CurveAffine> {
pub(crate) g: Vec<C>,
pub(crate) g_lagrange: Vec<C>,
pub(crate) h: C,
}
/// This is a proof object for the polynomial commitment scheme opening.
#[derive(Debug, Clone)]
pub struct Proof<C: CurveAffine> {
rounds: Vec<(C, C)>,
delta: C,
z1: C::Scalar,
z2: C::Scalar,
pub(crate) u: C,
}
impl<C: CurveAffine> Params<C> {
/// Initializes parameters for the curve, given a random oracle to draw
/// points from.
pub fn new<H: Hasher<C::Base>>(k: u32) -> Self {
pub fn new(k: u32) -> Self {
// This is usually a limitation on the curve, but we also want 32-bit
// architectures to be supported.
assert!(k < 32);
@ -48,26 +43,36 @@ impl<C: CurveAffine> Params<C> {
let n: u64 = 1 << k;
let g = {
let hasher = &H::init(C::Base::zero());
let try_and_increment = |hasher: &Blake2bState| {
let mut trial = 0u64;
loop {
let mut hasher = hasher.clone();
hasher.update(&(trial.to_le_bytes())[..]);
let p = C::from_bytes(&hasher.finalize().as_bytes().try_into().unwrap());
if bool::from(p.is_some()) {
break p.unwrap();
}
trial += 1;
}
};
let g = {
let mut g = Vec::with_capacity(n as usize);
g.resize(n as usize, C::zero());
parallelize(&mut g, move |g, start| {
let mut cur_value = C::Base::from(start as u64);
for g in g.iter_mut() {
let mut hasher = Blake2bParams::new()
.hash_length(32)
.personal(C::BLAKE2B_PERSONALIZATION)
.to_state();
hasher.update(b"G vector");
for (i, g) in g.iter_mut().enumerate() {
let i = (i + start) as u64;
let mut hasher = hasher.clone();
hasher.absorb(cur_value);
cur_value += &C::Base::one();
loop {
let x = hasher.squeeze().to_bytes();
let p = C::from_bytes(&x);
if bool::from(p.is_some()) {
*g = p.unwrap();
break;
}
}
hasher.update(&(i.to_le_bytes())[..]);
*g = try_and_increment(&hasher);
}
});
@ -102,10 +107,23 @@ impl<C: CurveAffine> Params<C> {
};
let h = {
let mut hasher = H::init(C::Base::zero());
let x = hasher.squeeze().to_bytes();
let p = C::from_bytes(&x);
p.unwrap()
let mut hasher = Blake2bParams::new()
.hash_length(32)
.personal(C::BLAKE2B_PERSONALIZATION)
.to_state();
hasher.update(b"H");
try_and_increment(&hasher)
};
let u = {
let mut hasher = Blake2bParams::new()
.hash_length(32)
.personal(C::BLAKE2B_PERSONALIZATION)
.to_state();
hasher.update(b"U");
try_and_increment(&hasher)
};
Params {
@ -114,6 +132,7 @@ impl<C: CurveAffine> Params<C> {
g,
g_lagrange,
h,
u,
}
}
@ -222,12 +241,11 @@ impl<F: FieldExt> MulAssign<F> for Blind<F> {
}
#[test]
fn test_commit_lagrange() {
fn test_commit_lagrange_epaffine() {
const K: u32 = 6;
use crate::pasta::{EpAffine, Fp, Fq};
use crate::transcript::DummyHash;
let params = Params::<EpAffine>::new::<DummyHash<Fp>>(K);
use crate::pasta::{EpAffine, Fq};
let params = Params::<EpAffine>::new(K);
let domain = super::EvaluationDomain::new(1, K);
let mut a = domain.empty_lagrange();
@ -243,6 +261,27 @@ fn test_commit_lagrange() {
assert_eq!(params.commit(&b, alpha), params.commit_lagrange(&a, alpha));
}
#[test]
fn test_commit_lagrange_eqaffine() {
const K: u32 = 6;
use crate::pasta::{EqAffine, Fp};
let params = Params::<EqAffine>::new(K);
let domain = super::EvaluationDomain::new(1, K);
let mut a = domain.empty_lagrange();
for (i, a) in a.iter_mut().enumerate() {
*a = Fp::from(i as u64);
}
let b = domain.lagrange_to_coeff(a.clone());
let alpha = Blind(Fp::rand());
assert_eq!(params.commit(&b, alpha), params.commit_lagrange(&a, alpha));
}
#[test]
fn test_opening_proof() {
const K: u32 = 6;
@ -254,10 +293,12 @@ fn test_opening_proof() {
EvaluationDomain,
};
use crate::arithmetic::{eval_polynomial, Curve, FieldExt};
use crate::pasta::{EpAffine, Fp, Fq};
use crate::transcript::{ChallengeScalar, DummyHash, Transcript};
use crate::pasta::{EpAffine, Fq};
use crate::transcript::{
ChallengeScalar, DummyHashRead, DummyHashWrite, Transcript, TranscriptRead, TranscriptWrite,
};
let params = Params::<EpAffine>::new::<DummyHash<Fp>>(K);
let params = Params::<EpAffine>::new(K);
let domain = EvaluationDomain::new(1, K);
let mut px = domain.empty_coeff();
@ -270,49 +311,43 @@ fn test_opening_proof() {
let p = params.commit(&px, blind).to_affine();
let mut transcript = Transcript::<_, DummyHash<_>, DummyHash<_>>::new();
transcript.absorb_point(&p).unwrap();
let mut transcript = DummyHashWrite::<Vec<u8>, EpAffine>::init(vec![], Field::zero());
transcript.write_point(p).unwrap();
let x = ChallengeScalar::<_, ()>::get(&mut transcript);
// Evaluate the polynomial
let v = eval_polynomial(&px, *x);
transcript.write_scalar(v).unwrap();
transcript.absorb_base(Fp::from_bytes(&v.to_bytes()).unwrap()); // unlikely to fail since p ~ q
let (proof, ch_prover) = {
create_proof(&params, &mut transcript, &px, blind, *x).unwrap();
let ch_prover = transcript.squeeze_challenge();
(transcript.finalize(), ch_prover)
};
loop {
let mut transcript_dup = transcript.clone();
// Verify the opening proof
let mut transcript = DummyHashRead::<&[u8], EpAffine>::init(&proof[..], Field::zero());
let p_prime = transcript.read_point().unwrap();
assert_eq!(p, p_prime);
let x_prime = ChallengeScalar::<_, ()>::get(&mut transcript);
assert_eq!(*x, *x_prime);
let v_prime = transcript.read_scalar().unwrap();
assert_eq!(v, v_prime);
let opening_proof = Proof::create(&params, &mut transcript, &px, blind, *x);
if let Ok(opening_proof) = opening_proof {
// Verify the opening proof
let mut commitment_msm = params.empty_msm();
commitment_msm.append_term(Field::one(), p);
let guard = opening_proof
.verify(
&params,
params.empty_msm(),
&mut transcript_dup,
*x,
commitment_msm,
v,
)
.unwrap();
let mut commitment_msm = params.empty_msm();
commitment_msm.append_term(Field::one(), p);
let guard = verify_proof(&params, commitment_msm, &mut transcript, *x, v).unwrap();
let ch_verifier = transcript.squeeze_challenge();
assert_eq!(ch_prover, ch_verifier);
// Test guard behavior prior to checking another proof
{
// Test use_challenges()
let msm_challenges = guard.clone().use_challenges();
assert!(msm_challenges.eval());
// Test guard behavior prior to checking another proof
{
// Test use_challenges()
let msm_challenges = guard.clone().use_challenges();
assert!(msm_challenges.eval());
// Test use_g()
let g = guard.compute_g();
let (msm_g, _accumulator) = guard.clone().use_g(g);
assert!(msm_g.eval());
break;
}
} else {
transcript = transcript_dup;
transcript.absorb_base(Field::one());
}
// Test use_g()
let g = guard.compute_g();
let (msm_g, _accumulator) = guard.clone().use_g(g);
assert!(msm_g.eval());
}
}

View File

@ -1,5 +1,6 @@
use super::Params;
use crate::arithmetic::{best_multiexp, parallelize, Curve, CurveAffine};
use ff::Field;
/// A multiscalar multiplication in the polynomial commitment scheme
#[derive(Debug, Clone)]
@ -7,6 +8,7 @@ pub struct MSM<'a, C: CurveAffine> {
pub(crate) params: &'a Params<C>,
g_scalars: Option<Vec<C::Scalar>>,
h_scalar: Option<C::Scalar>,
u_scalar: Option<C::Scalar>,
other_scalars: Vec<C::Scalar>,
other_bases: Vec<C>,
}
@ -16,6 +18,7 @@ impl<'a, C: CurveAffine> MSM<'a, C> {
pub fn new(params: &'a Params<C>) -> Self {
let g_scalars = None;
let h_scalar = None;
let u_scalar = None;
let other_scalars = vec![];
let other_bases = vec![];
@ -23,6 +26,7 @@ impl<'a, C: CurveAffine> MSM<'a, C> {
params,
g_scalars,
h_scalar,
u_scalar,
other_scalars,
other_bases,
}
@ -40,6 +44,10 @@ impl<'a, C: CurveAffine> MSM<'a, C> {
if let Some(h_scalar) = &other.h_scalar {
self.add_to_h_scalar(*h_scalar);
}
if let Some(u_scalar) = &other.u_scalar {
self.add_to_u_scalar(*u_scalar);
}
}
/// Add arbitrary term (the scalar and the point)
@ -48,6 +56,17 @@ impl<'a, C: CurveAffine> MSM<'a, C> {
self.other_bases.push(point);
}
/// Add a value to the first entry of `g_scalars`.
pub fn add_constant_term(&mut self, constant: C::Scalar) {
if let Some(g_scalars) = self.g_scalars.as_mut() {
g_scalars[0] += &constant;
} else {
let mut g_scalars = vec![C::Scalar::zero(); self.params.n as usize];
g_scalars[0] += &constant;
self.g_scalars = Some(g_scalars);
}
}
/// Add a vector of scalars to `g_scalars`. This function will panic if the
/// caller provides a slice of scalars that is not of length `params.n`.
pub fn add_to_g_scalars(&mut self, scalars: &[C::Scalar]) {
@ -68,6 +87,11 @@ impl<'a, C: CurveAffine> MSM<'a, C> {
self.h_scalar = self.h_scalar.map_or(Some(scalar), |a| Some(a + &scalar));
}
/// Add to `u_scalar`
pub fn add_to_u_scalar(&mut self, scalar: C::Scalar) {
self.u_scalar = self.u_scalar.map_or(Some(scalar), |a| Some(a + &scalar));
}
/// Scale all scalars in the MSM by some scaling factor
pub fn scale(&mut self, factor: C::Scalar) {
if let Some(g_scalars) = &mut self.g_scalars {
@ -87,12 +111,14 @@ impl<'a, C: CurveAffine> MSM<'a, C> {
}
self.h_scalar = self.h_scalar.map(|a| a * &factor);
self.u_scalar = self.u_scalar.map(|a| a * &factor);
}
/// Perform multiexp and check that it results in zero
pub fn eval(self) -> bool {
let len = self.g_scalars.as_ref().map(|v| v.len()).unwrap_or(0)
+ self.h_scalar.map(|_| 1).unwrap_or(0)
+ self.u_scalar.map(|_| 1).unwrap_or(0)
+ self.other_scalars.len();
let mut scalars: Vec<C::Scalar> = Vec::with_capacity(len);
let mut bases: Vec<C> = Vec::with_capacity(len);
@ -105,6 +131,11 @@ impl<'a, C: CurveAffine> MSM<'a, C> {
bases.push(self.params.h);
}
if let Some(u_scalar) = self.u_scalar {
scalars.push(u_scalar);
bases.push(self.params.u);
}
if let Some(g_scalars) = &self.g_scalars {
scalars.extend(g_scalars);
bases.extend(self.params.g.iter());

View File

@ -1,218 +1,160 @@
use ff::Field;
use super::super::{Coeff, Error, Polynomial};
use super::{Blind, Params, Proof};
use super::super::{Coeff, Polynomial};
use super::{Blind, Params};
use crate::arithmetic::{
best_multiexp, compute_inner_product, parallelize, small_multiexp, Curve, CurveAffine, FieldExt,
best_multiexp, compute_inner_product, eval_polynomial, parallelize, Curve, CurveAffine,
FieldExt,
};
use crate::transcript::{Challenge, ChallengeScalar, Hasher, Transcript};
use crate::transcript::{Challenge, ChallengeScalar, TranscriptWrite};
use std::io;
impl<C: CurveAffine> Proof<C> {
/// Create a polynomial commitment opening proof for the polynomial defined
/// by the coefficients `px`, the blinding factor `blind` used for the
/// polynomial commitment, and the point `x` that the polynomial is
/// evaluated at.
///
/// This function will panic if the provided polynomial is too large with
/// respect to the polynomial commitment parameters.
///
/// **Important:** This function assumes that the provided `transcript` has
/// already seen the common inputs: the polynomial commitment P, the claimed
/// opening v, and the point x. It's probably also nice for the transcript
/// to have seen the elliptic curve description and the SRS, if you want to
/// be rigorous.
pub fn create<HBase, HScalar>(
params: &Params<C>,
transcript: &mut Transcript<C, HBase, HScalar>,
px: &Polynomial<C::Scalar, Coeff>,
blind: Blind<C::Scalar>,
x: C::Scalar,
) -> Result<Self, Error>
where
HBase: Hasher<C::Base>,
HScalar: Hasher<C::Scalar>,
{
let mut blind = blind.0;
/// Create a polynomial commitment opening proof for the polynomial defined
/// by the coefficients `px`, the blinding factor `blind` used for the
/// polynomial commitment, and the point `x` that the polynomial is
/// evaluated at.
///
/// This function will panic if the provided polynomial is too large with
/// respect to the polynomial commitment parameters.
///
/// **Important:** This function assumes that the provided `transcript` has
/// already seen the common inputs: the polynomial commitment P, the claimed
/// opening v, and the point x. It's probably also nice for the transcript
/// to have seen the elliptic curve description and the SRS, if you want to
/// be rigorous.
pub fn create_proof<C: CurveAffine, T: TranscriptWrite<C>>(
params: &Params<C>,
transcript: &mut T,
px: &Polynomial<C::Scalar, Coeff>,
blind: Blind<C::Scalar>,
x: C::Scalar,
) -> io::Result<()> {
// We're limited to polynomials of degree n - 1.
assert!(px.len() <= params.n as usize);
// We're limited to polynomials of degree n - 1.
assert!(px.len() <= params.n as usize);
// Compute U
let u = {
let u_x = transcript.squeeze();
// y^2 = x^3 + B
let u_y2 = u_x.square() * &u_x + &C::b();
if let Some(u_y) = u_y2.deterministic_sqrt() {
C::from_xy(u_x, u_y).unwrap()
} else {
return Err(Error::SamplingError);
}
};
// Initialize the vector `a` as the coefficients of the polynomial,
// rounding up to the parameters.
let mut a = px.to_vec();
a.resize(params.n as usize, C::Scalar::zero());
// Initialize the vector `b` as the powers of `x`. The inner product of
// `a` and `b` is the evaluation of the polynomial at `x`.
let mut b = Vec::with_capacity(1 << params.k);
{
let mut cur = C::Scalar::one();
for _ in 0..(1 << params.k) {
b.push(cur);
cur *= &x;
}
}
// Initialize the vector `G` from the SRS. We'll be progressively
// collapsing this vector into smaller and smaller vectors until it is
// of length 1.
let mut g = params.g.clone();
// Perform the inner product argument, round by round.
let mut rounds = Vec::with_capacity(params.k as usize);
for k in (1..=params.k).rev() {
let half = 1 << (k - 1); // half the length of `a`, `b`, `G`
// Compute L, R
//
// TODO: If we modify multiexp to take "extra" bases, we could speed
// this piece up a bit by combining the multiexps.
metrics::counter!("multiexp", 2, "val" => "l/r", "size" => format!("{}", half));
let l = best_multiexp(&a[0..half], &g[half..]);
let r = best_multiexp(&a[half..], &g[0..half]);
let value_l = compute_inner_product(&a[0..half], &b[half..]);
let value_r = compute_inner_product(&a[half..], &b[0..half]);
let mut l_randomness = C::Scalar::rand();
let r_randomness = C::Scalar::rand();
metrics::counter!("multiexp", 2, "val" => "l/r", "size" => "2");
let l = l + &best_multiexp(&[value_l, l_randomness], &[u, params.h]);
let r = r + &best_multiexp(&[value_r, r_randomness], &[u, params.h]);
let mut l = l.to_affine();
let r = r.to_affine();
let challenge = loop {
// We'll fork the transcript and adjust our randomness
// until the challenge is a square.
let mut transcript = transcript.clone();
// Feed L and R into the cloned transcript.
// We expect these to not be points at infinity due to the randomness.
transcript
.absorb_point(&l)
.map_err(|_| Error::SamplingError)?;
transcript
.absorb_point(&r)
.map_err(|_| Error::SamplingError)?;
// ... and get the squared challenge.
let challenge_sq_packed = Challenge::get(&mut transcript);
let challenge_sq: C::Scalar = *ChallengeScalar::<_, ()>::from(challenge_sq_packed);
// There might be no square root, in which case we'll fork the
// transcript.
let challenge = challenge_sq.deterministic_sqrt();
if let Some(challenge) = challenge {
break challenge;
} else {
// Try again, with slightly different randomness
l = (l + params.h).to_affine();
l_randomness += &C::Scalar::one();
}
};
// Challenge is unlikely to be zero.
let challenge_inv = challenge.invert().unwrap();
let challenge_sq_inv = challenge_inv.square();
let challenge_sq = challenge.square();
// Feed L and R into the real transcript
transcript
.absorb_point(&l)
.map_err(|_| Error::SamplingError)?;
transcript
.absorb_point(&r)
.map_err(|_| Error::SamplingError)?;
// And obtain the challenge, even though we already have it, since
// squeezing affects the transcript.
{
let challenge_sq_expected = ChallengeScalar::<_, ()>::get(transcript);
assert_eq!(challenge_sq, *challenge_sq_expected);
}
// Done with this round.
rounds.push((l, r));
// Collapse `a` and `b`.
// TODO: parallelize
for i in 0..half {
a[i] = (a[i] * &challenge) + &(a[i + half] * &challenge_inv);
b[i] = (b[i] * &challenge_inv) + &(b[i + half] * &challenge);
}
a.truncate(half);
b.truncate(half);
// Collapse `G`
parallel_generator_collapse(&mut g, challenge, challenge_inv);
g.truncate(half);
// Update randomness (the synthetic blinding factor at the end)
blind += &(l_randomness * &challenge_sq);
blind += &(r_randomness * &challenge_sq_inv);
}
// We have fully collapsed `a`, `b`, `G`
assert_eq!(a.len(), 1);
let a = a[0];
assert_eq!(b.len(), 1);
let b = b[0];
assert_eq!(g.len(), 1);
let g = g[0];
// Random nonces for the zero-knowledge opening
let d = C::Scalar::rand();
let s = C::Scalar::rand();
metrics::increment_counter!("multiexp", "val" => "delta", "size" => "3");
let delta = best_multiexp(&[d, d * &b, s], &[g, u, params.h]).to_affine();
// Feed delta into the transcript
transcript
.absorb_point(&delta)
.map_err(|_| Error::SamplingError)?;
// Obtain the challenge c.
let c = ChallengeScalar::<C::Scalar, ()>::get(transcript);
// Compute z1 and z2 as described in the Halo paper.
let z1 = a * &*c + &d;
let z2 = *c * &blind + &s;
Ok(Proof {
rounds,
delta,
z1,
z2,
})
// Sample a random polynomial (of same degree) that has a root at x, first
// by setting all coefficients to random values.
let mut s_poly = (*px).clone();
for coeff in s_poly.iter_mut() {
*coeff = C::Scalar::rand();
}
// Evaluate the random polynomial at x
let v_prime = eval_polynomial(&s_poly[..], x);
// Subtract constant coefficient to get a random polynomial with a root at x
s_poly[0] = s_poly[0] - &v_prime;
// And sample a random blind
let s_poly_blind = Blind(C::Scalar::rand());
// Write a commitment to the random polynomial to the transcript
let s_poly_commitment = params.commit(&s_poly, s_poly_blind).to_affine();
transcript.write_point(s_poly_commitment)?;
// Challenge that will ensure that the prover cannot change P but can only
// witness a random polynomial commitment that agrees with P at x, with high
// probability.
let iota = *ChallengeScalar::<C, ()>::get(transcript);
// Challenge that ensures that the prover did not interfere with the U term
// in their commitments.
let z = *ChallengeScalar::<C, ()>::get(transcript);
// We'll be opening `s_poly_commitment * iota + P - [v] G_0` to ensure it
// has a root at zero.
let mut final_poly = s_poly * iota + px;
let v = eval_polynomial(&final_poly, x);
final_poly[0] = final_poly[0] - &v;
drop(px);
let blind = s_poly_blind * Blind(iota) + blind;
let mut blind = blind.0;
drop(s_poly_blind);
// Initialize the vector `a` as the coefficients of the polynomial,
// rounding up to the parameters.
let mut a = final_poly.values;
a.resize(params.n as usize, C::Scalar::zero());
// Initialize the vector `b` as the powers of `x`. The inner product of
// `a` and `b` is the evaluation of the polynomial at `x`.
let mut b = Vec::with_capacity(1 << params.k);
{
let mut cur = C::Scalar::one();
for _ in 0..(1 << params.k) {
b.push(cur);
cur *= &x;
}
}
// Initialize the vector `G` from the SRS. We'll be progressively collapsing
// this vector into smaller and smaller vectors until it is of length 1.
let mut g = params.g.clone();
// Perform the inner product argument, round by round.
for k in (1..=params.k).rev() {
let half = 1 << (k - 1); // half the length of `a`, `b`, `G`
// Compute L, R
//
// TODO: If we modify multiexp to take "extra" bases, we could speed
// this piece up a bit by combining the multiexps.
metrics::counter!("multiexp", 2, "val" => "l/r", "size" => format!("{}", half));
let l = best_multiexp(&a[half..], &g[0..half]);
let r = best_multiexp(&a[0..half], &g[half..]);
let value_l = compute_inner_product(&a[half..], &b[0..half]);
let value_r = compute_inner_product(&a[0..half], &b[half..]);
let l_randomness = C::Scalar::rand();
let r_randomness = C::Scalar::rand();
metrics::counter!("multiexp", 2, "val" => "l/r", "size" => "2");
let l = l + &best_multiexp(&[value_l * &z, l_randomness], &[params.u, params.h]);
let r = r + &best_multiexp(&[value_r * &z, r_randomness], &[params.u, params.h]);
let l = l.to_affine();
let r = r.to_affine();
// Feed L and R into the real transcript
transcript.write_point(l)?;
transcript.write_point(r)?;
let challenge_packed = Challenge::get(transcript);
let challenge = *ChallengeScalar::<C, ()>::from(challenge_packed);
let challenge_inv = challenge.invert().unwrap(); // TODO, bubble this up
// Collapse `a` and `b`.
// TODO: parallelize
for i in 0..half {
a[i] = a[i] + &(a[i + half] * &challenge_inv);
b[i] = b[i] + &(b[i + half] * &challenge);
}
a.truncate(half);
b.truncate(half);
// Collapse `G`
parallel_generator_collapse(&mut g, challenge);
g.truncate(half);
// Update randomness (the synthetic blinding factor at the end)
blind += &(l_randomness * &challenge_inv);
blind += &(r_randomness * &challenge);
}
// We have fully collapsed `a`, `b`, `G`
assert_eq!(a.len(), 1);
let a = a[0];
transcript.write_scalar(a)?;
transcript.write_scalar(blind)?; // \xi
Ok(())
}
fn parallel_generator_collapse<C: CurveAffine>(
g: &mut [C],
challenge: C::Scalar,
challenge_inv: C::Scalar,
) {
fn parallel_generator_collapse<C: CurveAffine>(g: &mut [C], challenge: C::Scalar) {
let len = g.len() / 2;
let (mut g_lo, g_hi) = g.split_at_mut(len);
metrics::counter!("multiexp", len as u64, "size" => "2", "fn" => "parallel_generator_collapse");
metrics::counter!("scalar_multiplication", len as u64, "fn" => "parallel_generator_collapse");
parallelize(&mut g_lo, |g_lo, start| {
let g_hi = &g_hi[start..];
let mut tmp = Vec::with_capacity(g_lo.len());
for (g_lo, g_hi) in g_lo.iter().zip(g_hi.iter()) {
tmp.push(small_multiexp(&[challenge_inv, challenge], &[*g_lo, *g_hi]));
tmp.push(g_lo.to_projective() + &(*g_hi * challenge));
}
C::Projective::batch_to_affine(&tmp, g_lo);
});

View File

@ -1,19 +1,18 @@
use ff::Field;
use super::super::Error;
use super::{Params, Proof, MSM};
use crate::transcript::{Challenge, ChallengeScalar, Hasher, Transcript};
use super::{Params, MSM};
use crate::transcript::{Challenge, ChallengeScalar, TranscriptRead};
use crate::arithmetic::{best_multiexp, Curve, CurveAffine, FieldExt};
use crate::arithmetic::{best_multiexp, BatchInvert, Curve, CurveAffine};
/// A guard returned by the verifier
#[derive(Debug, Clone)]
pub struct Guard<'a, C: CurveAffine> {
msm: MSM<'a, C>,
neg_z1: C::Scalar,
allinv: C::Scalar,
challenges_sq: Vec<C::Scalar>,
challenges_sq_packed: Vec<Challenge>,
neg_a: C::Scalar,
challenges: Vec<C::Scalar>,
challenges_packed: Vec<Challenge>,
}
/// An accumulator instance consisting of an evaluation claim and a proof.
@ -24,28 +23,28 @@ pub struct Accumulator<C: CurveAffine> {
/// A vector of 128-bit challenges sampled by the verifier, to be used in
/// computing g.
pub challenges_sq_packed: Vec<Challenge>,
pub challenges_packed: Vec<Challenge>,
}
impl<'a, C: CurveAffine> Guard<'a, C> {
/// Lets caller supply the challenges and obtain an MSM with updated
/// scalars and points.
pub fn use_challenges(mut self) -> MSM<'a, C> {
let s = compute_s(&self.challenges_sq, self.allinv * &self.neg_z1);
let s = compute_s(&self.challenges, self.neg_a);
self.msm.add_to_g_scalars(&s);
self.msm.add_to_h_scalar(self.neg_z1);
self.msm.add_to_h_scalar(self.neg_a);
self.msm
}
/// Lets caller supply the purported G point and simply appends
/// [-z1] G to return an updated MSM.
/// [-a] G to return an updated MSM.
pub fn use_g(mut self, g: C) -> (MSM<'a, C>, Accumulator<C>) {
self.msm.append_term(self.neg_z1, g);
self.msm.append_term(self.neg_a, g);
let accumulator = Accumulator {
g,
challenges_sq_packed: self.challenges_sq_packed,
challenges_packed: self.challenges_packed,
};
(self.msm, accumulator)
@ -53,7 +52,7 @@ impl<'a, C: CurveAffine> Guard<'a, C> {
/// Computes G + H, where G = ⟨s, params.g⟩ and H is used for blinding
pub fn compute_g(&self) -> C {
let s = compute_s(&self.challenges_sq, self.allinv);
let s = compute_s(&self.challenges, C::Scalar::one());
metrics::increment_counter!("multiexp", "size" => format!("{}", s.len()), "fn" => "compute_g");
let mut tmp = best_multiexp(&s, &self.msm.params.g);
@ -62,174 +61,122 @@ impl<'a, C: CurveAffine> Guard<'a, C> {
}
}
impl<C: CurveAffine> Proof<C> {
/// Checks to see if an [`Proof`] is valid given the current `transcript`,
/// and a point `x` that the polynomial commitment `p` opens purportedly to
/// the value `v`.
pub fn verify<'a, HBase, HScalar>(
&self,
params: &'a Params<C>,
mut msm: MSM<'a, C>,
transcript: &mut Transcript<C, HBase, HScalar>,
x: C::Scalar,
mut commitment_msm: MSM<'a, C>,
v: C::Scalar,
) -> Result<Guard<'a, C>, Error>
where
HBase: Hasher<C::Base>,
HScalar: Hasher<C::Scalar>,
/// Checks to see if an [`Proof`] is valid given the current `transcript`, and a
/// point `x` that the polynomial commitment `P` opens purportedly to the value
/// `v`. The provided `msm` should evaluate to the commitment `P` being opened.
pub fn verify_proof<'a, C: CurveAffine, T: TranscriptRead<C>>(
params: &'a Params<C>,
mut msm: MSM<'a, C>,
transcript: &mut T,
x: C::Scalar,
v: C::Scalar,
) -> Result<Guard<'a, C>, Error> {
let k = params.k as usize;
// P - [v] G_0 + S * iota
// + \sum(L_i * u_i^2) + \sum(R_i * u_i^-2)
msm.add_constant_term(-v);
let s_poly_commitment = transcript.read_point().map_err(|_| Error::OpeningError)?;
let iota = *ChallengeScalar::<C, ()>::get(transcript);
msm.append_term(iota, s_poly_commitment);
let z = *ChallengeScalar::<C, ()>::get(transcript);
let mut rounds = vec![];
for _ in 0..k {
// Read L and R from the proof and write them to the transcript
let l = transcript.read_point().map_err(|_| Error::OpeningError)?;
let r = transcript.read_point().map_err(|_| Error::OpeningError)?;
let challenge_packed = Challenge::get(transcript);
let challenge = *ChallengeScalar::<C, ()>::from(challenge_packed);
rounds.push((
l,
r,
challenge,
/* to be inverted */ challenge,
challenge_packed,
));
}
rounds
.iter_mut()
.map(|&mut (_, _, _, ref mut challenge, _)| challenge)
.batch_invert();
let mut challenges = Vec::with_capacity(k);
let mut challenges_packed: Vec<Challenge> = Vec::with_capacity(k);
for (l, r, challenge, challenge_inv, challenge_packed) in rounds {
msm.append_term(challenge_inv, l);
msm.append_term(challenge, r);
challenges.push(challenge);
challenges_packed.push(challenge_packed);
}
// Our goal is to open
// msm - [v] G_0 + random_poly_commitment * iota
// + \sum(L_i * u_i^2) + \sum(R_i * u_i^-2)
// at x to 0, by asking the prover to supply (a, \xi) such that it equals
// = [a] (G + [b * z] U) + [\xi] H
// except that we wish for the prover to supply G as Commit(g(X); 1) so
// we must substitute to get
// = [a] ((G - H) + [b * z] U) + [\xi] H
// = [a] G + [-a] H + [abz] U + [\xi] H
// = [a] G + [abz] U + [\xi - a] H
// but subtracting to get the desired equality
// ... + [-a] G + [-abz] U + [a - \xi] H = 0
let a = transcript.read_scalar().map_err(|_| Error::SamplingError)?;
let neg_a = -a;
let xi = transcript.read_scalar().map_err(|_| Error::SamplingError)?;
let b = compute_b(x, &challenges);
msm.add_to_u_scalar(neg_a * &b * &z);
msm.add_to_h_scalar(a - &xi);
let guard = Guard {
msm,
neg_a,
challenges,
challenges_packed,
};
Ok(guard)
}
/// Computes $\prod\limits_{i=0}^{k-1} (1 + u_i x^{2^i})$.
fn compute_b<F: Field>(x: F, challenges: &[F]) -> F {
let mut tmp = F::one();
let mut cur = x;
for challenge in challenges.iter().rev() {
tmp *= F::one() + &(*challenge * &cur);
cur *= cur;
}
tmp
}
/// Computes the coefficients of $g(X) = \prod\limits_{i=0}^{k-1} (1 + u_i X^{2^i})$.
fn compute_s<F: Field>(challenges: &[F], init: F) -> Vec<F> {
assert!(challenges.len() > 0);
let mut v = vec![F::zero(); 1 << challenges.len()];
v[0] = init;
for (len, challenge) in challenges
.iter()
.rev()
.enumerate()
.map(|(i, challenge)| (1 << i, challenge))
{
// Check for well-formedness
if self.rounds.len() != params.k as usize {
return Err(Error::OpeningError);
let (left, right) = v.split_at_mut(len);
let right = &mut right[0..len];
right.copy_from_slice(&left);
for v in right {
*v *= challenge;
}
// Compute U
let u = {
let u_x = transcript.squeeze();
// y^2 = x^3 + B
let u_y2 = u_x.square() * &u_x + &C::b();
let u_y = u_y2.deterministic_sqrt();
if u_y.is_none() {
return Err(Error::OpeningError);
}
let u_y = u_y.unwrap();
C::from_xy(u_x, u_y).unwrap()
};
let mut extra_scalars = Vec::with_capacity(self.rounds.len() * 2 + 4 + params.n as usize);
let mut extra_bases = Vec::with_capacity(self.rounds.len() * 2 + 4 + params.n as usize);
// Data about the challenges from each of the rounds.
let mut challenges = Vec::with_capacity(self.rounds.len());
let mut challenges_inv = Vec::with_capacity(self.rounds.len());
let mut challenges_sq = Vec::with_capacity(self.rounds.len());
let mut challenges_sq_packed: Vec<Challenge> = Vec::with_capacity(self.rounds.len());
let mut allinv = C::Scalar::one();
for round in &self.rounds {
// Feed L and R into the transcript.
let l = round.0;
let r = round.1;
transcript
.absorb_point(&l)
.map_err(|_| Error::OpeningError)?;
transcript
.absorb_point(&r)
.map_err(|_| Error::OpeningError)?;
let challenge_sq_packed = Challenge::get(transcript);
let challenge_sq: C::Scalar = *ChallengeScalar::<_, ()>::from(challenge_sq_packed);
let challenge = challenge_sq.deterministic_sqrt();
if challenge.is_none() {
// We didn't sample a square.
return Err(Error::OpeningError);
}
let challenge = challenge.unwrap();
let challenge_inv = challenge.invert();
if bool::from(challenge_inv.is_none()) {
// We sampled zero for some reason, unlikely to happen by
// chance.
return Err(Error::OpeningError);
}
let challenge_inv = challenge_inv.unwrap();
allinv *= &challenge_inv;
let challenge_sq_inv = challenge_inv.square();
extra_scalars.push(challenge_sq);
extra_bases.push(round.0);
extra_scalars.push(challenge_sq_inv);
extra_bases.push(round.1);
challenges.push(challenge);
challenges_inv.push(challenge_inv);
challenges_sq.push(challenge_sq);
challenges_sq_packed.push(challenge_sq_packed);
}
// Feed delta into the transcript
transcript
.absorb_point(&self.delta)
.map_err(|_| Error::OpeningError)?;
// Get the challenge `c`
let c = ChallengeScalar::<_, ()>::get(transcript);
// Construct
// [c] P + [c * v] U + [c] sum(L_i * u_i^2) + [c] sum(R_i * u_i^-2) + delta - [z1 * b] U + [z1 - z2] H
// = [z1] (G + H)
// The computation of [z1] (G + H) happens in either Guard::use_challenges()
// or Guard::use_g().
let b = compute_b(x, &challenges, &challenges_inv);
let neg_z1 = -self.z1;
// [c] P
commitment_msm.scale(*c);
msm.add_msm(&commitment_msm);
// [c] sum(L_i * u_i^2) + [c] sum(R_i * u_i^-2)
for scalar in &mut extra_scalars {
*scalar *= &(*c);
}
for (scalar, base) in extra_scalars.iter().zip(extra_bases.iter()) {
msm.append_term(*scalar, *base);
}
// [c * v] U - [z1 * b] U
msm.append_term((*c * &v) + &(neg_z1 * &b), u);
// delta
msm.append_term(Field::one(), self.delta);
// + [z1 - z2] H
msm.add_to_h_scalar(self.z1 - &self.z2);
let guard = Guard {
msm,
neg_z1,
allinv,
challenges_sq,
challenges_sq_packed,
};
Ok(guard)
}
}
fn compute_b<F: Field>(x: F, challenges: &[F], challenges_inv: &[F]) -> F {
assert!(!challenges.is_empty());
assert_eq!(challenges.len(), challenges_inv.len());
if challenges.len() == 1 {
*challenges_inv.last().unwrap() + *challenges.last().unwrap() * x
} else {
(*challenges_inv.last().unwrap() + *challenges.last().unwrap() * x)
* compute_b(
x.square(),
&challenges[0..(challenges.len() - 1)],
&challenges_inv[0..(challenges.len() - 1)],
)
}
}
// TODO: parallelize
fn compute_s<F: Field>(challenges_sq: &[F], allinv: F) -> Vec<F> {
let lg_n = challenges_sq.len();
let n = 1 << lg_n;
let mut s = Vec::with_capacity(n);
s.push(allinv);
for i in 1..n {
let lg_i = (32 - 1 - (i as u32).leading_zeros()) as usize;
let k = 1 << lg_i;
let u_lg_i_sq = challenges_sq[(lg_n - 1) - lg_i];
s.push(s[i - k] * u_lg_i_sq);
}
s
v
}

View File

@ -15,6 +15,9 @@ use crate::{
mod prover;
mod verifier;
pub use prover::create_proof;
pub use verifier::verify_proof;
#[derive(Clone, Copy, Debug)]
struct X1 {}
/// Challenge for compressing openings at the same point sets together.
@ -36,19 +39,6 @@ struct X4 {}
/// together.
type ChallengeX4<F> = ChallengeScalar<F, X4>;
/// This is a multi-point opening proof used in the polynomial commitment scheme opening.
#[derive(Debug, Clone)]
pub struct Proof<C: CurveAffine> {
// A vector of evaluations at each set of query points
q_evals: Vec<C::Scalar>,
// Commitment to final polynomial
f_commitment: C,
// Commitment proof
opening: commitment::Proof<C>,
}
/// A polynomial query at a point
#[derive(Debug, Clone)]
pub struct ProverQuery<'a, C: CurveAffine> {

View File

@ -1,18 +1,19 @@
use super::super::{
commitment::{self, Blind, Params},
Coeff, Error, Polynomial,
Coeff, Polynomial,
};
use super::{
construct_intermediate_sets, ChallengeX1, ChallengeX2, ChallengeX3, ChallengeX4, Proof,
ProverQuery, Query,
construct_intermediate_sets, ChallengeX1, ChallengeX2, ChallengeX3, ChallengeX4, ProverQuery,
Query,
};
use crate::arithmetic::{
eval_polynomial, kate_division, lagrange_interpolate, Curve, CurveAffine, FieldExt,
};
use crate::transcript::{Hasher, Transcript};
use crate::transcript::TranscriptWrite;
use ff::Field;
use std::io;
use std::marker::PhantomData;
#[derive(Debug, Clone)]
@ -23,138 +24,118 @@ struct CommitmentData<C: CurveAffine> {
evals: Vec<C::Scalar>,
}
impl<C: CurveAffine> Proof<C> {
/// Create a multi-opening proof
pub fn create<'a, I, HBase: Hasher<C::Base>, HScalar: Hasher<C::Scalar>>(
params: &Params<C>,
transcript: &mut Transcript<C, HBase, HScalar>,
queries: I,
) -> Result<Self, Error>
where
I: IntoIterator<Item = ProverQuery<'a, C>> + Clone,
/// Create a multi-opening proof
pub fn create_proof<'a, I, C: CurveAffine, T: TranscriptWrite<C>>(
params: &Params<C>,
transcript: &mut T,
queries: I,
) -> io::Result<()>
where
I: IntoIterator<Item = ProverQuery<'a, C>> + Clone,
{
let x_1 = ChallengeX1::get(transcript);
let x_2 = ChallengeX2::get(transcript);
let (poly_map, point_sets) = construct_intermediate_sets(queries);
// Collapse openings at same point sets together into single openings using
// x_1 challenge.
let mut q_polys: Vec<Option<Polynomial<C::Scalar, Coeff>>> = vec![None; point_sets.len()];
let mut q_blinds = vec![Blind(C::Scalar::zero()); point_sets.len()];
// A vec of vecs of evals. The outer vec corresponds to the point set,
// while the inner vec corresponds to the points in a particular set.
let mut q_eval_sets = Vec::with_capacity(point_sets.len());
for point_set in point_sets.iter() {
q_eval_sets.push(vec![C::Scalar::zero(); point_set.len()]);
}
{
let x_1 = ChallengeX1::get(transcript);
let x_2 = ChallengeX2::get(transcript);
let (poly_map, point_sets) = construct_intermediate_sets(queries);
// Collapse openings at same point sets together into single openings using
// x_1 challenge.
let mut q_polys: Vec<Option<Polynomial<C::Scalar, Coeff>>> = vec![None; point_sets.len()];
let mut q_blinds = vec![Blind(C::Scalar::zero()); point_sets.len()];
// A vec of vecs of evals. The outer vec corresponds to the point set,
// while the inner vec corresponds to the points in a particular set.
let mut q_eval_sets = Vec::with_capacity(point_sets.len());
for point_set in point_sets.iter() {
q_eval_sets.push(vec![C::Scalar::zero(); point_set.len()]);
}
{
let mut accumulate = |set_idx: usize,
new_poly: &Polynomial<C::Scalar, Coeff>,
blind: Blind<C::Scalar>,
evals: Vec<C::Scalar>| {
if let Some(poly) = &q_polys[set_idx] {
q_polys[set_idx] = Some(poly.clone() * *x_1 + new_poly);
} else {
q_polys[set_idx] = Some(new_poly.clone());
}
q_blinds[set_idx] *= *x_1;
q_blinds[set_idx] += blind;
// Each polynomial is evaluated at a set of points. For each set,
// we collapse each polynomial's evals pointwise.
for (eval, set_eval) in evals.iter().zip(q_eval_sets[set_idx].iter_mut()) {
*set_eval *= &*x_1;
*set_eval += eval;
}
};
for commitment_data in poly_map.into_iter() {
accumulate(
commitment_data.set_index, // set_idx,
commitment_data.commitment.poly, // poly,
commitment_data.commitment.blind, // blind,
commitment_data.evals, // evals
);
}
}
let f_poly = point_sets
.iter()
.zip(q_eval_sets.iter())
.zip(q_polys.iter())
.fold(None, |f_poly, ((points, evals), poly)| {
let mut poly = poly.clone().unwrap().values;
// TODO: makes implicit asssumption that poly degree is smaller than interpolation poly degree
for (p, r) in poly.iter_mut().zip(lagrange_interpolate(points, evals)) {
*p -= &r;
}
let mut poly = points
.iter()
.fold(poly, |poly, point| kate_division(&poly, *point));
poly.resize(params.n as usize, C::Scalar::zero());
let poly = Polynomial {
values: poly,
_marker: PhantomData,
};
if f_poly.is_none() {
Some(poly)
} else {
f_poly.map(|f_poly| f_poly * *x_2 + &poly)
}
})
.unwrap();
let mut f_blind = Blind(C::Scalar::rand());
let mut f_commitment = params.commit(&f_poly, f_blind).to_affine();
let (opening, q_evals) = loop {
let mut transcript = transcript.clone();
transcript
.absorb_point(&f_commitment)
.map_err(|_| Error::SamplingError)?;
let x_3 = ChallengeX3::get(&mut transcript);
let q_evals: Vec<C::Scalar> = q_polys
.iter()
.map(|poly| eval_polynomial(poly.as_ref().unwrap(), *x_3))
.collect();
for eval in q_evals.iter() {
transcript.absorb_scalar(*eval);
}
let x_4 = ChallengeX4::get(&mut transcript);
let (f_poly, f_blind_try) = q_polys.iter().zip(q_blinds.iter()).fold(
(f_poly.clone(), f_blind),
|(f_poly, f_blind), (poly, blind)| {
(
f_poly * *x_4 + poly.as_ref().unwrap(),
Blind((f_blind.0 * &*x_4) + &blind.0),
)
},
);
if let Ok(opening) =
commitment::Proof::create(&params, &mut transcript, &f_poly, f_blind_try, *x_3)
{
break (opening, q_evals);
let mut accumulate = |set_idx: usize,
new_poly: &Polynomial<C::Scalar, Coeff>,
blind: Blind<C::Scalar>,
evals: Vec<C::Scalar>| {
if let Some(poly) = &q_polys[set_idx] {
q_polys[set_idx] = Some(poly.clone() * *x_1 + new_poly);
} else {
f_blind += C::Scalar::one();
f_commitment = (f_commitment + params.h).to_affine();
q_polys[set_idx] = Some(new_poly.clone());
}
q_blinds[set_idx] *= *x_1;
q_blinds[set_idx] += blind;
// Each polynomial is evaluated at a set of points. For each set,
// we collapse each polynomial's evals pointwise.
for (eval, set_eval) in evals.iter().zip(q_eval_sets[set_idx].iter_mut()) {
*set_eval *= &(*x_1);
*set_eval += eval;
}
};
Ok(Proof {
q_evals,
f_commitment,
opening,
})
for commitment_data in poly_map.into_iter() {
accumulate(
commitment_data.set_index, // set_idx,
commitment_data.commitment.poly, // poly,
commitment_data.commitment.blind, // blind,
commitment_data.evals, // evals
);
}
}
let f_poly = point_sets
.iter()
.zip(q_eval_sets.iter())
.zip(q_polys.iter())
.fold(None, |f_poly, ((points, evals), poly)| {
let mut poly = poly.clone().unwrap().values;
// TODO: makes implicit asssumption that poly degree is smaller than interpolation poly degree
for (p, r) in poly.iter_mut().zip(lagrange_interpolate(points, evals)) {
*p -= &r;
}
let mut poly = points
.iter()
.fold(poly, |poly, point| kate_division(&poly, *point));
poly.resize(params.n as usize, C::Scalar::zero());
let poly = Polynomial {
values: poly,
_marker: PhantomData,
};
if f_poly.is_none() {
Some(poly)
} else {
f_poly.map(|f_poly| f_poly * *x_2 + &poly)
}
})
.unwrap();
let f_blind = Blind(C::Scalar::rand());
let f_commitment = params.commit(&f_poly, f_blind).to_affine();
transcript.write_point(f_commitment)?;
let x_3 = ChallengeX3::get(transcript);
let q_evals: Vec<C::Scalar> = q_polys
.iter()
.map(|poly| eval_polynomial(poly.as_ref().unwrap(), *x_3))
.collect();
for eval in q_evals.iter() {
transcript.write_scalar(*eval)?;
}
let x_4 = ChallengeX4::get(transcript);
let (f_poly, f_blind_try) = q_polys.iter().zip(q_blinds.iter()).fold(
(f_poly.clone(), f_blind),
|(f_poly, f_blind), (poly, blind)| {
(
f_poly * *x_4 + poly.as_ref().unwrap(),
Blind((f_blind.0 * &(*x_4)) + &blind.0),
)
},
);
commitment::create_proof(&params, transcript, &f_poly, f_blind_try, *x_3)
}
#[doc(hidden)]

View File

@ -5,12 +5,11 @@ use super::super::{
Error,
};
use super::{
construct_intermediate_sets, ChallengeX1, ChallengeX2, ChallengeX3, ChallengeX4, Proof, Query,
construct_intermediate_sets, ChallengeX1, ChallengeX2, ChallengeX3, ChallengeX4, Query,
VerifierQuery,
};
use crate::arithmetic::{eval_polynomial, lagrange_interpolate, CurveAffine, FieldExt};
use crate::transcript::{Hasher, Transcript};
use crate::transcript::TranscriptRead;
#[derive(Debug, Clone)]
struct CommitmentData<C: CurveAffine> {
set_index: usize,
@ -18,114 +17,108 @@ struct CommitmentData<C: CurveAffine> {
evals: Vec<C::Scalar>,
}
impl<C: CurveAffine> Proof<C> {
/// Verify a multi-opening proof
pub fn verify<'a, I, HBase: Hasher<C::Base>, HScalar: Hasher<C::Scalar>>(
&self,
params: &'a Params<C>,
transcript: &mut Transcript<C, HBase, HScalar>,
queries: I,
mut msm: MSM<'a, C>,
) -> Result<Guard<'a, C>, Error>
where
I: IntoIterator<Item = VerifierQuery<'a, C>> + Clone,
/// Verify a multi-opening proof
pub fn verify_proof<'b, 'a: 'b, I, C: CurveAffine, T: TranscriptRead<C>>(
params: &'a Params<C>,
transcript: &mut T,
queries: I,
mut msm: MSM<'a, C>,
) -> Result<Guard<'a, C>, Error>
where
I: IntoIterator<Item = VerifierQuery<'b, C>> + Clone,
{
// Scale the MSM by a random factor to ensure that if the existing MSM
// has is_zero() == false then this argument won't be able to interfere
// with it to make it true, with high probability.
msm.scale(C::Scalar::rand());
// Sample x_1 for compressing openings at the same point sets together
let x_1 = ChallengeX1::get(transcript);
// Sample a challenge x_2 for keeping the multi-point quotient
// polynomial terms linearly independent.
let x_2 = ChallengeX2::get(transcript);
let (commitment_map, point_sets) = construct_intermediate_sets(queries);
// Compress the commitments and expected evaluations at x together.
// using the challenge x_1
let mut q_commitments: Vec<_> = vec![params.empty_msm(); point_sets.len()];
// A vec of vecs of evals. The outer vec corresponds to the point set,
// while the inner vec corresponds to the points in a particular set.
let mut q_eval_sets = Vec::with_capacity(point_sets.len());
for point_set in point_sets.iter() {
q_eval_sets.push(vec![C::Scalar::zero(); point_set.len()]);
}
{
// Scale the MSM by a random factor to ensure that if the existing MSM
// has is_zero() == false then this argument won't be able to interfere
// with it to make it true, with high probability.
msm.scale(C::Scalar::rand());
// Sample x_1 for compressing openings at the same point sets together
let x_1 = ChallengeX1::get(transcript);
// Sample a challenge x_2 for keeping the multi-point quotient
// polynomial terms linearly independent.
let x_2 = ChallengeX2::<C::Scalar>::get(transcript);
let (commitment_map, point_sets) = construct_intermediate_sets(queries);
// Compress the commitments and expected evaluations at x together.
// using the challenge x_1
let mut q_commitments: Vec<_> = vec![params.empty_msm(); point_sets.len()];
// A vec of vecs of evals. The outer vec corresponds to the point set,
// while the inner vec corresponds to the points in a particular set.
let mut q_eval_sets = Vec::with_capacity(point_sets.len());
for point_set in point_sets.iter() {
q_eval_sets.push(vec![C::Scalar::zero(); point_set.len()]);
}
{
let mut accumulate = |set_idx: usize, new_commitment, evals: Vec<C::Scalar>| {
q_commitments[set_idx].scale(*x_1);
q_commitments[set_idx].append_term(C::Scalar::one(), new_commitment);
for (eval, set_eval) in evals.iter().zip(q_eval_sets[set_idx].iter_mut()) {
*set_eval *= &*x_1;
*set_eval += eval;
}
};
// Each commitment corresponds to evaluations at a set of points.
// For each set, we collapse each commitment's evals pointwise.
for commitment_data in commitment_map.into_iter() {
accumulate(
commitment_data.set_index, // set_idx,
*commitment_data.commitment.0, // commitment,
commitment_data.evals, // evals
);
let mut accumulate = |set_idx: usize, new_commitment, evals: Vec<C::Scalar>| {
q_commitments[set_idx].scale(*x_1);
q_commitments[set_idx].append_term(C::Scalar::one(), new_commitment);
for (eval, set_eval) in evals.iter().zip(q_eval_sets[set_idx].iter_mut()) {
*set_eval *= &(*x_1);
*set_eval += eval;
}
}
};
// Obtain the commitment to the multi-point quotient polynomial f(X).
transcript
.absorb_point(&self.f_commitment)
.map_err(|_| Error::SamplingError)?;
// Sample a challenge x_3 for checking that f(X) was committed to
// correctly.
let x_3 = ChallengeX3::get(transcript);
for eval in self.q_evals.iter() {
transcript.absorb_scalar(*eval);
}
// We can compute the expected msm_eval at x_3 using the q_evals provided
// by the prover and from x_2
let msm_eval = point_sets
.iter()
.zip(q_eval_sets.iter())
.zip(self.q_evals.iter())
.fold(
C::Scalar::zero(),
|msm_eval, ((points, evals), proof_eval)| {
let r_poly = lagrange_interpolate(points, evals);
let r_eval = eval_polynomial(&r_poly, *x_3);
let eval = points.iter().fold(*proof_eval - &r_eval, |eval, point| {
eval * &(*x_3 - point).invert().unwrap()
});
msm_eval * &*x_2 + &eval
},
// Each commitment corresponds to evaluations at a set of points.
// For each set, we collapse each commitment's evals pointwise.
for commitment_data in commitment_map.into_iter() {
accumulate(
commitment_data.set_index, // set_idx,
*commitment_data.commitment.0, // commitment,
commitment_data.evals, // evals
);
}
}
// Sample a challenge x_4 that we will use to collapse the openings of
// the various remaining polynomials at x_3 together.
let x_4 = ChallengeX4::get(transcript);
// Obtain the commitment to the multi-point quotient polynomial f(X).
let f_commitment = transcript.read_point().map_err(|_| Error::SamplingError)?;
// Compute the final commitment that has to be opened
let mut commitment_msm = params.empty_msm();
commitment_msm.append_term(C::Scalar::one(), self.f_commitment);
let (commitment_msm, msm_eval) = q_commitments.into_iter().zip(self.q_evals.iter()).fold(
(commitment_msm, msm_eval),
|(mut commitment_msm, msm_eval), (q_commitment, q_eval)| {
commitment_msm.scale(*x_4);
commitment_msm.add_msm(&q_commitment);
(commitment_msm, msm_eval * &*x_4 + q_eval)
// Sample a challenge x_3 for checking that f(X) was committed to
// correctly.
let x_3 = ChallengeX3::get(transcript);
let mut q_evals = Vec::with_capacity(q_eval_sets.len());
for _ in 0..q_eval_sets.len() {
q_evals.push(transcript.read_scalar().map_err(|_| Error::SamplingError)?);
}
// We can compute the expected msm_eval at x_3 using the q_evals provided
// by the prover and from x_2
let msm_eval = point_sets
.iter()
.zip(q_eval_sets.iter())
.zip(q_evals.iter())
.fold(
C::Scalar::zero(),
|msm_eval, ((points, evals), proof_eval)| {
let r_poly = lagrange_interpolate(points, evals);
let r_eval = eval_polynomial(&r_poly, *x_3);
let eval = points.iter().fold(*proof_eval - &r_eval, |eval, point| {
eval * &(*x_3 - point).invert().unwrap()
});
msm_eval * &(*x_2) + &eval
},
);
// Verify the opening proof
self.opening
.verify(params, msm, transcript, *x_3, commitment_msm, msm_eval)
}
// Sample a challenge x_4 that we will use to collapse the openings of
// the various remaining polynomials at x_3 together.
let x_4 = ChallengeX4::get(transcript);
// Compute the final commitment that has to be opened
msm.append_term(C::Scalar::one(), f_commitment);
let (msm, msm_eval) = q_commitments.into_iter().zip(q_evals.iter()).fold(
(msm, msm_eval),
|(mut msm, msm_eval), (q_commitment, q_eval)| {
msm.scale(*x_4);
msm.add_msm(&q_commitment);
(msm, msm_eval * &(*x_4) + q_eval)
},
);
// Verify the opening proof
super::commitment::verify_proof(params, msm, transcript, *x_3, msm_eval)
}
#[doc(hidden)]

View File

@ -2,125 +2,203 @@
//! transcripts.
use ff::Field;
use std::marker::PhantomData;
use std::ops::Deref;
use crate::arithmetic::{CurveAffine, FieldExt};
/// This is a generic interface for a sponge function that can be used for
/// Fiat-Shamir transformations.
pub trait Hasher<F: FieldExt>: Clone + Send + Sync + 'static {
/// Initialize the sponge with some key.
fn init(key: F) -> Self;
/// Absorb a field element into the sponge.
fn absorb(&mut self, value: F);
/// Square a field element out of the sponge.
fn squeeze(&mut self) -> F;
use std::io::{self, Read, Write};
use std::marker::PhantomData;
/// Generic transcript view (from either the prover or verifier's perspective)
pub trait Transcript<C: CurveAffine> {
/// Squeeze a challenge (in the base field) from the transcript.
fn squeeze_challenge(&mut self) -> C::Base;
/// Writing the point to the transcript without writing it to the proof,
/// treating it as a common input.
fn common_point(&mut self, point: C) -> io::Result<()>;
}
/// This is just a simple (and completely broken) hash function, standing in for
/// some algebraic hash function that we'll switch to later.
/// Transcript view from the perspective of a verifier that has access to an
/// input stream of data from the prover to the verifier.
pub trait TranscriptRead<C: CurveAffine>: Transcript<C> {
/// Read a curve point from the prover.
fn read_point(&mut self) -> io::Result<C>;
/// Read a curve scalar from the prover.
fn read_scalar(&mut self) -> io::Result<C::Scalar>;
}
/// Transcript view from the perspective of a prover that has access to an
/// output stream of messages from the prover to the verifier.
pub trait TranscriptWrite<C: CurveAffine>: Transcript<C> {
/// Write a curve point to the proof and the transcript.
fn write_point(&mut self, point: C) -> io::Result<()>;
/// Write a scalar to the proof and the transcript.
fn write_scalar(&mut self, scalar: C::Scalar) -> io::Result<()>;
}
/// This is just a simple (and completely broken) transcript reader
/// implementation, standing in for some algebraic hash function that we'll
/// switch to later.
#[derive(Debug, Clone)]
pub struct DummyHash<F: FieldExt> {
power: F,
state: F,
pub struct DummyHashRead<R: Read, C: CurveAffine> {
base_state: C::Base,
scalar_state: C::Scalar,
read_scalar: bool,
reader: R,
}
impl<F: FieldExt> Hasher<F> for DummyHash<F> {
fn init(key: F) -> Self {
DummyHash {
power: F::ZETA + F::one() + key,
state: F::ZETA,
impl<R: Read, C: CurveAffine> DummyHashRead<R, C> {
/// Initialize a transcript given an input buffer and a key.
pub fn init(reader: R, key: C::Base) -> Self {
DummyHashRead {
base_state: key + &C::Base::from_u64(1013),
scalar_state: C::Scalar::from_u64(1013),
read_scalar: false,
reader,
}
}
fn absorb(&mut self, value: F) {
for _ in 0..10 {
self.state += value;
self.state *= self.power;
self.power += self.power.square();
self.state += self.power;
}
}
impl<R: Read, C: CurveAffine> TranscriptRead<C> for DummyHashRead<R, C> {
fn read_point(&mut self) -> io::Result<C> {
let mut compressed = [0u8; 32];
self.reader.read_exact(&mut compressed[..])?;
let point: C = Option::from(C::from_bytes(&compressed)).ok_or(io::Error::new(
io::ErrorKind::Other,
"invalid point encoding in proof",
))?;
self.common_point(point)?;
Ok(point)
}
fn squeeze(&mut self) -> F {
let tmp = self.state;
self.absorb(tmp);
fn read_scalar(&mut self) -> io::Result<C::Scalar> {
let mut data = [0u8; 32];
self.reader.read_exact(&mut data)?;
let scalar = Option::from(C::Scalar::from_bytes(&data)).ok_or(io::Error::new(
io::ErrorKind::Other,
"invalid field element encoding in proof",
))?;
self.scalar_state += &(scalar * &C::Scalar::ZETA);
self.scalar_state = self.scalar_state.square();
self.read_scalar = true;
Ok(scalar)
}
}
impl<R: Read, C: CurveAffine> Transcript<C> for DummyHashRead<R, C> {
fn common_point(&mut self, point: C) -> io::Result<()> {
let (x, y) = Option::from(point.get_xy()).ok_or(io::Error::new(
io::ErrorKind::Other,
"cannot write points at infinity to the transcript",
))?;
self.base_state += &(x * &C::Base::ZETA);
self.base_state = self.base_state.square();
self.base_state += &(y * &C::Base::ZETA);
self.base_state = self.base_state.square();
Ok(())
}
fn squeeze_challenge(&mut self) -> C::Base {
if self.read_scalar {
let x = C::Base::from_bytes(&self.scalar_state.to_bytes()).unwrap();
self.base_state += &(x * &C::Base::ZETA);
self.base_state = self.base_state.square();
self.scalar_state = self.scalar_state.square();
self.read_scalar = false;
}
let tmp = self.base_state;
for _ in 0..5 {
self.base_state *= &(C::Base::ZETA + &C::Base::ZETA);
self.base_state += &C::Base::ZETA;
self.base_state = self.base_state.square();
}
tmp
}
}
/// A transcript that can absorb points from both the base field and scalar
/// field of a curve
/// This is just a simple (and completely broken) transcript writer
/// implementation, standing in for some algebraic hash function that we'll
/// switch to later.
#[derive(Debug, Clone)]
pub struct Transcript<C: CurveAffine, HBase, HScalar>
where
HBase: Hasher<C::Base>,
HScalar: Hasher<C::Scalar>,
{
// Hasher over the base field
base_hasher: HBase,
// Hasher over the scalar field
scalar_hasher: HScalar,
// Indicates if scalar(s) has been hashed but not squeezed
scalar_needs_squeezing: bool,
// PhantomData
_marker: PhantomData<C>,
pub struct DummyHashWrite<W: Write, C: CurveAffine> {
base_state: C::Base,
scalar_state: C::Scalar,
written_scalar: bool,
writer: W,
}
impl<C: CurveAffine, HBase: Hasher<C::Base>, HScalar: Hasher<C::Scalar>>
Transcript<C, HBase, HScalar>
{
/// Initialise a new transcript with Field::one() as keys
/// in both the base_hasher and scalar_hasher
pub fn new() -> Self {
let base_hasher = HBase::init(C::Base::one());
let scalar_hasher = HScalar::init(C::Scalar::one());
Transcript {
base_hasher,
scalar_hasher,
scalar_needs_squeezing: false,
_marker: PhantomData,
impl<W: Write, C: CurveAffine> DummyHashWrite<W, C> {
/// Initialize a transcript given an output buffer and a key.
pub fn init(writer: W, key: C::Base) -> Self {
DummyHashWrite {
base_state: key + &C::Base::from_u64(1013),
scalar_state: C::Scalar::from_u64(1013),
written_scalar: false,
writer,
}
}
fn conditional_scalar_squeeze(&mut self) {
if self.scalar_needs_squeezing {
let transcript_scalar_point =
C::Base::from_bytes(&(self.scalar_hasher.squeeze()).to_bytes()).unwrap();
self.base_hasher.absorb(transcript_scalar_point);
self.scalar_needs_squeezing = false;
}
/// Conclude the interaction and return the output buffer (writer).
pub fn finalize(self) -> W {
// TODO: handle outstanding scalars? see issue #138
self.writer
}
}
impl<W: Write, C: CurveAffine> TranscriptWrite<C> for DummyHashWrite<W, C> {
fn write_point(&mut self, point: C) -> io::Result<()> {
self.common_point(point)?;
let compressed = point.to_bytes();
self.writer.write_all(&compressed[..])
}
fn write_scalar(&mut self, scalar: C::Scalar) -> io::Result<()> {
self.scalar_state += &(scalar * &C::Scalar::ZETA);
self.scalar_state = self.scalar_state.square();
self.written_scalar = true;
let data = scalar.to_bytes();
self.writer.write_all(&data[..])
}
}
impl<W: Write, C: CurveAffine> Transcript<C> for DummyHashWrite<W, C> {
fn common_point(&mut self, point: C) -> io::Result<()> {
let (x, y) = Option::from(point.get_xy()).ok_or(io::Error::new(
io::ErrorKind::Other,
"cannot write points at infinity to the transcript",
))?;
self.base_state += &(x * &C::Base::ZETA);
self.base_state = self.base_state.square();
self.base_state += &(y * &C::Base::ZETA);
self.base_state = self.base_state.square();
/// Absorb a curve point into the transcript by absorbing
/// its x and y coordinates
pub fn absorb_point(&mut self, point: &C) -> Result<(), ()> {
self.conditional_scalar_squeeze();
let tmp = point.get_xy();
if bool::from(tmp.is_none()) {
return Err(());
};
let tmp = tmp.unwrap();
self.base_hasher.absorb(tmp.0);
self.base_hasher.absorb(tmp.1);
Ok(())
}
/// Absorb a base into the base_hasher
pub fn absorb_base(&mut self, base: C::Base) {
self.conditional_scalar_squeeze();
self.base_hasher.absorb(base);
}
fn squeeze_challenge(&mut self) -> C::Base {
if self.written_scalar {
let x = C::Base::from_bytes(&self.scalar_state.to_bytes()).unwrap();
self.base_state += &(x * &C::Base::ZETA);
self.base_state = self.base_state.square();
self.scalar_state = self.scalar_state.square();
self.written_scalar = false;
}
/// Absorb a scalar into the scalar_hasher
pub fn absorb_scalar(&mut self, scalar: C::Scalar) {
self.scalar_hasher.absorb(scalar);
self.scalar_needs_squeezing = true;
}
let tmp = self.base_state;
for _ in 0..5 {
self.base_state *= &(C::Base::ZETA + &C::Base::ZETA);
self.base_state += &C::Base::ZETA;
self.base_state = self.base_state.square();
}
/// Squeeze the transcript to obtain a C::Base value.
pub fn squeeze(&mut self) -> C::Base {
self.conditional_scalar_squeeze();
self.base_hasher.squeeze()
tmp
}
}
@ -130,64 +208,75 @@ pub struct Challenge(pub(crate) u128);
impl Challenge {
/// Obtains a new challenge from the transcript.
pub fn get<C, HBase, HScalar>(transcript: &mut Transcript<C, HBase, HScalar>) -> Challenge
where
C: CurveAffine,
HBase: Hasher<C::Base>,
HScalar: Hasher<C::Scalar>,
{
Challenge(transcript.squeeze().get_lower_128())
pub fn get<C: CurveAffine, T: Transcript<C>>(transcript: &mut T) -> Challenge {
Challenge(transcript.squeeze_challenge().get_lower_128())
}
}
/// The scalar representation of a verifier challenge.
///
/// The `T` type can be used to scope the challenge to a specific context, or set to `()`
/// if no context is required.
/// The `Type` type can be used to scope the challenge to a specific context, or
/// set to `()` if no context is required.
#[derive(Copy, Clone, Debug)]
pub struct ChallengeScalar<F: FieldExt, T> {
inner: F,
_marker: PhantomData<T>,
pub struct ChallengeScalar<C: CurveAffine, Type> {
inner: C::Scalar,
_marker: PhantomData<Type>,
}
impl<F: FieldExt, T> From<Challenge> for ChallengeScalar<F, T> {
impl<C: CurveAffine, Type> From<Challenge> for ChallengeScalar<C, Type> {
/// This algorithm applies the mapping of Algorithm 1 from the
/// [Halo](https://eprint.iacr.org/2019/1021) paper.
fn from(challenge: Challenge) -> Self {
let mut acc = (F::ZETA + F::one()).double();
let mut acc = (C::Scalar::ZETA + &C::Scalar::one()).double();
for i in (0..64).rev() {
let should_negate = ((challenge.0 >> ((i << 1) + 1)) & 1) == 1;
let should_endo = ((challenge.0 >> (i << 1)) & 1) == 1;
let q = if should_negate { -F::one() } else { F::one() };
let q = if should_endo { q * F::ZETA } else { q };
acc = acc + q + acc;
let q = if should_negate {
-C::Scalar::one()
} else {
C::Scalar::one()
};
let q = if should_endo { q * &C::Scalar::ZETA } else { q };
acc = acc + &q + &acc;
}
ChallengeScalar {
inner: acc,
_marker: PhantomData::default(),
_marker: PhantomData,
}
}
}
impl<F: FieldExt, T> ChallengeScalar<F, T> {
impl<C: CurveAffine, Type> ChallengeScalar<C, Type> {
/// Obtains a new challenge from the transcript.
pub fn get<C, HBase, HScalar>(transcript: &mut Transcript<C, HBase, HScalar>) -> Self
pub fn get<T: Transcript<C>>(transcript: &mut T) -> Self
where
C: CurveAffine,
HBase: Hasher<C::Base>,
HScalar: Hasher<C::Scalar>,
{
Challenge::get(transcript).into()
}
}
impl<F: FieldExt, T> Deref for ChallengeScalar<F, T> {
type Target = F;
impl<C: CurveAffine, Type> Deref for ChallengeScalar<C, Type> {
type Target = C::Scalar;
fn deref(&self) -> &F {
fn deref(&self) -> &C::Scalar {
&self.inner
}
}
pub(crate) fn read_n_points<C: CurveAffine, T: TranscriptRead<C>>(
transcript: &mut T,
n: usize,
) -> io::Result<Vec<C>> {
(0..n).map(|_| transcript.read_point()).collect()
}
pub(crate) fn read_n_scalars<C: CurveAffine, T: TranscriptRead<C>>(
transcript: &mut T,
n: usize,
) -> io::Result<Vec<C::Scalar>> {
(0..n).map(|_| transcript.read_scalar()).collect()
}