Update the PLONK implementation to adapt to the new transcript API.

This commit is contained in:
Sean Bowe 2020-12-23 13:03:31 -07:00
parent 5be7d9525d
commit 06552eec44
No known key found for this signature in database
GPG Key ID: 95684257D8F8B031
17 changed files with 780 additions and 811 deletions

View File

@ -3,9 +3,9 @@ extern crate criterion;
extern crate halo2;
use crate::arithmetic::{small_multiexp, FieldExt};
use crate::pasta::{EqAffine, Fp, Fq};
use crate::pasta::{EqAffine, Fp};
use crate::poly::commitment::Params;
use crate::transcript::DummyHash;
use crate::transcript::DummyHashWriter;
use halo2::*;
use criterion::{black_box, Criterion};
@ -13,7 +13,7 @@ use criterion::{black_box, Criterion};
fn criterion_benchmark(c: &mut Criterion) {
// small multiexp
{
let params: Params<EqAffine> = Params::new::<DummyHash<Fq>>(5);
let params: Params<EqAffine> = Params::new::<DummyHashWriter<_, _>>(5);
let g = &mut params.get_g();
let len = g.len() / 2;
let (g_lo, g_hi) = g.split_at_mut(len);

View File

@ -6,7 +6,7 @@ use halo2::arithmetic::FieldExt;
use halo2::pasta::{EqAffine, Fp, Fq};
use halo2::plonk::*;
use halo2::poly::commitment::Params;
use halo2::transcript::DummyHash;
use halo2::transcript::{DummyHashReader, DummyHashWriter, TranscriptRead, TranscriptWrite};
use std::marker::PhantomData;
@ -18,7 +18,7 @@ fn bench_with_k(name: &str, k: u32, c: &mut Criterion) {
pub struct Variable(Column<Advice>, usize);
// Initialize the polynomial commitment parameters
let params: Params<EqAffine> = Params::new::<DummyHash<Fq>>(k);
let params: Params<EqAffine> = Params::new::<DummyHashWriter<_, _>>(k);
struct PLONKConfig {
a: Column<Advice>,
@ -239,7 +239,8 @@ fn bench_with_k(name: &str, k: u32, c: &mut Criterion) {
};
// Create a proof
Proof::create::<DummyHash<Fq>, DummyHash<Fp>, _>(&params, &pk, &circuit, &[])
let mut transcript = DummyHashWriter::init(vec![], Fq::one());
create_proof(&params, &pk, &circuit, &[], &mut transcript)
.expect("proof generation should not fail")
});
});
@ -250,15 +251,16 @@ fn bench_with_k(name: &str, k: u32, c: &mut Criterion) {
};
// Create a proof
let proof = Proof::create::<DummyHash<Fq>, DummyHash<Fp>, _>(&params, &pk, &circuit, &[])
let mut transcript = DummyHashWriter::init(vec![], Fq::one());
create_proof(&params, &pk, &circuit, &[], &mut transcript)
.expect("proof generation should not fail");
let proof = transcript.finalize();
c.bench_function(&verifier_name, |b| {
b.iter(|| {
let msm = params.empty_msm();
let guard = proof
.verify::<DummyHash<Fq>, DummyHash<Fp>>(&params, pk.get_vk(), msm, &[])
.unwrap();
let mut transcript = DummyHashReader::init(&proof[..], Fq::one());
let guard = verify_proof(&params, pk.get_vk(), msm, &[], &mut transcript).unwrap();
let msm = guard.clone().use_challenges();
assert!(msm.eval());
});

View File

@ -4,9 +4,10 @@ use halo2::{
pasta::{EqAffine, Fp, Fq},
plonk::*,
poly::commitment::{Blind, Params},
transcript::DummyHash,
transcript::{DummyHashReader, DummyHashWriter, TranscriptRead, TranscriptWrite},
};
use std::io;
use std::marker::PhantomData;
/// This represents an advice column at a certain row in the ConstraintSystem
@ -249,7 +250,7 @@ fn main() {
let k = 11;
// Initialize the polynomial commitment parameters
let params: Params<EqAffine> = Params::new::<DummyHash<Fq>>(k);
let params: Params<EqAffine> = Params::new::<DummyHashWriter<io::Sink, _>>(k);
let empty_circuit: MyCircuit<Fp> = MyCircuit { a: None, k };
@ -273,18 +274,18 @@ fn main() {
};
// Create a proof
let proof =
Proof::create::<DummyHash<Fq>, DummyHash<Fp>, _>(&params, &pk, &circuit, &[pubinputs])
.expect("proof generation should not fail");
let mut transcript = DummyHashWriter::init(vec![], Fq::one());
create_proof(&params, &pk, &circuit, &[pubinputs], &mut transcript)
.expect("proof generation should not fail");
let proof: Vec<u8> = transcript.finalize();
println!("[Prover] {}", recorder);
recorder.clear();
let pubinput_slice = &[pubinput];
let msm = params.empty_msm();
let guard = proof
.verify::<DummyHash<Fq>, DummyHash<Fp>>(&params, pk.get_vk(), msm, pubinput_slice)
.unwrap();
let mut transcript = DummyHashReader::init(&proof[..], Fq::one());
let guard = verify_proof(&params, pk.get_vk(), msm, pubinput_slice, &mut transcript).unwrap();
let msm = guard.clone().use_challenges();
assert!(msm.eval());

View File

@ -6,9 +6,7 @@
//! [plonk]: https://eprint.iacr.org/2019/953
use crate::arithmetic::CurveAffine;
use crate::poly::{
multiopen, Coeff, EvaluationDomain, ExtendedLagrangeCoeff, LagrangeCoeff, Polynomial,
};
use crate::poly::{Coeff, EvaluationDomain, ExtendedLagrangeCoeff, LagrangeCoeff, Polynomial};
use crate::transcript::ChallengeScalar;
mod circuit;
@ -48,20 +46,6 @@ pub struct ProvingKey<C: CurveAffine> {
permutations: Vec<permutation::ProvingKey<C>>,
}
/// This is an object which represents a (Turbo)PLONK proof.
// This structure must never allow points at infinity.
#[derive(Debug, Clone)]
pub struct Proof<C: CurveAffine> {
advice_commitments: Vec<C>,
permutations: Vec<permutation::Proof<C>>,
lookups: Vec<lookup::Proof<C>>,
advice_evals: Vec<C::Scalar>,
aux_evals: Vec<C::Scalar>,
fixed_evals: Vec<C::Scalar>,
vanishing: vanishing::Proof<C>,
multiopening: multiopen::Proof<C>,
}
/// This is an error that could occur during proving or circuit synthesis.
// TODO: these errors need to be cleaned up
#[derive(Debug)]
@ -121,8 +105,9 @@ fn test_proving() {
use crate::arithmetic::{Curve, FieldExt};
use crate::pasta::{EqAffine, Fp, Fq};
use crate::poly::commitment::{Blind, Params};
use crate::transcript::DummyHash;
use crate::transcript::{DummyHashReader, DummyHashWriter, TranscriptRead, TranscriptWrite};
use circuit::{Advice, Column, Fixed};
use std::io;
use std::marker::PhantomData;
const K: u32 = 5;
@ -131,7 +116,7 @@ fn test_proving() {
pub struct Variable(Column<Advice>, usize);
// Initialize the polynomial commitment parameters
let params: Params<EqAffine> = Params::new::<DummyHash<Fq>>(K);
let params: Params<EqAffine> = Params::new::<DummyHashWriter<io::Sink, _>>(K);
struct PLONKConfig {
a: Column<Advice>,
@ -463,20 +448,23 @@ fn test_proving() {
.to_affine();
for _ in 0..100 {
let mut transcript = DummyHashWriter::init(vec![], Fq::one());
// Create a proof
let proof = Proof::create::<DummyHash<Fq>, DummyHash<Fp>, _>(
create_proof(
&params,
&pk,
&circuit,
&[pubinputs.clone()],
&mut transcript,
)
.expect("proof generation should not fail");
let proof: Vec<u8> = transcript.finalize();
let pubinput_slice = &[pubinput];
let msm = params.empty_msm();
let guard = proof
.verify::<DummyHash<Fq>, DummyHash<Fp>>(&params, pk.get_vk(), msm, pubinput_slice)
.unwrap();
let mut transcript = DummyHashReader::init(&proof[..], Fq::one());
let guard =
verify_proof(&params, pk.get_vk(), msm, pubinput_slice, &mut transcript).unwrap();
{
let msm = guard.clone().use_challenges();
assert!(msm.eval());
@ -488,9 +476,9 @@ fn test_proving() {
}
let msm = guard.clone().use_challenges();
assert!(msm.clone().eval());
let guard = proof
.verify::<DummyHash<Fq>, DummyHash<Fp>>(&params, pk.get_vk(), msm, pubinput_slice)
.unwrap();
let mut transcript = DummyHashReader::init(&proof[..], Fq::one());
let guard =
verify_proof(&params, pk.get_vk(), msm, pubinput_slice, &mut transcript).unwrap();
{
let msm = guard.clone().use_challenges();
assert!(msm.eval());

View File

@ -1,5 +1,4 @@
use super::circuit::{Any, Column};
use crate::arithmetic::CurveAffine;
mod prover;
mod verifier;
@ -37,15 +36,3 @@ impl Argument {
3
}
}
#[derive(Clone, Debug)]
pub(crate) struct Proof<C: CurveAffine> {
product_commitment: C,
product_eval: C::Scalar,
product_inv_eval: C::Scalar,
permuted_input_commitment: C,
permuted_table_commitment: C,
permuted_input_eval: C::Scalar,
permuted_input_inv_eval: C::Scalar,
permuted_table_eval: C::Scalar,
}

View File

@ -2,7 +2,7 @@ use super::super::{
circuit::{Any, Column},
ChallengeBeta, ChallengeGamma, ChallengeTheta, ChallengeX, Error, ProvingKey,
};
use super::{Argument, Proof};
use super::Argument;
use crate::{
arithmetic::{eval_polynomial, parallelize, BatchInvert, Curve, CurveAffine, FieldExt},
poly::{
@ -10,9 +10,10 @@ use crate::{
multiopen::ProverQuery,
Coeff, EvaluationDomain, ExtendedLagrangeCoeff, LagrangeCoeff, Polynomial, Rotation,
},
transcript::{Hasher, Transcript},
transcript::TranscriptWrite,
};
use ff::Field;
use std::io::Write;
use std::{collections::BTreeMap, iter};
#[derive(Debug)]
@ -47,13 +48,10 @@ pub(in crate::plonk) struct Committed<'a, C: CurveAffine> {
pub(in crate::plonk) struct Constructed<C: CurveAffine> {
permuted_input_poly: Polynomial<C::Scalar, Coeff>,
permuted_input_blind: Blind<C::Scalar>,
permuted_input_commitment: C,
permuted_table_poly: Polynomial<C::Scalar, Coeff>,
permuted_table_blind: Blind<C::Scalar>,
permuted_table_commitment: C,
product_poly: Polynomial<C::Scalar, Coeff>,
product_blind: Blind<C::Scalar>,
product_commitment: C,
}
pub(in crate::plonk) struct Evaluated<C: CurveAffine> {
@ -78,21 +76,21 @@ impl Argument {
pub(in crate::plonk) fn commit_permuted<
'a,
C: CurveAffine,
HBase: Hasher<C::Base>,
HScalar: Hasher<C::Scalar>,
W: Write,
T: TranscriptWrite<W, C>,
>(
&self,
pk: &ProvingKey<C>,
params: &Params<C>,
domain: &EvaluationDomain<C::Scalar>,
theta: ChallengeTheta<C::Scalar>,
theta: ChallengeTheta<C>,
advice_values: &'a [Polynomial<C::Scalar, LagrangeCoeff>],
fixed_values: &'a [Polynomial<C::Scalar, LagrangeCoeff>],
aux_values: &'a [Polynomial<C::Scalar, LagrangeCoeff>],
advice_cosets: &'a [Polynomial<C::Scalar, ExtendedLagrangeCoeff>],
fixed_cosets: &'a [Polynomial<C::Scalar, ExtendedLagrangeCoeff>],
aux_cosets: &'a [Polynomial<C::Scalar, ExtendedLagrangeCoeff>],
transcript: &mut Transcript<C, HBase, HScalar>,
transcript: &mut T,
) -> Result<Permuted<'a, C>, Error> {
// Closure to get values of columns and compress them
let compress_columns = |columns: &[Column<Any>]| {
@ -150,12 +148,12 @@ impl Argument {
// Hash permuted input commitment
transcript
.absorb_point(&permuted_input_commitment)
.write_point(permuted_input_commitment)
.map_err(|_| Error::TranscriptError)?;
// Hash permuted table commitment
transcript
.absorb_point(&permuted_table_commitment)
.write_point(permuted_table_commitment)
.map_err(|_| Error::TranscriptError)?;
let permuted_input_coset = pk
@ -197,14 +195,14 @@ impl<'a, C: CurveAffine> Permuted<'a, C> {
/// grand product polynomial over the lookup. The grand product polynomial
/// is used to populate the Product<C> struct. The Product<C> struct is
/// added to the Lookup and finally returned by the method.
pub(in crate::plonk) fn commit_product<HBase: Hasher<C::Base>, HScalar: Hasher<C::Scalar>>(
pub(in crate::plonk) fn commit_product<W: Write, T: TranscriptWrite<W, C>>(
self,
pk: &ProvingKey<C>,
params: &Params<C>,
theta: ChallengeTheta<C::Scalar>,
beta: ChallengeBeta<C::Scalar>,
gamma: ChallengeGamma<C::Scalar>,
transcript: &mut Transcript<C, HBase, HScalar>,
theta: ChallengeTheta<C>,
beta: ChallengeBeta<C>,
gamma: ChallengeGamma<C>,
transcript: &mut T,
) -> Result<Committed<'a, C>, Error> {
// Goal is to compute the products of fractions
//
@ -331,7 +329,7 @@ impl<'a, C: CurveAffine> Permuted<'a, C> {
// Hash product commitment
transcript
.absorb_point(&product_commitment)
.write_point(product_commitment)
.map_err(|_| Error::TranscriptError)?;
Ok(Committed::<'a, C> {
@ -354,9 +352,9 @@ impl<'a, C: CurveAffine> Committed<'a, C> {
pub(in crate::plonk) fn construct(
self,
pk: &'a ProvingKey<C>,
theta: ChallengeTheta<C::Scalar>,
beta: ChallengeBeta<C::Scalar>,
gamma: ChallengeGamma<C::Scalar>,
theta: ChallengeTheta<C>,
beta: ChallengeBeta<C>,
gamma: ChallengeGamma<C>,
) -> Result<
(
Constructed<C>,
@ -434,13 +432,10 @@ impl<'a, C: CurveAffine> Committed<'a, C> {
Constructed {
permuted_input_poly: permuted.permuted_input_poly,
permuted_input_blind: permuted.permuted_input_blind,
permuted_input_commitment: permuted.permuted_input_commitment,
permuted_table_poly: permuted.permuted_table_poly,
permuted_table_blind: permuted.permuted_table_blind,
permuted_table_commitment: permuted.permuted_table_commitment,
product_poly: self.product_poly,
product_blind: self.product_blind,
product_commitment: self.product_commitment,
},
expressions,
))
@ -448,12 +443,12 @@ impl<'a, C: CurveAffine> Committed<'a, C> {
}
impl<C: CurveAffine> Constructed<C> {
pub(in crate::plonk) fn evaluate<HBase: Hasher<C::Base>, HScalar: Hasher<C::Scalar>>(
pub(in crate::plonk) fn evaluate<W: Write, T: TranscriptWrite<W, C>>(
self,
pk: &ProvingKey<C>,
x: ChallengeX<C::Scalar>,
transcript: &mut Transcript<C, HBase, HScalar>,
) -> Evaluated<C> {
x: ChallengeX<C>,
transcript: &mut T,
) -> Result<Evaluated<C>, Error> {
let domain = &pk.vk.domain;
let x_inv = domain.rotate_omega(*x, Rotation(-1));
@ -471,17 +466,19 @@ impl<C: CurveAffine> Constructed<C> {
.chain(Some(permuted_input_inv_eval))
.chain(Some(permuted_table_eval))
{
transcript.absorb_scalar(eval);
transcript
.write_scalar(eval)
.map_err(|_| Error::TranscriptError)?;
}
Evaluated {
Ok(Evaluated {
constructed: self,
product_eval,
product_inv_eval,
permuted_input_eval,
permuted_input_inv_eval,
permuted_table_eval,
}
})
}
}
@ -489,7 +486,7 @@ impl<C: CurveAffine> Evaluated<C> {
pub(in crate::plonk) fn open<'a>(
&'a self,
pk: &'a ProvingKey<C>,
x: ChallengeX<C::Scalar>,
x: ChallengeX<C>,
) -> impl Iterator<Item = ProverQuery<'a, C>> + Clone {
let x_inv = pk.vk.domain.rotate_omega(*x, Rotation(-1));
@ -520,29 +517,16 @@ impl<C: CurveAffine> Evaluated<C> {
point: x_inv,
poly: &self.constructed.permuted_input_poly,
blind: self.constructed.permuted_input_blind,
eval: self.permuted_input_eval,
eval: self.permuted_input_inv_eval,
}))
// Open lookup product commitments at x_inv
.chain(Some(ProverQuery {
point: x_inv,
poly: &self.constructed.product_poly,
blind: self.constructed.product_blind,
eval: self.product_eval,
eval: self.product_inv_eval,
}))
}
pub(crate) fn build(self) -> Proof<C> {
Proof {
product_commitment: self.constructed.product_commitment,
product_eval: self.product_eval,
product_inv_eval: self.product_inv_eval,
permuted_input_commitment: self.constructed.permuted_input_commitment,
permuted_table_commitment: self.constructed.permuted_table_commitment,
permuted_input_eval: self.permuted_input_eval,
permuted_input_inv_eval: self.permuted_input_inv_eval,
permuted_table_eval: self.permuted_table_eval,
}
}
}
/// Given a column of input values A and a column of table values S,

View File

@ -1,51 +1,117 @@
use std::iter;
use super::super::circuit::{Any, Column};
use super::{Argument, Proof};
use super::Argument;
use crate::{
arithmetic::CurveAffine,
plonk::{ChallengeBeta, ChallengeGamma, ChallengeTheta, ChallengeX, Error, VerifyingKey},
poly::{multiopen::VerifierQuery, Rotation},
transcript::{Hasher, Transcript},
transcript::TranscriptRead,
};
use ff::Field;
use std::io::Read;
impl<C: CurveAffine> Proof<C> {
pub struct PermutationCommitments<C: CurveAffine> {
permuted_input_commitment: C,
permuted_table_commitment: C,
}
pub struct Committed<C: CurveAffine> {
permuted_input_commitment: C,
permuted_table_commitment: C,
product_commitment: C,
}
pub struct Evaluated<C: CurveAffine> {
committed: Committed<C>,
product_eval: C::Scalar,
product_inv_eval: C::Scalar,
permuted_input_eval: C::Scalar,
permuted_input_inv_eval: C::Scalar,
permuted_table_eval: C::Scalar,
}
impl Argument {
pub(in crate::plonk) fn absorb_permuted_commitments<
HBase: Hasher<C::Base>,
HScalar: Hasher<C::Scalar>,
C: CurveAffine,
R: Read,
T: TranscriptRead<R, C>,
>(
&self,
transcript: &mut Transcript<C, HBase, HScalar>,
) -> Result<(), Error> {
transcript
.absorb_point(&self.permuted_input_commitment)
transcript: &mut T,
) -> Result<PermutationCommitments<C>, Error> {
let permuted_input_commitment = transcript
.read_point()
.map_err(|_| Error::TranscriptError)?;
let permuted_table_commitment = transcript
.read_point()
.map_err(|_| Error::TranscriptError)?;
transcript
.absorb_point(&self.permuted_table_commitment)
.map_err(|_| Error::TranscriptError)
}
pub(in crate::plonk) fn absorb_product_commitment<
HBase: Hasher<C::Base>,
HScalar: Hasher<C::Scalar>,
>(
&self,
transcript: &mut Transcript<C, HBase, HScalar>,
) -> Result<(), Error> {
transcript
.absorb_point(&self.product_commitment)
.map_err(|_| Error::TranscriptError)
Ok(PermutationCommitments {
permuted_input_commitment,
permuted_table_commitment,
})
}
}
impl<C: CurveAffine> PermutationCommitments<C> {
pub(in crate::plonk) fn absorb_product_commitment<R: Read, T: TranscriptRead<R, C>>(
self,
transcript: &mut T,
) -> Result<Committed<C>, Error> {
let product_commitment = transcript
.read_point()
.map_err(|_| Error::TranscriptError)?;
Ok(Committed {
permuted_input_commitment: self.permuted_input_commitment,
permuted_table_commitment: self.permuted_table_commitment,
product_commitment,
})
}
}
impl<C: CurveAffine> Committed<C> {
pub(crate) fn evaluate<R: Read, T: TranscriptRead<R, C>>(
self,
transcript: &mut T,
) -> Result<Evaluated<C>, Error> {
let product_eval = transcript
.read_scalar()
.map_err(|_| Error::TranscriptError)?;
let product_inv_eval = transcript
.read_scalar()
.map_err(|_| Error::TranscriptError)?;
let permuted_input_eval = transcript
.read_scalar()
.map_err(|_| Error::TranscriptError)?;
let permuted_input_inv_eval = transcript
.read_scalar()
.map_err(|_| Error::TranscriptError)?;
let permuted_table_eval = transcript
.read_scalar()
.map_err(|_| Error::TranscriptError)?;
Ok(Evaluated {
committed: self,
product_eval,
product_inv_eval,
permuted_input_eval,
permuted_input_inv_eval,
permuted_table_eval,
})
}
}
impl<C: CurveAffine> Evaluated<C> {
pub(in crate::plonk) fn expressions<'a>(
&'a self,
vk: &'a VerifyingKey<C>,
l_0: C::Scalar,
argument: &'a Argument,
theta: ChallengeTheta<C::Scalar>,
beta: ChallengeBeta<C::Scalar>,
gamma: ChallengeGamma<C::Scalar>,
theta: ChallengeTheta<C>,
beta: ChallengeBeta<C>,
gamma: ChallengeGamma<C>,
advice_evals: &[C::Scalar],
fixed_evals: &[C::Scalar],
aux_evals: &[C::Scalar],
@ -98,19 +164,10 @@ impl<C: CurveAffine> Proof<C> {
))
}
pub(in crate::plonk) fn evals(&self) -> impl Iterator<Item = &C::Scalar> {
iter::empty()
.chain(Some(&self.product_eval))
.chain(Some(&self.product_inv_eval))
.chain(Some(&self.permuted_input_eval))
.chain(Some(&self.permuted_input_inv_eval))
.chain(Some(&self.permuted_table_eval))
}
pub(in crate::plonk) fn queries<'a>(
&'a self,
vk: &'a VerifyingKey<C>,
x: ChallengeX<C::Scalar>,
x: ChallengeX<C>,
) -> impl Iterator<Item = VerifierQuery<'a, C>> + Clone {
let x_inv = vk.domain.rotate_omega(*x, Rotation(-1));
@ -118,31 +175,31 @@ impl<C: CurveAffine> Proof<C> {
// Open lookup product commitments at x
.chain(Some(VerifierQuery {
point: *x,
commitment: &self.product_commitment,
commitment: &self.committed.product_commitment,
eval: self.product_eval,
}))
// Open lookup input commitments at x
.chain(Some(VerifierQuery {
point: *x,
commitment: &self.permuted_input_commitment,
commitment: &self.committed.permuted_input_commitment,
eval: self.permuted_input_eval,
}))
// Open lookup table commitments at x
.chain(Some(VerifierQuery {
point: *x,
commitment: &self.permuted_table_commitment,
commitment: &self.committed.permuted_table_commitment,
eval: self.permuted_table_eval,
}))
// Open lookup input commitments at \omega^{-1} x
.chain(Some(VerifierQuery {
point: x_inv,
commitment: &self.permuted_input_commitment,
commitment: &self.committed.permuted_input_commitment,
eval: self.permuted_input_inv_eval,
}))
// Open lookup product commitments at \omega^{-1} x
.chain(Some(VerifierQuery {
point: x_inv,
commitment: &self.product_commitment,
commitment: &self.committed.product_commitment,
eval: self.product_inv_eval,
}))
}

View File

@ -52,11 +52,3 @@ pub(crate) struct ProvingKey<C: CurveAffine> {
polys: Vec<Polynomial<C::Scalar, Coeff>>,
cosets: Vec<Polynomial<C::Scalar, ExtendedLagrangeCoeff>>,
}
#[derive(Debug, Clone)]
pub(crate) struct Proof<C: CurveAffine> {
permutation_product_commitment: C,
permutation_product_eval: C::Scalar,
permutation_product_inv_eval: C::Scalar,
permutation_evals: Vec<C::Scalar>,
}

View File

@ -1,7 +1,8 @@
use ff::Field;
use std::io::Write;
use std::iter;
use super::{Argument, Proof, ProvingKey};
use super::{Argument, ProvingKey};
use crate::{
arithmetic::{eval_polynomial, parallelize, BatchInvert, Curve, CurveAffine, FieldExt},
plonk::{self, ChallengeBeta, ChallengeGamma, ChallengeX, Error},
@ -10,7 +11,7 @@ use crate::{
multiopen::ProverQuery,
Coeff, ExtendedLagrangeCoeff, LagrangeCoeff, Polynomial, Rotation,
},
transcript::{Hasher, Transcript},
transcript::TranscriptWrite,
};
pub(crate) struct Committed<C: CurveAffine> {
@ -18,13 +19,11 @@ pub(crate) struct Committed<C: CurveAffine> {
permutation_product_coset: Polynomial<C::Scalar, ExtendedLagrangeCoeff>,
permutation_product_coset_inv: Polynomial<C::Scalar, ExtendedLagrangeCoeff>,
permutation_product_blind: Blind<C::Scalar>,
permutation_product_commitment: C,
}
pub(crate) struct Constructed<C: CurveAffine> {
permutation_product_poly: Polynomial<C::Scalar, Coeff>,
permutation_product_blind: Blind<C::Scalar>,
permutation_product_commitment: C,
}
pub(crate) struct Evaluated<C: CurveAffine> {
@ -35,19 +34,15 @@ pub(crate) struct Evaluated<C: CurveAffine> {
}
impl Argument {
pub(in crate::plonk) fn commit<
C: CurveAffine,
HBase: Hasher<C::Base>,
HScalar: Hasher<C::Scalar>,
>(
pub(in crate::plonk) fn commit<C: CurveAffine, W: Write, T: TranscriptWrite<W, C>>(
&self,
params: &Params<C>,
pk: &plonk::ProvingKey<C>,
pkey: &ProvingKey<C>,
advice: &[Polynomial<C::Scalar, LagrangeCoeff>],
beta: ChallengeBeta<C::Scalar>,
gamma: ChallengeGamma<C::Scalar>,
transcript: &mut Transcript<C, HBase, HScalar>,
beta: ChallengeBeta<C>,
gamma: ChallengeGamma<C>,
transcript: &mut T,
) -> Result<Committed<C>, Error> {
let domain = &pk.vk.domain;
@ -129,7 +124,7 @@ impl Argument {
// Hash the permutation product commitment
transcript
.absorb_point(&permutation_product_commitment)
.write_point(permutation_product_commitment)
.map_err(|_| Error::TranscriptError)?;
Ok(Committed {
@ -137,7 +132,6 @@ impl Argument {
permutation_product_coset,
permutation_product_coset_inv,
permutation_product_blind,
permutation_product_commitment,
})
}
}
@ -149,8 +143,8 @@ impl<C: CurveAffine> Committed<C> {
p: &'a Argument,
pkey: &'a ProvingKey<C>,
advice_cosets: &'a [Polynomial<C::Scalar, ExtendedLagrangeCoeff>],
beta: ChallengeBeta<C::Scalar>,
gamma: ChallengeGamma<C::Scalar>,
beta: ChallengeBeta<C>,
gamma: ChallengeGamma<C>,
) -> Result<
(
Constructed<C>,
@ -211,7 +205,6 @@ impl<C: CurveAffine> Committed<C> {
Constructed {
permutation_product_poly: self.permutation_product_poly,
permutation_product_blind: self.permutation_product_blind,
permutation_product_commitment: self.permutation_product_commitment,
},
expressions,
))
@ -219,7 +212,7 @@ impl<C: CurveAffine> Committed<C> {
}
impl<C: CurveAffine> super::ProvingKey<C> {
fn evaluate(&self, x: ChallengeX<C::Scalar>) -> Vec<C::Scalar> {
fn evaluate(&self, x: ChallengeX<C>) -> Vec<C::Scalar> {
self.polys
.iter()
.map(|poly| eval_polynomial(poly, *x))
@ -229,7 +222,7 @@ impl<C: CurveAffine> super::ProvingKey<C> {
fn open<'a>(
&'a self,
evals: &'a [C::Scalar],
x: ChallengeX<C::Scalar>,
x: ChallengeX<C>,
) -> impl Iterator<Item = ProverQuery<'a, C>> + Clone {
self.polys
.iter()
@ -244,13 +237,13 @@ impl<C: CurveAffine> super::ProvingKey<C> {
}
impl<C: CurveAffine> Constructed<C> {
pub(in crate::plonk) fn evaluate<HBase: Hasher<C::Base>, HScalar: Hasher<C::Scalar>>(
pub(in crate::plonk) fn evaluate<W: Write, T: TranscriptWrite<W, C>>(
self,
pk: &plonk::ProvingKey<C>,
pkey: &ProvingKey<C>,
x: ChallengeX<C::Scalar>,
transcript: &mut Transcript<C, HBase, HScalar>,
) -> Evaluated<C> {
x: ChallengeX<C>,
transcript: &mut T,
) -> Result<Evaluated<C>, Error> {
let domain = &pk.vk.domain;
let permutation_product_eval = eval_polynomial(&self.permutation_product_poly, *x);
@ -268,15 +261,17 @@ impl<C: CurveAffine> Constructed<C> {
.chain(Some(&permutation_product_inv_eval))
.chain(permutation_evals.iter())
{
transcript.absorb_scalar(*eval);
transcript
.write_scalar(*eval)
.map_err(|_| Error::TranscriptError)?;
}
Evaluated {
Ok(Evaluated {
constructed: self,
permutation_product_eval,
permutation_product_inv_eval,
permutation_evals,
}
})
}
}
@ -285,7 +280,7 @@ impl<C: CurveAffine> Evaluated<C> {
&'a self,
pk: &'a plonk::ProvingKey<C>,
pkey: &'a ProvingKey<C>,
x: ChallengeX<C::Scalar>,
x: ChallengeX<C>,
) -> impl Iterator<Item = ProverQuery<'a, C>> + Clone {
let x_inv = pk.vk.domain.rotate_omega(*x, Rotation(-1));
@ -306,13 +301,4 @@ impl<C: CurveAffine> Evaluated<C> {
// Open permutation polynomial commitments at x
.chain(pkey.open(&self.permutation_evals, x))
}
pub(crate) fn build(self) -> Proof<C> {
Proof {
permutation_product_commitment: self.constructed.permutation_product_commitment,
permutation_product_eval: self.permutation_product_eval,
permutation_product_inv_eval: self.permutation_product_inv_eval,
permutation_evals: self.permutation_evals,
}
}
}

View File

@ -1,41 +1,81 @@
use ff::Field;
use std::io::Read;
use std::iter;
use super::{Argument, Proof, VerifyingKey};
use super::{Argument, VerifyingKey};
use crate::{
arithmetic::{CurveAffine, FieldExt},
plonk::{self, ChallengeBeta, ChallengeGamma, ChallengeX, Error},
poly::{multiopen::VerifierQuery, Rotation},
transcript::{Hasher, Transcript},
transcript::TranscriptRead,
};
impl<C: CurveAffine> Proof<C> {
pub(crate) fn check_lengths(&self, p: &Argument) -> Result<(), Error> {
if self.permutation_evals.len() != p.columns.len() {
return Err(Error::IncompatibleParams);
pub struct Committed<C: CurveAffine> {
permutation_product_commitment: C,
}
pub struct Evaluated<C: CurveAffine> {
permutation_product_commitment: C,
permutation_product_eval: C::Scalar,
permutation_product_inv_eval: C::Scalar,
permutation_evals: Vec<C::Scalar>,
}
impl Argument {
pub(crate) fn absorb_product_commitment<C: CurveAffine, R: Read, T: TranscriptRead<R, C>>(
&self,
transcript: &mut T,
) -> Result<Committed<C>, Error> {
let permutation_product_commitment = transcript
.read_point()
.map_err(|_| Error::TranscriptError)?;
Ok(Committed {
permutation_product_commitment,
})
}
}
impl<C: CurveAffine> Committed<C> {
pub(crate) fn evaluate<R: Read, T: TranscriptRead<R, C>>(
self,
vkey: &VerifyingKey<C>,
transcript: &mut T,
) -> Result<Evaluated<C>, Error> {
let permutation_product_eval = transcript
.read_scalar()
.map_err(|_| Error::TranscriptError)?;
let permutation_product_inv_eval = transcript
.read_scalar()
.map_err(|_| Error::TranscriptError)?;
let mut permutation_evals = Vec::with_capacity(vkey.commitments.len());
for _ in 0..vkey.commitments.len() {
permutation_evals.push(
transcript
.read_scalar()
.map_err(|_| Error::TranscriptError)?,
);
}
Ok(())
}
pub(crate) fn absorb_commitments<HBase: Hasher<C::Base>, HScalar: Hasher<C::Scalar>>(
&self,
transcript: &mut Transcript<C, HBase, HScalar>,
) -> Result<(), Error> {
transcript
.absorb_point(&self.permutation_product_commitment)
.map_err(|_| Error::TranscriptError)
Ok(Evaluated {
permutation_product_commitment: self.permutation_product_commitment,
permutation_product_eval,
permutation_product_inv_eval,
permutation_evals,
})
}
}
impl<C: CurveAffine> Evaluated<C> {
pub(in crate::plonk) fn expressions<'a>(
&'a self,
vk: &'a plonk::VerifyingKey<C>,
p: &'a Argument,
advice_evals: &'a [C::Scalar],
l_0: C::Scalar,
beta: ChallengeBeta<C::Scalar>,
gamma: ChallengeGamma<C::Scalar>,
x: ChallengeX<C::Scalar>,
beta: ChallengeBeta<C>,
gamma: ChallengeGamma<C>,
x: ChallengeX<C>,
) -> impl Iterator<Item = C::Scalar> + 'a {
iter::empty()
// l_0(X) * (1 - z(X)) = 0
@ -70,18 +110,11 @@ impl<C: CurveAffine> Proof<C> {
}))
}
pub(crate) fn evals(&self) -> impl Iterator<Item = &C::Scalar> {
iter::empty()
.chain(Some(&self.permutation_product_eval))
.chain(Some(&self.permutation_product_inv_eval))
.chain(self.permutation_evals.iter())
}
pub(in crate::plonk) fn queries<'a>(
&'a self,
vk: &'a plonk::VerifyingKey<C>,
vkey: &'a VerifyingKey<C>,
x: ChallengeX<C::Scalar>,
x: ChallengeX<C>,
) -> impl Iterator<Item = VerifierQuery<'a, C>> + Clone {
let x_inv = vk.domain.rotate_omega(*x, Rotation(-1));

View File

@ -1,9 +1,10 @@
use ff::Field;
use std::io::Write;
use std::iter;
use super::{
circuit::{Advice, Assignment, Circuit, Column, ConstraintSystem, Fixed},
vanishing, ChallengeBeta, ChallengeGamma, ChallengeTheta, ChallengeX, ChallengeY, Error, Proof,
vanishing, ChallengeBeta, ChallengeGamma, ChallengeTheta, ChallengeX, ChallengeY, Error,
ProvingKey,
};
use crate::arithmetic::{eval_polynomial, Curve, CurveAffine, FieldExt};
@ -12,371 +13,350 @@ use crate::poly::{
multiopen::{self, ProverQuery},
LagrangeCoeff, Polynomial,
};
use crate::transcript::{Hasher, Transcript};
use crate::transcript::TranscriptWrite;
impl<C: CurveAffine> Proof<C> {
/// This creates a proof for the provided `circuit` when given the public
/// parameters `params` and the proving key [`ProvingKey`] that was
/// generated previously for the same circuit.
pub fn create<
HBase: Hasher<C::Base>,
HScalar: Hasher<C::Scalar>,
ConcreteCircuit: Circuit<C::Scalar>,
>(
params: &Params<C>,
pk: &ProvingKey<C>,
circuit: &ConcreteCircuit,
aux: &[Polynomial<C::Scalar, LagrangeCoeff>],
) -> Result<Self, Error> {
if aux.len() != pk.vk.cs.num_aux_columns {
return Err(Error::IncompatibleParams);
/// This creates a proof for the provided `circuit` when given the public
/// parameters `params` and the proving key [`ProvingKey`] that was
/// generated previously for the same circuit.
pub fn create_proof<
C: CurveAffine,
W: Write,
T: TranscriptWrite<W, C>,
ConcreteCircuit: Circuit<C::Scalar>,
>(
params: &Params<C>,
pk: &ProvingKey<C>,
circuit: &ConcreteCircuit,
aux: &[Polynomial<C::Scalar, LagrangeCoeff>],
transcript: &mut T,
) -> Result<(), Error> {
if aux.len() != pk.vk.cs.num_aux_columns {
return Err(Error::IncompatibleParams);
}
struct WitnessCollection<F: Field> {
advice: Vec<Polynomial<F, LagrangeCoeff>>,
_marker: std::marker::PhantomData<F>,
}
impl<F: Field> Assignment<F> for WitnessCollection<F> {
fn assign_advice(
&mut self,
column: Column<Advice>,
row: usize,
to: impl FnOnce() -> Result<F, Error>,
) -> Result<(), Error> {
*self
.advice
.get_mut(column.index())
.and_then(|v| v.get_mut(row))
.ok_or(Error::BoundsFailure)? = to()?;
Ok(())
}
struct WitnessCollection<F: Field> {
advice: Vec<Polynomial<F, LagrangeCoeff>>,
_marker: std::marker::PhantomData<F>,
fn assign_fixed(
&mut self,
_: Column<Fixed>,
_: usize,
_: impl FnOnce() -> Result<F, Error>,
) -> Result<(), Error> {
// We only care about advice columns here
Ok(())
}
impl<F: Field> Assignment<F> for WitnessCollection<F> {
fn assign_advice(
&mut self,
column: Column<Advice>,
row: usize,
to: impl FnOnce() -> Result<F, Error>,
) -> Result<(), Error> {
*self
.advice
.get_mut(column.index())
.and_then(|v| v.get_mut(row))
.ok_or(Error::BoundsFailure)? = to()?;
fn copy(&mut self, _: usize, _: usize, _: usize, _: usize, _: usize) -> Result<(), Error> {
// We only care about advice columns here
Ok(())
}
fn assign_fixed(
&mut self,
_: Column<Fixed>,
_: usize,
_: impl FnOnce() -> Result<F, Error>,
) -> Result<(), Error> {
// We only care about advice columns here
Ok(())
}
fn copy(
&mut self,
_: usize,
_: usize,
_: usize,
_: usize,
_: usize,
) -> Result<(), Error> {
// We only care about advice columns here
Ok(())
}
Ok(())
}
}
let domain = &pk.vk.domain;
let mut meta = ConstraintSystem::default();
let config = ConcreteCircuit::configure(&mut meta);
let domain = &pk.vk.domain;
let mut meta = ConstraintSystem::default();
let config = ConcreteCircuit::configure(&mut meta);
let mut witness = WitnessCollection {
advice: vec![domain.empty_lagrange(); meta.num_advice_columns],
_marker: std::marker::PhantomData,
};
let mut witness = WitnessCollection {
advice: vec![domain.empty_lagrange(); meta.num_advice_columns],
_marker: std::marker::PhantomData,
};
// Synthesize the circuit to obtain the witness and other information.
circuit.synthesize(&mut witness, config)?;
// Synthesize the circuit to obtain the witness and other information.
circuit.synthesize(&mut witness, config)?;
let witness = witness;
let witness = witness;
// Create a transcript for obtaining Fiat-Shamir challenges.
let mut transcript = Transcript::<C, HBase, HScalar>::new();
// Compute commitments to aux column polynomials
let aux_commitments_projective: Vec<_> = aux
.iter()
.map(|poly| params.commit_lagrange(poly, Blind::default()))
.collect();
let mut aux_commitments = vec![C::zero(); aux_commitments_projective.len()];
C::Projective::batch_to_affine(&aux_commitments_projective, &mut aux_commitments);
let aux_commitments = aux_commitments;
drop(aux_commitments_projective);
metrics::counter!("aux_commitments", aux_commitments.len() as u64);
// Compute commitments to aux column polynomials
let aux_commitments_projective: Vec<_> = aux
.iter()
.map(|poly| params.commit_lagrange(poly, Blind::default()))
.collect();
let mut aux_commitments = vec![C::zero(); aux_commitments_projective.len()];
C::Projective::batch_to_affine(&aux_commitments_projective, &mut aux_commitments);
let aux_commitments = aux_commitments;
drop(aux_commitments_projective);
metrics::counter!("aux_commitments", aux_commitments.len() as u64);
for commitment in &aux_commitments {
transcript
.common_point(*commitment)
.map_err(|_| Error::TranscriptError)?;
}
for commitment in &aux_commitments {
transcript
.absorb_point(commitment)
.map_err(|_| Error::TranscriptError)?;
}
let aux_polys: Vec<_> = aux
.iter()
.map(|poly| {
let lagrange_vec = domain.lagrange_from_vec(poly.to_vec());
domain.lagrange_to_coeff(lagrange_vec)
})
.collect();
let aux_polys: Vec<_> = aux
.iter()
.map(|poly| {
let lagrange_vec = domain.lagrange_from_vec(poly.to_vec());
domain.lagrange_to_coeff(lagrange_vec)
})
.collect();
let aux_cosets: Vec<_> = meta
.aux_queries
.iter()
.map(|&(column, at)| {
let poly = aux_polys[column.index()].clone();
domain.coeff_to_extended(poly, at)
})
.collect();
let aux_cosets: Vec<_> = meta
.aux_queries
.iter()
.map(|&(column, at)| {
let poly = aux_polys[column.index()].clone();
domain.coeff_to_extended(poly, at)
})
.collect();
// Compute commitments to advice column polynomials
let advice_blinds: Vec<_> = witness
.advice
.iter()
.map(|_| Blind(C::Scalar::rand()))
.collect();
let advice_commitments_projective: Vec<_> = witness
.advice
.iter()
.zip(advice_blinds.iter())
.map(|(poly, blind)| params.commit_lagrange(poly, *blind))
.collect();
let mut advice_commitments = vec![C::zero(); advice_commitments_projective.len()];
C::Projective::batch_to_affine(&advice_commitments_projective, &mut advice_commitments);
let advice_commitments = advice_commitments;
drop(advice_commitments_projective);
metrics::counter!("advice_commitments", advice_commitments.len() as u64);
// Compute commitments to advice column polynomials
let advice_blinds: Vec<_> = witness
.advice
.iter()
.map(|_| Blind(C::Scalar::rand()))
.collect();
let advice_commitments_projective: Vec<_> = witness
.advice
.iter()
.zip(advice_blinds.iter())
.map(|(poly, blind)| params.commit_lagrange(poly, *blind))
.collect();
let mut advice_commitments = vec![C::zero(); advice_commitments_projective.len()];
C::Projective::batch_to_affine(&advice_commitments_projective, &mut advice_commitments);
let advice_commitments = advice_commitments;
drop(advice_commitments_projective);
metrics::counter!("advice_commitments", advice_commitments.len() as u64);
for commitment in &advice_commitments {
transcript
.write_point(*commitment)
.map_err(|_| Error::TranscriptError)?;
}
for commitment in &advice_commitments {
transcript
.absorb_point(commitment)
.map_err(|_| Error::TranscriptError)?;
}
let advice_polys: Vec<_> = witness
.advice
.clone()
.into_iter()
.map(|poly| domain.lagrange_to_coeff(poly))
.collect();
let advice_polys: Vec<_> = witness
.advice
.clone()
let advice_cosets: Vec<_> = meta
.advice_queries
.iter()
.map(|&(column, at)| {
let poly = advice_polys[column.index()].clone();
domain.coeff_to_extended(poly, at)
})
.collect();
// Sample theta challenge for keeping lookup columns linearly independent
let theta = ChallengeTheta::get(transcript);
// Construct and commit to permuted values for each lookup
let lookups = pk
.vk
.cs
.lookups
.iter()
.map(|lookup| {
lookup.commit_permuted(
&pk,
&params,
&domain,
theta,
&witness.advice,
&pk.fixed_values,
&aux,
&advice_cosets,
&pk.fixed_cosets,
&aux_cosets,
transcript,
)
})
.collect::<Result<Vec<_>, _>>()?;
// Sample beta challenge
let beta = ChallengeBeta::get(transcript);
// Sample gamma challenge
let gamma = ChallengeGamma::get(transcript);
// Commit to permutations, if any.
let permutations = pk
.vk
.cs
.permutations
.iter()
.zip(pk.permutations.iter())
.map(|(p, pkey)| p.commit(params, pk, pkey, &witness.advice, beta, gamma, transcript))
.collect::<Result<Vec<_>, _>>()?;
// Construct and commit to products for each lookup
let lookups = lookups
.into_iter()
.map(|lookup| lookup.commit_product(&pk, &params, theta, beta, gamma, transcript))
.collect::<Result<Vec<_>, _>>()?;
// Obtain challenge for keeping all separate gates linearly independent
let y = ChallengeY::get(transcript);
// Evaluate the h(X) polynomial's constraint system expressions for the permutation constraints, if any.
let (permutations, permutation_expressions): (Vec<_>, Vec<_>) = {
let tmp = permutations
.into_iter()
.map(|poly| domain.lagrange_to_coeff(poly))
.collect();
let advice_cosets: Vec<_> = meta
.advice_queries
.iter()
.map(|&(column, at)| {
let poly = advice_polys[column.index()].clone();
domain.coeff_to_extended(poly, at)
})
.collect();
// Sample theta challenge for keeping lookup columns linearly independent
let theta = ChallengeTheta::get(&mut transcript);
// Construct and commit to permuted values for each lookup
let lookups = pk
.vk
.cs
.lookups
.iter()
.map(|lookup| {
lookup.commit_permuted(
&pk,
&params,
&domain,
theta,
&witness.advice,
&pk.fixed_values,
&aux,
&advice_cosets,
&pk.fixed_cosets,
&aux_cosets,
&mut transcript,
)
})
.collect::<Result<Vec<_>, _>>()?;
// Sample beta challenge
let beta = ChallengeBeta::get(&mut transcript);
// Sample gamma challenge
let gamma = ChallengeGamma::get(&mut transcript);
// Commit to permutations, if any.
let permutations = pk
.vk
.cs
.permutations
.iter()
.zip(pk.vk.cs.permutations.iter())
.zip(pk.permutations.iter())
.map(|(p, pkey)| {
p.commit(
params,
pk,
pkey,
&witness.advice,
beta,
gamma,
&mut transcript,
)
.map(|((p, argument), pkey)| {
p.construct(pk, argument, pkey, &advice_cosets, beta, gamma)
})
.collect::<Result<Vec<_>, _>>()?;
// Construct and commit to products for each lookup
let lookups = lookups
tmp.into_iter().unzip()
};
// Evaluate the h(X) polynomial's constraint system expressions for the lookup constraints, if any.
let (lookups, lookup_expressions): (Vec<_>, Vec<_>) = {
let tmp = lookups
.into_iter()
.map(|lookup| lookup.commit_product(&pk, &params, theta, beta, gamma, &mut transcript))
.map(|p| p.construct(pk, theta, beta, gamma))
.collect::<Result<Vec<_>, _>>()?;
// Obtain challenge for keeping all separate gates linearly independent
let y = ChallengeY::get(&mut transcript);
tmp.into_iter().unzip()
};
// Evaluate the h(X) polynomial's constraint system expressions for the permutation constraints, if any.
let (permutations, permutation_expressions): (Vec<_>, Vec<_>) = {
let tmp = permutations
.into_iter()
.zip(pk.vk.cs.permutations.iter())
.zip(pk.permutations.iter())
.map(|((p, argument), pkey)| {
p.construct(pk, argument, pkey, &advice_cosets, beta, gamma)
})
.collect::<Result<Vec<_>, _>>()?;
// Evaluate the h(X) polynomial's constraint system expressions for the constraints provided
let expressions = iter::empty()
// Custom constraints
.chain(meta.gates.iter().map(|poly| {
poly.evaluate(
&|index| pk.fixed_cosets[index].clone(),
&|index| advice_cosets[index].clone(),
&|index| aux_cosets[index].clone(),
&|a, b| a + &b,
&|a, b| a * &b,
&|a, scalar| a * scalar,
)
}))
// Permutation constraints, if any.
.chain(permutation_expressions.into_iter().flatten())
// Lookup constraints, if any.
.chain(lookup_expressions.into_iter().flatten());
tmp.into_iter().unzip()
};
// Construct the vanishing argument
let vanishing = vanishing::Argument::construct(params, domain, expressions, y, transcript)?;
// Evaluate the h(X) polynomial's constraint system expressions for the lookup constraints, if any.
let (lookups, lookup_expressions): (Vec<_>, Vec<_>) = {
let tmp = lookups
.into_iter()
.map(|p| p.construct(pk, theta, beta, gamma))
.collect::<Result<Vec<_>, _>>()?;
let x = ChallengeX::get(transcript);
tmp.into_iter().unzip()
};
// Evaluate polynomials at omega^i x
let advice_evals: Vec<_> = meta
.advice_queries
.iter()
.map(|&(column, at)| {
eval_polynomial(&advice_polys[column.index()], domain.rotate_omega(*x, at))
})
.collect();
// Evaluate the h(X) polynomial's constraint system expressions for the constraints provided
let expressions = iter::empty()
// Custom constraints
.chain(meta.gates.iter().map(|poly| {
poly.evaluate(
&|index| pk.fixed_cosets[index].clone(),
&|index| advice_cosets[index].clone(),
&|index| aux_cosets[index].clone(),
&|a, b| a + &b,
&|a, b| a * &b,
&|a, scalar| a * scalar,
)
}))
// Permutation constraints, if any.
.chain(permutation_expressions.into_iter().flatten())
// Lookup constraints, if any.
.chain(lookup_expressions.into_iter().flatten());
let aux_evals: Vec<_> = meta
.aux_queries
.iter()
.map(|&(column, at)| {
eval_polynomial(&aux_polys[column.index()], domain.rotate_omega(*x, at))
})
.collect();
// Construct the vanishing argument
let vanishing =
vanishing::Argument::construct(params, domain, expressions, y, &mut transcript)?;
let fixed_evals: Vec<_> = meta
.fixed_queries
.iter()
.map(|&(column, at)| {
eval_polynomial(&pk.fixed_polys[column.index()], domain.rotate_omega(*x, at))
})
.collect();
let x = ChallengeX::get(&mut transcript);
// Hash each column evaluation
for eval in advice_evals
.iter()
.chain(aux_evals.iter())
.chain(fixed_evals.iter())
{
transcript
.write_scalar(*eval)
.map_err(|_| Error::TranscriptError)?;
}
// Evaluate polynomials at omega^i x
let advice_evals: Vec<_> = meta
.advice_queries
.iter()
.map(|&(column, at)| {
eval_polynomial(&advice_polys[column.index()], domain.rotate_omega(*x, at))
})
.collect();
let vanishing = vanishing.evaluate(x, transcript)?;
let aux_evals: Vec<_> = meta
.aux_queries
.iter()
.map(|&(column, at)| {
eval_polynomial(&aux_polys[column.index()], domain.rotate_omega(*x, at))
})
.collect();
// Evaluate the permutations, if any, at omega^i x.
let permutations = permutations
.into_iter()
.zip(pk.permutations.iter())
.map(|(p, pkey)| p.evaluate(pk, pkey, x, transcript))
.collect::<Result<Vec<_>, _>>()?;
let fixed_evals: Vec<_> = meta
.fixed_queries
.iter()
.map(|&(column, at)| {
eval_polynomial(&pk.fixed_polys[column.index()], domain.rotate_omega(*x, at))
})
.collect();
// Evaluate the lookups, if any, at omega^i x.
let lookups = lookups
.into_iter()
.map(|p| p.evaluate(pk, x, transcript))
.collect::<Result<Vec<_>, _>>()?;
// Hash each column evaluation
for eval in advice_evals
.iter()
.chain(aux_evals.iter())
.chain(fixed_evals.iter())
{
transcript.absorb_scalar(*eval);
}
let vanishing = vanishing.evaluate(x, &mut transcript);
// Evaluate the permutations, if any, at omega^i x.
let permutations = permutations
.into_iter()
.zip(pk.permutations.iter())
.map(|(p, pkey)| p.evaluate(pk, pkey, x, &mut transcript))
.collect::<Vec<_>>();
// Evaluate the lookups, if any, at omega^i x.
let lookups = lookups
.into_iter()
.map(|p| p.evaluate(pk, x, &mut transcript))
.collect::<Vec<_>>();
let instances =
iter::empty()
.chain(pk.vk.cs.advice_queries.iter().enumerate().map(
|(query_index, &(column, at))| ProverQuery {
point: domain.rotate_omega(*x, at),
poly: &advice_polys[column.index()],
blind: advice_blinds[column.index()],
eval: advice_evals[query_index],
},
))
.chain(pk.vk.cs.aux_queries.iter().enumerate().map(
|(query_index, &(column, at))| ProverQuery {
let instances =
iter::empty()
.chain(pk.vk.cs.advice_queries.iter().enumerate().map(
|(query_index, &(column, at))| ProverQuery {
point: domain.rotate_omega(*x, at),
poly: &advice_polys[column.index()],
blind: advice_blinds[column.index()],
eval: advice_evals[query_index],
},
))
.chain(
pk.vk
.cs
.aux_queries
.iter()
.enumerate()
.map(|(query_index, &(column, at))| ProverQuery {
point: domain.rotate_omega(*x, at),
poly: &aux_polys[column.index()],
blind: Blind::default(),
eval: aux_evals[query_index],
},
))
.chain(pk.vk.cs.fixed_queries.iter().enumerate().map(
|(query_index, &(column, at))| ProverQuery {
}),
)
.chain(
pk.vk
.cs
.fixed_queries
.iter()
.enumerate()
.map(|(query_index, &(column, at))| ProverQuery {
point: domain.rotate_omega(*x, at),
poly: &pk.fixed_polys[column.index()],
blind: Blind::default(),
eval: fixed_evals[query_index],
},
))
// We query the h(X) polynomial at x
.chain(vanishing.open(x))
.chain(
permutations
.iter()
.zip(pk.permutations.iter())
.map(|(p, pkey)| p.open(pk, pkey, x))
.into_iter()
.flatten(),
)
.chain(lookups.iter().map(|p| p.open(pk, x)).into_iter().flatten());
}),
)
// We query the h(X) polynomial at x
.chain(vanishing.open(x))
.chain(
permutations
.iter()
.zip(pk.permutations.iter())
.map(|(p, pkey)| p.open(pk, pkey, x))
.into_iter()
.flatten(),
)
.chain(lookups.iter().map(|p| p.open(pk, x)).into_iter().flatten());
let multiopening = multiopen::Proof::create(params, &mut transcript, instances)
.map_err(|_| Error::OpeningError)?;
Ok(Proof {
advice_commitments,
permutations: permutations.into_iter().map(|p| p.build()).collect(),
lookups: lookups.into_iter().map(|p| p.build()).collect(),
advice_evals,
fixed_evals,
aux_evals,
vanishing: vanishing.build(),
multiopening,
})
}
multiopen::create_proof(params, transcript, instances).map_err(|_| Error::OpeningError)
}

View File

@ -9,9 +9,3 @@ mod verifier;
pub(crate) struct Argument<C: CurveAffine> {
_marker: PhantomData<C>,
}
#[derive(Debug, Clone)]
pub(crate) struct Proof<C: CurveAffine> {
h_commitments: Vec<C>,
h_evals: Vec<C::Scalar>,
}

View File

@ -1,4 +1,6 @@
use super::{Argument, Proof};
use std::io::Write;
use super::Argument;
use crate::{
arithmetic::{eval_polynomial, Curve, CurveAffine, FieldExt},
plonk::{ChallengeX, ChallengeY, Error},
@ -7,13 +9,12 @@ use crate::{
multiopen::ProverQuery,
Coeff, EvaluationDomain, ExtendedLagrangeCoeff, Polynomial,
},
transcript::{Hasher, Transcript},
transcript::TranscriptWrite,
};
pub(in crate::plonk) struct Constructed<C: CurveAffine> {
h_pieces: Vec<Polynomial<C::Scalar, Coeff>>,
h_blinds: Vec<Blind<C::Scalar>>,
h_commitments: Vec<C>,
}
pub(in crate::plonk) struct Evaluated<C: CurveAffine> {
@ -22,12 +23,12 @@ pub(in crate::plonk) struct Evaluated<C: CurveAffine> {
}
impl<C: CurveAffine> Argument<C> {
pub(in crate::plonk) fn construct<HBase: Hasher<C::Base>, HScalar: Hasher<C::Scalar>>(
pub(in crate::plonk) fn construct<W: Write, T: TranscriptWrite<W, C>>(
params: &Params<C>,
domain: &EvaluationDomain<C::Scalar>,
expressions: impl Iterator<Item = Polynomial<C::Scalar, ExtendedLagrangeCoeff>>,
y: ChallengeY<C::Scalar>,
transcript: &mut Transcript<C, HBase, HScalar>,
y: ChallengeY<C>,
transcript: &mut T,
) -> Result<Constructed<C>, Error> {
// Evaluate the h(X) polynomial's constraint system expressions for the constraints provided
let h_poly = expressions.fold(domain.empty_extended(), |h_poly, v| h_poly * *y + &v);
@ -59,24 +60,20 @@ impl<C: CurveAffine> Argument<C> {
// Hash each h(X) piece
for c in h_commitments.iter() {
transcript
.absorb_point(c)
.write_point(*c)
.map_err(|_| Error::TranscriptError)?;
}
Ok(Constructed {
h_pieces,
h_blinds,
h_commitments,
})
Ok(Constructed { h_pieces, h_blinds })
}
}
impl<C: CurveAffine> Constructed<C> {
pub(in crate::plonk) fn evaluate<HBase: Hasher<C::Base>, HScalar: Hasher<C::Scalar>>(
pub(in crate::plonk) fn evaluate<W: Write, T: TranscriptWrite<W, C>>(
self,
x: ChallengeX<C::Scalar>,
transcript: &mut Transcript<C, HBase, HScalar>,
) -> Evaluated<C> {
x: ChallengeX<C>,
transcript: &mut T,
) -> Result<Evaluated<C>, Error> {
let h_evals: Vec<_> = self
.h_pieces
.iter()
@ -85,20 +82,22 @@ impl<C: CurveAffine> Constructed<C> {
// Hash each advice evaluation
for eval in &h_evals {
transcript.absorb_scalar(*eval);
transcript
.write_scalar(*eval)
.map_err(|_| Error::TranscriptError)?;
}
Evaluated {
Ok(Evaluated {
constructed: self,
h_evals,
}
})
}
}
impl<C: CurveAffine> Evaluated<C> {
pub(in crate::plonk) fn open<'a>(
&'a self,
x: ChallengeX<C::Scalar>,
x: ChallengeX<C>,
) -> impl Iterator<Item = ProverQuery<'a, C>> + Clone {
self.constructed
.h_pieces
@ -112,11 +111,4 @@ impl<C: CurveAffine> Evaluated<C> {
eval: *h_eval,
})
}
pub(in crate::plonk) fn build(self) -> Proof<C> {
Proof {
h_commitments: self.constructed.h_commitments,
h_evals: self.h_evals,
}
}
}

View File

@ -1,46 +1,57 @@
use ff::Field;
use std::io::Read;
use super::Proof;
use crate::{
arithmetic::CurveAffine,
plonk::{ChallengeX, ChallengeY, Error, VerifyingKey},
poly::multiopen::VerifierQuery,
transcript::{Hasher, Transcript},
transcript::{read_n_points, read_n_scalars, TranscriptRead},
};
impl<C: CurveAffine> Proof<C> {
pub(in crate::plonk) fn check_lengths(&self, vk: &VerifyingKey<C>) -> Result<(), Error> {
if self.h_commitments.len() != self.h_evals.len() {
return Err(Error::IncompatibleParams);
}
use super::Argument;
if self.h_commitments.len() != vk.domain.get_quotient_poly_degree() {
return Err(Error::IncompatibleParams);
}
pub struct Committed<C: CurveAffine> {
h_commitments: Vec<C>,
}
Ok(())
}
pub struct Evaluated<C: CurveAffine> {
h_commitments: Vec<C>,
h_evals: Vec<C::Scalar>,
}
pub(in crate::plonk) fn absorb_commitments<
HBase: Hasher<C::Base>,
HScalar: Hasher<C::Scalar>,
>(
&self,
transcript: &mut Transcript<C, HBase, HScalar>,
) -> Result<(), Error> {
impl<C: CurveAffine> Argument<C> {
pub(in crate::plonk) fn absorb_commitments<R: Read, T: TranscriptRead<R, C>>(
vk: &VerifyingKey<C>,
transcript: &mut T,
) -> Result<Committed<C>, Error> {
// Obtain a commitment to h(X) in the form of multiple pieces of degree n - 1
for c in &self.h_commitments {
transcript
.absorb_point(c)
.map_err(|_| Error::TranscriptError)?;
}
Ok(())
}
let h_commitments = read_n_points(transcript, vk.domain.get_quotient_poly_degree())
.map_err(|_| Error::TranscriptError)?;
Ok(Committed { h_commitments })
}
}
impl<C: CurveAffine> Committed<C> {
pub(in crate::plonk) fn evaluate<R: Read, T: TranscriptRead<R, C>>(
self,
transcript: &mut T,
) -> Result<Evaluated<C>, Error> {
let h_evals = read_n_scalars(transcript, self.h_commitments.len())
.map_err(|_| Error::TranscriptError)?;
Ok(Evaluated {
h_commitments: self.h_commitments,
h_evals,
})
}
}
impl<C: CurveAffine> Evaluated<C> {
pub(in crate::plonk) fn verify(
&self,
expressions: impl Iterator<Item = C::Scalar>,
y: ChallengeY<C::Scalar>,
y: ChallengeY<C>,
xn: C::Scalar,
) -> Result<(), Error> {
let expected_h_eval = expressions.fold(C::Scalar::zero(), |h_eval, v| h_eval * &*y + &v);
@ -60,13 +71,9 @@ impl<C: CurveAffine> Proof<C> {
Ok(())
}
pub(in crate::plonk) fn evals(&self) -> impl Iterator<Item = &C::Scalar> {
self.h_evals.iter()
}
pub(in crate::plonk) fn queries<'a>(
&'a self,
x: ChallengeX<C::Scalar>,
x: ChallengeX<C>,
) -> impl Iterator<Item = VerifierQuery<'a, C>> + Clone {
self.h_commitments
.iter()

View File

@ -1,216 +1,105 @@
use ff::Field;
use std::io::Read;
use std::iter;
use super::{
ChallengeBeta, ChallengeGamma, ChallengeTheta, ChallengeX, ChallengeY, Error, Proof,
vanishing, ChallengeBeta, ChallengeGamma, ChallengeTheta, ChallengeX, ChallengeY, Error,
VerifyingKey,
};
use crate::arithmetic::{CurveAffine, FieldExt};
use crate::poly::{
commitment::{Guard, Params, MSM},
multiopen::VerifierQuery,
multiopen::{self, VerifierQuery},
};
use crate::transcript::{Hasher, Transcript};
use crate::transcript::{read_n_points, read_n_scalars, TranscriptRead};
impl<'a, C: CurveAffine> Proof<C> {
/// Returns a boolean indicating whether or not the proof is valid
pub fn verify<HBase: Hasher<C::Base>, HScalar: Hasher<C::Scalar>>(
&'a self,
params: &'a Params<C>,
vk: &'a VerifyingKey<C>,
msm: MSM<'a, C>,
aux_commitments: &'a [C],
) -> Result<Guard<'a, C>, Error> {
self.check_lengths(vk, aux_commitments)?;
// Check that aux_commitments matches the expected number of aux_columns
// and self.aux_evals
if aux_commitments.len() != vk.cs.num_aux_columns
|| self.aux_evals.len() != vk.cs.num_aux_columns
{
return Err(Error::IncompatibleParams);
}
// Create a transcript for obtaining Fiat-Shamir challenges.
let mut transcript = Transcript::<C, HBase, HScalar>::new();
// Hash the aux (external) commitments into the transcript
for commitment in aux_commitments {
transcript
.absorb_point(commitment)
.map_err(|_| Error::TranscriptError)?;
}
// Hash the prover's advice commitments into the transcript
for commitment in &self.advice_commitments {
transcript
.absorb_point(commitment)
.map_err(|_| Error::TranscriptError)?;
}
// Sample theta challenge for keeping lookup columns linearly independent
let theta = ChallengeTheta::get(&mut transcript);
// Hash each lookup permuted commitment
for lookup in &self.lookups {
lookup.absorb_permuted_commitments(&mut transcript)?;
}
// Sample beta challenge
let beta = ChallengeBeta::get(&mut transcript);
// Sample gamma challenge
let gamma = ChallengeGamma::get(&mut transcript);
// Hash each permutation product commitment
for permutation in &self.permutations {
permutation.absorb_commitments(&mut transcript)?;
}
// Hash each lookup product commitment
for lookup in &self.lookups {
lookup.absorb_product_commitment(&mut transcript)?;
}
// Sample y challenge, which keeps the gates linearly independent.
let y = ChallengeY::get(&mut transcript);
self.vanishing.absorb_commitments(&mut transcript)?;
// Sample x challenge, which is used to ensure the circuit is
// satisfied with high probability.
let x = ChallengeX::get(&mut transcript);
// This check ensures the circuit is satisfied so long as the polynomial
// commitments open to the correct values.
self.check_hx(params, vk, theta, beta, gamma, y, x)?;
for eval in self
.advice_evals
.iter()
.chain(self.aux_evals.iter())
.chain(self.fixed_evals.iter())
.chain(self.vanishing.evals())
.chain(
self.permutations
.iter()
.map(|p| p.evals())
.into_iter()
.flatten(),
)
.chain(self.lookups.iter().map(|p| p.evals()).into_iter().flatten())
{
transcript.absorb_scalar(*eval);
}
let queries =
iter::empty()
.chain(vk.cs.advice_queries.iter().enumerate().map(
|(query_index, &(column, at))| VerifierQuery {
point: vk.domain.rotate_omega(*x, at),
commitment: &self.advice_commitments[column.index()],
eval: self.advice_evals[query_index],
},
))
.chain(
vk.cs
.aux_queries
.iter()
.enumerate()
.map(|(query_index, &(column, at))| VerifierQuery {
point: vk.domain.rotate_omega(*x, at),
commitment: &aux_commitments[column.index()],
eval: self.aux_evals[query_index],
}),
)
.chain(vk.cs.fixed_queries.iter().enumerate().map(
|(query_index, &(column, at))| VerifierQuery {
point: vk.domain.rotate_omega(*x, at),
commitment: &vk.fixed_commitments[column.index()],
eval: self.fixed_evals[query_index],
},
))
.chain(self.vanishing.queries(x));
// We are now convinced the circuit is satisfied so long as the
// polynomial commitments open to the correct values.
self.multiopening
.verify(
params,
&mut transcript,
queries
.chain(
self.permutations
.iter()
.zip(vk.permutations.iter())
.map(|(p, vkey)| p.queries(vk, vkey, x))
.into_iter()
.flatten(),
)
.chain(
self.lookups
.iter()
.map(|p| p.queries(vk, x))
.into_iter()
.flatten(),
),
msm,
)
.map_err(|_| Error::OpeningError)
/// Returns a boolean indicating whether or not the proof is valid
pub fn verify_proof<'a, C: CurveAffine, R: Read, T: TranscriptRead<R, C>>(
params: &'a Params<C>,
vk: &'a VerifyingKey<C>,
msm: MSM<'a, C>,
aux_commitments: &'a [C],
transcript: &mut T,
) -> Result<Guard<'a, C>, Error> {
// Check that aux_commitments matches the expected number of aux columns
if aux_commitments.len() != vk.cs.num_aux_columns {
return Err(Error::IncompatibleParams);
}
/// Checks that the lengths of vectors are consistent with the constraint
/// system
fn check_lengths(&self, vk: &VerifyingKey<C>, aux_commitments: &[C]) -> Result<(), Error> {
// Check that aux_commitments matches the expected number of aux_columns
// and self.aux_evals
if aux_commitments.len() != vk.cs.num_aux_columns
|| self.aux_evals.len() != vk.cs.num_aux_columns
{
return Err(Error::IncompatibleParams);
}
if self.fixed_evals.len() != vk.cs.fixed_queries.len() {
return Err(Error::IncompatibleParams);
}
if self.advice_evals.len() != vk.cs.advice_queries.len() {
return Err(Error::IncompatibleParams);
}
if self.permutations.len() != vk.cs.permutations.len() {
return Err(Error::IncompatibleParams);
}
for (permutation, p) in self.permutations.iter().zip(vk.cs.permutations.iter()) {
permutation.check_lengths(p)?;
}
self.vanishing.check_lengths(vk)?;
if self.lookups.len() != vk.cs.lookups.len() {
return Err(Error::IncompatibleParams);
}
if self.advice_commitments.len() != vk.cs.num_advice_columns {
return Err(Error::IncompatibleParams);
}
Ok(())
// Hash the aux (external) commitments into the transcript
for commitment in aux_commitments {
transcript
.common_point(*commitment)
.map_err(|_| Error::TranscriptError)?
}
/// Checks that this proof's h_evals are correct, and thus that all of the
/// rules are satisfied.
fn check_hx(
&self,
params: &'a Params<C>,
vk: &VerifyingKey<C>,
theta: ChallengeTheta<C::Scalar>,
beta: ChallengeBeta<C::Scalar>,
gamma: ChallengeGamma<C::Scalar>,
y: ChallengeY<C::Scalar>,
x: ChallengeX<C::Scalar>,
) -> Result<(), Error> {
// Hash the prover's advice commitments into the transcript
let advice_commitments =
read_n_points(transcript, vk.cs.num_advice_columns).map_err(|_| Error::TranscriptError)?;
// Sample theta challenge for keeping lookup columns linearly independent
let theta = ChallengeTheta::get(transcript);
// Hash each lookup permuted commitment
let lookups = vk
.cs
.lookups
.iter()
.map(|argument| argument.absorb_permuted_commitments(transcript))
.collect::<Result<Vec<_>, _>>()?;
// Sample beta challenge
let beta = ChallengeBeta::get(transcript);
// Sample gamma challenge
let gamma = ChallengeGamma::get(transcript);
// Hash each permutation product commitment
let permutations = vk
.cs
.permutations
.iter()
.map(|argument| argument.absorb_product_commitment(transcript))
.collect::<Result<Vec<_>, _>>()?;
// Hash each lookup product commitment
let lookups = lookups
.into_iter()
.map(|lookup| lookup.absorb_product_commitment(transcript))
.collect::<Result<Vec<_>, _>>()?;
// Sample y challenge, which keeps the gates linearly independent.
let y = ChallengeY::get(transcript);
let vanishing = vanishing::Argument::absorb_commitments(vk, transcript)?;
// Sample x challenge, which is used to ensure the circuit is
// satisfied with high probability.
let x = ChallengeX::get(transcript);
let advice_evals = read_n_scalars(transcript, vk.cs.advice_queries.len())
.map_err(|_| Error::TranscriptError)?;
let aux_evals =
read_n_scalars(transcript, vk.cs.aux_queries.len()).map_err(|_| Error::TranscriptError)?;
let fixed_evals = read_n_scalars(transcript, vk.cs.fixed_queries.len())
.map_err(|_| Error::TranscriptError)?;
let vanishing = vanishing.evaluate(transcript)?;
let permutations = permutations
.into_iter()
.zip(vk.permutations.iter())
.map(|(permutation, vkey)| permutation.evaluate(vkey, transcript))
.collect::<Result<Vec<_>, _>>()?;
let lookups = lookups
.into_iter()
.map(|lookup| lookup.evaluate(transcript))
.collect::<Result<Vec<_>, _>>()?;
// This check ensures the circuit is satisfied so long as the polynomial
// commitments open to the correct values.
{
// x^n
let xn = x.pow(&[params.n as u64, 0, 0, 0]);
@ -225,26 +114,26 @@ impl<'a, C: CurveAffine> Proof<C> {
// Evaluate the circuit using the custom gates provided
.chain(vk.cs.gates.iter().map(|poly| {
poly.evaluate(
&|index| self.fixed_evals[index],
&|index| self.advice_evals[index],
&|index| self.aux_evals[index],
&|index| fixed_evals[index],
&|index| advice_evals[index],
&|index| aux_evals[index],
&|a, b| a + &b,
&|a, b| a * &b,
&|a, scalar| a * &scalar,
)
}))
.chain(
self.permutations
permutations
.iter()
.zip(vk.cs.permutations.iter())
.map(|(p, argument)| {
p.expressions(vk, argument, &self.advice_evals, l_0, beta, gamma, x)
p.expressions(vk, argument, &advice_evals, l_0, beta, gamma, x)
})
.into_iter()
.flatten(),
)
.chain(
self.lookups
lookups
.iter()
.zip(vk.cs.lookups.iter())
.map(|(p, argument)| {
@ -255,15 +144,70 @@ impl<'a, C: CurveAffine> Proof<C> {
theta,
beta,
gamma,
&self.advice_evals,
&self.fixed_evals,
&self.aux_evals,
&advice_evals,
&fixed_evals,
&aux_evals,
)
})
.into_iter()
.flatten(),
);
self.vanishing.verify(expressions, y, xn)
vanishing.verify(expressions, y, xn)?;
}
let queries = iter::empty()
.chain(
vk.cs
.advice_queries
.iter()
.enumerate()
.map(|(query_index, &(column, at))| VerifierQuery {
point: vk.domain.rotate_omega(*x, at),
commitment: &advice_commitments[column.index()],
eval: advice_evals[query_index],
}),
)
.chain(
vk.cs
.aux_queries
.iter()
.enumerate()
.map(|(query_index, &(column, at))| VerifierQuery {
point: vk.domain.rotate_omega(*x, at),
commitment: &aux_commitments[column.index()],
eval: aux_evals[query_index],
}),
)
.chain(
vk.cs
.fixed_queries
.iter()
.enumerate()
.map(|(query_index, &(column, at))| VerifierQuery {
point: vk.domain.rotate_omega(*x, at),
commitment: &vk.fixed_commitments[column.index()],
eval: fixed_evals[query_index],
}),
)
.chain(vanishing.queries(x))
.chain(
permutations
.iter()
.zip(vk.permutations.iter())
.map(|(p, vkey)| p.queries(vk, vkey, x))
.into_iter()
.flatten(),
)
.chain(
lookups
.iter()
.map(|p| p.queries(vk, x))
.into_iter()
.flatten(),
);
// We are now convinced the circuit is satisfied so long as the
// polynomial commitments open to the correct values.
multiopen::verify_proof(params, transcript, queries, msm).map_err(|_| Error::OpeningError)
}

View File

@ -21,14 +21,14 @@ struct CommitmentData<C: CurveAffine> {
}
/// Verify a multi-opening proof
pub fn verify_proof<'a, I, C: CurveAffine, R: Read, T: TranscriptRead<R, C>>(
pub fn verify_proof<'b, 'a: 'b, I, C: CurveAffine, R: Read, T: TranscriptRead<R, C>>(
params: &'a Params<C>,
transcript: &mut T,
queries: I,
mut msm: MSM<'a, C>,
) -> Result<Guard<'a, C>, Error>
where
I: IntoIterator<Item = VerifierQuery<'a, C>> + Clone,
I: IntoIterator<Item = VerifierQuery<'b, C>> + Clone,
{
// Scale the MSM by a random factor to ensure that if the existing MSM
// has is_zero() == false then this argument won't be able to interfere

View File

@ -284,3 +284,25 @@ impl<C: CurveAffine, Type> Deref for ChallengeScalar<C, Type> {
&self.inner
}
}
pub(crate) fn read_n_points<C: CurveAffine, R: Read, T: TranscriptRead<R, C>>(
transcript: &mut T,
n: usize,
) -> io::Result<Vec<C>> {
let mut v = Vec::with_capacity(n);
for _ in 0..n {
v.push(transcript.read_point()?);
}
Ok(v)
}
pub(crate) fn read_n_scalars<C: CurveAffine, R: Read, T: TranscriptRead<R, C>>(
transcript: &mut T,
n: usize,
) -> io::Result<Vec<C::Scalar>> {
let mut v = Vec::with_capacity(n);
for _ in 0..n {
v.push(transcript.read_scalar()?);
}
Ok(v)
}