Update multiopen APIs to reflect changes made to Transcript APIs

This commit is contained in:
Sean Bowe 2020-12-21 15:59:41 -07:00
parent d30c6b62e4
commit 5be7d9525d
No known key found for this signature in database
GPG Key ID: 95684257D8F8B031
3 changed files with 212 additions and 244 deletions

View File

@ -15,6 +15,9 @@ use crate::{
mod prover;
mod verifier;
pub use prover::create_proof;
pub use verifier::verify_proof;
#[derive(Clone, Copy, Debug)]
struct X1 {}
/// Challenge for compressing openings at the same point sets together.
@ -36,19 +39,6 @@ struct X4 {}
/// together.
type ChallengeX4<F> = ChallengeScalar<F, X4>;
/// This is a multi-point opening proof used in the polynomial commitment scheme opening.
#[derive(Debug, Clone)]
pub struct Proof<C: CurveAffine> {
// A vector of evaluations at each set of query points
q_evals: Vec<C::Scalar>,
// Commitment to final polynomial
f_commitment: C,
// Commitment proof
opening: commitment::Proof<C>,
}
/// A polynomial query at a point
#[derive(Debug, Clone)]
pub struct ProverQuery<'a, C: CurveAffine> {

View File

@ -1,18 +1,19 @@
use super::super::{
commitment::{self, Blind, Params},
Coeff, Error, Polynomial,
Coeff, Polynomial,
};
use super::{
construct_intermediate_sets, ChallengeX1, ChallengeX2, ChallengeX3, ChallengeX4, Proof,
ProverQuery, Query,
construct_intermediate_sets, ChallengeX1, ChallengeX2, ChallengeX3, ChallengeX4, ProverQuery,
Query,
};
use crate::arithmetic::{
eval_polynomial, kate_division, lagrange_interpolate, Curve, CurveAffine, FieldExt,
};
use crate::transcript::{Hasher, Transcript};
use crate::transcript::TranscriptWrite;
use ff::Field;
use std::io::{self, Write};
use std::marker::PhantomData;
#[derive(Debug, Clone)]
@ -23,138 +24,118 @@ struct CommitmentData<C: CurveAffine> {
evals: Vec<C::Scalar>,
}
impl<C: CurveAffine> Proof<C> {
/// Create a multi-opening proof
pub fn create<'a, I, HBase: Hasher<C::Base>, HScalar: Hasher<C::Scalar>>(
params: &Params<C>,
transcript: &mut Transcript<C, HBase, HScalar>,
queries: I,
) -> Result<Self, Error>
where
I: IntoIterator<Item = ProverQuery<'a, C>> + Clone,
/// Create a multi-opening proof
pub fn create_proof<'a, I, C: CurveAffine, W: Write, T: TranscriptWrite<W, C>>(
params: &Params<C>,
transcript: &mut T,
queries: I,
) -> io::Result<()>
where
I: IntoIterator<Item = ProverQuery<'a, C>> + Clone,
{
let x_1 = ChallengeX1::get(transcript);
let x_2 = ChallengeX2::get(transcript);
let (poly_map, point_sets) = construct_intermediate_sets(queries);
// Collapse openings at same point sets together into single openings using
// x_1 challenge.
let mut q_polys: Vec<Option<Polynomial<C::Scalar, Coeff>>> = vec![None; point_sets.len()];
let mut q_blinds = vec![Blind(C::Scalar::zero()); point_sets.len()];
// A vec of vecs of evals. The outer vec corresponds to the point set,
// while the inner vec corresponds to the points in a particular set.
let mut q_eval_sets = Vec::with_capacity(point_sets.len());
for point_set in point_sets.iter() {
q_eval_sets.push(vec![C::Scalar::zero(); point_set.len()]);
}
{
let x_1 = ChallengeX1::get(transcript);
let x_2 = ChallengeX2::get(transcript);
let (poly_map, point_sets) = construct_intermediate_sets(queries);
// Collapse openings at same point sets together into single openings using
// x_1 challenge.
let mut q_polys: Vec<Option<Polynomial<C::Scalar, Coeff>>> = vec![None; point_sets.len()];
let mut q_blinds = vec![Blind(C::Scalar::zero()); point_sets.len()];
// A vec of vecs of evals. The outer vec corresponds to the point set,
// while the inner vec corresponds to the points in a particular set.
let mut q_eval_sets = Vec::with_capacity(point_sets.len());
for point_set in point_sets.iter() {
q_eval_sets.push(vec![C::Scalar::zero(); point_set.len()]);
}
{
let mut accumulate = |set_idx: usize,
new_poly: &Polynomial<C::Scalar, Coeff>,
blind: Blind<C::Scalar>,
evals: Vec<C::Scalar>| {
if let Some(poly) = &q_polys[set_idx] {
q_polys[set_idx] = Some(poly.clone() * *x_1 + new_poly);
} else {
q_polys[set_idx] = Some(new_poly.clone());
}
q_blinds[set_idx] *= *x_1;
q_blinds[set_idx] += blind;
// Each polynomial is evaluated at a set of points. For each set,
// we collapse each polynomial's evals pointwise.
for (eval, set_eval) in evals.iter().zip(q_eval_sets[set_idx].iter_mut()) {
*set_eval *= &*x_1;
*set_eval += eval;
}
};
for commitment_data in poly_map.into_iter() {
accumulate(
commitment_data.set_index, // set_idx,
commitment_data.commitment.poly, // poly,
commitment_data.commitment.blind, // blind,
commitment_data.evals, // evals
);
}
}
let f_poly = point_sets
.iter()
.zip(q_eval_sets.iter())
.zip(q_polys.iter())
.fold(None, |f_poly, ((points, evals), poly)| {
let mut poly = poly.clone().unwrap().values;
// TODO: makes implicit asssumption that poly degree is smaller than interpolation poly degree
for (p, r) in poly.iter_mut().zip(lagrange_interpolate(points, evals)) {
*p -= &r;
}
let mut poly = points
.iter()
.fold(poly, |poly, point| kate_division(&poly, *point));
poly.resize(params.n as usize, C::Scalar::zero());
let poly = Polynomial {
values: poly,
_marker: PhantomData,
};
if f_poly.is_none() {
Some(poly)
} else {
f_poly.map(|f_poly| f_poly * *x_2 + &poly)
}
})
.unwrap();
let mut f_blind = Blind(C::Scalar::rand());
let mut f_commitment = params.commit(&f_poly, f_blind).to_affine();
let (opening, q_evals) = loop {
let mut transcript = transcript.clone();
transcript
.absorb_point(&f_commitment)
.map_err(|_| Error::SamplingError)?;
let x_3 = ChallengeX3::get(&mut transcript);
let q_evals: Vec<C::Scalar> = q_polys
.iter()
.map(|poly| eval_polynomial(poly.as_ref().unwrap(), *x_3))
.collect();
for eval in q_evals.iter() {
transcript.absorb_scalar(*eval);
}
let x_4 = ChallengeX4::get(&mut transcript);
let (f_poly, f_blind_try) = q_polys.iter().zip(q_blinds.iter()).fold(
(f_poly.clone(), f_blind),
|(f_poly, f_blind), (poly, blind)| {
(
f_poly * *x_4 + poly.as_ref().unwrap(),
Blind((f_blind.0 * &*x_4) + &blind.0),
)
},
);
if let Ok(opening) =
commitment::Proof::create(&params, &mut transcript, &f_poly, f_blind_try, *x_3)
{
break (opening, q_evals);
let mut accumulate = |set_idx: usize,
new_poly: &Polynomial<C::Scalar, Coeff>,
blind: Blind<C::Scalar>,
evals: Vec<C::Scalar>| {
if let Some(poly) = &q_polys[set_idx] {
q_polys[set_idx] = Some(poly.clone() * *x_1 + new_poly);
} else {
f_blind += C::Scalar::one();
f_commitment = (f_commitment + params.h).to_affine();
q_polys[set_idx] = Some(new_poly.clone());
}
q_blinds[set_idx] *= *x_1;
q_blinds[set_idx] += blind;
// Each polynomial is evaluated at a set of points. For each set,
// we collapse each polynomial's evals pointwise.
for (eval, set_eval) in evals.iter().zip(q_eval_sets[set_idx].iter_mut()) {
*set_eval *= &x_1;
*set_eval += eval;
}
};
Ok(Proof {
q_evals,
f_commitment,
opening,
})
for commitment_data in poly_map.into_iter() {
accumulate(
commitment_data.set_index, // set_idx,
commitment_data.commitment.poly, // poly,
commitment_data.commitment.blind, // blind,
commitment_data.evals, // evals
);
}
}
let f_poly = point_sets
.iter()
.zip(q_eval_sets.iter())
.zip(q_polys.iter())
.fold(None, |f_poly, ((points, evals), poly)| {
let mut poly = poly.clone().unwrap().values;
// TODO: makes implicit asssumption that poly degree is smaller than interpolation poly degree
for (p, r) in poly.iter_mut().zip(lagrange_interpolate(points, evals)) {
*p -= &r;
}
let mut poly = points
.iter()
.fold(poly, |poly, point| kate_division(&poly, *point));
poly.resize(params.n as usize, C::Scalar::zero());
let poly = Polynomial {
values: poly,
_marker: PhantomData,
};
if f_poly.is_none() {
Some(poly)
} else {
f_poly.map(|f_poly| f_poly * *x_2 + &poly)
}
})
.unwrap();
let f_blind = Blind(C::Scalar::rand());
let f_commitment = params.commit(&f_poly, f_blind).to_affine();
transcript.write_point(f_commitment)?;
let x_3 = ChallengeX3::get(transcript);
let q_evals: Vec<C::Scalar> = q_polys
.iter()
.map(|poly| eval_polynomial(poly.as_ref().unwrap(), *x_3))
.collect();
for eval in q_evals.iter() {
transcript.write_scalar(*eval)?;
}
let x_4 = ChallengeX4::get(transcript);
let (f_poly, f_blind_try) = q_polys.iter().zip(q_blinds.iter()).fold(
(f_poly.clone(), f_blind),
|(f_poly, f_blind), (poly, blind)| {
(
f_poly * *x_4 + poly.as_ref().unwrap(),
Blind((f_blind.0 * &x_4) + &blind.0),
)
},
);
commitment::create_proof(&params, transcript, &f_poly, f_blind_try, *x_3)
}
#[doc(hidden)]

View File

@ -5,11 +5,13 @@ use super::super::{
Error,
};
use super::{
construct_intermediate_sets, ChallengeX1, ChallengeX2, ChallengeX3, ChallengeX4, Proof, Query,
construct_intermediate_sets, ChallengeX1, ChallengeX2, ChallengeX3, ChallengeX4, Query,
VerifierQuery,
};
use crate::arithmetic::{eval_polynomial, lagrange_interpolate, CurveAffine, FieldExt};
use crate::transcript::{Hasher, Transcript};
use crate::transcript::TranscriptRead;
use std::io::Read;
#[derive(Debug, Clone)]
struct CommitmentData<C: CurveAffine> {
@ -18,114 +20,109 @@ struct CommitmentData<C: CurveAffine> {
evals: Vec<C::Scalar>,
}
impl<C: CurveAffine> Proof<C> {
/// Verify a multi-opening proof
pub fn verify<'a, I, HBase: Hasher<C::Base>, HScalar: Hasher<C::Scalar>>(
&self,
params: &'a Params<C>,
transcript: &mut Transcript<C, HBase, HScalar>,
queries: I,
mut msm: MSM<'a, C>,
) -> Result<Guard<'a, C>, Error>
where
I: IntoIterator<Item = VerifierQuery<'a, C>> + Clone,
/// Verify a multi-opening proof
pub fn verify_proof<'a, I, C: CurveAffine, R: Read, T: TranscriptRead<R, C>>(
params: &'a Params<C>,
transcript: &mut T,
queries: I,
mut msm: MSM<'a, C>,
) -> Result<Guard<'a, C>, Error>
where
I: IntoIterator<Item = VerifierQuery<'a, C>> + Clone,
{
// Scale the MSM by a random factor to ensure that if the existing MSM
// has is_zero() == false then this argument won't be able to interfere
// with it to make it true, with high probability.
msm.scale(C::Scalar::rand());
// Sample x_1 for compressing openings at the same point sets together
let x_1 = ChallengeX1::get(transcript);
// Sample a challenge x_2 for keeping the multi-point quotient
// polynomial terms linearly independent.
let x_2 = ChallengeX2::get(transcript);
let (commitment_map, point_sets) = construct_intermediate_sets(queries);
// Compress the commitments and expected evaluations at x together.
// using the challenge x_1
let mut q_commitments: Vec<_> = vec![params.empty_msm(); point_sets.len()];
// A vec of vecs of evals. The outer vec corresponds to the point set,
// while the inner vec corresponds to the points in a particular set.
let mut q_eval_sets = Vec::with_capacity(point_sets.len());
for point_set in point_sets.iter() {
q_eval_sets.push(vec![C::Scalar::zero(); point_set.len()]);
}
{
// Scale the MSM by a random factor to ensure that if the existing MSM
// has is_zero() == false then this argument won't be able to interfere
// with it to make it true, with high probability.
msm.scale(C::Scalar::rand());
// Sample x_1 for compressing openings at the same point sets together
let x_1 = ChallengeX1::get(transcript);
// Sample a challenge x_2 for keeping the multi-point quotient
// polynomial terms linearly independent.
let x_2 = ChallengeX2::<C::Scalar>::get(transcript);
let (commitment_map, point_sets) = construct_intermediate_sets(queries);
// Compress the commitments and expected evaluations at x together.
// using the challenge x_1
let mut q_commitments: Vec<_> = vec![params.empty_msm(); point_sets.len()];
// A vec of vecs of evals. The outer vec corresponds to the point set,
// while the inner vec corresponds to the points in a particular set.
let mut q_eval_sets = Vec::with_capacity(point_sets.len());
for point_set in point_sets.iter() {
q_eval_sets.push(vec![C::Scalar::zero(); point_set.len()]);
}
{
let mut accumulate = |set_idx: usize, new_commitment, evals: Vec<C::Scalar>| {
q_commitments[set_idx].scale(*x_1);
q_commitments[set_idx].append_term(C::Scalar::one(), new_commitment);
for (eval, set_eval) in evals.iter().zip(q_eval_sets[set_idx].iter_mut()) {
*set_eval *= &*x_1;
*set_eval += eval;
}
};
// Each commitment corresponds to evaluations at a set of points.
// For each set, we collapse each commitment's evals pointwise.
for commitment_data in commitment_map.into_iter() {
accumulate(
commitment_data.set_index, // set_idx,
*commitment_data.commitment.0, // commitment,
commitment_data.evals, // evals
);
let mut accumulate = |set_idx: usize, new_commitment, evals: Vec<C::Scalar>| {
q_commitments[set_idx].scale(*x_1);
q_commitments[set_idx].append_term(C::Scalar::one(), new_commitment);
for (eval, set_eval) in evals.iter().zip(q_eval_sets[set_idx].iter_mut()) {
*set_eval *= &x_1;
*set_eval += eval;
}
}
};
// Obtain the commitment to the multi-point quotient polynomial f(X).
transcript
.absorb_point(&self.f_commitment)
.map_err(|_| Error::SamplingError)?;
// Sample a challenge x_3 for checking that f(X) was committed to
// correctly.
let x_3 = ChallengeX3::get(transcript);
for eval in self.q_evals.iter() {
transcript.absorb_scalar(*eval);
}
// We can compute the expected msm_eval at x_3 using the q_evals provided
// by the prover and from x_2
let msm_eval = point_sets
.iter()
.zip(q_eval_sets.iter())
.zip(self.q_evals.iter())
.fold(
C::Scalar::zero(),
|msm_eval, ((points, evals), proof_eval)| {
let r_poly = lagrange_interpolate(points, evals);
let r_eval = eval_polynomial(&r_poly, *x_3);
let eval = points.iter().fold(*proof_eval - &r_eval, |eval, point| {
eval * &(*x_3 - point).invert().unwrap()
});
msm_eval * &*x_2 + &eval
},
// Each commitment corresponds to evaluations at a set of points.
// For each set, we collapse each commitment's evals pointwise.
for commitment_data in commitment_map.into_iter() {
accumulate(
commitment_data.set_index, // set_idx,
*commitment_data.commitment.0, // commitment,
commitment_data.evals, // evals
);
}
}
// Sample a challenge x_4 that we will use to collapse the openings of
// the various remaining polynomials at x_3 together.
let x_4 = ChallengeX4::get(transcript);
// Obtain the commitment to the multi-point quotient polynomial f(X).
let f_commitment = transcript.read_point().map_err(|_| Error::SamplingError)?;
// Compute the final commitment that has to be opened
let mut commitment_msm = params.empty_msm();
commitment_msm.append_term(C::Scalar::one(), self.f_commitment);
let (commitment_msm, msm_eval) = q_commitments.into_iter().zip(self.q_evals.iter()).fold(
(commitment_msm, msm_eval),
|(mut commitment_msm, msm_eval), (q_commitment, q_eval)| {
commitment_msm.scale(*x_4);
commitment_msm.add_msm(&q_commitment);
(commitment_msm, msm_eval * &*x_4 + q_eval)
// Sample a challenge x_3 for checking that f(X) was committed to
// correctly.
let x_3 = ChallengeX3::get(transcript);
let mut q_evals = Vec::with_capacity(q_eval_sets.len());
for _ in 0..q_eval_sets.len() {
q_evals.push(transcript.read_scalar().map_err(|_| Error::SamplingError)?);
}
// We can compute the expected msm_eval at x_3 using the q_evals provided
// by the prover and from x_2
let msm_eval = point_sets
.iter()
.zip(q_eval_sets.iter())
.zip(q_evals.iter())
.fold(
C::Scalar::zero(),
|msm_eval, ((points, evals), proof_eval)| {
let r_poly = lagrange_interpolate(points, evals);
let r_eval = eval_polynomial(&r_poly, *x_3);
let eval = points.iter().fold(*proof_eval - &r_eval, |eval, point| {
eval * &(*x_3 - point).invert().unwrap()
});
msm_eval * &x_2 + &eval
},
);
// Verify the opening proof
self.opening
.verify(params, msm, transcript, *x_3, commitment_msm, msm_eval)
}
// Sample a challenge x_4 that we will use to collapse the openings of
// the various remaining polynomials at x_3 together.
let x_4 = ChallengeX4::get(transcript);
// Compute the final commitment that has to be opened
let mut commitment_msm = params.empty_msm();
commitment_msm.append_term(C::Scalar::one(), f_commitment);
let (commitment_msm, msm_eval) = q_commitments.into_iter().zip(q_evals.iter()).fold(
(commitment_msm, msm_eval),
|(mut commitment_msm, msm_eval), (q_commitment, q_eval)| {
commitment_msm.scale(*x_4);
commitment_msm.add_msm(&q_commitment);
(commitment_msm, msm_eval * &x_4 + q_eval)
},
);
// Verify the opening proof
super::commitment::verify_proof(params, msm, transcript, *x_3, commitment_msm, msm_eval)
}
#[doc(hidden)]