Merge pull request #70 from zcash/internal-api

Extract permutation argument and introduce typed challenges
This commit is contained in:
ebfull 2020-12-02 08:54:36 -07:00 committed by GitHub
commit d5927d66c0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 1108 additions and 710 deletions

View File

@ -68,27 +68,6 @@ where
}
}
/// This is a 128-bit verifier challenge.
#[derive(Copy, Clone, Debug)]
pub struct Challenge(pub(crate) u128);
/// This algorithm applies the mapping of Algorithm 1 from the
/// [Halo](https://eprint.iacr.org/2019/1021) paper.
pub fn get_challenge_scalar<F: FieldExt>(challenge: Challenge) -> F {
let mut acc = (F::ZETA + F::one()).double();
for i in (0..64).rev() {
let should_negate = ((challenge.0 >> ((i << 1) + 1)) & 1) == 1;
let should_endo = ((challenge.0 >> (i << 1)) & 1) == 1;
let q = if should_negate { -F::one() } else { F::one() };
let q = if should_endo { q * F::ZETA } else { q };
acc = acc + q + acc;
}
acc
}
fn multiexp_serial<C: CurveAffine>(coeffs: &[C::Scalar], bases: &[C], acc: &mut C::Projective) {
let coeffs: Vec<[u8; 32]> = coeffs.iter().map(|a| a.to_bytes()).collect();

View File

@ -6,12 +6,12 @@
//! [plonk]: https://eprint.iacr.org/2019/953
use crate::arithmetic::CurveAffine;
use crate::poly::{
multiopen, Coeff, EvaluationDomain, ExtendedLagrangeCoeff, LagrangeCoeff, Polynomial,
};
use crate::poly::{multiopen, Coeff, EvaluationDomain, ExtendedLagrangeCoeff, Polynomial};
use crate::transcript::ChallengeScalar;
mod circuit;
mod keygen;
mod permutation;
mod prover;
mod verifier;
@ -26,7 +26,7 @@ pub use verifier::*;
pub struct VerifyingKey<C: CurveAffine> {
domain: EvaluationDomain<C::Scalar>,
fixed_commitments: Vec<C>,
permutation_commitments: Vec<Vec<C>>,
permutations: Vec<permutation::VerifyingKey<C>>,
cs: ConstraintSystem<C::Scalar>,
}
@ -39,9 +39,7 @@ pub struct ProvingKey<C: CurveAffine> {
l0: Polynomial<C::Scalar, ExtendedLagrangeCoeff>,
fixed_polys: Vec<Polynomial<C::Scalar, Coeff>>,
fixed_cosets: Vec<Polynomial<C::Scalar, ExtendedLagrangeCoeff>>,
permutations: Vec<Vec<Polynomial<C::Scalar, LagrangeCoeff>>>,
permutation_polys: Vec<Vec<Polynomial<C::Scalar, Coeff>>>,
permutation_cosets: Vec<Vec<Polynomial<C::Scalar, ExtendedLagrangeCoeff>>>,
permutations: Vec<permutation::ProvingKey<C>>,
}
/// This is an object which represents a (Turbo)PLONK proof.
@ -50,10 +48,7 @@ pub struct ProvingKey<C: CurveAffine> {
pub struct Proof<C: CurveAffine> {
advice_commitments: Vec<C>,
h_commitments: Vec<C>,
permutation_product_commitments: Vec<C>,
permutation_product_evals: Vec<C::Scalar>,
permutation_product_inv_evals: Vec<C::Scalar>,
permutation_evals: Vec<Vec<C::Scalar>>,
permutations: Option<permutation::Proof<C>>,
advice_evals: Vec<C::Scalar>,
aux_evals: Vec<C::Scalar>,
fixed_evals: Vec<C::Scalar>,
@ -95,6 +90,22 @@ impl<C: CurveAffine> VerifyingKey<C> {
}
}
#[derive(Clone, Copy, Debug)]
struct Beta;
type ChallengeBeta<F> = ChallengeScalar<F, Beta>;
#[derive(Clone, Copy, Debug)]
struct Gamma;
type ChallengeGamma<F> = ChallengeScalar<F, Gamma>;
#[derive(Clone, Copy, Debug)]
struct Y;
type ChallengeY<F> = ChallengeScalar<F, Y>;
#[derive(Clone, Copy, Debug)]
struct X;
type ChallengeX<F> = ChallengeScalar<F, X>;
#[test]
fn test_proving() {
use crate::arithmetic::{Curve, FieldExt};

View File

@ -4,7 +4,7 @@ use ff::Field;
use std::collections::BTreeMap;
use std::convert::TryFrom;
use super::Error;
use super::{permutation, Error};
use crate::poly::Rotation;
/// A column type
@ -312,7 +312,7 @@ pub struct ConstraintSystem<F> {
// Vector of permutation arguments, where each corresponds to a sequence of columns
// that are involved in a permutation argument.
pub(crate) permutations: Vec<Vec<Column<Advice>>>,
pub(crate) permutations: Vec<permutation::Argument>,
}
impl<F: Field> Default for ConstraintSystem<F> {
@ -347,7 +347,8 @@ impl<F: Field> ConstraintSystem<F> {
for column in columns {
self.query_advice_index(*column, 0);
}
self.permutations.push(columns.to_vec());
self.permutations
.push(permutation::Argument::new(columns.to_vec()));
index
}

View File

@ -2,9 +2,9 @@ use ff::Field;
use super::{
circuit::{Advice, Assignment, Circuit, Column, ConstraintSystem, Fixed},
Error, ProvingKey, VerifyingKey,
permutation, Error, ProvingKey, VerifyingKey,
};
use crate::arithmetic::{Curve, CurveAffine, FieldExt};
use crate::arithmetic::{Curve, CurveAffine};
use crate::poly::{
commitment::{Blind, Params},
EvaluationDomain, LagrangeCoeff, Polynomial, Rotation,
@ -21,9 +21,7 @@ where
{
struct Assembly<F: Field> {
fixed: Vec<Polynomial<F, LagrangeCoeff>>,
mapping: Vec<Vec<Vec<(usize, usize)>>>,
aux: Vec<Vec<Vec<(usize, usize)>>>,
sizes: Vec<Vec<Vec<usize>>>,
permutations: permutation::keygen::Assembly,
_marker: std::marker::PhantomData<F>,
}
@ -61,62 +59,22 @@ where
right_column: usize,
right_row: usize,
) -> Result<(), Error> {
// Check bounds first
if permutation >= self.mapping.len()
|| left_column >= self.mapping[permutation].len()
|| left_row >= self.mapping[permutation][left_column].len()
|| right_column >= self.mapping[permutation].len()
|| right_row >= self.mapping[permutation][right_column].len()
{
return Err(Error::BoundsFailure);
}
let mut left_cycle = self.aux[permutation][left_column][left_row];
let mut right_cycle = self.aux[permutation][right_column][right_row];
if left_cycle == right_cycle {
return Ok(());
}
if self.sizes[permutation][left_cycle.0][left_cycle.1]
< self.sizes[permutation][right_cycle.0][right_cycle.1]
{
std::mem::swap(&mut left_cycle, &mut right_cycle);
}
self.sizes[permutation][left_cycle.0][left_cycle.1] +=
self.sizes[permutation][right_cycle.0][right_cycle.1];
let mut i = right_cycle;
loop {
self.aux[permutation][i.0][i.1] = left_cycle;
i = self.mapping[permutation][i.0][i.1];
if i == right_cycle {
break;
}
}
let tmp = self.mapping[permutation][left_column][left_row];
self.mapping[permutation][left_column][left_row] =
self.mapping[permutation][right_column][right_row];
self.mapping[permutation][right_column][right_row] = tmp;
Ok(())
self.permutations
.copy(permutation, left_column, left_row, right_column, right_row)
}
}
let mut cs = ConstraintSystem::default();
let config = ConcreteCircuit::configure(&mut cs);
// Get the largest permutation argument length in terms of the number of
// advice columns involved.
let mut largest_permutation_length = 0;
for permutation in &cs.permutations {
largest_permutation_length = std::cmp::max(permutation.len(), largest_permutation_length);
}
// The permutation argument will serve alongside the gates, so must be
// accounted for.
let mut degree = largest_permutation_length + 1;
let mut degree = cs
.permutations
.iter()
.map(|p| p.required_degree())
.max()
.unwrap_or(1);
// Account for each gate to ensure our quotient polynomial is the
// correct degree and that our extended domain is the right size.
@ -126,95 +84,16 @@ where
let domain = EvaluationDomain::new(degree as u32, params.k);
// Compute [omega^0, omega^1, ..., omega^{params.n - 1}]
let mut omega_powers = Vec::with_capacity(params.n as usize);
{
let mut cur = C::Scalar::one();
for _ in 0..params.n {
omega_powers.push(cur);
cur *= &domain.get_omega();
}
}
// Compute [omega_powers * \delta^0, omega_powers * \delta^1, ..., omega_powers * \delta^m]
let mut deltaomega = Vec::with_capacity(largest_permutation_length);
{
let mut cur = C::Scalar::one();
for _ in 0..largest_permutation_length {
let mut omega_powers = omega_powers.clone();
for o in &mut omega_powers {
*o *= &cur;
}
deltaomega.push(omega_powers);
cur *= &C::Scalar::DELTA;
}
}
let mut assembly: Assembly<C::Scalar> = Assembly {
fixed: vec![domain.empty_lagrange(); cs.num_fixed_columns],
mapping: vec![],
aux: vec![],
sizes: vec![],
permutations: permutation::keygen::Assembly::new(params, &cs),
_marker: std::marker::PhantomData,
};
// Initialize the copy vector to keep track of copy constraints in all
// the permutation arguments.
for permutation in &cs.permutations {
let mut columns = vec![];
for i in 0..permutation.len() {
// Computes [(i, 0), (i, 1), ..., (i, n - 1)]
columns.push((0..params.n).map(|j| (i, j as usize)).collect());
}
assembly.mapping.push(columns.clone());
assembly.aux.push(columns);
assembly
.sizes
.push(vec![vec![1usize; params.n as usize]; permutation.len()]);
}
// Synthesize the circuit to obtain SRS
circuit.synthesize(&mut assembly, config)?;
// Compute permutation polynomials, convert to coset form and
// pre-compute commitments for the SRS.
let mut permutation_commitments = vec![];
let mut permutations = vec![];
let mut permutation_polys = vec![];
let mut permutation_cosets = vec![];
for (permutation_index, permutation) in cs.permutations.iter().enumerate() {
let mut commitments = vec![];
let mut inner_permutations = vec![];
let mut polys = vec![];
let mut cosets = vec![];
for i in 0..permutation.len() {
// Computes the permutation polynomial based on the permutation
// description in the assembly.
let mut permutation_poly = domain.empty_lagrange();
for (j, p) in permutation_poly.iter_mut().enumerate() {
let (permuted_i, permuted_j) = assembly.mapping[permutation_index][i][j];
*p = deltaomega[permuted_i][permuted_j];
}
// Compute commitment to permutation polynomial
commitments.push(
params
.commit_lagrange(&permutation_poly, Blind::default())
.to_affine(),
);
// Store permutation polynomial and precompute its coset evaluation
inner_permutations.push(permutation_poly.clone());
let poly = domain.lagrange_to_coeff(permutation_poly);
polys.push(poly.clone());
cosets.push(domain.coeff_to_extended(poly, Rotation::default()));
}
permutation_commitments.push(commitments);
permutations.push(inner_permutations);
permutation_polys.push(polys);
permutation_cosets.push(cosets);
}
let (permutation_pks, permutation_vks) = assembly.permutations.build_keys(params, &cs, &domain);
let fixed_commitments = assembly
.fixed
@ -248,14 +127,12 @@ where
vk: VerifyingKey {
domain,
fixed_commitments,
permutation_commitments,
permutations: permutation_vks,
cs,
},
l0,
fixed_polys,
fixed_cosets,
permutations,
permutation_polys,
permutation_cosets,
permutations: permutation_pks,
})
}

52
src/plonk/permutation.rs Normal file
View File

@ -0,0 +1,52 @@
//! Implementation of a PLONK permutation argument.
use super::circuit::{Advice, Column};
use crate::{
arithmetic::CurveAffine,
poly::{Coeff, ExtendedLagrangeCoeff, LagrangeCoeff, Polynomial},
};
pub(crate) mod keygen;
mod prover;
mod verifier;
/// A permutation argument.
#[derive(Debug, Clone)]
pub(crate) struct Argument {
/// A sequence of columns involved in the argument.
columns: Vec<Column<Advice>>,
}
impl Argument {
pub(crate) fn new(columns: Vec<Column<Advice>>) -> Self {
Argument { columns }
}
pub(crate) fn required_degree(&self) -> usize {
// The permutation argument will serve alongside the gates, so must be
// accounted for.
self.columns.len() + 1
}
}
/// The verifying key for a single permutation argument.
#[derive(Debug)]
pub(crate) struct VerifyingKey<C: CurveAffine> {
commitments: Vec<C>,
}
/// The proving key for a single permutation argument.
#[derive(Debug)]
pub(crate) struct ProvingKey<C: CurveAffine> {
permutations: Vec<Polynomial<C::Scalar, LagrangeCoeff>>,
polys: Vec<Polynomial<C::Scalar, Coeff>>,
cosets: Vec<Polynomial<C::Scalar, ExtendedLagrangeCoeff>>,
}
#[derive(Debug, Clone)]
pub(crate) struct Proof<C: CurveAffine> {
permutation_product_commitments: Vec<C>,
permutation_product_evals: Vec<C::Scalar>,
permutation_product_inv_evals: Vec<C::Scalar>,
permutation_evals: Vec<Vec<C::Scalar>>,
}

View File

@ -0,0 +1,179 @@
use ff::Field;
use super::{ProvingKey, VerifyingKey};
use crate::{
arithmetic::{Curve, CurveAffine, FieldExt},
plonk::{circuit::ConstraintSystem, Error},
poly::{
commitment::{Blind, Params},
EvaluationDomain, Rotation,
},
};
pub(crate) struct Assembly {
mapping: Vec<Vec<Vec<(usize, usize)>>>,
aux: Vec<Vec<Vec<(usize, usize)>>>,
sizes: Vec<Vec<Vec<usize>>>,
}
impl Assembly {
pub(crate) fn new<C: CurveAffine>(
params: &Params<C>,
cs: &ConstraintSystem<C::Scalar>,
) -> Self {
let mut assembly = Assembly {
mapping: vec![],
aux: vec![],
sizes: vec![],
};
// Initialize the copy vector to keep track of copy constraints in all
// the permutation arguments.
for p in &cs.permutations {
let mut columns = vec![];
for i in 0..p.columns.len() {
// Computes [(i, 0), (i, 1), ..., (i, n - 1)]
columns.push((0..params.n).map(|j| (i, j as usize)).collect());
}
assembly.mapping.push(columns.clone());
assembly.aux.push(columns);
assembly
.sizes
.push(vec![vec![1usize; params.n as usize]; p.columns.len()]);
}
assembly
}
pub(crate) fn copy(
&mut self,
permutation: usize,
left_column: usize,
left_row: usize,
right_column: usize,
right_row: usize,
) -> Result<(), Error> {
// Check bounds first
if permutation >= self.mapping.len()
|| left_column >= self.mapping[permutation].len()
|| left_row >= self.mapping[permutation][left_column].len()
|| right_column >= self.mapping[permutation].len()
|| right_row >= self.mapping[permutation][right_column].len()
{
return Err(Error::BoundsFailure);
}
let mut left_cycle = self.aux[permutation][left_column][left_row];
let mut right_cycle = self.aux[permutation][right_column][right_row];
if left_cycle == right_cycle {
return Ok(());
}
if self.sizes[permutation][left_cycle.0][left_cycle.1]
< self.sizes[permutation][right_cycle.0][right_cycle.1]
{
std::mem::swap(&mut left_cycle, &mut right_cycle);
}
self.sizes[permutation][left_cycle.0][left_cycle.1] +=
self.sizes[permutation][right_cycle.0][right_cycle.1];
let mut i = right_cycle;
loop {
self.aux[permutation][i.0][i.1] = left_cycle;
i = self.mapping[permutation][i.0][i.1];
if i == right_cycle {
break;
}
}
let tmp = self.mapping[permutation][left_column][left_row];
self.mapping[permutation][left_column][left_row] =
self.mapping[permutation][right_column][right_row];
self.mapping[permutation][right_column][right_row] = tmp;
Ok(())
}
pub(crate) fn build_keys<C: CurveAffine>(
self,
params: &Params<C>,
cs: &ConstraintSystem<C::Scalar>,
domain: &EvaluationDomain<C::Scalar>,
) -> (Vec<ProvingKey<C>>, Vec<VerifyingKey<C>>) {
// Get the largest permutation argument length in terms of the number of
// advice columns involved.
let largest_permutation_length = cs
.permutations
.iter()
.map(|p| p.columns.len())
.max()
.unwrap_or_default();
// Compute [omega^0, omega^1, ..., omega^{params.n - 1}]
let mut omega_powers = Vec::with_capacity(params.n as usize);
{
let mut cur = C::Scalar::one();
for _ in 0..params.n {
omega_powers.push(cur);
cur *= &domain.get_omega();
}
}
// Compute [omega_powers * \delta^0, omega_powers * \delta^1, ..., omega_powers * \delta^m]
let mut deltaomega = Vec::with_capacity(largest_permutation_length);
{
let mut cur = C::Scalar::one();
for _ in 0..largest_permutation_length {
let mut omega_powers = omega_powers.clone();
for o in &mut omega_powers {
*o *= &cur;
}
deltaomega.push(omega_powers);
cur *= &C::Scalar::DELTA;
}
}
// Compute permutation polynomials, convert to coset form and
// pre-compute commitments for the SRS.
let mut pks = vec![];
let mut vks = vec![];
for (p, mapping) in cs.permutations.iter().zip(self.mapping.iter()) {
let mut commitments = vec![];
let mut permutations = vec![];
let mut polys = vec![];
let mut cosets = vec![];
for i in 0..p.columns.len() {
// Computes the permutation polynomial based on the permutation
// description in the assembly.
let mut permutation_poly = domain.empty_lagrange();
for (j, p) in permutation_poly.iter_mut().enumerate() {
let (permuted_i, permuted_j) = mapping[i][j];
*p = deltaomega[permuted_i][permuted_j];
}
// Compute commitment to permutation polynomial
commitments.push(
params
.commit_lagrange(&permutation_poly, Blind::default())
.to_affine(),
);
// Store permutation polynomial and precompute its coset evaluation
permutations.push(permutation_poly.clone());
let poly = domain.lagrange_to_coeff(permutation_poly);
polys.push(poly.clone());
cosets.push(domain.coeff_to_extended(poly, Rotation::default()));
}
vks.push(VerifyingKey { commitments });
pks.push(ProvingKey {
permutations,
polys,
cosets,
});
}
(pks, vks)
}
}

View File

@ -0,0 +1,392 @@
use ff::Field;
use std::iter;
use super::{Argument, Proof};
use crate::{
arithmetic::{eval_polynomial, parallelize, BatchInvert, Curve, CurveAffine, FieldExt},
plonk::{ChallengeBeta, ChallengeGamma, ChallengeX, Error, ProvingKey},
poly::{
commitment::{Blind, Params},
multiopen::ProverQuery,
Coeff, ExtendedLagrangeCoeff, LagrangeCoeff, Polynomial, Rotation,
},
transcript::{Hasher, Transcript},
};
#[derive(Clone)]
pub(crate) struct Committed<C: CurveAffine> {
permutation_product_polys: Vec<Polynomial<C::Scalar, Coeff>>,
permutation_product_cosets: Vec<Polynomial<C::Scalar, ExtendedLagrangeCoeff>>,
permutation_product_cosets_inv: Vec<Polynomial<C::Scalar, ExtendedLagrangeCoeff>>,
permutation_product_blinds: Vec<Blind<C::Scalar>>,
permutation_product_commitments: Vec<C>,
}
pub(crate) struct Constructed<C: CurveAffine> {
permutation_product_polys: Vec<Polynomial<C::Scalar, Coeff>>,
permutation_product_blinds: Vec<Blind<C::Scalar>>,
permutation_product_commitments: Vec<C>,
}
pub(crate) struct Evaluated<C: CurveAffine> {
constructed: Constructed<C>,
permutation_product_evals: Vec<C::Scalar>,
permutation_product_inv_evals: Vec<C::Scalar>,
permutation_evals: Vec<Vec<C::Scalar>>,
}
impl Argument {
pub(in crate::plonk) fn commit<
C: CurveAffine,
HBase: Hasher<C::Base>,
HScalar: Hasher<C::Scalar>,
>(
params: &Params<C>,
pk: &ProvingKey<C>,
advice: &[Polynomial<C::Scalar, LagrangeCoeff>],
beta: ChallengeBeta<C::Scalar>,
gamma: ChallengeGamma<C::Scalar>,
transcript: &mut Transcript<C, HBase, HScalar>,
) -> Result<Committed<C>, Error> {
let domain = &pk.vk.domain;
// Compute permutation product polynomial commitment
let mut permutation_product_polys = vec![];
let mut permutation_product_cosets = vec![];
let mut permutation_product_cosets_inv = vec![];
let mut permutation_product_commitments_projective = vec![];
let mut permutation_product_blinds = vec![];
// Iterate over each permutation
let mut permutation_modified_advice = pk
.vk
.cs
.permutations
.iter()
.zip(pk.permutations.iter())
// Goal is to compute the products of fractions
//
// (p_j(\omega^i) + \delta^j \omega^i \beta + \gamma) /
// (p_j(\omega^i) + \beta s_j(\omega^i) + \gamma)
//
// where p_j(X) is the jth advice column in this permutation,
// and i is the ith row of the column.
.map(|(p, pkey)| {
let mut modified_advice = vec![C::Scalar::one(); params.n as usize];
// Iterate over each column of the permutation
for (&column, permuted_column_values) in
p.columns.iter().zip(pkey.permutations.iter())
{
parallelize(&mut modified_advice, |modified_advice, start| {
for ((modified_advice, advice_value), permuted_advice_value) in
modified_advice
.iter_mut()
.zip(advice[column.index()][start..].iter())
.zip(permuted_column_values[start..].iter())
{
*modified_advice *=
&(*beta * permuted_advice_value + &gamma + advice_value);
}
});
}
modified_advice
})
.collect::<Vec<_>>();
// Batch invert to obtain the denominators for the permutation product
// polynomials
permutation_modified_advice
.iter_mut()
.flat_map(|v| v.iter_mut())
.batch_invert();
for (p, mut modified_advice) in pk
.vk
.cs
.permutations
.iter()
.zip(permutation_modified_advice.into_iter())
{
// Iterate over each column again, this time finishing the computation
// of the entire fraction by computing the numerators
let mut deltaomega = C::Scalar::one();
for &column in p.columns.iter() {
let omega = domain.get_omega();
parallelize(&mut modified_advice, |modified_advice, start| {
let mut deltaomega = deltaomega * &omega.pow_vartime(&[start as u64, 0, 0, 0]);
for (modified_advice, advice_value) in modified_advice
.iter_mut()
.zip(advice[column.index()][start..].iter())
{
// Multiply by p_j(\omega^i) + \delta^j \omega^i \beta
*modified_advice *= &(deltaomega * &beta + &gamma + advice_value);
deltaomega *= &omega;
}
});
deltaomega *= &C::Scalar::DELTA;
}
// The modified_advice vector is a vector of products of fractions
// of the form
//
// (p_j(\omega^i) + \delta^j \omega^i \beta + \gamma) /
// (p_j(\omega^i) + \beta s_j(\omega^i) + \gamma)
//
// where i is the index into modified_advice, for the jth column in
// the permutation
// Compute the evaluations of the permutation product polynomial
// over our domain, starting with z[0] = 1
let mut z = vec![C::Scalar::one()];
for row in 1..(params.n as usize) {
let mut tmp = z[row - 1];
tmp *= &modified_advice[row];
z.push(tmp);
}
let z = domain.lagrange_from_vec(z);
let blind = Blind(C::Scalar::rand());
permutation_product_commitments_projective.push(params.commit_lagrange(&z, blind));
permutation_product_blinds.push(blind);
let z = domain.lagrange_to_coeff(z);
permutation_product_polys.push(z.clone());
permutation_product_cosets
.push(domain.coeff_to_extended(z.clone(), Rotation::default()));
permutation_product_cosets_inv.push(domain.coeff_to_extended(z, Rotation(-1)));
}
let mut permutation_product_commitments =
vec![C::zero(); permutation_product_commitments_projective.len()];
C::Projective::batch_to_affine(
&permutation_product_commitments_projective,
&mut permutation_product_commitments,
);
let permutation_product_commitments = permutation_product_commitments;
drop(permutation_product_commitments_projective);
// Hash each permutation product commitment
for c in &permutation_product_commitments {
transcript
.absorb_point(c)
.map_err(|_| Error::TranscriptError)?;
}
Ok(Committed {
permutation_product_polys,
permutation_product_cosets,
permutation_product_cosets_inv,
permutation_product_blinds,
permutation_product_commitments,
})
}
}
impl<C: CurveAffine> Committed<C> {
pub(in crate::plonk) fn construct<'a>(
self,
pk: &'a ProvingKey<C>,
advice_cosets: &'a [Polynomial<C::Scalar, ExtendedLagrangeCoeff>],
beta: ChallengeBeta<C::Scalar>,
gamma: ChallengeGamma<C::Scalar>,
) -> Result<
(
Constructed<C>,
impl Iterator<Item = Polynomial<C::Scalar, ExtendedLagrangeCoeff>> + 'a,
),
Error,
> {
let domain = &pk.vk.domain;
let permutation_product_cosets_owned = self.permutation_product_cosets.clone();
let permutation_product_cosets = self.permutation_product_cosets.clone();
let permutation_product_cosets_inv = self.permutation_product_cosets_inv.clone();
let expressions = iter::empty()
// l_0(X) * (1 - z(X)) = 0
.chain(
permutation_product_cosets_owned
.into_iter()
.map(move |coset| Polynomial::one_minus(coset) * &pk.l0),
)
// z(X) \prod (p(X) + \beta s_i(X) + \gamma) - z(omega^{-1} X) \prod (p(X) + \delta^i \beta X + \gamma)
.chain(
pk.vk
.cs
.permutations
.iter()
.zip(pk.permutations.iter())
.zip(permutation_product_cosets.into_iter())
.zip(permutation_product_cosets_inv.into_iter())
.map(move |(((p, pkey), cosets), cosets_inv)| {
let mut left = cosets;
for (advice, permutation) in p
.columns
.iter()
.map(|&column| {
&advice_cosets[pk.vk.cs.get_advice_query_index(column, 0)]
})
.zip(pkey.cosets.iter())
{
parallelize(&mut left, |left, start| {
for ((left, advice), permutation) in left
.iter_mut()
.zip(advice[start..].iter())
.zip(permutation[start..].iter())
{
*left *= &(*advice + &(*beta * permutation) + &gamma);
}
});
}
let mut right = cosets_inv;
let mut current_delta = *beta * &C::Scalar::ZETA;
let step = domain.get_extended_omega();
for advice in p.columns.iter().map(|&column| {
&advice_cosets[pk.vk.cs.get_advice_query_index(column, 0)]
}) {
parallelize(&mut right, move |right, start| {
let mut beta_term =
current_delta * &step.pow_vartime(&[start as u64, 0, 0, 0]);
for (right, advice) in right.iter_mut().zip(advice[start..].iter())
{
*right *= &(*advice + &beta_term + &gamma);
beta_term *= &step;
}
});
current_delta *= &C::Scalar::DELTA;
}
left - &right
}),
);
Ok((
Constructed {
permutation_product_polys: self.permutation_product_polys,
permutation_product_blinds: self.permutation_product_blinds,
permutation_product_commitments: self.permutation_product_commitments,
},
expressions,
))
}
}
impl<C: CurveAffine> super::ProvingKey<C> {
fn evaluate(&self, x: ChallengeX<C::Scalar>) -> Vec<C::Scalar> {
self.polys
.iter()
.map(|poly| eval_polynomial(poly, *x))
.collect()
}
fn open<'a>(
&'a self,
evals: &'a [C::Scalar],
x: ChallengeX<C::Scalar>,
) -> impl Iterator<Item = ProverQuery<'a, C>> + Clone {
self.polys
.iter()
.zip(evals.iter())
.map(move |(poly, eval)| ProverQuery {
point: *x,
poly,
blind: Blind::default(),
eval: *eval,
})
}
}
impl<C: CurveAffine> Constructed<C> {
pub(in crate::plonk) fn evaluate<HBase: Hasher<C::Base>, HScalar: Hasher<C::Scalar>>(
self,
pk: &ProvingKey<C>,
x: ChallengeX<C::Scalar>,
transcript: &mut Transcript<C, HBase, HScalar>,
) -> Evaluated<C> {
let domain = &pk.vk.domain;
let permutation_product_evals: Vec<_> = self
.permutation_product_polys
.iter()
.map(|poly| eval_polynomial(poly, *x))
.collect();
let permutation_product_inv_evals: Vec<_> = self
.permutation_product_polys
.iter()
.map(|poly| eval_polynomial(poly, domain.rotate_omega(*x, Rotation(-1))))
.collect();
let permutation_evals: Vec<_> = pk.permutations.iter().map(|p| p.evaluate(x)).collect();
// Hash each advice evaluation
for eval in permutation_product_evals
.iter()
.chain(permutation_product_inv_evals.iter())
.chain(permutation_evals.iter().flat_map(|evals| evals.iter()))
{
transcript.absorb_scalar(*eval);
}
Evaluated {
constructed: self,
permutation_product_evals,
permutation_product_inv_evals,
permutation_evals,
}
}
}
impl<C: CurveAffine> Evaluated<C> {
pub(in crate::plonk) fn open<'a>(
&'a self,
pk: &'a ProvingKey<C>,
x: ChallengeX<C::Scalar>,
) -> impl Iterator<Item = ProverQuery<'a, C>> + Clone {
let x_inv = pk.vk.domain.rotate_omega(*x, Rotation(-1));
iter::empty()
// Open permutation product commitments at x and \omega^{-1} x
.chain(
self.constructed
.permutation_product_polys
.iter()
.zip(self.constructed.permutation_product_blinds.iter())
.zip(self.permutation_product_evals.iter())
.zip(self.permutation_product_inv_evals.iter())
.flat_map(move |(((poly, blind), eval), inv_eval)| {
iter::empty()
.chain(Some(ProverQuery {
point: *x,
poly,
blind: *blind,
eval: *eval,
}))
.chain(Some(ProverQuery {
point: x_inv,
poly,
blind: *blind,
eval: *inv_eval,
}))
}),
)
// Open permutation polynomial commitments at x
.chain(
pk.permutations
.iter()
.zip(self.permutation_evals.iter())
.flat_map(move |(permutation, evals)| permutation.open(evals, x)),
)
}
pub(crate) fn build(self) -> Proof<C> {
Proof {
permutation_product_commitments: self.constructed.permutation_product_commitments,
permutation_product_evals: self.permutation_product_evals,
permutation_product_inv_evals: self.permutation_product_inv_evals,
permutation_evals: self.permutation_evals,
}
}
}

View File

@ -0,0 +1,155 @@
use ff::Field;
use std::iter;
use super::Proof;
use crate::{
arithmetic::{CurveAffine, FieldExt},
plonk::{ChallengeBeta, ChallengeGamma, ChallengeX, Error, VerifyingKey},
poly::{multiopen::VerifierQuery, Rotation},
transcript::{Hasher, Transcript},
};
impl<C: CurveAffine> Proof<C> {
pub(crate) fn check_lengths(&self, vk: &VerifyingKey<C>) -> Result<(), Error> {
if self.permutation_evals.len() != vk.cs.permutations.len() {
return Err(Error::IncompatibleParams);
}
for (permutation_evals, p) in self.permutation_evals.iter().zip(vk.cs.permutations.iter()) {
if permutation_evals.len() != p.columns.len() {
return Err(Error::IncompatibleParams);
}
}
if self.permutation_product_inv_evals.len() != vk.cs.permutations.len() {
return Err(Error::IncompatibleParams);
}
if self.permutation_product_evals.len() != vk.cs.permutations.len() {
return Err(Error::IncompatibleParams);
}
if self.permutation_product_commitments.len() != vk.cs.permutations.len() {
return Err(Error::IncompatibleParams);
}
Ok(())
}
pub(crate) fn absorb_commitments<HBase: Hasher<C::Base>, HScalar: Hasher<C::Scalar>>(
&self,
transcript: &mut Transcript<C, HBase, HScalar>,
) -> Result<(), Error> {
for c in &self.permutation_product_commitments {
transcript
.absorb_point(c)
.map_err(|_| Error::TranscriptError)?;
}
Ok(())
}
pub(in crate::plonk) fn expressions<'a>(
&'a self,
vk: &'a VerifyingKey<C>,
advice_evals: &'a [C::Scalar],
l_0: C::Scalar,
beta: ChallengeBeta<C::Scalar>,
gamma: ChallengeGamma<C::Scalar>,
x: ChallengeX<C::Scalar>,
) -> impl Iterator<Item = C::Scalar> + 'a {
iter::empty()
// l_0(X) * (1 - z(X)) = 0
.chain(
self.permutation_product_evals
.iter()
.map(move |product_eval| l_0 * &(C::Scalar::one() - product_eval)),
)
// z(X) \prod (p(X) + \beta s_i(X) + \gamma)
// - z(omega^{-1} X) \prod (p(X) + \delta^i \beta X + \gamma)
.chain(
vk.cs
.permutations
.iter()
.zip(self.permutation_evals.iter())
.zip(self.permutation_product_evals.iter())
.zip(self.permutation_product_inv_evals.iter())
.map(
move |(((p, permutation_evals), product_eval), product_inv_eval)| {
let mut left = *product_eval;
for (advice_eval, permutation_eval) in p
.columns
.iter()
.map(|&column| {
advice_evals[vk.cs.get_advice_query_index(column, 0)]
})
.zip(permutation_evals.iter())
{
left *= &(advice_eval + &(*beta * permutation_eval) + &gamma);
}
let mut right = *product_inv_eval;
let mut current_delta = *beta * &x;
for advice_eval in p.columns.iter().map(|&column| {
advice_evals[vk.cs.get_advice_query_index(column, 0)]
}) {
right *= &(advice_eval + &current_delta + &gamma);
current_delta *= &C::Scalar::DELTA;
}
left - &right
},
),
)
}
pub(crate) fn evals(&self) -> impl Iterator<Item = &C::Scalar> {
self.permutation_product_evals
.iter()
.chain(self.permutation_product_inv_evals.iter())
.chain(self.permutation_evals.iter().flat_map(|evals| evals.iter()))
}
pub(in crate::plonk) fn queries<'a>(
&'a self,
vk: &'a VerifyingKey<C>,
x: ChallengeX<C::Scalar>,
) -> impl Iterator<Item = VerifierQuery<'a, C>> + Clone {
let x_inv = vk.domain.rotate_omega(*x, Rotation(-1));
iter::empty()
// Open permutation product commitments at x and \omega^{-1} x
.chain(
self.permutation_product_commitments
.iter()
.enumerate()
.zip(self.permutation_product_evals.iter())
.zip(self.permutation_product_inv_evals.iter())
.flat_map(move |(((idx, _), &eval), &inv_eval)| {
iter::empty()
.chain(Some(VerifierQuery {
point: *x,
commitment: &self.permutation_product_commitments[idx],
eval,
}))
.chain(Some(VerifierQuery {
point: x_inv,
commitment: &self.permutation_product_commitments[idx],
eval: inv_eval,
}))
}),
)
// Open permutation commitments for each permutation argument at x
.chain(
(0..vk.permutations.len())
.map(move |outer_idx| {
let inner_len = vk.permutations[outer_idx].commitments.len();
(0..inner_len).map(move |inner_idx| VerifierQuery {
point: *x,
commitment: &vk.permutations[outer_idx].commitments[inner_idx],
eval: self.permutation_evals[outer_idx][inner_idx],
})
})
.flatten(),
)
}
}

View File

@ -3,16 +3,13 @@ use std::iter;
use super::{
circuit::{Advice, Assignment, Circuit, Column, ConstraintSystem, Fixed},
Error, Proof, ProvingKey,
};
use crate::arithmetic::{
eval_polynomial, get_challenge_scalar, parallelize, BatchInvert, Challenge, Curve, CurveAffine,
FieldExt,
permutation, ChallengeBeta, ChallengeGamma, ChallengeX, ChallengeY, Error, Proof, ProvingKey,
};
use crate::arithmetic::{eval_polynomial, Curve, CurveAffine, FieldExt};
use crate::poly::{
commitment::{Blind, Params},
multiopen::{self, ProverQuery},
LagrangeCoeff, Polynomial, Rotation,
LagrangeCoeff, Polynomial,
};
use crate::transcript::{Hasher, Transcript};
@ -171,203 +168,52 @@ impl<C: CurveAffine> Proof<C> {
})
.collect();
// Sample x_0 challenge
let x_0: C::Scalar = get_challenge_scalar(Challenge(transcript.squeeze().get_lower_128()));
// Sample beta challenge
let beta = ChallengeBeta::get(&mut transcript);
// Sample x_1 challenge
let x_1: C::Scalar = get_challenge_scalar(Challenge(transcript.squeeze().get_lower_128()));
// Sample gamma challenge
let gamma = ChallengeGamma::get(&mut transcript);
// Compute permutation product polynomial commitment
let mut permutation_product_polys = vec![];
let mut permutation_product_cosets = vec![];
let mut permutation_product_cosets_inv = vec![];
let mut permutation_product_commitments_projective = vec![];
let mut permutation_product_blinds = vec![];
// Iterate over each permutation
let mut permutation_modified_advice = pk
.vk
.cs
.permutations
.iter()
.zip(pk.permutations.iter())
// Goal is to compute the products of fractions
//
// (p_j(\omega^i) + \delta^j \omega^i \beta + \gamma) /
// (p_j(\omega^i) + \beta s_j(\omega^i) + \gamma)
//
// where p_j(X) is the jth advice column in this permutation,
// and i is the ith row of the column.
.map(|(columns, permuted_values)| {
let mut modified_advice = vec![C::Scalar::one(); params.n as usize];
// Iterate over each column of the permutation
for (&column, permuted_column_values) in columns.iter().zip(permuted_values.iter())
{
parallelize(&mut modified_advice, |modified_advice, start| {
for ((modified_advice, advice_value), permuted_advice_value) in
modified_advice
.iter_mut()
.zip(witness.advice[column.index()][start..].iter())
.zip(permuted_column_values[start..].iter())
{
*modified_advice *=
&(x_0 * permuted_advice_value + &x_1 + advice_value);
}
});
}
modified_advice
})
.collect::<Vec<_>>();
// Batch invert to obtain the denominators for the permutation product
// polynomials
permutation_modified_advice
.iter_mut()
.flat_map(|v| v.iter_mut())
.batch_invert();
for (columns, mut modified_advice) in pk
.vk
.cs
.permutations
.iter()
.zip(permutation_modified_advice.into_iter())
{
// Iterate over each column again, this time finishing the computation
// of the entire fraction by computing the numerators
let mut deltaomega = C::Scalar::one();
for &column in columns.iter() {
let omega = domain.get_omega();
parallelize(&mut modified_advice, |modified_advice, start| {
let mut deltaomega = deltaomega * &omega.pow_vartime(&[start as u64, 0, 0, 0]);
for (modified_advice, advice_value) in modified_advice
.iter_mut()
.zip(witness.advice[column.index()][start..].iter())
{
// Multiply by p_j(\omega^i) + \delta^j \omega^i \beta
*modified_advice *= &(deltaomega * &x_0 + &x_1 + advice_value);
deltaomega *= &omega;
}
});
deltaomega *= &C::Scalar::DELTA;
}
// The modified_advice vector is a vector of products of fractions
// of the form
//
// (p_j(\omega^i) + \delta^j \omega^i \beta + \gamma) /
// (p_j(\omega^i) + \beta s_j(\omega^i) + \gamma)
//
// where i is the index into modified_advice, for the jth column in
// the permutation
// Compute the evaluations of the permutation product polynomial
// over our domain, starting with z[0] = 1
let mut z = vec![C::Scalar::one()];
for row in 1..(params.n as usize) {
let mut tmp = z[row - 1];
tmp *= &modified_advice[row];
z.push(tmp);
}
let z = domain.lagrange_from_vec(z);
let blind = Blind(C::Scalar::rand());
permutation_product_commitments_projective.push(params.commit_lagrange(&z, blind));
permutation_product_blinds.push(blind);
let z = domain.lagrange_to_coeff(z);
permutation_product_polys.push(z.clone());
permutation_product_cosets
.push(domain.coeff_to_extended(z.clone(), Rotation::default()));
permutation_product_cosets_inv.push(domain.coeff_to_extended(z, Rotation(-1)));
}
let mut permutation_product_commitments =
vec![C::zero(); permutation_product_commitments_projective.len()];
C::Projective::batch_to_affine(
&permutation_product_commitments_projective,
&mut permutation_product_commitments,
);
let permutation_product_commitments = permutation_product_commitments;
drop(permutation_product_commitments_projective);
// Hash each permutation product commitment
for c in &permutation_product_commitments {
transcript
.absorb_point(c)
.map_err(|_| Error::TranscriptError)?;
}
// Commit to permutations, if any.
let permutations = if !pk.vk.cs.permutations.is_empty() {
Some(permutation::Argument::commit(
params,
pk,
&witness.advice,
beta,
gamma,
&mut transcript,
)?)
} else {
None
};
// Obtain challenge for keeping all separate gates linearly independent
let x_2: C::Scalar = get_challenge_scalar(Challenge(transcript.squeeze().get_lower_128()));
let y = ChallengeY::<C::Scalar>::get(&mut transcript);
// Evaluate the h(X) polynomial's constraint system expressions for the permutation constraints, if any.
let (permutations, permutation_expressions) = permutations
.map(|p| p.construct(pk, &advice_cosets, beta, gamma))
.transpose()?
.map(|(p, expressions)| (Some(p), Some(expressions)))
.unwrap_or_default();
// Evaluate the h(X) polynomial's constraint system expressions for the constraints provided
let h_poly =
iter::empty()
// Custom constraints
.chain(meta.gates.iter().map(|poly| {
poly.evaluate(
&|index| pk.fixed_cosets[index].clone(),
&|index| advice_cosets[index].clone(),
&|index| aux_cosets[index].clone(),
&|a, b| a + &b,
&|a, b| a * &b,
&|a, scalar| a * scalar,
)
}))
// l_0(X) * (1 - z(X)) = 0
.chain(
permutation_product_cosets
.iter()
.cloned()
.map(|coset| Polynomial::one_minus(coset) * &pk.l0),
let h_poly = iter::empty()
// Custom constraints
.chain(meta.gates.iter().map(|poly| {
poly.evaluate(
&|index| pk.fixed_cosets[index].clone(),
&|index| advice_cosets[index].clone(),
&|index| aux_cosets[index].clone(),
&|a, b| a + &b,
&|a, b| a * &b,
&|a, scalar| a * scalar,
)
// z(X) \prod (p(X) + \beta s_i(X) + \gamma) - z(omega^{-1} X) \prod (p(X) + \delta^i \beta X + \gamma)
.chain(pk.vk.cs.permutations.iter().enumerate().map(
|(permutation_index, columns)| {
let mut left = permutation_product_cosets[permutation_index].clone();
for (advice, permutation) in columns
.iter()
.map(|&column| {
&advice_cosets[pk.vk.cs.get_advice_query_index(column, 0)]
})
.zip(pk.permutation_cosets[permutation_index].iter())
{
parallelize(&mut left, |left, start| {
for ((left, advice), permutation) in left
.iter_mut()
.zip(advice[start..].iter())
.zip(permutation[start..].iter())
{
*left *= &(*advice + &(x_0 * permutation) + &x_1);
}
});
}
let mut right = permutation_product_cosets_inv[permutation_index].clone();
let mut current_delta = x_0 * &C::Scalar::ZETA;
let step = domain.get_extended_omega();
for advice in columns.iter().map(|&column| {
&advice_cosets[pk.vk.cs.get_advice_query_index(column, 0)]
}) {
parallelize(&mut right, move |right, start| {
let mut beta_term =
current_delta * &step.pow_vartime(&[start as u64, 0, 0, 0]);
for (right, advice) in right.iter_mut().zip(advice[start..].iter())
{
*right *= &(*advice + &beta_term + &x_1);
beta_term *= &step;
}
});
current_delta *= &C::Scalar::DELTA;
}
left - &right
},
))
.fold(domain.empty_extended(), |h_poly, v| h_poly * x_2 + &v);
}))
// Permutation constraints, if any.
.chain(permutation_expressions.into_iter().flatten())
.fold(domain.empty_extended(), |h_poly, v| h_poly * *y + &v);
// Divide by t(X) = X^{params.n} - 1.
let h_poly = domain.divide_by_vanishing_poly(h_poly);
@ -401,15 +247,14 @@ impl<C: CurveAffine> Proof<C> {
.map_err(|_| Error::TranscriptError)?;
}
let x_3: C::Scalar = get_challenge_scalar(Challenge(transcript.squeeze().get_lower_128()));
let x_3_inv = domain.rotate_omega(x_3, Rotation(-1));
let x = ChallengeX::get(&mut transcript);
// Evaluate polynomials at omega^i x_3
// Evaluate polynomials at omega^i x
let advice_evals: Vec<_> = meta
.advice_queries
.iter()
.map(|&(column, at)| {
eval_polynomial(&advice_polys[column.index()], domain.rotate_omega(x_3, at))
eval_polynomial(&advice_polys[column.index()], domain.rotate_omega(*x, at))
})
.collect();
@ -417,7 +262,7 @@ impl<C: CurveAffine> Proof<C> {
.aux_queries
.iter()
.map(|&(column, at)| {
eval_polynomial(&aux_polys[column.index()], domain.rotate_omega(x_3, at))
eval_polynomial(&aux_polys[column.index()], domain.rotate_omega(*x, at))
})
.collect();
@ -425,37 +270,13 @@ impl<C: CurveAffine> Proof<C> {
.fixed_queries
.iter()
.map(|&(column, at)| {
eval_polynomial(
&pk.fixed_polys[column.index()],
domain.rotate_omega(x_3, at),
)
})
.collect();
let permutation_product_evals: Vec<C::Scalar> = permutation_product_polys
.iter()
.map(|poly| eval_polynomial(poly, x_3))
.collect();
let permutation_product_inv_evals: Vec<C::Scalar> = permutation_product_polys
.iter()
.map(|poly| eval_polynomial(poly, domain.rotate_omega(x_3, Rotation(-1))))
.collect();
let permutation_evals: Vec<Vec<C::Scalar>> = pk
.permutation_polys
.iter()
.map(|polys| {
polys
.iter()
.map(|poly| eval_polynomial(poly, x_3))
.collect()
eval_polynomial(&pk.fixed_polys[column.index()], domain.rotate_omega(*x, at))
})
.collect();
let h_evals: Vec<_> = h_pieces
.iter()
.map(|poly| eval_polynomial(poly, x_3))
.map(|poly| eval_polynomial(poly, *x))
.collect();
// Hash each advice evaluation
@ -464,18 +285,18 @@ impl<C: CurveAffine> Proof<C> {
.chain(aux_evals.iter())
.chain(fixed_evals.iter())
.chain(h_evals.iter())
.chain(permutation_product_evals.iter())
.chain(permutation_product_inv_evals.iter())
.chain(permutation_evals.iter().flat_map(|evals| evals.iter()))
{
transcript.absorb_scalar(*eval);
}
// Evaluate the permutations, if any, at omega^i x.
let permutations = permutations.map(|p| p.evaluate(pk, x, &mut transcript));
let instances =
iter::empty()
.chain(pk.vk.cs.advice_queries.iter().enumerate().map(
|(query_index, &(column, at))| ProverQuery {
point: domain.rotate_omega(x_3, at),
point: domain.rotate_omega(*x, at),
poly: &advice_polys[column.index()],
blind: advice_blinds[column.index()],
eval: advice_evals[query_index],
@ -483,7 +304,7 @@ impl<C: CurveAffine> Proof<C> {
))
.chain(pk.vk.cs.aux_queries.iter().enumerate().map(
|(query_index, &(column, at))| ProverQuery {
point: domain.rotate_omega(x_3, at),
point: domain.rotate_omega(*x, at),
poly: &aux_polys[column.index()],
blind: Blind::default(),
eval: aux_evals[query_index],
@ -491,88 +312,43 @@ impl<C: CurveAffine> Proof<C> {
))
.chain(pk.vk.cs.fixed_queries.iter().enumerate().map(
|(query_index, &(column, at))| ProverQuery {
point: domain.rotate_omega(x_3, at),
point: domain.rotate_omega(*x, at),
poly: &pk.fixed_polys[column.index()],
blind: Blind::default(),
eval: fixed_evals[query_index],
},
))
// We query the h(X) polynomial at x_3
// We query the h(X) polynomial at x
.chain(
h_pieces
.iter()
.zip(h_blinds.iter())
.zip(h_evals.iter())
.map(|((h_poly, h_blind), h_eval)| ProverQuery {
point: x_3,
point: *x,
poly: h_poly,
blind: *h_blind,
eval: *h_eval,
}),
);
// Handle permutation arguments, if any exist
let permutation_instances = if !pk.vk.cs.permutations.is_empty() {
Some(
iter::empty()
// Open permutation product commitments at x_3
.chain(
permutation_product_polys
.iter()
.zip(permutation_product_blinds.iter())
.zip(permutation_product_evals.iter())
.map(|((poly, blind), eval)| ProverQuery {
point: x_3,
poly,
blind: *blind,
eval: *eval,
}),
)
// Open permutation polynomial commitments at x_3
.chain(
pk.permutation_polys
.iter()
.zip(permutation_evals.iter())
.flat_map(|(polys, evals)| polys.iter().zip(evals.iter()))
.map(|(poly, eval)| ProverQuery {
point: x_3,
poly,
blind: Blind::default(),
eval: *eval,
}),
)
// Open permutation product commitments at \omega^{-1} x_3
.chain(
permutation_product_polys
.iter()
.zip(permutation_product_blinds.iter())
.zip(permutation_product_inv_evals.iter())
.map(|((poly, blind), eval)| ProverQuery {
point: x_3_inv,
poly,
blind: *blind,
eval: *eval,
}),
),
)
} else {
None
};
let multiopening = multiopen::Proof::create(
params,
&mut transcript,
instances.chain(permutation_instances.into_iter().flatten()),
instances.chain(
permutations
.as_ref()
.map(|p| p.open(pk, x))
.into_iter()
.flatten(),
),
)
.map_err(|_| Error::OpeningError)?;
Ok(Proof {
advice_commitments,
h_commitments,
permutation_product_commitments,
permutation_product_evals,
permutation_product_inv_evals,
permutation_evals,
permutations: permutations.map(|p| p.build()),
advice_evals,
fixed_evals,
aux_evals,

View File

@ -1,12 +1,11 @@
use ff::Field;
use std::iter;
use super::{Error, Proof, VerifyingKey};
use crate::arithmetic::{get_challenge_scalar, Challenge, CurveAffine, FieldExt};
use super::{ChallengeBeta, ChallengeGamma, ChallengeX, ChallengeY, Error, Proof, VerifyingKey};
use crate::arithmetic::{CurveAffine, FieldExt};
use crate::poly::{
commitment::{Guard, Params, MSM},
multiopen::VerifierQuery,
Rotation,
};
use crate::transcript::{Hasher, Transcript};
@ -46,21 +45,19 @@ impl<'a, C: CurveAffine> Proof<C> {
.map_err(|_| Error::TranscriptError)?;
}
// Sample x_0 challenge
let x_0: C::Scalar = get_challenge_scalar(Challenge(transcript.squeeze().get_lower_128()));
// Sample beta challenge
let beta = ChallengeBeta::get(&mut transcript);
// Sample x_1 challenge
let x_1: C::Scalar = get_challenge_scalar(Challenge(transcript.squeeze().get_lower_128()));
// Sample gamma challenge
let gamma = ChallengeGamma::get(&mut transcript);
// Hash each permutation product commitment
for c in &self.permutation_product_commitments {
transcript
.absorb_point(c)
.map_err(|_| Error::TranscriptError)?;
if let Some(p) = &self.permutations {
p.absorb_commitments(&mut transcript)?;
}
// Sample x_2 challenge, which keeps the gates linearly independent.
let x_2: C::Scalar = get_challenge_scalar(Challenge(transcript.squeeze().get_lower_128()));
// Sample y challenge, which keeps the gates linearly independent.
let y = ChallengeY::get(&mut transcript);
// Obtain a commitment to h(X) in the form of multiple pieces of degree n - 1
for c in &self.h_commitments {
@ -69,14 +66,13 @@ impl<'a, C: CurveAffine> Proof<C> {
.map_err(|_| Error::TranscriptError)?;
}
// Sample x_3 challenge, which is used to ensure the circuit is
// Sample x challenge, which is used to ensure the circuit is
// satisfied with high probability.
let x_3: C::Scalar = get_challenge_scalar(Challenge(transcript.squeeze().get_lower_128()));
let x_3_inv = vk.domain.rotate_omega(x_3, Rotation(-1));
let x = ChallengeX::get(&mut transcript);
// This check ensures the circuit is satisfied so long as the polynomial
// commitments open to the correct values.
self.check_hx(params, vk, x_0, x_1, x_2, x_3)?;
self.check_hx(params, vk, beta, gamma, y, x)?;
for eval in self
.advice_evals
@ -84,9 +80,13 @@ impl<'a, C: CurveAffine> Proof<C> {
.chain(self.aux_evals.iter())
.chain(self.fixed_evals.iter())
.chain(self.h_evals.iter())
.chain(self.permutation_product_evals.iter())
.chain(self.permutation_product_inv_evals.iter())
.chain(self.permutation_evals.iter().flat_map(|evals| evals.iter()))
.chain(
self.permutations
.as_ref()
.map(|p| p.evals())
.into_iter()
.flatten(),
)
{
transcript.absorb_scalar(*eval);
}
@ -95,7 +95,7 @@ impl<'a, C: CurveAffine> Proof<C> {
iter::empty()
.chain(vk.cs.advice_queries.iter().enumerate().map(
|(query_index, &(column, at))| VerifierQuery {
point: vk.domain.rotate_omega(x_3, at),
point: vk.domain.rotate_omega(*x, at),
commitment: &self.advice_commitments[column.index()],
eval: self.advice_evals[query_index],
},
@ -106,14 +106,14 @@ impl<'a, C: CurveAffine> Proof<C> {
.iter()
.enumerate()
.map(|(query_index, &(column, at))| VerifierQuery {
point: vk.domain.rotate_omega(x_3, at),
point: vk.domain.rotate_omega(*x, at),
commitment: &aux_commitments[column.index()],
eval: self.aux_evals[query_index],
}),
)
.chain(vk.cs.fixed_queries.iter().enumerate().map(
|(query_index, &(column, at))| VerifierQuery {
point: vk.domain.rotate_omega(x_3, at),
point: vk.domain.rotate_omega(*x, at),
commitment: &vk.fixed_commitments[column.index()],
eval: self.fixed_evals[query_index],
},
@ -124,65 +124,25 @@ impl<'a, C: CurveAffine> Proof<C> {
.enumerate()
.zip(self.h_evals.iter())
.map(|((idx, _), &eval)| VerifierQuery {
point: x_3,
point: *x,
commitment: &self.h_commitments[idx],
eval,
}),
);
// Handle permutation arguments, if any exist
let permutation_queries = if !vk.cs.permutations.is_empty() {
Some(
iter::empty()
// Open permutation product commitments at x_3
.chain(
self.permutation_product_commitments
.iter()
.enumerate()
.zip(self.permutation_product_evals.iter())
.map(|((idx, _), &eval)| VerifierQuery {
point: x_3,
commitment: &self.permutation_product_commitments[idx],
eval,
}),
)
// Open permutation commitments for each permutation argument at x_3
.chain(
(0..vk.permutation_commitments.len())
.map(|outer_idx| {
let inner_len = vk.permutation_commitments[outer_idx].len();
(0..inner_len).map(move |inner_idx| VerifierQuery {
point: x_3,
commitment: &vk.permutation_commitments[outer_idx][inner_idx],
eval: self.permutation_evals[outer_idx][inner_idx],
})
})
.flatten(),
)
// Open permutation product commitments at \omega^{-1} x_3
.chain(
self.permutation_product_commitments
.iter()
.enumerate()
.zip(self.permutation_product_inv_evals.iter())
.map(|((idx, _), &eval)| VerifierQuery {
point: x_3_inv,
commitment: &self.permutation_product_commitments[idx],
eval,
}),
),
)
} else {
None
};
// We are now convinced the circuit is satisfied so long as the
// polynomial commitments open to the correct values.
self.multiopening
.verify(
params,
&mut transcript,
queries.chain(permutation_queries.into_iter().flatten()),
queries.chain(
self.permutations
.as_ref()
.map(|p| p.queries(vk, x))
.into_iter()
.flatten(),
),
msm,
)
.map_err(|_| Error::OpeningError)
@ -209,29 +169,10 @@ impl<'a, C: CurveAffine> Proof<C> {
return Err(Error::IncompatibleParams);
}
if self.permutation_evals.len() != vk.cs.permutations.len() {
return Err(Error::IncompatibleParams);
}
for (permutation_evals, permutation) in
self.permutation_evals.iter().zip(vk.cs.permutations.iter())
{
if permutation_evals.len() != permutation.len() {
return Err(Error::IncompatibleParams);
}
}
if self.permutation_product_inv_evals.len() != vk.cs.permutations.len() {
return Err(Error::IncompatibleParams);
}
if self.permutation_product_evals.len() != vk.cs.permutations.len() {
return Err(Error::IncompatibleParams);
}
if self.permutation_product_commitments.len() != vk.cs.permutations.len() {
return Err(Error::IncompatibleParams);
}
self.permutations
.as_ref()
.map(|p| p.check_lengths(vk))
.transpose()?;
// TODO: check h_commitments
@ -248,21 +189,21 @@ impl<'a, C: CurveAffine> Proof<C> {
&self,
params: &'a Params<C>,
vk: &VerifyingKey<C>,
x_0: C::Scalar,
x_1: C::Scalar,
x_2: C::Scalar,
x_3: C::Scalar,
beta: ChallengeBeta<C::Scalar>,
gamma: ChallengeGamma<C::Scalar>,
y: ChallengeY<C::Scalar>,
x: ChallengeX<C::Scalar>,
) -> Result<(), Error> {
// x_3^n
let x_3n = x_3.pow(&[params.n as u64, 0, 0, 0]);
// x^n
let xn = x.pow(&[params.n as u64, 0, 0, 0]);
// TODO: bubble this error up
// l_0(x_3)
let l_0 = (x_3 - &C::Scalar::one()).invert().unwrap() // 1 / (x_3 - 1)
* &(x_3n - &C::Scalar::one()) // (x_3^n - 1) / (x_3 - 1)
* &vk.domain.get_barycentric_weight(); // l_0(x_3)
// l_0(x)
let l_0 = (*x - &C::Scalar::one()).invert().unwrap() // 1 / (x - 1)
* &(xn - &C::Scalar::one()) // (x^n - 1) / (x - 1)
* &vk.domain.get_barycentric_weight(); // l_0(x)
// Compute the expected value of h(x_3)
// Compute the expected value of h(x)
let expected_h_eval = std::iter::empty()
// Evaluate the circuit using the custom gates provided
.chain(vk.cs.gates.iter().map(|poly| {
@ -275,58 +216,24 @@ impl<'a, C: CurveAffine> Proof<C> {
&|a, scalar| a * &scalar,
)
}))
// l_0(X) * (1 - z(X)) = 0
.chain(
self.permutation_product_evals
.iter()
.map(|product_eval| l_0 * &(C::Scalar::one() - product_eval)),
self.permutations
.as_ref()
.map(|p| p.expressions(vk, &self.advice_evals, l_0, beta, gamma, x))
.into_iter()
.flatten(),
)
// z(X) \prod (p(X) + \beta s_i(X) + \gamma)
// - z(omega^{-1} X) \prod (p(X) + \delta^i \beta X + \gamma)
.chain(
vk.cs
.permutations
.iter()
.zip(self.permutation_evals.iter())
.zip(self.permutation_product_evals.iter())
.zip(self.permutation_product_inv_evals.iter())
.map(
|(((columns, permutation_evals), product_eval), product_inv_eval)| {
let mut left = *product_eval;
for (advice_eval, permutation_eval) in columns
.iter()
.map(|&column| {
self.advice_evals[vk.cs.get_advice_query_index(column, 0)]
})
.zip(permutation_evals.iter())
{
left *= &(advice_eval + &(x_0 * permutation_eval) + &x_1);
}
.fold(C::Scalar::zero(), |h_eval, v| h_eval * &y + &v);
let mut right = *product_inv_eval;
let mut current_delta = x_0 * &x_3;
for advice_eval in columns.iter().map(|&column| {
self.advice_evals[vk.cs.get_advice_query_index(column, 0)]
}) {
right *= &(advice_eval + &current_delta + &x_1);
current_delta *= &C::Scalar::DELTA;
}
left - &right
},
),
)
.fold(C::Scalar::zero(), |h_eval, v| h_eval * &x_2 + &v);
// Compute h(x_3) from the prover
// Compute h(x) from the prover
let h_eval = self
.h_evals
.iter()
.rev()
.fold(C::Scalar::zero(), |acc, eval| acc * &x_3n + eval);
.fold(C::Scalar::zero(), |acc, eval| acc * &xn + eval);
// Did the prover commit to the correct polynomial?
if expected_h_eval != (h_eval * &(x_3n - &C::Scalar::one())) {
if expected_h_eval != (h_eval * &(xn - &C::Scalar::one())) {
return Err(Error::ConstraintSystemFailure);
}

View File

@ -176,13 +176,13 @@ impl<C: CurveAffine> Params<C> {
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
pub struct Blind<F>(pub F);
impl<F: Field> Default for Blind<F> {
impl<F: FieldExt> Default for Blind<F> {
fn default() -> Self {
Blind(F::one())
}
}
impl<F: Field> Add for Blind<F> {
impl<F: FieldExt> Add for Blind<F> {
type Output = Self;
fn add(self, rhs: Blind<F>) -> Self {
@ -190,7 +190,7 @@ impl<F: Field> Add for Blind<F> {
}
}
impl<F: Field> Mul for Blind<F> {
impl<F: FieldExt> Mul for Blind<F> {
type Output = Self;
fn mul(self, rhs: Blind<F>) -> Self {
@ -198,25 +198,25 @@ impl<F: Field> Mul for Blind<F> {
}
}
impl<F: Field> AddAssign for Blind<F> {
impl<F: FieldExt> AddAssign for Blind<F> {
fn add_assign(&mut self, rhs: Blind<F>) {
self.0 += rhs.0;
}
}
impl<F: Field> MulAssign for Blind<F> {
impl<F: FieldExt> MulAssign for Blind<F> {
fn mul_assign(&mut self, rhs: Blind<F>) {
self.0 *= rhs.0;
}
}
impl<F: Field> AddAssign<F> for Blind<F> {
impl<F: FieldExt> AddAssign<F> for Blind<F> {
fn add_assign(&mut self, rhs: F) {
self.0 += rhs;
}
}
impl<F: Field> MulAssign<F> for Blind<F> {
impl<F: FieldExt> MulAssign<F> for Blind<F> {
fn mul_assign(&mut self, rhs: F) {
self.0 *= rhs;
}
@ -254,10 +254,8 @@ fn test_opening_proof() {
commitment::{Blind, Params},
EvaluationDomain,
};
use crate::arithmetic::{
eval_polynomial, get_challenge_scalar, Challenge, Curve, CurveAffine, FieldExt,
};
use crate::transcript::{DummyHash, Hasher, Transcript};
use crate::arithmetic::{eval_polynomial, Curve, FieldExt};
use crate::transcript::{ChallengeScalar, DummyHash, Transcript};
use crate::tweedle::{EpAffine, Fp, Fq};
let params = Params::<EpAffine>::new::<DummyHash<Fp>>(K);
@ -273,23 +271,18 @@ fn test_opening_proof() {
let p = params.commit(&px, blind).to_affine();
let mut hasher = DummyHash::init(Field::one());
let (p_x, p_y) = p.get_xy().unwrap();
hasher.absorb(p_x);
hasher.absorb(p_y);
let x_packed = hasher.squeeze().get_lower_128();
let x: Fq = get_challenge_scalar(Challenge(x_packed));
let mut transcript = Transcript::<_, DummyHash<_>, DummyHash<_>>::new();
transcript.absorb_point(&p).unwrap();
let x = ChallengeScalar::<_, ()>::get(&mut transcript);
// Evaluate the polynomial
let v = eval_polynomial(&px, x);
let v = eval_polynomial(&px, *x);
hasher.absorb(Fp::from_bytes(&v.to_bytes()).unwrap()); // unlikely to fail since p ~ q
let scalar_hasher = DummyHash::init(Fq::one());
let mut transcript = Transcript::init_with_hashers(&hasher, &scalar_hasher);
transcript.absorb_base(Fp::from_bytes(&v.to_bytes()).unwrap()); // unlikely to fail since p ~ q
loop {
let mut transcript_dup = transcript.clone();
let opening_proof = Proof::create(&params, &mut transcript, &px, blind, x);
let opening_proof = Proof::create(&params, &mut transcript, &px, blind, *x);
if let Ok(opening_proof) = opening_proof {
// Verify the opening proof
let mut commitment_msm = params.empty_msm();
@ -299,7 +292,7 @@ fn test_opening_proof() {
&params,
params.empty_msm(),
&mut transcript_dup,
x,
*x,
commitment_msm,
v,
)

View File

@ -3,10 +3,9 @@ use ff::Field;
use super::super::{Coeff, Error, Polynomial};
use super::{Blind, Params, Proof};
use crate::arithmetic::{
best_multiexp, compute_inner_product, get_challenge_scalar, parallelize, small_multiexp,
Challenge, Curve, CurveAffine, FieldExt,
best_multiexp, compute_inner_product, parallelize, small_multiexp, Curve, CurveAffine, FieldExt,
};
use crate::transcript::{Hasher, Transcript};
use crate::transcript::{Challenge, ChallengeScalar, Hasher, Transcript};
impl<C: CurveAffine> Proof<C> {
/// Create a polynomial commitment opening proof for the polynomial defined
@ -108,8 +107,8 @@ impl<C: CurveAffine> Proof<C> {
.map_err(|_| Error::SamplingError)?;
// ... and get the squared challenge.
let challenge_sq_packed = transcript.squeeze().get_lower_128();
let challenge_sq: C::Scalar = get_challenge_scalar(Challenge(challenge_sq_packed));
let challenge_sq_packed = Challenge::get(&mut transcript);
let challenge_sq: C::Scalar = *ChallengeScalar::<_, ()>::from(challenge_sq_packed);
// There might be no square root, in which case we'll fork the
// transcript.
@ -139,9 +138,8 @@ impl<C: CurveAffine> Proof<C> {
// And obtain the challenge, even though we already have it, since
// squeezing affects the transcript.
{
let challenge_sq_packed = transcript.squeeze().get_lower_128();
let challenge_sq_expected = get_challenge_scalar(Challenge(challenge_sq_packed));
assert_eq!(challenge_sq, challenge_sq_expected);
let challenge_sq_expected = ChallengeScalar::<_, ()>::get(transcript);
assert_eq!(challenge_sq, *challenge_sq_expected);
}
// Done with this round.
@ -186,12 +184,11 @@ impl<C: CurveAffine> Proof<C> {
.map_err(|_| Error::SamplingError)?;
// Obtain the challenge c.
let c_packed = transcript.squeeze().get_lower_128();
let c: C::Scalar = get_challenge_scalar(Challenge(c_packed));
let c = ChallengeScalar::<_, ()>::get(transcript);
// Compute z1 and z2 as described in the Halo paper.
let z1 = a * &c + &d;
let z2 = c * &blind + &s;
let z2 = *c * &blind + &s;
Ok(Proof {
rounds,

View File

@ -2,11 +2,9 @@ use ff::Field;
use super::super::Error;
use super::{Params, Proof, MSM};
use crate::transcript::{Hasher, Transcript};
use crate::transcript::{Challenge, ChallengeScalar, Hasher, Transcript};
use crate::arithmetic::{
best_multiexp, get_challenge_scalar, Challenge, Curve, CurveAffine, FieldExt,
};
use crate::arithmetic::{best_multiexp, Curve, CurveAffine, FieldExt};
/// A guard returned by the verifier
#[derive(Debug, Clone)]
@ -120,8 +118,8 @@ impl<C: CurveAffine> Proof<C> {
transcript
.absorb_point(&r)
.map_err(|_| Error::OpeningError)?;
let challenge_sq_packed = transcript.squeeze().get_lower_128();
let challenge_sq: C::Scalar = get_challenge_scalar(Challenge(challenge_sq_packed));
let challenge_sq_packed = Challenge::get(transcript);
let challenge_sq: C::Scalar = *ChallengeScalar::<_, ()>::from(challenge_sq_packed);
let challenge = challenge_sq.deterministic_sqrt();
if challenge.is_none() {
@ -149,7 +147,7 @@ impl<C: CurveAffine> Proof<C> {
challenges.push(challenge);
challenges_inv.push(challenge_inv);
challenges_sq.push(challenge_sq);
challenges_sq_packed.push(Challenge(challenge_sq_packed));
challenges_sq_packed.push(challenge_sq_packed);
}
// Feed delta into the transcript
@ -158,8 +156,7 @@ impl<C: CurveAffine> Proof<C> {
.map_err(|_| Error::OpeningError)?;
// Get the challenge `c`
let c_packed = transcript.squeeze().get_lower_128();
let c: C::Scalar = get_challenge_scalar(Challenge(c_packed));
let c = ChallengeScalar::<_, ()>::get(transcript);
// Construct
// [c] P + [c * v] U + [c] sum(L_i * u_i^2) + [c] sum(R_i * u_i^-2) + delta - [z1 * b] U + [z1 - z2] H
@ -172,12 +169,12 @@ impl<C: CurveAffine> Proof<C> {
let neg_z1 = -self.z1;
// [c] P
commitment_msm.scale(c);
commitment_msm.scale(*c);
msm.add_msm(&commitment_msm);
// [c] sum(L_i * u_i^2) + [c] sum(R_i * u_i^-2)
for scalar in &mut extra_scalars {
*scalar *= &c;
*scalar *= &(*c);
}
for (scalar, base) in extra_scalars.iter().zip(extra_bases.iter()) {
@ -185,7 +182,7 @@ impl<C: CurveAffine> Proof<C> {
}
// [c * v] U - [z1 * b] U
msm.append_term((c * &v) + &(neg_z1 * &b), u);
msm.append_term((*c * &v) + &(neg_z1 * &b), u);
// delta
msm.append_term(Field::one(), self.delta);

View File

@ -7,11 +7,35 @@ use ff::Field;
use std::collections::{BTreeMap, BTreeSet};
use super::*;
use crate::arithmetic::{CurveAffine, FieldExt};
use crate::{
arithmetic::{CurveAffine, FieldExt},
transcript::ChallengeScalar,
};
mod prover;
mod verifier;
#[derive(Clone, Copy, Debug)]
struct X1 {}
/// Challenge for compressing openings at the same point sets together.
type ChallengeX1<F> = ChallengeScalar<F, X1>;
#[derive(Clone, Copy, Debug)]
struct X2 {}
/// Challenge for keeping the multi-point quotient polynomial terms linearly independent.
type ChallengeX2<F> = ChallengeScalar<F, X2>;
#[derive(Clone, Copy, Debug)]
struct X3 {}
/// Challenge point at which the commitments are opened.
type ChallengeX3<F> = ChallengeScalar<F, X3>;
#[derive(Clone, Copy, Debug)]
struct X4 {}
/// Challenge for collapsing the openings of the various remaining polynomials at x_3
/// together.
type ChallengeX4<F> = ChallengeScalar<F, X4>;
/// This is a multi-point opening proof used in the polynomial commitment scheme opening.
#[derive(Debug, Clone)]
pub struct Proof<C: CurveAffine> {

View File

@ -2,11 +2,13 @@ use super::super::{
commitment::{self, Blind, Params},
Coeff, Error, Polynomial,
};
use super::{construct_intermediate_sets, Proof, ProverQuery, Query};
use super::{
construct_intermediate_sets, ChallengeX1, ChallengeX2, ChallengeX3, ChallengeX4, Proof,
ProverQuery, Query,
};
use crate::arithmetic::{
eval_polynomial, get_challenge_scalar, kate_division, lagrange_interpolate, Challenge, Curve,
CurveAffine, FieldExt,
eval_polynomial, kate_division, lagrange_interpolate, Curve, CurveAffine, FieldExt,
};
use crate::transcript::{Hasher, Transcript};
@ -31,13 +33,13 @@ impl<C: CurveAffine> Proof<C> {
where
I: IntoIterator<Item = ProverQuery<'a, C>> + Clone,
{
let x_4: C::Scalar = get_challenge_scalar(Challenge(transcript.squeeze().get_lower_128()));
let x_5: C::Scalar = get_challenge_scalar(Challenge(transcript.squeeze().get_lower_128()));
let x_1 = ChallengeX1::get(transcript);
let x_2 = ChallengeX2::get(transcript);
let (poly_map, point_sets) = construct_intermediate_sets(queries);
// Collapse openings at same point sets together into single openings using
// x_4 challenge.
// x_1 challenge.
let mut q_polys: Vec<Option<Polynomial<C::Scalar, Coeff>>> = vec![None; point_sets.len()];
let mut q_blinds = vec![Blind(C::Scalar::zero()); point_sets.len()];
@ -54,16 +56,16 @@ impl<C: CurveAffine> Proof<C> {
blind: Blind<C::Scalar>,
evals: Vec<C::Scalar>| {
if let Some(poly) = &q_polys[set_idx] {
q_polys[set_idx] = Some(poly.clone() * x_4 + new_poly);
q_polys[set_idx] = Some(poly.clone() * *x_1 + new_poly);
} else {
q_polys[set_idx] = Some(new_poly.clone());
}
q_blinds[set_idx] *= x_4;
q_blinds[set_idx] *= *x_1;
q_blinds[set_idx] += blind;
// Each polynomial is evaluated at a set of points. For each set,
// we collapse each polynomial's evals pointwise.
for (eval, set_eval) in evals.iter().zip(q_eval_sets[set_idx].iter_mut()) {
*set_eval *= &x_4;
*set_eval *= &x_1;
*set_eval += eval;
}
};
@ -100,7 +102,7 @@ impl<C: CurveAffine> Proof<C> {
if f_poly.is_none() {
Some(poly)
} else {
f_poly.map(|f_poly| f_poly * x_5 + &poly)
f_poly.map(|f_poly| f_poly * *x_2 + &poly)
}
})
.unwrap();
@ -114,33 +116,31 @@ impl<C: CurveAffine> Proof<C> {
.absorb_point(&f_commitment)
.map_err(|_| Error::SamplingError)?;
let x_6: C::Scalar =
get_challenge_scalar(Challenge(transcript.squeeze().get_lower_128()));
let x_3 = ChallengeX3::get(&mut transcript);
let q_evals: Vec<C::Scalar> = q_polys
.iter()
.map(|poly| eval_polynomial(poly.as_ref().unwrap(), x_6))
.map(|poly| eval_polynomial(poly.as_ref().unwrap(), *x_3))
.collect();
for eval in q_evals.iter() {
transcript.absorb_scalar(*eval);
}
let x_7: C::Scalar =
get_challenge_scalar(Challenge(transcript.squeeze().get_lower_128()));
let x_4 = ChallengeX4::get(&mut transcript);
let (f_poly, f_blind_try) = q_polys.iter().zip(q_blinds.iter()).fold(
(f_poly.clone(), f_blind),
|(f_poly, f_blind), (poly, blind)| {
(
f_poly * x_7 + poly.as_ref().unwrap(),
Blind((f_blind.0 * &x_7) + &blind.0),
f_poly * *x_4 + poly.as_ref().unwrap(),
Blind((f_blind.0 * &x_4) + &blind.0),
)
},
);
if let Ok(opening) =
commitment::Proof::create(&params, &mut transcript, &f_poly, f_blind_try, x_6)
commitment::Proof::create(&params, &mut transcript, &f_poly, f_blind_try, *x_3)
{
break (opening, q_evals);
} else {

View File

@ -4,10 +4,11 @@ use super::super::{
commitment::{Guard, Params, MSM},
Error,
};
use super::{construct_intermediate_sets, Proof, Query, VerifierQuery};
use crate::arithmetic::{
eval_polynomial, get_challenge_scalar, lagrange_interpolate, Challenge, CurveAffine, FieldExt,
use super::{
construct_intermediate_sets, ChallengeX1, ChallengeX2, ChallengeX3, ChallengeX4, Proof, Query,
VerifierQuery,
};
use crate::arithmetic::{eval_polynomial, lagrange_interpolate, CurveAffine, FieldExt};
use crate::transcript::{Hasher, Transcript};
#[derive(Debug, Clone)]
@ -34,17 +35,17 @@ impl<C: CurveAffine> Proof<C> {
// with it to make it true, with high probability.
msm.scale(C::Scalar::rand());
// Sample x_4 for compressing openings at the same point sets together
let x_4: C::Scalar = get_challenge_scalar(Challenge(transcript.squeeze().get_lower_128()));
// Sample x_1 for compressing openings at the same point sets together
let x_1 = ChallengeX1::get(transcript);
// Sample a challenge x_5 for keeping the multi-point quotient
// Sample a challenge x_2 for keeping the multi-point quotient
// polynomial terms linearly independent.
let x_5: C::Scalar = get_challenge_scalar(Challenge(transcript.squeeze().get_lower_128()));
let x_2 = ChallengeX2::get(transcript);
let (commitment_map, point_sets) = construct_intermediate_sets(queries);
// Compress the commitments and expected evaluations at x_3 together.
// using the challenge x_4
// Compress the commitments and expected evaluations at x together.
// using the challenge x_1
let mut q_commitments: Vec<_> = vec![params.empty_msm(); point_sets.len()];
// A vec of vecs of evals. The outer vec corresponds to the point set,
@ -55,10 +56,10 @@ impl<C: CurveAffine> Proof<C> {
}
{
let mut accumulate = |set_idx: usize, new_commitment, evals: Vec<C::Scalar>| {
q_commitments[set_idx].scale(x_4);
q_commitments[set_idx].scale(*x_1);
q_commitments[set_idx].append_term(C::Scalar::one(), new_commitment);
for (eval, set_eval) in evals.iter().zip(q_eval_sets[set_idx].iter_mut()) {
*set_eval *= &x_4;
*set_eval *= &x_1;
*set_eval += eval;
}
};
@ -79,16 +80,16 @@ impl<C: CurveAffine> Proof<C> {
.absorb_point(&self.f_commitment)
.map_err(|_| Error::SamplingError)?;
// Sample a challenge x_6 for checking that f(X) was committed to
// Sample a challenge x_3 for checking that f(X) was committed to
// correctly.
let x_6: C::Scalar = get_challenge_scalar(Challenge(transcript.squeeze().get_lower_128()));
let x_3 = ChallengeX3::get(transcript);
for eval in self.q_evals.iter() {
transcript.absorb_scalar(*eval);
}
// We can compute the expected msm_eval at x_6 using the q_evals provided
// by the prover and from x_5
// We can compute the expected msm_eval at x_3 using the q_evals provided
// by the prover and from x_2
let msm_eval = point_sets
.iter()
.zip(q_eval_sets.iter())
@ -97,17 +98,17 @@ impl<C: CurveAffine> Proof<C> {
C::Scalar::zero(),
|msm_eval, ((points, evals), proof_eval)| {
let r_poly = lagrange_interpolate(points, evals);
let r_eval = eval_polynomial(&r_poly, x_6);
let r_eval = eval_polynomial(&r_poly, *x_3);
let eval = points.iter().fold(*proof_eval - &r_eval, |eval, point| {
eval * &(x_6 - point).invert().unwrap()
eval * &(*x_3 - point).invert().unwrap()
});
msm_eval * &x_5 + &eval
msm_eval * &x_2 + &eval
},
);
// Sample a challenge x_7 that we will use to collapse the openings of
// the various remaining polynomials at x_6 together.
let x_7: C::Scalar = get_challenge_scalar(Challenge(transcript.squeeze().get_lower_128()));
// Sample a challenge x_4 that we will use to collapse the openings of
// the various remaining polynomials at x_3 together.
let x_4 = ChallengeX4::get(transcript);
// Compute the final commitment that has to be opened
let mut commitment_msm = params.empty_msm();
@ -115,15 +116,15 @@ impl<C: CurveAffine> Proof<C> {
let (commitment_msm, msm_eval) = q_commitments.into_iter().zip(self.q_evals.iter()).fold(
(commitment_msm, msm_eval),
|(mut commitment_msm, msm_eval), (q_commitment, q_eval)| {
commitment_msm.scale(x_7);
commitment_msm.scale(*x_4);
commitment_msm.add_msm(&q_commitment);
(commitment_msm, msm_eval * &x_7 + q_eval)
(commitment_msm, msm_eval * &x_4 + q_eval)
},
);
// Verify the opening proof
self.opening
.verify(params, msm, transcript, x_6, commitment_msm, msm_eval)
.verify(params, msm, transcript, *x_3, commitment_msm, msm_eval)
}
}

View File

@ -3,6 +3,7 @@
use ff::Field;
use std::marker::PhantomData;
use std::ops::Deref;
use crate::arithmetic::{CurveAffine, FieldExt};
@ -81,18 +82,6 @@ impl<C: CurveAffine, HBase: Hasher<C::Base>, HScalar: Hasher<C::Scalar>>
}
}
/// Initialise a new transcript with some given base_hasher and
/// scalar_hasher
#[cfg(test)]
pub(crate) fn init_with_hashers(base_hasher: &HBase, scalar_hasher: &HScalar) -> Self {
Transcript {
base_hasher: base_hasher.clone(),
scalar_hasher: scalar_hasher.clone(),
scalar_needs_squeezing: false,
_marker: PhantomData,
}
}
fn conditional_scalar_squeeze(&mut self) {
if self.scalar_needs_squeezing {
let transcript_scalar_point =
@ -134,3 +123,71 @@ impl<C: CurveAffine, HBase: Hasher<C::Base>, HScalar: Hasher<C::Scalar>>
self.base_hasher.squeeze()
}
}
/// This is a 128-bit verifier challenge.
#[derive(Copy, Clone, Debug)]
pub struct Challenge(pub(crate) u128);
impl Challenge {
/// Obtains a new challenge from the transcript.
pub fn get<C, HBase, HScalar>(transcript: &mut Transcript<C, HBase, HScalar>) -> Challenge
where
C: CurveAffine,
HBase: Hasher<C::Base>,
HScalar: Hasher<C::Scalar>,
{
Challenge(transcript.squeeze().get_lower_128())
}
}
/// The scalar representation of a verifier challenge.
///
/// The `T` type can be used to scope the challenge to a specific context, or set to `()`
/// if no context is required.
#[derive(Copy, Clone, Debug)]
pub struct ChallengeScalar<F: FieldExt, T> {
inner: F,
_marker: PhantomData<T>,
}
impl<F: FieldExt, T> From<Challenge> for ChallengeScalar<F, T> {
/// This algorithm applies the mapping of Algorithm 1 from the
/// [Halo](https://eprint.iacr.org/2019/1021) paper.
fn from(challenge: Challenge) -> Self {
let mut acc = (F::ZETA + F::one()).double();
for i in (0..64).rev() {
let should_negate = ((challenge.0 >> ((i << 1) + 1)) & 1) == 1;
let should_endo = ((challenge.0 >> (i << 1)) & 1) == 1;
let q = if should_negate { -F::one() } else { F::one() };
let q = if should_endo { q * F::ZETA } else { q };
acc = acc + q + acc;
}
ChallengeScalar {
inner: acc,
_marker: PhantomData::default(),
}
}
}
impl<F: FieldExt, T> ChallengeScalar<F, T> {
/// Obtains a new challenge from the transcript.
pub fn get<C, HBase, HScalar>(transcript: &mut Transcript<C, HBase, HScalar>) -> Self
where
C: CurveAffine,
HBase: Hasher<C::Base>,
HScalar: Hasher<C::Scalar>,
{
Challenge::get(transcript).into()
}
}
impl<F: FieldExt, T> Deref for ChallengeScalar<F, T> {
type Target = F;
fn deref(&self) -> &F {
&self.inner
}
}