Refactor module tree.

This commit is contained in:
Sean Bowe 2020-09-07 10:22:25 -06:00
parent 7250ac8252
commit 116659c1ba
No known key found for this signature in database
GPG Key ID: 95684257D8F8B031
12 changed files with 1138 additions and 827 deletions

View File

@ -14,6 +14,6 @@
#![deny(unsafe_code)]
pub mod arithmetic;
pub mod plonk;
pub mod polycommit;
pub mod poly;
pub mod transcript;
pub mod plonk;

View File

@ -6,12 +6,13 @@
//! [plonk]: https://eprint.iacr.org/2019/953
use crate::arithmetic::CurveAffine;
use crate::polycommit::OpeningProof;
use crate::poly::{
commitment::OpeningProof, Coeff, EvaluationDomain, ExtendedLagrangeCoeff, LagrangeCoeff,
Polynomial,
};
use crate::transcript::Hasher;
#[macro_use]
mod circuit;
mod domain;
mod prover;
mod srs;
mod verifier;
@ -21,22 +22,20 @@ pub use prover::*;
pub use srs::*;
pub use verifier::*;
use domain::EvaluationDomain;
/// This is a structured reference string (SRS) that is (deterministically)
/// computed from a specific circuit and parameters for the polynomial
/// commitment scheme.
#[derive(Debug)]
pub struct SRS<C: CurveAffine> {
domain: EvaluationDomain<C::Scalar>,
l0: Vec<C::Scalar>,
l0: Polynomial<C::Scalar, ExtendedLagrangeCoeff>,
fixed_commitments: Vec<C>,
fixed_polys: Vec<Vec<C::Scalar>>,
fixed_cosets: Vec<Vec<C::Scalar>>,
fixed_polys: Vec<Polynomial<C::Scalar, Coeff>>,
fixed_cosets: Vec<Polynomial<C::Scalar, ExtendedLagrangeCoeff>>,
permutation_commitments: Vec<Vec<C>>,
permutations: Vec<Vec<Vec<C::Scalar>>>,
permutation_polys: Vec<Vec<Vec<C::Scalar>>>,
permutation_cosets: Vec<Vec<Vec<C::Scalar>>>,
permutations: Vec<Vec<Polynomial<C::Scalar, LagrangeCoeff>>>,
permutation_polys: Vec<Vec<Polynomial<C::Scalar, Coeff>>>,
permutation_cosets: Vec<Vec<Polynomial<C::Scalar, ExtendedLagrangeCoeff>>>,
meta: MetaCircuit<C::Scalar>,
}
@ -91,7 +90,7 @@ fn hash_point<C: CurveAffine, H: Hasher<C::Base>>(
#[test]
fn test_proving() {
use crate::arithmetic::{EqAffine, Field, Fp, Fq};
use crate::polycommit::Params;
use crate::poly::commitment::Params;
use crate::transcript::DummyHash;
use std::marker::PhantomData;
const K: u32 = 5;

View File

@ -5,7 +5,7 @@ use std::collections::BTreeMap;
use super::Error;
use crate::arithmetic::Field;
use super::domain::Rotation;
use crate::poly::Rotation;
/// This represents a wire which has a fixed (permanent) value
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
pub struct FixedWire(pub usize);
@ -65,22 +65,22 @@ pub trait Circuit<F: Field> {
) -> Result<(), Error>;
}
/// Low-degree polynomial representing an identity that must hold over the committed wires.
/// Low-degree expression representing an identity that must hold over the committed wires.
#[derive(Clone, Debug)]
pub enum Polynomial<F> {
pub enum Expression<F> {
/// This is a fixed wire queried at a certain relative location
Fixed(usize),
/// This is an advice (witness) wire queried at a certain relative location
Advice(usize),
/// This is the sum of two polynomials
Sum(Box<Polynomial<F>>, Box<Polynomial<F>>),
Sum(Box<Expression<F>>, Box<Expression<F>>),
/// This is the product of two polynomials
Product(Box<Polynomial<F>>, Box<Polynomial<F>>),
Product(Box<Expression<F>>, Box<Expression<F>>),
/// This is a scaled polynomial
Scaled(Box<Polynomial<F>>, F),
Scaled(Box<Expression<F>>, F),
}
impl<F: Field> Polynomial<F> {
impl<F: Field> Expression<F> {
/// Evaluate the polynomial using the provided closures to perform the
/// operations.
pub fn evaluate<T>(
@ -92,19 +92,19 @@ impl<F: Field> Polynomial<F> {
scaled: &impl Fn(T, F) -> T,
) -> T {
match self {
Polynomial::Fixed(index) => fixed_wire(*index),
Polynomial::Advice(index) => advice_wire(*index),
Polynomial::Sum(a, b) => {
Expression::Fixed(index) => fixed_wire(*index),
Expression::Advice(index) => advice_wire(*index),
Expression::Sum(a, b) => {
let a = a.evaluate(fixed_wire, advice_wire, sum, product, scaled);
let b = b.evaluate(fixed_wire, advice_wire, sum, product, scaled);
sum(a, b)
}
Polynomial::Product(a, b) => {
Expression::Product(a, b) => {
let a = a.evaluate(fixed_wire, advice_wire, sum, product, scaled);
let b = b.evaluate(fixed_wire, advice_wire, sum, product, scaled);
product(a, b)
}
Polynomial::Scaled(a, f) => {
Expression::Scaled(a, f) => {
let a = a.evaluate(fixed_wire, advice_wire, sum, product, scaled);
scaled(a, *f)
}
@ -114,40 +114,40 @@ impl<F: Field> Polynomial<F> {
/// Compute the degree of this polynomial
pub fn degree(&self) -> usize {
match self {
Polynomial::Fixed(_) => 1,
Polynomial::Advice(_) => 1,
Polynomial::Sum(a, b) => max(a.degree(), b.degree()),
Polynomial::Product(a, b) => a.degree() + b.degree(),
Polynomial::Scaled(poly, _) => poly.degree(),
Expression::Fixed(_) => 1,
Expression::Advice(_) => 1,
Expression::Sum(a, b) => max(a.degree(), b.degree()),
Expression::Product(a, b) => a.degree() + b.degree(),
Expression::Scaled(poly, _) => poly.degree(),
}
}
}
impl<F> Add for Polynomial<F> {
type Output = Polynomial<F>;
fn add(self, rhs: Polynomial<F>) -> Polynomial<F> {
Polynomial::Sum(Box::new(self), Box::new(rhs))
impl<F> Add for Expression<F> {
type Output = Expression<F>;
fn add(self, rhs: Expression<F>) -> Expression<F> {
Expression::Sum(Box::new(self), Box::new(rhs))
}
}
impl<F> Mul for Polynomial<F> {
type Output = Polynomial<F>;
fn mul(self, rhs: Polynomial<F>) -> Polynomial<F> {
Polynomial::Product(Box::new(self), Box::new(rhs))
impl<F> Mul for Expression<F> {
type Output = Expression<F>;
fn mul(self, rhs: Expression<F>) -> Expression<F> {
Expression::Product(Box::new(self), Box::new(rhs))
}
}
impl<F> Mul<F> for Polynomial<F> {
type Output = Polynomial<F>;
fn mul(self, rhs: F) -> Polynomial<F> {
Polynomial::Scaled(Box::new(self), rhs)
impl<F> Mul<F> for Expression<F> {
type Output = Expression<F>;
fn mul(self, rhs: F) -> Expression<F> {
Expression::Scaled(Box::new(self), rhs)
}
}
/// Represents an index into a vector where each entry corresponds to a distinct
/// point that polynomials are queried at.
#[derive(Copy, Clone, Debug)]
pub struct PointIndex(pub usize);
pub(crate) struct PointIndex(pub usize);
/// This is a description of the circuit environment, such as the gate, wire and
/// permutation arrangements.
@ -155,7 +155,7 @@ pub struct PointIndex(pub usize);
pub struct MetaCircuit<F> {
pub(crate) num_fixed_wires: usize,
pub(crate) num_advice_wires: usize,
pub(crate) gates: Vec<Polynomial<F>>,
pub(crate) gates: Vec<Expression<F>>,
pub(crate) advice_queries: Vec<(AdviceWire, Rotation)>,
pub(crate) fixed_queries: Vec<(FixedWire, Rotation)>,
@ -229,8 +229,8 @@ impl<F: Field> MetaCircuit<F> {
}
/// Query a fixed wire at a relative position
pub fn query_fixed(&mut self, wire: FixedWire, at: i32) -> Polynomial<F> {
Polynomial::Fixed(self.query_fixed_index(wire, at))
pub fn query_fixed(&mut self, wire: FixedWire, at: i32) -> Expression<F> {
Expression::Fixed(self.query_fixed_index(wire, at))
}
fn query_advice_index(&mut self, wire: AdviceWire, at: i32) -> usize {
@ -255,12 +255,12 @@ impl<F: Field> MetaCircuit<F> {
}
/// Query an advice wire at a relative position
pub fn query_advice(&mut self, wire: AdviceWire, at: i32) -> Polynomial<F> {
Polynomial::Advice(self.query_advice_index(wire, at))
pub fn query_advice(&mut self, wire: AdviceWire, at: i32) -> Expression<F> {
Expression::Advice(self.query_advice_index(wire, at))
}
/// Create a new gate
pub fn create_gate(&mut self, f: impl FnOnce(&mut Self) -> Polynomial<F>) {
pub fn create_gate(&mut self, f: impl FnOnce(&mut Self) -> Expression<F>) {
let poly = f(self);
self.gates.push(poly);
}

View File

@ -1,13 +1,15 @@
use super::{
circuit::{AdviceWire, Circuit, ConstraintSystem, FixedWire, MetaCircuit},
domain::Rotation,
hash_point, Error, Proof, SRS,
};
use crate::arithmetic::{
eval_polynomial, get_challenge_scalar, kate_division, parallelize, BatchInvert, Challenge,
Curve, CurveAffine, Field,
};
use crate::polycommit::Params;
use crate::poly::{
commitment::{Blind, OpeningProof, Params},
Coeff, LagrangeCoeff, Polynomial, Rotation,
};
use crate::transcript::Hasher;
impl<C: CurveAffine> Proof<C> {
@ -24,7 +26,8 @@ impl<C: CurveAffine> Proof<C> {
circuit: &ConcreteCircuit,
) -> Result<Self, Error> {
struct WitnessCollection<F: Field> {
advice: Vec<Vec<F>>,
advice: Vec<Polynomial<F, LagrangeCoeff>>,
_marker: std::marker::PhantomData<F>,
}
impl<F: Field> ConstraintSystem<F> for WitnessCollection<F> {
@ -68,11 +71,13 @@ impl<C: CurveAffine> Proof<C> {
}
}
let domain = &srs.domain;
let mut meta = MetaCircuit::default();
let config = ConcreteCircuit::configure(&mut meta);
let mut witness = WitnessCollection {
advice: vec![vec![C::Scalar::zero(); params.n as usize]; meta.num_advice_wires],
advice: vec![domain.empty_lagrange(); meta.num_advice_wires],
_marker: std::marker::PhantomData,
};
// Synthesize the circuit to obtain the witness and other information.
@ -84,7 +89,11 @@ impl<C: CurveAffine> Proof<C> {
let mut transcript = HBase::init(C::Base::one());
// Compute commitments to advice wire polynomials
let advice_blinds: Vec<_> = witness.advice.iter().map(|_| C::Scalar::random()).collect();
let advice_blinds: Vec<_> = witness
.advice
.iter()
.map(|_| Blind(C::Scalar::random()))
.collect();
let advice_commitments_projective: Vec<_> = witness
.advice
.iter()
@ -100,13 +109,11 @@ impl<C: CurveAffine> Proof<C> {
hash_point(&mut transcript, commitment)?;
}
let domain = &srs.domain;
let advice_polys: Vec<_> = witness
.advice
.clone()
.into_iter()
.map(|poly| domain.obtain_poly(poly))
.map(|poly| domain.lagrange_to_coeff(poly))
.collect();
let advice_cosets: Vec<_> = meta
@ -114,7 +121,7 @@ impl<C: CurveAffine> Proof<C> {
.iter()
.map(|&(wire, at)| {
let poly = advice_polys[wire.0].clone();
domain.obtain_coset(poly, at)
domain.coeff_to_extended(poly, at)
})
.collect();
@ -209,15 +216,17 @@ impl<C: CurveAffine> Proof<C> {
tmp *= &modified_advice[row];
z.push(tmp);
}
let z = domain.lagrange_from_vec(z);
let blind = C::Scalar::random();
let blind = Blind(C::Scalar::random());
permutation_product_commitments_projective.push(params.commit_lagrange(&z, blind));
permutation_product_blinds.push(blind);
let z = domain.obtain_poly(z);
let z = domain.lagrange_to_coeff(z);
permutation_product_polys.push(z.clone());
permutation_product_cosets.push(domain.obtain_coset(z.clone(), Rotation::default()));
permutation_product_cosets_inv.push(domain.obtain_coset(z, Rotation(-1)));
permutation_product_cosets
.push(domain.coeff_to_extended(z.clone(), Rotation::default()));
permutation_product_cosets_inv.push(domain.coeff_to_extended(z, Rotation(-1)));
}
let mut permutation_product_commitments =
vec![C::zero(); permutation_product_commitments_projective.len()];
@ -237,56 +246,19 @@ impl<C: CurveAffine> Proof<C> {
let x_2: C::Scalar = get_challenge_scalar(Challenge(transcript.squeeze().get_lower_128()));
// Evaluate the circuit using the custom gates provided
let mut h_poly = vec![C::Scalar::zero(); domain.coset_len()];
for (i, poly) in meta.gates.iter().enumerate() {
if i != 0 {
parallelize(&mut h_poly, |a, _| {
for a in a.iter_mut() {
*a *= &x_2;
}
});
}
let mut h_poly = domain.empty_extended();
for poly in meta.gates.iter() {
h_poly = h_poly * x_2;
let evaluation: Vec<C::Scalar> = poly.evaluate(
let evaluation = poly.evaluate(
&|index| srs.fixed_cosets[index].clone(),
&|index| advice_cosets[index].clone(),
&|mut a, b| {
parallelize(&mut a, |a, start| {
for (a, b) in a.iter_mut().zip(b[start..].iter()) {
*a += b;
}
});
a
},
&|mut a, b| {
parallelize(&mut a, |a, start| {
for (a, b) in a.iter_mut().zip(b[start..].iter()) {
*a *= b;
}
});
a
},
&|mut a, scalar| {
parallelize(&mut a, |a, _| {
for a in a {
*a *= &scalar;
}
});
a
},
&|a, b| a + &b,
&|a, b| a * &b,
&|a, scalar| a * scalar,
);
assert_eq!(h_poly.len(), evaluation.len());
if i == 0 {
h_poly = evaluation;
} else {
parallelize(&mut h_poly, |a, start| {
for (a, b) in a.iter_mut().zip(evaluation[start..].iter()) {
*a += b;
}
});
}
h_poly = h_poly + &evaluation;
}
// l_0(X) * (1 - z(X)) = 0
@ -305,11 +277,7 @@ impl<C: CurveAffine> Proof<C> {
// z(X) \prod (p(X) + \beta s_i(X) + \gamma) - z(omega^{-1} X) \prod (p(X) + \delta^i \beta X + \gamma)
for (permutation_index, wires) in srs.meta.permutations.iter().enumerate() {
parallelize(&mut h_poly, |a, _| {
for a in a.iter_mut() {
*a *= &x_2;
}
});
h_poly = h_poly * x_2;
let mut left = permutation_product_cosets[permutation_index].clone();
for (advice, permutation) in wires
@ -342,31 +310,25 @@ impl<C: CurveAffine> Proof<C> {
current_delta *= &C::Scalar::DELTA;
}
parallelize(&mut h_poly, |a, start| {
for ((h, left), right) in a
.iter_mut()
.zip(left[start..].iter())
.zip(right[start..].iter())
{
*h += &left;
*h -= &right;
}
});
h_poly = h_poly + &left - &right;
}
// Divide by t(X) = X^{params.n} - 1.
let h_poly = domain.divide_by_vanishing_poly(h_poly);
// Obtain final h(X) polynomial
let h_poly = domain.from_coset(h_poly);
let h_poly = domain.extended_to_coeff(h_poly);
// Split h(X) up into pieces
let h_pieces = h_poly
.chunks_exact(params.n as usize)
.map(|v| v.to_vec())
.map(|v| domain.coeff_from_vec(v.to_vec()))
.collect::<Vec<_>>();
drop(h_poly);
let h_blinds: Vec<_> = h_pieces.iter().map(|_| C::Scalar::random()).collect();
let h_blinds: Vec<_> = h_pieces
.iter()
.map(|_| Blind(C::Scalar::random()))
.collect();
// Compute commitments to each h(X) piece
let h_commitments_projective: Vec<_> = h_pieces
@ -451,30 +413,32 @@ impl<C: CurveAffine> Proof<C> {
// Collapse openings at same points together into single openings using
// x_4 challenge.
let mut q_polys: Vec<Option<Vec<_>>> = vec![None; meta.rotations.len()];
let mut q_blinds = vec![C::Scalar::zero(); meta.rotations.len()];
let mut q_polys: Vec<Option<Polynomial<C::Scalar, Coeff>>> =
vec![None; meta.rotations.len()];
let mut q_blinds = vec![Blind(C::Scalar::zero()); meta.rotations.len()];
let mut q_evals: Vec<_> = vec![C::Scalar::zero(); meta.rotations.len()];
{
let mut accumulate = |point_index: usize, new_poly: &Vec<_>, blind, eval| {
q_polys[point_index]
.as_mut()
.map(|poly| {
parallelize(poly, |q, start| {
for (q, a) in q.iter_mut().zip(new_poly[start..].iter()) {
*q *= &x_4;
*q += a;
}
let mut accumulate =
|point_index: usize, new_poly: &Polynomial<_, Coeff>, blind, eval| {
q_polys[point_index]
.as_mut()
.map(|poly| {
parallelize(poly, |q, start| {
for (q, a) in q.iter_mut().zip(new_poly[start..].iter()) {
*q *= &x_4;
*q += a;
}
});
})
.or_else(|| {
q_polys[point_index] = Some(new_poly.clone());
Some(())
});
})
.or_else(|| {
q_polys[point_index] = Some(new_poly.clone());
Some(())
});
q_blinds[point_index] *= &x_4;
q_blinds[point_index] += &blind;
q_evals[point_index] *= &x_4;
q_evals[point_index] += &eval;
};
q_blinds[point_index] *= x_4;
q_blinds[point_index] += blind;
q_evals[point_index] *= &x_4;
q_evals[point_index] += &eval;
};
for (query_index, &(wire, ref at)) in meta.advice_queries.iter().enumerate() {
let point_index = (*meta.rotations.get(at).unwrap()).0;
@ -493,7 +457,7 @@ impl<C: CurveAffine> Proof<C> {
accumulate(
point_index,
&srs.fixed_polys[wire.0],
C::Scalar::one(),
Blind::default(),
fixed_evals[query_index],
);
}
@ -526,7 +490,7 @@ impl<C: CurveAffine> Proof<C> {
.zip(permutation_evals.iter())
.flat_map(|(polys, evals)| polys.iter().zip(evals.iter()))
{
accumulate(current_index, poly, C::Scalar::one(), *eval);
accumulate(current_index, poly, Blind::default(), *eval);
}
let current_index = (*srs.meta.rotations.get(&Rotation(-1)).unwrap()).0;
@ -543,13 +507,15 @@ impl<C: CurveAffine> Proof<C> {
let x_5: C::Scalar = get_challenge_scalar(Challenge(transcript.squeeze().get_lower_128()));
let mut f_poly: Option<Vec<C::Scalar>> = None;
let mut f_poly: Option<Polynomial<C::Scalar, Coeff>> = None;
for (&row, &point_index) in meta.rotations.iter() {
let mut poly = q_polys[point_index.0].as_ref().unwrap().clone();
let point = domain.rotate_omega(x_3, row);
poly[0] -= &q_evals[point_index.0];
let mut poly = kate_division(&poly, point);
// TODO: change kate_division interface?
let mut poly = kate_division(&poly[..], point);
poly.push(C::Scalar::zero());
let poly = domain.coeff_from_vec(poly);
f_poly = f_poly
.map(|mut f_poly| {
@ -564,7 +530,7 @@ impl<C: CurveAffine> Proof<C> {
.or_else(|| Some(poly));
}
let mut f_poly = f_poly.unwrap();
let mut f_blind = C::Scalar::random();
let mut f_blind = Blind(C::Scalar::random());
let f_commitment = params.commit(&f_poly, f_blind).to_affine();
@ -590,8 +556,8 @@ impl<C: CurveAffine> Proof<C> {
let x_7: C::Scalar = get_challenge_scalar(Challenge(transcript.squeeze().get_lower_128()));
for (_, &point_index) in meta.rotations.iter() {
f_blind *= &x_7;
f_blind += &q_blinds[point_index.0];
f_blind *= x_7;
f_blind += q_blinds[point_index.0];
parallelize(&mut f_poly, |f, start| {
for (f, a) in f
@ -605,8 +571,7 @@ impl<C: CurveAffine> Proof<C> {
}
// Let's prove that the q_commitment opens at x to the expected value.
let opening = params
.create_proof(&mut transcript, &f_poly, f_blind, x_6)
let opening = OpeningProof::create(&params, &mut transcript, &f_poly, f_blind, x_6)
.map_err(|_| Error::ConstraintSystemFailure)?;
Ok(Proof {

View File

@ -1,10 +1,12 @@
use super::{
circuit::{AdviceWire, Circuit, ConstraintSystem, FixedWire, MetaCircuit},
domain::{EvaluationDomain, Rotation},
Error, SRS,
};
use crate::arithmetic::{Curve, CurveAffine, Field};
use crate::polycommit::Params;
use crate::poly::{
commitment::{Blind, Params},
EvaluationDomain, LagrangeCoeff, Polynomial, Rotation,
};
impl<C: CurveAffine> SRS<C> {
/// This generates a structured reference string for the provided `circuit`
@ -14,10 +16,11 @@ impl<C: CurveAffine> SRS<C> {
circuit: &ConcreteCircuit,
) -> Result<Self, Error> {
struct Assembly<F: Field> {
fixed: Vec<Vec<F>>,
fixed: Vec<Polynomial<F, LagrangeCoeff>>,
mapping: Vec<Vec<Vec<(usize, usize)>>>,
aux: Vec<Vec<Vec<(usize, usize)>>>,
sizes: Vec<Vec<Vec<usize>>>,
_marker: std::marker::PhantomData<F>,
}
impl<F: Field> ConstraintSystem<F> for Assembly<F> {
@ -147,10 +150,11 @@ impl<C: CurveAffine> SRS<C> {
}
let mut assembly: Assembly<C::Scalar> = Assembly {
fixed: vec![vec![C::Scalar::zero(); params.n as usize]; meta.num_fixed_wires],
fixed: vec![domain.empty_lagrange(); meta.num_fixed_wires],
mapping: vec![],
aux: vec![],
sizes: vec![],
_marker: std::marker::PhantomData,
};
// Initialize the copy vector to keep track of copy constraints in all
@ -185,27 +189,23 @@ impl<C: CurveAffine> SRS<C> {
for i in 0..permutation.len() {
// Computes the permutation polynomial based on the permutation
// description in the assembly.
let permutation_poly: Vec<_> = (0..params.n as usize)
.map(|j| {
// assembly.copy[permutation_index] is indexed by wire
// i, and then indexed by row j, obtaining the index of
// the permuted value in deltaomega.
let (permuted_i, permuted_j) = assembly.mapping[permutation_index][i][j];
deltaomega[permuted_i][permuted_j]
})
.collect();
let mut permutation_poly = domain.empty_lagrange();
for (j, p) in permutation_poly.iter_mut().enumerate() {
let (permuted_i, permuted_j) = assembly.mapping[permutation_index][i][j];
*p = deltaomega[permuted_i][permuted_j];
}
// Compute commitment to permutation polynomial
commitments.push(
params
.commit_lagrange(&permutation_poly, C::Scalar::one())
.commit_lagrange(&permutation_poly, Blind::default())
.to_affine(),
);
// Store permutation polynomial and precompute its coset evaluation
inner_permutations.push(permutation_poly.clone());
let poly = domain.obtain_poly(permutation_poly);
let poly = domain.lagrange_to_coeff(permutation_poly);
polys.push(poly.clone());
cosets.push(domain.obtain_coset(poly, Rotation::default()));
cosets.push(domain.coeff_to_extended(poly, Rotation::default()));
}
permutation_commitments.push(commitments);
permutations.push(inner_permutations);
@ -216,13 +216,13 @@ impl<C: CurveAffine> SRS<C> {
let fixed_commitments = assembly
.fixed
.iter()
.map(|poly| params.commit_lagrange(poly, C::Scalar::one()).to_affine())
.map(|poly| params.commit_lagrange(poly, Blind::default()).to_affine())
.collect();
let fixed_polys: Vec<_> = assembly
.fixed
.into_iter()
.map(|poly| domain.obtain_poly(poly))
.map(|poly| domain.lagrange_to_coeff(poly))
.collect();
let fixed_cosets = meta
@ -230,16 +230,16 @@ impl<C: CurveAffine> SRS<C> {
.iter()
.map(|&(wire, at)| {
let poly = fixed_polys[wire.0].clone();
domain.obtain_coset(poly, at)
domain.coeff_to_extended(poly, at)
})
.collect();
// Compute l_0(X)
// TODO: this can be done more efficiently
let mut l0 = vec![C::Scalar::zero(); params.n as usize];
let mut l0 = domain.empty_lagrange();
l0[0] = C::Scalar::one();
let l0 = domain.obtain_poly(l0);
let l0 = domain.obtain_coset(l0, Rotation::default());
let l0 = domain.lagrange_to_coeff(l0);
let l0 = domain.coeff_to_extended(l0, Rotation::default());
Ok(SRS {
domain,

View File

@ -1,6 +1,6 @@
use super::{domain::Rotation, hash_point, Proof, SRS};
use super::{hash_point, Proof, SRS};
use crate::arithmetic::{get_challenge_scalar, Challenge, Curve, CurveAffine, Field};
use crate::polycommit::Params;
use crate::poly::{commitment::Params, Rotation};
use crate::transcript::Hasher;
impl<C: CurveAffine> Proof<C> {
@ -261,8 +261,8 @@ impl<C: CurveAffine> Proof<C> {
}
// Verify the opening proof
params.verify_proof(
&self.opening,
self.opening.verify(
params,
&mut transcript,
x_6,
&f_commitment.to_affine(),

178
src/poly.rs Normal file
View File

@ -0,0 +1,178 @@
//! Contains utilities for performing arithmetic over univariate polynomials in
//! various forms, including computing commitments to them and provably opening
//! the committed polynomials at arbitrary points.
use crate::arithmetic::{parallelize, Field};
use std::fmt::Debug;
use std::marker::PhantomData;
use std::ops::{Add, Deref, DerefMut, Index, IndexMut, Mul, RangeFrom, RangeFull, Sub};
pub mod commitment;
mod domain;
pub use domain::*;
/// The basis over which a polynomial is described.
pub trait Basis: Clone + Debug + Send + Sync {}
/// The polynomial is defined as coefficients
#[derive(Clone, Debug)]
pub struct Coeff;
impl Basis for Coeff {}
/// The polynomial is defined as coefficients of Lagrange basis polynomials
#[derive(Clone, Debug)]
pub struct LagrangeCoeff;
impl Basis for LagrangeCoeff {}
/// The polynomial is defined as coefficients of Lagrange basis polynomials in
/// an extended size domain which supports multiplication
#[derive(Clone, Debug)]
pub struct ExtendedLagrangeCoeff;
impl Basis for ExtendedLagrangeCoeff {}
/// Represents a univariate polynomial defined over a field and a particular
/// basis.
#[derive(Clone, Debug)]
pub struct Polynomial<F, B> {
values: Vec<F>,
_marker: PhantomData<B>,
}
impl<F, B> Index<usize> for Polynomial<F, B> {
type Output = F;
fn index(&self, index: usize) -> &F {
self.values.index(index)
}
}
impl<F, B> IndexMut<usize> for Polynomial<F, B> {
fn index_mut(&mut self, index: usize) -> &mut F {
self.values.index_mut(index)
}
}
impl<F, B> Index<RangeFrom<usize>> for Polynomial<F, B> {
type Output = [F];
fn index(&self, index: RangeFrom<usize>) -> &[F] {
self.values.index(index)
}
}
impl<F, B> IndexMut<RangeFrom<usize>> for Polynomial<F, B> {
fn index_mut(&mut self, index: RangeFrom<usize>) -> &mut [F] {
self.values.index_mut(index)
}
}
impl<F, B> Index<RangeFull> for Polynomial<F, B> {
type Output = [F];
fn index(&self, index: RangeFull) -> &[F] {
self.values.index(index)
}
}
impl<F, B> IndexMut<RangeFull> for Polynomial<F, B> {
fn index_mut(&mut self, index: RangeFull) -> &mut [F] {
self.values.index_mut(index)
}
}
impl<F, B> Deref for Polynomial<F, B> {
type Target = [F];
fn deref(&self) -> &[F] {
&self.values[..]
}
}
impl<F, B> DerefMut for Polynomial<F, B> {
fn deref_mut(&mut self) -> &mut [F] {
&mut self.values[..]
}
}
impl<F, B> Polynomial<F, B> {
/// Iterate over the values, which are either in coefficient or evaluation
/// form depending on the basis `B`.
pub fn iter(&self) -> impl Iterator<Item = &F> {
self.values.iter()
}
/// Iterate over the values mutably, which are either in coefficient or
/// evaluation form depending on the basis `B`.
pub fn iter_mut(&mut self) -> impl Iterator<Item = &mut F> {
self.values.iter_mut()
}
/// Gets the length of this polynomial in terms of the number of
/// coefficients used to describe it.
pub fn len(&self) -> usize {
self.values.len()
}
}
impl<'a, F: Field, B: Basis> Add<&'a Polynomial<F, B>> for Polynomial<F, B> {
type Output = Polynomial<F, B>;
fn add(mut self, rhs: &'a Polynomial<F, B>) -> Polynomial<F, B> {
parallelize(&mut self.values, |lhs, start| {
for (lhs, rhs) in lhs.iter_mut().zip(rhs.values[start..].iter()) {
*lhs += *rhs;
}
});
self
}
}
impl<'a, F: Field, B: Basis> Sub<&'a Polynomial<F, B>> for Polynomial<F, B> {
type Output = Polynomial<F, B>;
fn sub(mut self, rhs: &'a Polynomial<F, B>) -> Polynomial<F, B> {
parallelize(&mut self.values, |lhs, start| {
for (lhs, rhs) in lhs.iter_mut().zip(rhs.values[start..].iter()) {
*lhs -= *rhs;
}
});
self
}
}
impl<'a, F: Field> Mul<&'a Polynomial<F, ExtendedLagrangeCoeff>>
for Polynomial<F, ExtendedLagrangeCoeff>
{
type Output = Polynomial<F, ExtendedLagrangeCoeff>;
fn mul(
mut self,
rhs: &'a Polynomial<F, ExtendedLagrangeCoeff>,
) -> Polynomial<F, ExtendedLagrangeCoeff> {
parallelize(&mut self.values, |lhs, start| {
for (lhs, rhs) in lhs.iter_mut().zip(rhs.values[start..].iter()) {
*lhs *= *rhs;
}
});
self
}
}
impl<'a, F: Field, B: Basis> Mul<F> for Polynomial<F, B> {
type Output = Polynomial<F, B>;
fn mul(mut self, rhs: F) -> Polynomial<F, B> {
parallelize(&mut self.values, |lhs, _| {
for lhs in lhs.iter_mut() {
*lhs *= rhs;
}
});
self
}
}

280
src/poly/commitment.rs Normal file
View File

@ -0,0 +1,280 @@
//! This module contains an implementation of the polynomial commitment scheme
//! described in the [Halo][halo] paper.
//!
//! [halo]: https://eprint.iacr.org/2019/1021
use super::{Coeff, LagrangeCoeff, Polynomial};
use crate::arithmetic::{best_fft, best_multiexp, parallelize, Curve, CurveAffine, Field};
use crate::transcript::Hasher;
use std::ops::{Add, AddAssign, Mul, MulAssign};
mod prover;
mod verifier;
/// This is a proof object for the polynomial commitment scheme opening.
#[derive(Debug, Clone)]
pub struct OpeningProof<C: CurveAffine> {
fork: u8,
rounds: Vec<(C, C)>,
delta: C,
z1: C::Scalar,
z2: C::Scalar,
}
/// These are the public parameters for the polynomial commitment scheme.
#[derive(Debug)]
pub struct Params<C: CurveAffine> {
pub(crate) k: u32,
pub(crate) n: u64,
pub(crate) g: Vec<C>,
pub(crate) g_lagrange: Vec<C>,
pub(crate) h: C,
}
impl<C: CurveAffine> Params<C> {
/// Initializes parameters for the curve, given a random oracle to draw
/// points from.
pub fn new<H: Hasher<C::Base>>(k: u32) -> Self {
// This is usually a limitation on the curve, but we also want 32-bit
// architectures to be supported.
assert!(k < 32);
// No goofy hardware please.
assert!(core::mem::size_of::<usize>() >= 4);
let n: u64 = 1 << k;
let g = {
let hasher = &H::init(C::Base::zero());
let mut g = Vec::with_capacity(n as usize);
g.resize(n as usize, C::zero());
parallelize(&mut g, move |g, start| {
let mut cur_value = C::Base::from(start as u64);
for g in g.iter_mut() {
let mut hasher = hasher.clone();
hasher.absorb(cur_value);
cur_value += &C::Base::one();
loop {
let x = hasher.squeeze().to_bytes();
let p = C::from_bytes(&x);
if bool::from(p.is_some()) {
*g = p.unwrap();
break;
}
}
}
});
g
};
// Let's evaluate all of the Lagrange basis polynomials
// using an inverse FFT.
let mut alpha_inv = C::Scalar::ROOT_OF_UNITY_INV;
for _ in k..C::Scalar::S {
alpha_inv = alpha_inv.square();
}
let mut g_lagrange_projective = g.iter().map(|g| g.to_projective()).collect::<Vec<_>>();
best_fft(&mut g_lagrange_projective, alpha_inv, k);
let minv = C::Scalar::TWO_INV.pow_vartime(&[k as u64, 0, 0, 0]);
parallelize(&mut g_lagrange_projective, |g, _| {
for g in g.iter_mut() {
*g *= minv;
}
});
let g_lagrange = {
let mut g_lagrange = vec![C::zero(); n as usize];
parallelize(&mut g_lagrange, |g_lagrange, starts| {
C::Projective::batch_to_affine(
&g_lagrange_projective[starts..(starts + g_lagrange.len())],
g_lagrange,
);
});
drop(g_lagrange_projective);
g_lagrange
};
let h = {
let mut hasher = H::init(C::Base::zero());
hasher.absorb(-C::Base::one());
let x = hasher.squeeze().to_bytes();
let p = C::from_bytes(&x);
p.unwrap()
};
Params {
k,
n,
g,
g_lagrange,
h,
}
}
/// This computes a commitment to a polynomial described by the provided
/// slice of coefficients. The commitment will be blinded by the blinding
/// factor `r`.
pub fn commit(
&self,
poly: &Polynomial<C::Scalar, Coeff>,
r: Blind<C::Scalar>,
) -> C::Projective {
let mut tmp_scalars = Vec::with_capacity(poly.len() + 1);
let mut tmp_bases = Vec::with_capacity(poly.len() + 1);
tmp_scalars.extend(poly.iter());
tmp_scalars.push(r.0);
tmp_bases.extend(self.g.iter());
tmp_bases.push(self.h);
best_multiexp::<C>(&tmp_scalars, &tmp_bases)
}
/// This commits to a polynomial using its evaluations over the $2^k$ size
/// evaluation domain. The commitment will be blinded by the blinding factor
/// `r`.
pub fn commit_lagrange(
&self,
poly: &Polynomial<C::Scalar, LagrangeCoeff>,
r: Blind<C::Scalar>,
) -> C::Projective {
let mut tmp_scalars = Vec::with_capacity(poly.len() + 1);
let mut tmp_bases = Vec::with_capacity(poly.len() + 1);
tmp_scalars.extend(poly.iter());
tmp_scalars.push(r.0);
tmp_bases.extend(self.g_lagrange.iter());
tmp_bases.push(self.h);
best_multiexp::<C>(&tmp_scalars, &tmp_bases)
}
}
/// Wrapper type around a blinding factor.
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
pub struct Blind<F>(pub F);
impl<F: Field> Default for Blind<F> {
fn default() -> Self {
Blind(F::one())
}
}
impl<F: Field> Add for Blind<F> {
type Output = Self;
fn add(self, rhs: Blind<F>) -> Self {
Blind(self.0 + rhs.0)
}
}
impl<F: Field> Mul for Blind<F> {
type Output = Self;
fn mul(self, rhs: Blind<F>) -> Self {
Blind(self.0 * rhs.0)
}
}
impl<F: Field> AddAssign for Blind<F> {
fn add_assign(&mut self, rhs: Blind<F>) {
self.0 += rhs.0;
}
}
impl<F: Field> MulAssign for Blind<F> {
fn mul_assign(&mut self, rhs: Blind<F>) {
self.0 *= rhs.0;
}
}
impl<F: Field> AddAssign<F> for Blind<F> {
fn add_assign(&mut self, rhs: F) {
self.0 += rhs;
}
}
impl<F: Field> MulAssign<F> for Blind<F> {
fn mul_assign(&mut self, rhs: F) {
self.0 *= rhs;
}
}
#[test]
fn test_commit_lagrange() {
const K: u32 = 6;
use crate::arithmetic::{EpAffine, Fp, Fq};
use crate::transcript::DummyHash;
let params = Params::<EpAffine>::new::<DummyHash<Fp>>(K);
let domain = super::EvaluationDomain::new(1, K);
let mut a = domain.empty_lagrange();
for (i, a) in a.iter_mut().enumerate() {
*a = Fq::from(i as u64);
}
let b = domain.lagrange_to_coeff(a.clone());
let alpha = Blind(Fq::random());
assert_eq!(params.commit(&b, alpha), params.commit_lagrange(&a, alpha));
}
#[test]
fn test_opening_proof() {
const K: u32 = 6;
use super::{
commitment::{Blind, Params},
EvaluationDomain,
};
use crate::arithmetic::{
eval_polynomial, get_challenge_scalar, Challenge, Curve, EpAffine, Field, Fp, Fq,
};
use crate::transcript::{DummyHash, Hasher};
let params = Params::<EpAffine>::new::<DummyHash<Fp>>(K);
let domain = EvaluationDomain::new(1, K);
let mut px = domain.empty_coeff();
for (i, a) in px.iter_mut().enumerate() {
*a = Fq::from(i as u64);
}
let blind = Blind(Fq::random());
let p = params.commit(&px, blind).to_affine();
let mut transcript = DummyHash::init(Field::one());
let (p_x, p_y) = p.get_xy().unwrap();
transcript.absorb(p_x);
transcript.absorb(p_y);
let x_packed = transcript.squeeze().get_lower_128();
let x: Fq = get_challenge_scalar(Challenge(x_packed));
// Evaluate the polynomial
let v = eval_polynomial(&px, x);
transcript.absorb(Fp::from_bytes(&v.to_bytes()).unwrap()); // unlikely to fail since p ~ q
loop {
let mut transcript_dup = transcript.clone();
let opening_proof = OpeningProof::create(&params, &mut transcript, &px, blind, x);
if opening_proof.is_err() {
transcript = transcript_dup;
transcript.absorb(Field::one());
} else {
let opening_proof = opening_proof.unwrap();
assert!(opening_proof.verify(&params, &mut transcript_dup, x, &p, v));
break;
}
}
}

View File

@ -0,0 +1,234 @@
use super::super::{Coeff, Polynomial};
use super::{Blind, OpeningProof, Params};
use crate::arithmetic::{
best_multiexp, compute_inner_product, get_challenge_scalar, parallelize, Challenge, Curve,
CurveAffine, Field,
};
use crate::transcript::Hasher;
impl<C: CurveAffine> OpeningProof<C> {
/// Create a polynomial commitment opening proof for the polynomial defined
/// by the coefficients `px`, the blinding factor `blind` used for the
/// polynomial commitment, and the point `x` that the polynomial is
/// evaluated at.
///
/// This function will panic if the provided polynomial is too large with
/// respect to the polynomial commitment parameters.
///
/// **Important:** This function assumes that the provided `transcript` has
/// already seen the common inputs: the polynomial commitment P, the claimed
/// opening v, and the point x. It's probably also nice for the transcript
/// to have seen the elliptic curve description and the SRS, if you want to
/// be rigorous.
pub fn create<H: Hasher<C::Base>>(
params: &Params<C>,
transcript: &mut H,
px: &Polynomial<C::Scalar, Coeff>,
blind: Blind<C::Scalar>,
x: C::Scalar,
) -> Result<Self, ()> {
let mut blind = blind.0;
// We're limited to polynomials of degree n - 1.
assert!(px.len() <= params.n as usize);
let mut fork = 0;
// TODO: remove this hack and force the caller to deal with it
loop {
let mut transcript = transcript.clone();
transcript.absorb(C::Base::from_u64(fork as u64));
let u_x = transcript.squeeze();
// y^2 = x^3 + B
let u_y2 = u_x.square() * &u_x + &C::b();
let u_y = u_y2.deterministic_sqrt();
if u_y.is_none() {
fork += 1;
} else {
break;
}
}
transcript.absorb(C::Base::from_u64(fork as u64));
// Compute U
let u = {
let u_x = transcript.squeeze();
// y^2 = x^3 + B
let u_y2 = u_x.square() * &u_x + &C::b();
let u_y = u_y2.deterministic_sqrt().unwrap();
C::from_xy(u_x, u_y).unwrap()
};
// Initialize the vector `a` as the coefficients of the polynomial,
// rounding up to the parameters.
let mut a = px.to_vec();
a.resize(params.n as usize, C::Scalar::zero());
// Initialize the vector `b` as the powers of `x`. The inner product of
// `a` and `b` is the evaluation of the polynomial at `x`.
let mut b = Vec::with_capacity(1 << params.k);
{
let mut cur = C::Scalar::one();
for _ in 0..(1 << params.k) {
b.push(cur);
cur *= &x;
}
}
// Initialize the vector `G` from the SRS. We'll be progressively
// collapsing this vector into smaller and smaller vectors until it is
// of length 1.
let mut g = params.g.clone();
// Perform the inner product argument, round by round.
let mut rounds = Vec::with_capacity(params.k as usize);
for k in (1..=params.k).rev() {
let half = 1 << (k - 1); // half the length of `a`, `b`, `G`
// Compute L, R
//
// TODO: If we modify multiexp to take "extra" bases, we could speed
// this piece up a bit by combining the multiexps.
let l = best_multiexp(&a[0..half], &g[half..]);
let r = best_multiexp(&a[half..], &g[0..half]);
let value_l = compute_inner_product(&a[0..half], &b[half..]);
let value_r = compute_inner_product(&a[half..], &b[0..half]);
let mut l_randomness = C::Scalar::random();
let r_randomness = C::Scalar::random();
let l = l + &best_multiexp(&[value_l, l_randomness], &[u, params.h]);
let r = r + &best_multiexp(&[value_r, r_randomness], &[u, params.h]);
let mut l = l.to_affine();
let r = r.to_affine();
let challenge = loop {
// We'll fork the transcript and adjust our randomness
// until the challenge is a square.
let mut transcript = transcript.clone();
// We expect these to not be points at infinity due to the randomness.
let (l_x, l_y) = l.get_xy().unwrap();
let (r_x, r_y) = r.get_xy().unwrap();
// Feed L and R into the cloned transcript...
transcript.absorb(l_x);
transcript.absorb(l_y);
transcript.absorb(r_x);
transcript.absorb(r_y);
// ... and get the squared challenge.
let challenge_sq_packed = transcript.squeeze().get_lower_128();
let challenge_sq: C::Scalar = get_challenge_scalar(Challenge(challenge_sq_packed));
// There might be no square root, in which case we'll fork the
// transcript.
let challenge = challenge_sq.deterministic_sqrt();
if let Some(challenge) = challenge {
break challenge;
} else {
// Try again, with slightly different randomness
l = (l + params.h).to_affine();
l_randomness += &C::Scalar::one();
}
};
// Challenge is unlikely to be zero.
let challenge_inv = challenge.invert().unwrap();
let challenge_sq_inv = challenge_inv.square();
let challenge_sq = challenge.square();
// Feed L and R into the real transcript
let (l_x, l_y) = l.get_xy().unwrap();
let (r_x, r_y) = r.get_xy().unwrap();
transcript.absorb(l_x);
transcript.absorb(l_y);
transcript.absorb(r_x);
transcript.absorb(r_y);
// And obtain the challenge, even though we already have it, since
// squeezing affects the transcript.
{
let challenge_sq_packed = transcript.squeeze().get_lower_128();
let challenge_sq_expected = get_challenge_scalar(Challenge(challenge_sq_packed));
assert_eq!(challenge_sq, challenge_sq_expected);
}
// Done with this round.
rounds.push((l, r));
// Collapse `a` and `b`.
// TODO: parallelize
for i in 0..half {
a[i] = (a[i] * &challenge) + &(a[i + half] * &challenge_inv);
b[i] = (b[i] * &challenge_inv) + &(b[i + half] * &challenge);
}
a.truncate(half);
b.truncate(half);
// Collapse `G`
parallel_generator_collapse(&mut g, challenge, challenge_inv);
g.truncate(half);
// Update randomness (the synthetic blinding factor at the end)
blind += &(l_randomness * &challenge_sq);
blind += &(r_randomness * &challenge_sq_inv);
}
// We have fully collapsed `a`, `b`, `G`
assert_eq!(a.len(), 1);
let a = a[0];
assert_eq!(b.len(), 1);
let b = b[0];
assert_eq!(g.len(), 1);
let g = g[0];
// Random nonces for the zero-knowledge opening
let d = C::Scalar::random();
let s = C::Scalar::random();
let delta = best_multiexp(&[d, d * &b, s], &[g, u, params.h]).to_affine();
let (delta_x, delta_y) = delta.get_xy().unwrap();
// Feed delta into the transcript
transcript.absorb(delta_x);
transcript.absorb(delta_y);
// Obtain the challenge c.
let c_packed = transcript.squeeze().get_lower_128();
let c: C::Scalar = get_challenge_scalar(Challenge(c_packed));
// Compute z1 and z2 as described in the Halo paper.
let z1 = a * &c + &d;
let z2 = c * &blind + &s;
Ok(OpeningProof {
fork,
rounds,
delta,
z1,
z2,
})
}
}
fn parallel_generator_collapse<C: CurveAffine>(
g: &mut [C],
challenge: C::Scalar,
challenge_inv: C::Scalar,
) {
let len = g.len() / 2;
let (mut g_lo, g_hi) = g.split_at_mut(len);
parallelize(&mut g_lo, |g_lo, start| {
let g_hi = &g_hi[start..];
let mut tmp = Vec::with_capacity(g_lo.len());
for (g_lo, g_hi) in g_lo.iter().zip(g_hi.iter()) {
// TODO: could use multiexp
tmp.push(((*g_lo) * challenge_inv) + &((*g_hi) * challenge));
}
C::Projective::batch_to_affine(&tmp, g_lo);
});
}

View File

@ -0,0 +1,179 @@
use super::{OpeningProof, Params};
use crate::transcript::Hasher;
use crate::arithmetic::{
best_multiexp, get_challenge_scalar, Challenge, Curve, CurveAffine, Field,
};
impl<C: CurveAffine> OpeningProof<C> {
/// Checks to see if an [`OpeningProof`] is valid given the current
/// `transcript`, and a point `x` that the polynomial commitment `p` opens
/// purportedly to the value `v`.
pub fn verify<H: Hasher<C::Base>>(
&self,
params: &Params<C>,
transcript: &mut H,
x: C::Scalar,
p: &C,
v: C::Scalar,
) -> bool {
// Check for well-formedness
if self.rounds.len() != params.k as usize {
return false;
}
transcript.absorb(C::Base::from_u64(self.fork as u64));
// Compute U
let u = {
let u_x = transcript.squeeze();
// y^2 = x^3 + B
let u_y2 = u_x.square() * &u_x + &C::b();
let u_y = u_y2.deterministic_sqrt();
if u_y.is_none() {
return false;
}
let u_y = u_y.unwrap();
C::from_xy(u_x, u_y).unwrap()
};
let mut extra_scalars = Vec::with_capacity(self.rounds.len() * 2 + 4 + params.n as usize);
let mut extra_bases = Vec::with_capacity(self.rounds.len() * 2 + 4 + params.n as usize);
// Data about the challenges from each of the rounds.
let mut challenges = Vec::with_capacity(self.rounds.len());
let mut challenges_inv = Vec::with_capacity(self.rounds.len());
let mut challenges_sq = Vec::with_capacity(self.rounds.len());
let mut allinv = Field::one();
for round in &self.rounds {
// Feed L and R into the transcript.
let l = round.0.get_xy();
let r = round.1.get_xy();
if bool::from(l.is_none() | r.is_none()) {
return false;
}
let l = l.unwrap();
let r = r.unwrap();
transcript.absorb(l.0);
transcript.absorb(l.1);
transcript.absorb(r.0);
transcript.absorb(r.1);
let challenge_sq_packed = transcript.squeeze().get_lower_128();
let challenge_sq: C::Scalar = get_challenge_scalar(Challenge(challenge_sq_packed));
let challenge = challenge_sq.deterministic_sqrt();
if challenge.is_none() {
// We didn't sample a square.
return false;
}
let challenge = challenge.unwrap();
let challenge_inv = challenge.invert();
if bool::from(challenge_inv.is_none()) {
// We sampled zero for some reason, unlikely to happen by
// chance.
return false;
}
let challenge_inv = challenge_inv.unwrap();
allinv *= challenge_inv;
let challenge_sq_inv = challenge_inv.square();
extra_scalars.push(challenge_sq);
extra_bases.push(round.0);
extra_scalars.push(challenge_sq_inv);
extra_bases.push(round.1);
challenges.push(challenge);
challenges_inv.push(challenge_inv);
challenges_sq.push(challenge_sq);
}
let delta = self.delta.get_xy();
if bool::from(delta.is_none()) {
return false;
}
let delta = delta.unwrap();
// Feed delta into the transcript
transcript.absorb(delta.0);
transcript.absorb(delta.1);
// Get the challenge `c`
let c_packed = transcript.squeeze().get_lower_128();
let c: C::Scalar = get_challenge_scalar(Challenge(c_packed));
// Check
// [c] P + [c * v] U + [c] sum(L_i * u_i^2) + [c] sum(R_i * u_i^-2) + delta - [z1] G - [z1 * b] U - [z2] H
// = 0
for scalar in &mut extra_scalars {
*scalar *= &c;
}
let b = compute_b(x, &challenges, &challenges_inv);
let neg_z1 = -self.z1;
// [c] P
extra_bases.push(*p);
extra_scalars.push(c);
// [c * v] U - [z1 * b] U
extra_bases.push(u);
extra_scalars.push((c * &v) + &(neg_z1 * &b));
// delta
extra_bases.push(self.delta);
extra_scalars.push(Field::one());
// - [z2] H
extra_bases.push(params.h);
extra_scalars.push(-self.z2);
// - [z1] G
extra_bases.extend(&params.g);
let mut s = compute_s(&challenges_sq, allinv);
// TODO: parallelize
for s in &mut s {
*s *= &neg_z1;
}
extra_scalars.extend(s);
bool::from(best_multiexp(&extra_scalars, &extra_bases).is_zero())
}
}
fn compute_b<F: Field>(x: F, challenges: &[F], challenges_inv: &[F]) -> F {
assert!(!challenges.is_empty());
assert_eq!(challenges.len(), challenges_inv.len());
if challenges.len() == 1 {
*challenges_inv.last().unwrap() + *challenges.last().unwrap() * x
} else {
(*challenges_inv.last().unwrap() + *challenges.last().unwrap() * x)
* compute_b(
x.square(),
&challenges[0..(challenges.len() - 1)],
&challenges_inv[0..(challenges.len() - 1)],
)
}
}
// TODO: parallelize
fn compute_s<F: Field>(challenges_sq: &[F], allinv: F) -> Vec<F> {
let lg_n = challenges_sq.len();
let n = 1 << lg_n;
let mut s = Vec::with_capacity(n);
s.push(allinv);
for i in 1..n {
let lg_i = (32 - 1 - (i as u32).leading_zeros()) as usize;
let k = 1 << lg_i;
let u_lg_i_sq = challenges_sq[(lg_n - 1) - lg_i];
s.push(s[i - k] * u_lg_i_sq);
}
s
}

View File

@ -1,5 +1,11 @@
//! Contains utilities for performing polynomial arithmetic over an evaluation
//! domain that is of a suitable size for the application.
use crate::arithmetic::{best_fft, parallelize, BatchInvert, Field, Group};
use super::{Coeff, ExtendedLagrangeCoeff, LagrangeCoeff, Polynomial};
use std::marker::PhantomData;
/// Describes a relative location in the evaluation domain; applying a rotation
/// by i will rotate the vector in the evaluation domain by i.
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, Ord, PartialOrd)]
@ -12,8 +18,8 @@ impl Default for Rotation {
}
/// This structure contains precomputed constants and other details needed for
/// performing operations on an evaluation domain of size $2^k$ in the context
/// of PLONK.
/// performing operations on an evaluation domain of size $2^k$ and an extended
/// domain of size $2^{k} * j$ with $j \neq 0$.
#[derive(Debug)]
pub struct EvaluationDomain<G: Group> {
n: u64,
@ -33,14 +39,11 @@ pub struct EvaluationDomain<G: Group> {
}
impl<G: Group> EvaluationDomain<G> {
/// This constructs a new evaluation domain object (containing precomputed
/// constants) for operating on an evaluation domain of size $2^k$ and for
/// some operations over an extended domain of size $2^{k + j}$ where $j$ is
/// sufficiently large to describe the quotient polynomial depending on the
/// maximum degree of all PLONK gates.
pub fn new(gate_degree: u32, k: u32) -> Self {
/// This constructs a new evaluation domain object based on the provided
/// values $j, k$.
pub fn new(j: u32, k: u32) -> Self {
// quotient_poly_degree * params.n - 1 is the degree of the quotient polynomial
let quotient_poly_degree = (gate_degree - 1) as u64;
let quotient_poly_degree = (j - 1) as u64;
let n = 1u64 << k;
@ -131,32 +134,85 @@ impl<G: Group> EvaluationDomain<G> {
}
}
/// Obtains a polynomial in Lagrange form when given a vector of Lagrange
/// coefficients of size `n`; panics if the provided vector is the wrong
/// length.
pub fn lagrange_from_vec(&self, values: Vec<G>) -> Polynomial<G, LagrangeCoeff> {
assert_eq!(values.len(), self.n as usize);
Polynomial {
values,
_marker: PhantomData,
}
}
/// Obtains a polynomial in coefficient form when given a vector of
/// coefficients of size `n`; panics if the provided vector is the wrong
/// length.
pub fn coeff_from_vec(&self, values: Vec<G>) -> Polynomial<G, Coeff> {
assert_eq!(values.len(), self.n as usize);
Polynomial {
values,
_marker: PhantomData,
}
}
/// Returns an empty (zero) polynomial in the coefficient basis
pub fn empty_coeff(&self) -> Polynomial<G, Coeff> {
Polynomial {
values: vec![G::group_zero(); self.n as usize],
_marker: PhantomData,
}
}
/// Returns an empty (zero) polynomial in the Lagrange coefficient basis
pub fn empty_lagrange(&self) -> Polynomial<G, LagrangeCoeff> {
Polynomial {
values: vec![G::group_zero(); self.n as usize],
_marker: PhantomData,
}
}
/// Returns an empty (zero) polynomial in the extended Lagrange coefficient
/// basis
pub fn empty_extended(&self) -> Polynomial<G, ExtendedLagrangeCoeff> {
Polynomial {
values: vec![G::group_zero(); self.extended_len()],
_marker: PhantomData,
}
}
/// This takes us from an n-length vector into the coefficient form.
///
/// This function will panic if the provided vector is not the correct
/// length.
pub fn obtain_poly(&self, mut a: Vec<G>) -> Vec<G> {
assert_eq!(a.len(), 1 << self.k);
pub fn lagrange_to_coeff(&self, mut a: Polynomial<G, LagrangeCoeff>) -> Polynomial<G, Coeff> {
assert_eq!(a.values.len(), 1 << self.k);
// Perform inverse FFT to obtain the polynomial in coefficient form
Self::ifft(&mut a, self.omega_inv, self.k, self.ifft_divisor);
Self::ifft(&mut a.values, self.omega_inv, self.k, self.ifft_divisor);
a
Polynomial {
values: a.values,
_marker: PhantomData,
}
}
/// This takes us from an n-length coefficient vector into the coset
/// This takes us from an n-length coefficient vector into the extended
/// evaluation domain, rotating by `rotation` if desired.
///
/// This function will panic if the provided vector is not the correct
/// length.
pub fn obtain_coset(&self, mut a: Vec<G>, rotation: Rotation) -> Vec<G> {
assert_eq!(a.len(), 1 << self.k);
pub fn coeff_to_extended(
&self,
mut a: Polynomial<G, Coeff>,
rotation: Rotation,
) -> Polynomial<G, ExtendedLagrangeCoeff> {
assert_eq!(a.values.len(), 1 << self.k);
assert!(rotation.0 != i32::MIN);
if rotation.0 == 0 {
// In this special case, the powers of zeta repeat so we do not need
// to compute them.
Self::distribute_powers_zeta(&mut a, self.g_coset);
Self::distribute_powers_zeta(&mut a.values, self.g_coset);
} else {
let mut g = G::Scalar::ZETA;
if rotation.0 > 0 {
@ -166,24 +222,29 @@ impl<G: Group> EvaluationDomain<G> {
.omega_inv
.pow_vartime(&[rotation.0.abs() as u64, 0, 0, 0]);
}
Self::distribute_powers(&mut a, g);
Self::distribute_powers(&mut a.values, g);
}
a.values.resize(self.extended_len(), G::group_zero());
best_fft(&mut a.values, self.extended_omega, self.extended_k);
Polynomial {
values: a.values,
_marker: PhantomData,
}
a.resize(self.coset_len(), G::group_zero());
best_fft(&mut a, self.extended_omega, self.extended_k);
a
}
/// This takes us from the coset evaluation domain and gets us the quotient
/// polynomial coefficients.
/// This takes us from the extended evaluation domain and gets us the
/// quotient polynomial coefficients.
///
/// This function will panic if the provided vector is not the correct
/// length.
pub fn from_coset(&self, mut a: Vec<G>) -> Vec<G> {
assert_eq!(a.len(), self.coset_len());
// TODO/FIXME: caller should be responsible for truncating
pub fn extended_to_coeff(&self, mut a: Polynomial<G, ExtendedLagrangeCoeff>) -> Vec<G> {
assert_eq!(a.values.len(), self.extended_len());
// Inverse FFT
Self::ifft(
&mut a,
&mut a.values,
self.extended_omega_inv,
self.extended_k,
self.extended_ifft_divisor,
@ -191,31 +252,38 @@ impl<G: Group> EvaluationDomain<G> {
// Distribute powers to move from coset; opposite from the
// transformation we performed earlier.
Self::distribute_powers(&mut a, self.g_coset_inv);
Self::distribute_powers(&mut a.values, self.g_coset_inv);
// Truncate it to match the size of the quotient polynomial; the
// evaluation domain might be slightly larger than necessary because
// it always lies on a power-of-two boundary.
a.truncate((&self.n * self.quotient_poly_degree) as usize);
a.values
.truncate((&self.n * self.quotient_poly_degree) as usize);
a
a.values
}
/// This divides the polynomial (in the coset domain) by the vanishing
/// polynomial.
pub fn divide_by_vanishing_poly(&self, mut h_poly: Vec<G>) -> Vec<G> {
assert_eq!(h_poly.len(), self.coset_len());
/// This divides the polynomial (in the extended domain) by the vanishing
/// polynomial of the $2^k$ size domain.
pub fn divide_by_vanishing_poly(
&self,
mut a: Polynomial<G, ExtendedLagrangeCoeff>,
) -> Polynomial<G, ExtendedLagrangeCoeff> {
assert_eq!(a.values.len(), self.extended_len());
// Divide to obtain the quotient polynomial in the coset evaluation
// domain.
parallelize(&mut h_poly, |h, mut index| {
parallelize(&mut a.values, |h, mut index| {
for h in h {
h.group_scale(&self.t_evaluations[index % self.t_evaluations.len()]);
index += 1;
}
});
h_poly
Polynomial {
values: a.values,
_marker: PhantomData,
}
}
fn distribute_powers_zeta(mut a: &mut [G], g: G::Scalar) {
@ -252,24 +320,31 @@ impl<G: Group> EvaluationDomain<G> {
});
}
pub fn coset_len(&self) -> usize {
/// Get the size of the extended domain
pub fn extended_len(&self) -> usize {
1 << self.extended_k
}
/// Get $\omega$, the generator of the $2^k$ order multiplicative subgroup.
pub fn get_omega(&self) -> G::Scalar {
self.omega
}
pub fn get_extended_omega(&self) -> G::Scalar {
self.extended_omega
}
/// Get $\omega^{-1}$, the inverse of the generator of the $2^k$ order
/// multiplicative subgroup.
pub fn get_omega_inv(&self) -> G::Scalar {
self.omega_inv
}
pub fn rotate_omega(&self, constant: G::Scalar, rotation: Rotation) -> G::Scalar {
let mut point = constant;
/// Get the generator of the extended domain's multiplicative subgroup.
pub fn get_extended_omega(&self) -> G::Scalar {
self.extended_omega
}
/// Multiplies a value by some power of $\omega$, essentially rotating over
/// the domain.
pub fn rotate_omega(&self, value: G::Scalar, rotation: Rotation) -> G::Scalar {
let mut point = value;
if rotation.0 >= 0 {
point *= &self.get_omega().pow(&[rotation.0 as u64, 0, 0, 0]);
} else {
@ -280,6 +355,7 @@ impl<G: Group> EvaluationDomain<G> {
point
}
/// Gets the barycentric weight of $1$ over the $2^k$ size domain.
pub fn get_barycentric_weight(&self) -> G::Scalar {
self.barycentric_weight
}

View File

@ -1,600 +0,0 @@
//! This module contains an implementation of the polynomial commitment scheme
//! described in the [Halo][halo] paper.
//!
//! [halo]: https://eprint.iacr.org/2019/1021
use crate::arithmetic::{
best_fft, best_multiexp, compute_inner_product, get_challenge_scalar, parallelize, Challenge,
Curve, CurveAffine, Field,
};
use crate::transcript::Hasher;
/// This is a proof object for the polynomial commitment scheme opening.
#[derive(Debug, Clone)]
pub struct OpeningProof<C: CurveAffine> {
fork: u8,
rounds: Vec<(C, C)>,
delta: C,
z1: C::Scalar,
z2: C::Scalar,
}
/// These are the public parameters for the polynomial commitment scheme.
#[derive(Debug)]
pub struct Params<C: CurveAffine> {
pub(crate) k: u32,
pub(crate) n: u64,
pub(crate) g: Vec<C>,
pub(crate) g_lagrange: Vec<C>,
pub(crate) h: C,
}
impl<C: CurveAffine> Params<C> {
/// Initializes parameters for the curve, given a random oracle to draw
/// points from.
pub fn new<H: Hasher<C::Base>>(k: u32) -> Self {
// This is usually a limitation on the curve, but we also want 32-bit
// architectures to be supported.
assert!(k < 32);
// No goofy hardware please.
assert!(core::mem::size_of::<usize>() >= 4);
let n: u64 = 1 << k;
let g = {
let hasher = &H::init(C::Base::zero());
let mut g = Vec::with_capacity(n as usize);
g.resize(n as usize, C::zero());
parallelize(&mut g, move |g, start| {
let mut cur_value = C::Base::from(start as u64);
for g in g.iter_mut() {
let mut hasher = hasher.clone();
hasher.absorb(cur_value);
cur_value += &C::Base::one();
loop {
let x = hasher.squeeze().to_bytes();
let p = C::from_bytes(&x);
if bool::from(p.is_some()) {
*g = p.unwrap();
break;
}
}
}
});
g
};
// Let's evaluate all of the Lagrange basis polynomials
// using an inverse FFT.
let mut alpha_inv = C::Scalar::ROOT_OF_UNITY_INV;
for _ in k..C::Scalar::S {
alpha_inv = alpha_inv.square();
}
let mut g_lagrange_projective = g.iter().map(|g| g.to_projective()).collect::<Vec<_>>();
best_fft(&mut g_lagrange_projective, alpha_inv, k);
let minv = C::Scalar::TWO_INV.pow_vartime(&[k as u64, 0, 0, 0]);
parallelize(&mut g_lagrange_projective, |g, _| {
for g in g.iter_mut() {
*g *= minv;
}
});
let g_lagrange = {
let mut g_lagrange = vec![C::zero(); n as usize];
parallelize(&mut g_lagrange, |g_lagrange, starts| {
C::Projective::batch_to_affine(
&g_lagrange_projective[starts..(starts + g_lagrange.len())],
g_lagrange,
);
});
drop(g_lagrange_projective);
g_lagrange
};
let h = {
let mut hasher = H::init(C::Base::zero());
hasher.absorb(-C::Base::one());
let x = hasher.squeeze().to_bytes();
let p = C::from_bytes(&x);
p.unwrap()
};
Params {
k,
n,
g,
g_lagrange,
h,
}
}
/// This computes a commitment to a polynomial described by the provided
/// slice of coefficients. The commitment will be blinded by the blinding
/// factor `r`.
pub fn commit(&self, poly: &[C::Scalar], r: C::Scalar) -> C::Projective {
let mut tmp_scalars = Vec::with_capacity(poly.len() + 1);
let mut tmp_bases = Vec::with_capacity(poly.len() + 1);
tmp_scalars.extend(poly.iter());
tmp_scalars.push(r);
tmp_bases.extend(self.g.iter());
tmp_bases.push(self.h);
best_multiexp::<C>(&tmp_scalars, &tmp_bases)
}
/// This commits to a polynomial using its evaluations over the $2^k$ size
/// evaluation domain. The commitment will be blinded by the blinding factor
/// `r`.
pub fn commit_lagrange(&self, poly: &[C::Scalar], r: C::Scalar) -> C::Projective {
let mut tmp_scalars = Vec::with_capacity(poly.len() + 1);
let mut tmp_bases = Vec::with_capacity(poly.len() + 1);
tmp_scalars.extend(poly.iter());
tmp_scalars.push(r);
tmp_bases.extend(self.g_lagrange.iter());
tmp_bases.push(self.h);
best_multiexp::<C>(&tmp_scalars, &tmp_bases)
}
/// Create a polynomial commitment opening proof for the polynomial defined
/// by the coefficients `px`, the blinding factor `blind` used for the
/// polynomial commitment, and the point `x` that the polynomial is
/// evaluated at.
///
/// This function will panic if the provided polynomial is too large with
/// respect to the polynomial commitment parameters.
///
/// **Important:** This function assumes that the provided `transcript` has
/// already seen the common inputs: the polynomial commitment P, the claimed
/// opening v, and the point x. It's probably also nice for the transcript
/// to have seen the elliptic curve description and the SRS, if you want to
/// be rigorous.
pub fn create_proof<H: Hasher<C::Base>>(
&self,
transcript: &mut H,
px: &[C::Scalar],
mut blind: C::Scalar,
x: C::Scalar,
) -> Result<OpeningProof<C>, ()> {
// We're limited to polynomials of degree n - 1.
assert!(px.len() <= self.n as usize);
let mut fork = 0;
// TODO: remove this hack and force the caller to deal with it
loop {
let mut transcript = transcript.clone();
transcript.absorb(C::Base::from_u64(fork as u64));
let u_x = transcript.squeeze();
// y^2 = x^3 + B
let u_y2 = u_x.square() * &u_x + &C::b();
let u_y = u_y2.deterministic_sqrt();
if u_y.is_none() {
fork += 1;
} else {
break;
}
}
transcript.absorb(C::Base::from_u64(fork as u64));
// Compute U
let u = {
let u_x = transcript.squeeze();
// y^2 = x^3 + B
let u_y2 = u_x.square() * &u_x + &C::b();
let u_y = u_y2.deterministic_sqrt().unwrap();
C::from_xy(u_x, u_y).unwrap()
};
// Initialize the vector `a` as the coefficients of the polynomial,
// rounding up to the parameters.
let mut a = px.to_vec();
a.resize(self.n as usize, C::Scalar::zero());
// Initialize the vector `b` as the powers of `x`. The inner product of
// `a` and `b` is the evaluation of the polynomial at `x`.
let mut b = Vec::with_capacity(1 << self.k);
{
let mut cur = C::Scalar::one();
for _ in 0..(1 << self.k) {
b.push(cur);
cur *= &x;
}
}
// Initialize the vector `G` from the SRS. We'll be progressively
// collapsing this vector into smaller and smaller vectors until it is
// of length 1.
let mut g = self.g.clone();
// Perform the inner product argument, round by round.
let mut rounds = Vec::with_capacity(self.k as usize);
for k in (1..=self.k).rev() {
let half = 1 << (k - 1); // half the length of `a`, `b`, `G`
// Compute L, R
//
// TODO: If we modify multiexp to take "extra" bases, we could speed
// this piece up a bit by combining the multiexps.
let l = best_multiexp(&a[0..half], &g[half..]);
let r = best_multiexp(&a[half..], &g[0..half]);
let value_l = compute_inner_product(&a[0..half], &b[half..]);
let value_r = compute_inner_product(&a[half..], &b[0..half]);
let mut l_randomness = C::Scalar::random();
let r_randomness = C::Scalar::random();
let l = l + &best_multiexp(&[value_l, l_randomness], &[u, self.h]);
let r = r + &best_multiexp(&[value_r, r_randomness], &[u, self.h]);
let mut l = l.to_affine();
let r = r.to_affine();
let challenge = loop {
// We'll fork the transcript and adjust our randomness
// until the challenge is a square.
let mut transcript = transcript.clone();
// We expect these to not be points at infinity due to the randomness.
let (l_x, l_y) = l.get_xy().unwrap();
let (r_x, r_y) = r.get_xy().unwrap();
// Feed L and R into the cloned transcript...
transcript.absorb(l_x);
transcript.absorb(l_y);
transcript.absorb(r_x);
transcript.absorb(r_y);
// ... and get the squared challenge.
let challenge_sq_packed = transcript.squeeze().get_lower_128();
let challenge_sq: C::Scalar = get_challenge_scalar(Challenge(challenge_sq_packed));
// There might be no square root, in which case we'll fork the
// transcript.
let challenge = challenge_sq.deterministic_sqrt();
if let Some(challenge) = challenge {
break challenge;
} else {
// Try again, with slightly different randomness
l = (l + self.h).to_affine();
l_randomness += &C::Scalar::one();
}
};
// Challenge is unlikely to be zero.
let challenge_inv = challenge.invert().unwrap();
let challenge_sq_inv = challenge_inv.square();
let challenge_sq = challenge.square();
// Feed L and R into the real transcript
let (l_x, l_y) = l.get_xy().unwrap();
let (r_x, r_y) = r.get_xy().unwrap();
transcript.absorb(l_x);
transcript.absorb(l_y);
transcript.absorb(r_x);
transcript.absorb(r_y);
// And obtain the challenge, even though we already have it, since
// squeezing affects the transcript.
{
let challenge_sq_packed = transcript.squeeze().get_lower_128();
let challenge_sq_expected = get_challenge_scalar(Challenge(challenge_sq_packed));
assert_eq!(challenge_sq, challenge_sq_expected);
}
// Done with this round.
rounds.push((l, r));
// Collapse `a` and `b`.
// TODO: parallelize
for i in 0..half {
a[i] = (a[i] * &challenge) + &(a[i + half] * &challenge_inv);
b[i] = (b[i] * &challenge_inv) + &(b[i + half] * &challenge);
}
a.truncate(half);
b.truncate(half);
// Collapse `G`
parallel_generator_collapse(&mut g, challenge, challenge_inv);
g.truncate(half);
// Update randomness (the synthetic blinding factor at the end)
blind += &(l_randomness * &challenge_sq);
blind += &(r_randomness * &challenge_sq_inv);
}
// We have fully collapsed `a`, `b`, `G`
assert_eq!(a.len(), 1);
let a = a[0];
assert_eq!(b.len(), 1);
let b = b[0];
assert_eq!(g.len(), 1);
let g = g[0];
// Random nonces for the zero-knowledge opening
let d = C::Scalar::random();
let s = C::Scalar::random();
let delta = best_multiexp(&[d, d * &b, s], &[g, u, self.h]).to_affine();
let (delta_x, delta_y) = delta.get_xy().unwrap();
// Feed delta into the transcript
transcript.absorb(delta_x);
transcript.absorb(delta_y);
// Obtain the challenge c.
let c_packed = transcript.squeeze().get_lower_128();
let c: C::Scalar = get_challenge_scalar(Challenge(c_packed));
// Compute z1 and z2 as described in the Halo paper.
let z1 = a * &c + &d;
let z2 = c * &blind + &s;
Ok(OpeningProof {
fork,
rounds,
delta,
z1,
z2,
})
}
/// Checks to see if an [`OpeningProof`] is valid given the current
/// `transcript`, and a point `x` that the polynomial commitment `p` opens
/// purportedly to the value `v`.
pub fn verify_proof<H: Hasher<C::Base>>(
&self,
proof: &OpeningProof<C>,
transcript: &mut H,
x: C::Scalar,
p: &C,
v: C::Scalar,
) -> bool {
// Check for well-formedness
if proof.rounds.len() != self.k as usize {
return false;
}
transcript.absorb(C::Base::from_u64(proof.fork as u64));
// Compute U
let u = {
let u_x = transcript.squeeze();
// y^2 = x^3 + B
let u_y2 = u_x.square() * &u_x + &C::b();
let u_y = u_y2.deterministic_sqrt();
if u_y.is_none() {
return false;
}
let u_y = u_y.unwrap();
C::from_xy(u_x, u_y).unwrap()
};
let mut extra_scalars = Vec::with_capacity(proof.rounds.len() * 2 + 4 + self.n as usize);
let mut extra_bases = Vec::with_capacity(proof.rounds.len() * 2 + 4 + self.n as usize);
// Data about the challenges from each of the rounds.
let mut challenges = Vec::with_capacity(proof.rounds.len());
let mut challenges_inv = Vec::with_capacity(proof.rounds.len());
let mut challenges_sq = Vec::with_capacity(proof.rounds.len());
let mut allinv = Field::one();
for round in &proof.rounds {
// Feed L and R into the transcript.
let l = round.0.get_xy();
let r = round.1.get_xy();
if bool::from(l.is_none() | r.is_none()) {
return false;
}
let l = l.unwrap();
let r = r.unwrap();
transcript.absorb(l.0);
transcript.absorb(l.1);
transcript.absorb(r.0);
transcript.absorb(r.1);
let challenge_sq_packed = transcript.squeeze().get_lower_128();
let challenge_sq: C::Scalar = get_challenge_scalar(Challenge(challenge_sq_packed));
let challenge = challenge_sq.deterministic_sqrt();
if challenge.is_none() {
// We didn't sample a square.
return false;
}
let challenge = challenge.unwrap();
let challenge_inv = challenge.invert();
if bool::from(challenge_inv.is_none()) {
// We sampled zero for some reason, unlikely to happen by
// chance.
return false;
}
let challenge_inv = challenge_inv.unwrap();
allinv *= challenge_inv;
let challenge_sq_inv = challenge_inv.square();
extra_scalars.push(challenge_sq);
extra_bases.push(round.0);
extra_scalars.push(challenge_sq_inv);
extra_bases.push(round.1);
challenges.push(challenge);
challenges_inv.push(challenge_inv);
challenges_sq.push(challenge_sq);
}
let delta = proof.delta.get_xy();
if bool::from(delta.is_none()) {
return false;
}
let delta = delta.unwrap();
// Feed delta into the transcript
transcript.absorb(delta.0);
transcript.absorb(delta.1);
// Get the challenge `c`
let c_packed = transcript.squeeze().get_lower_128();
let c: C::Scalar = get_challenge_scalar(Challenge(c_packed));
// Check
// [c] P + [c * v] U + [c] sum(L_i * u_i^2) + [c] sum(R_i * u_i^-2) + delta - [z1] G - [z1 * b] U - [z2] H
// = 0
for scalar in &mut extra_scalars {
*scalar *= &c;
}
let b = compute_b(x, &challenges, &challenges_inv);
let neg_z1 = -proof.z1;
// [c] P
extra_bases.push(*p);
extra_scalars.push(c);
// [c * v] U - [z1 * b] U
extra_bases.push(u);
extra_scalars.push((c * &v) + &(neg_z1 * &b));
// delta
extra_bases.push(proof.delta);
extra_scalars.push(Field::one());
// - [z2] H
extra_bases.push(self.h);
extra_scalars.push(-proof.z2);
// - [z1] G
extra_bases.extend(&self.g);
let mut s = compute_s(&challenges_sq, allinv);
// TODO: parallelize
for s in &mut s {
*s *= &neg_z1;
}
extra_scalars.extend(s);
bool::from(best_multiexp(&extra_scalars, &extra_bases).is_zero())
}
}
fn compute_b<F: Field>(x: F, challenges: &[F], challenges_inv: &[F]) -> F {
assert!(!challenges.is_empty());
assert_eq!(challenges.len(), challenges_inv.len());
if challenges.len() == 1 {
*challenges_inv.last().unwrap() + *challenges.last().unwrap() * x
} else {
(*challenges_inv.last().unwrap() + *challenges.last().unwrap() * x)
* compute_b(
x.square(),
&challenges[0..(challenges.len() - 1)],
&challenges_inv[0..(challenges.len() - 1)],
)
}
}
// TODO: parallelize
fn compute_s<F: Field>(challenges_sq: &[F], allinv: F) -> Vec<F> {
let lg_n = challenges_sq.len();
let n = 1 << lg_n;
let mut s = Vec::with_capacity(n);
s.push(allinv);
for i in 1..n {
let lg_i = (32 - 1 - (i as u32).leading_zeros()) as usize;
let k = 1 << lg_i;
let u_lg_i_sq = challenges_sq[(lg_n - 1) - lg_i];
s.push(s[i - k] * u_lg_i_sq);
}
s
}
fn parallel_generator_collapse<C: CurveAffine>(
g: &mut [C],
challenge: C::Scalar,
challenge_inv: C::Scalar,
) {
let len = g.len() / 2;
let (mut g_lo, g_hi) = g.split_at_mut(len);
parallelize(&mut g_lo, |g_lo, start| {
let g_hi = &g_hi[start..];
let mut tmp = Vec::with_capacity(g_lo.len());
for (g_lo, g_hi) in g_lo.iter().zip(g_hi.iter()) {
// TODO: could use multiexp
tmp.push(((*g_lo) * challenge_inv) + &((*g_hi) * challenge));
}
C::Projective::batch_to_affine(&tmp, g_lo);
});
}
#[test]
fn test_commit_lagrange() {
const K: u32 = 6;
use crate::arithmetic::{EpAffine, Fp, Fq};
use crate::transcript::DummyHash;
let params = Params::<EpAffine>::new::<DummyHash<Fp>>(K);
let a = (0..(1 << K)).map(|l| Fq::from(l)).collect::<Vec<_>>();
let mut b = a.clone();
let mut alpha = Fq::ROOT_OF_UNITY;
for _ in K..Fq::S {
alpha = alpha.square();
}
best_fft(&mut b, alpha, K);
assert_eq!(params.commit(&a, alpha), params.commit_lagrange(&b, alpha));
}
#[test]
fn test_opening_proof() {
const K: u32 = 6;
use crate::arithmetic::{eval_polynomial, EpAffine, Fp, Fq};
use crate::transcript::DummyHash;
let params = Params::<EpAffine>::new::<DummyHash<Fp>>(K);
let px = (0..(1 << K))
.map(|l| Fq::from(l + 1) * Fq::ZETA)
.collect::<Vec<_>>();
let blind = Fq::random();
let p = params.commit(&px, blind).to_affine();
let mut transcript = DummyHash::init(Field::one());
let (p_x, p_y) = p.get_xy().unwrap();
transcript.absorb(p_x);
transcript.absorb(p_y);
let x_packed = transcript.squeeze().get_lower_128();
let x: Fq = get_challenge_scalar(Challenge(x_packed));
// Evaluate the polynomial
let v = eval_polynomial(&px, x);
transcript.absorb(Fp::from_bytes(&v.to_bytes()).unwrap()); // unlikely to fail since p ~ q
loop {
let mut transcript_dup = transcript.clone();
let opening_proof = params.create_proof(&mut transcript, &px, blind, x);
if opening_proof.is_err() {
transcript = transcript_dup;
transcript.absorb(Field::one());
} else {
let opening_proof = opening_proof.unwrap();
assert!(params.verify_proof(&opening_proof, &mut transcript_dup, x, &p, v));
break;
}
}
}