ZAL: ZK Accel Layer (#277)

* Implement ZAL API (Zk Acceleration Layer)

* Update halo2_backend/src/poly/ipa/commitment.rs

* zal: cargo fmt
This commit is contained in:
Mamy Ratsimbazafy 2024-04-10 16:32:13 +02:00 committed by GitHub
parent 1b2a6774b0
commit 26652ae3d0
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
28 changed files with 832 additions and 196 deletions

View File

@ -6,6 +6,7 @@
use group::Curve;
use halo2_middleware::ff::{Field, FromUniformBytes};
use halo2_middleware::zal::impls::H2cEngine;
use super::{evaluation::Evaluator, permutation, Polynomial, ProvingKey, VerifyingKey};
use crate::{
@ -70,6 +71,7 @@ where
.map(|poly| {
params
.commit_lagrange(
&H2cEngine::new(),
&Polynomial::new_lagrange_from_vec(poly.clone()),
Blind::default(),
)

View File

@ -17,6 +17,7 @@ use group::{
};
use halo2_middleware::ff::WithSmallOrderMulGroup;
use halo2_middleware::poly::Rotation;
use halo2_middleware::zal::{impls::PlonkEngine, traits::MsmAccel};
use rand_core::RngCore;
use std::{
collections::BTreeMap,
@ -69,7 +70,9 @@ pub(in crate::plonk) fn lookup_commit_permuted<
E: EncodedChallenge<C>,
R: RngCore,
T: TranscriptWrite<C, E>,
M: MsmAccel<C>,
>(
engine: &PlonkEngine<C, M>,
arg: &Argument<F>,
pk: &ProvingKey<C>,
params: &P,
@ -127,7 +130,9 @@ where
let mut commit_values = |values: &Polynomial<C::Scalar, LagrangeCoeff>| {
let poly = pk.vk.domain.lagrange_to_coeff(values.clone());
let blind = Blind(C::Scalar::random(&mut rng));
let commitment = params.commit_lagrange(values, blind).to_affine();
let commitment = params
.commit_lagrange(&engine.msm_backend, values, blind)
.to_affine();
(poly, blind, commitment)
};
@ -163,14 +168,17 @@ impl<C: CurveAffine> Permuted<C> {
/// grand product polynomial over the lookup. The grand product polynomial
/// is used to populate the [`Committed<C>`] struct. The [`Committed<C>`] struct is
/// added to the Lookup and finally returned by the method.
#[allow(clippy::too_many_arguments)]
pub(in crate::plonk) fn commit_product<
'params,
P: Params<'params, C>,
E: EncodedChallenge<C>,
R: RngCore,
T: TranscriptWrite<C, E>,
M: MsmAccel<C>,
>(
self,
engine: &PlonkEngine<C, M>,
pk: &ProvingKey<C>,
params: &P,
beta: ChallengeBeta<C>,
@ -287,7 +295,9 @@ impl<C: CurveAffine> Permuted<C> {
}
let product_blind = Blind(C::Scalar::random(rng));
let product_commitment = params.commit_lagrange(&z, product_blind).to_affine();
let product_commitment = params
.commit_lagrange(&engine.msm_backend, &z, product_blind)
.to_affine();
let z = pk.vk.domain.lagrange_to_coeff(z);
// Hash product commitment

View File

@ -1,5 +1,6 @@
use group::Curve;
use halo2_middleware::ff::{Field, PrimeField};
use halo2_middleware::zal::impls::H2cEngine;
use super::{Argument, ProvingKey, VerifyingKey};
use crate::{
@ -265,7 +266,7 @@ pub(crate) fn build_vk<'params, C: CurveAffine, P: Params<'params, C>>(
// Compute commitment to permutation polynomial
commitments.push(
params
.commit_lagrange(permutation, Blind::default())
.commit_lagrange(&H2cEngine::new(), permutation, Blind::default())
.to_affine(),
);
}

View File

@ -2,7 +2,8 @@ use group::{
ff::{BatchInvert, Field},
Curve,
};
use halo2_middleware::ff::PrimeField;
use halo2_middleware::zal::traits::MsmAccel;
use halo2_middleware::{ff::PrimeField, zal::impls::PlonkEngine};
use rand_core::RngCore;
use std::iter::{self, ExactSizeIterator};
@ -53,7 +54,9 @@ pub(in crate::plonk) fn permutation_commit<
E: EncodedChallenge<C>,
R: RngCore,
T: TranscriptWrite<C, E>,
M: MsmAccel<C>,
>(
engine: &PlonkEngine<C, M>,
arg: &Argument,
params: &P,
pk: &plonk::ProvingKey<C>,
@ -171,7 +174,8 @@ pub(in crate::plonk) fn permutation_commit<
let blind = Blind(C::Scalar::random(&mut rng));
let permutation_product_commitment_projective = params.commit_lagrange(&z, blind);
let permutation_product_commitment_projective =
params.commit_lagrange(&engine.msm_backend, &z, blind);
let permutation_product_blind = blind;
let z = domain.lagrange_to_coeff(z);
let permutation_product_poly = z.clone();

View File

@ -19,6 +19,10 @@ use crate::poly::{
};
use crate::transcript::{EncodedChallenge, TranscriptWrite};
use halo2_middleware::ff::{Field, FromUniformBytes, WithSmallOrderMulGroup};
use halo2_middleware::zal::{
impls::{H2cEngine, PlonkEngine, PlonkEngineConfig},
traits::MsmAccel,
};
/// Collection of instance data used during proving for a single circuit proof.
#[derive(Debug)]
@ -45,7 +49,8 @@ pub struct ProverV2Single<
E: EncodedChallenge<Scheme::Curve>,
R: RngCore,
T: TranscriptWrite<Scheme::Curve, E>,
>(ProverV2<'a, 'params, Scheme, P, E, R, T>);
M: MsmAccel<Scheme::Curve>,
>(ProverV2<'a, 'params, Scheme, P, E, R, T, M>);
impl<
'a,
@ -55,10 +60,12 @@ impl<
E: EncodedChallenge<Scheme::Curve>,
R: RngCore,
T: TranscriptWrite<Scheme::Curve, E>,
> ProverV2Single<'a, 'params, Scheme, P, E, R, T>
M: MsmAccel<Scheme::Curve>,
> ProverV2Single<'a, 'params, Scheme, P, E, R, T, M>
{
/// Create a new prover object
pub fn new(
pub fn new_with_engine(
engine: PlonkEngine<Scheme::Curve, M>,
params: &'params Scheme::ParamsProver,
pk: &'a ProvingKey<Scheme::Curve>,
// TODO: If this was a vector the usage would be simpler
@ -70,7 +77,8 @@ impl<
where
Scheme::Scalar: WithSmallOrderMulGroup<3> + FromUniformBytes<64>,
{
Ok(Self(ProverV2::new(
Ok(Self(ProverV2::new_with_engine(
engine,
params,
pk,
&[instance],
@ -79,6 +87,22 @@ impl<
)?))
}
pub fn new(
params: &'params Scheme::ParamsProver,
pk: &'a ProvingKey<Scheme::Curve>,
// TODO: If this was a vector the usage would be simpler
// https://github.com/privacy-scaling-explorations/halo2/issues/265
instance: &[&[Scheme::Scalar]],
rng: R,
transcript: &'a mut T,
) -> Result<ProverV2Single<'a, 'params, Scheme, P, E, R, T, H2cEngine>, Error>
where
Scheme::Scalar: WithSmallOrderMulGroup<3> + FromUniformBytes<64>,
{
let engine = PlonkEngineConfig::build_default();
ProverV2Single::new_with_engine(engine, params, pk, instance, rng, transcript)
}
/// Commit the `witness` at `phase` and return the challenges after `phase`.
pub fn commit_phase(
&mut self,
@ -111,7 +135,9 @@ pub struct ProverV2<
E: EncodedChallenge<Scheme::Curve>,
R: RngCore,
T: TranscriptWrite<Scheme::Curve, E>,
M: MsmAccel<Scheme::Curve>,
> {
engine: PlonkEngine<Scheme::Curve, M>,
// Circuit and setup fields
params: &'params Scheme::ParamsProver,
// Plonk proving key
@ -141,10 +167,12 @@ impl<
E: EncodedChallenge<Scheme::Curve>,
R: RngCore,
T: TranscriptWrite<Scheme::Curve, E>,
> ProverV2<'a, 'params, Scheme, P, E, R, T>
M: MsmAccel<Scheme::Curve>,
> ProverV2<'a, 'params, Scheme, P, E, R, T, M>
{
/// Create a new prover object
pub fn new(
pub fn new_with_engine(
engine: PlonkEngine<Scheme::Curve, M>,
params: &'params Scheme::ParamsProver,
pk: &'a ProvingKey<Scheme::Curve>,
// TODO: If this was a vector the usage would be simpler.
@ -200,7 +228,9 @@ impl<
let instance_commitments_projective: Vec<_> = instance_values
.iter()
.map(|poly| params.commit_lagrange(poly, Blind::default()))
.map(|poly| {
params.commit_lagrange(&engine.msm_backend, poly, Blind::default())
})
.collect();
let mut instance_commitments =
vec![Scheme::Curve::identity(); instance_commitments_projective.len()];
@ -260,6 +290,7 @@ impl<
let challenges = HashMap::<usize, Scheme::Scalar>::with_capacity(meta.num_challenges);
Ok(ProverV2 {
engine,
params,
pk,
phases,
@ -367,69 +398,70 @@ impl<
// Also sets advice_polys with the (blinding) updated advice columns and advice_blinds with
// the blinding factor used for each advice column.
let mut commit_phase_fn =
|advice: &mut AdviceSingle<Scheme::Curve, LagrangeCoeff>,
witness: Vec<Option<Polynomial<Scheme::Scalar, LagrangeCoeff>>>|
-> Result<(), Error> {
let unusable_rows_start = params.n() as usize - (meta.blinding_factors() + 1);
let mut advice_values: Vec<_> = witness.into_iter().flatten().collect();
let unblinded_advice: HashSet<usize> =
HashSet::from_iter(meta.unblinded_advice_columns.clone());
let mut commit_phase_fn = |advice: &mut AdviceSingle<Scheme::Curve, LagrangeCoeff>,
witness: Vec<
Option<Polynomial<Scheme::Scalar, LagrangeCoeff>>,
>|
-> Result<(), Error> {
let unusable_rows_start = params.n() as usize - (meta.blinding_factors() + 1);
let mut advice_values: Vec<_> = witness.into_iter().flatten().collect();
let unblinded_advice: HashSet<usize> =
HashSet::from_iter(meta.unblinded_advice_columns.clone());
// Add blinding factors to advice columns.
for (column_index, advice_values) in column_indices.iter().zip(&mut advice_values) {
if !unblinded_advice.contains(column_index) {
for cell in &mut advice_values[unusable_rows_start..] {
*cell = Scheme::Scalar::random(&mut rng);
}
} else {
#[cfg(feature = "sanity-checks")]
for cell in &advice_values[unusable_rows_start..] {
assert_eq!(*cell, Scheme::Scalar::ZERO);
}
// Add blinding factors to advice columns.
for (column_index, advice_values) in column_indices.iter().zip(&mut advice_values) {
if !unblinded_advice.contains(column_index) {
for cell in &mut advice_values[unusable_rows_start..] {
*cell = Scheme::Scalar::random(&mut rng);
}
} else {
#[cfg(feature = "sanity-checks")]
for cell in &advice_values[unusable_rows_start..] {
assert_eq!(*cell, Scheme::Scalar::ZERO);
}
}
}
// Compute commitments to advice column polynomials
let blinds: Vec<_> = column_indices
.iter()
.map(|i| {
if unblinded_advice.contains(i) {
Blind::default()
} else {
Blind(Scheme::Scalar::random(&mut rng))
}
})
.collect();
let advice_commitments_projective: Vec<_> = advice_values
.iter()
.zip(blinds.iter())
.map(|(poly, blind)| params.commit_lagrange(poly, *blind))
.collect();
let mut advice_commitments_affine =
vec![Scheme::Curve::identity(); advice_commitments_projective.len()];
<Scheme::Curve as CurveAffine>::CurveExt::batch_normalize(
&advice_commitments_projective,
&mut advice_commitments_affine,
);
let advice_commitments_affine = advice_commitments_affine;
drop(advice_commitments_projective);
// Compute commitments to advice column polynomials
let blinds: Vec<_> = column_indices
.iter()
.map(|i| {
if unblinded_advice.contains(i) {
Blind::default()
} else {
Blind(Scheme::Scalar::random(&mut rng))
}
})
.collect();
let advice_commitments_projective: Vec<_> = advice_values
.iter()
.zip(blinds.iter())
.map(|(poly, blind)| params.commit_lagrange(&self.engine.msm_backend, poly, *blind))
.collect();
let mut advice_commitments_affine =
vec![Scheme::Curve::identity(); advice_commitments_projective.len()];
<Scheme::Curve as CurveAffine>::CurveExt::batch_normalize(
&advice_commitments_projective,
&mut advice_commitments_affine,
);
let advice_commitments_affine = advice_commitments_affine;
drop(advice_commitments_projective);
// Update transcript.
// [TRANSCRIPT-3]
for commitment in &advice_commitments_affine {
self.transcript.write_point(*commitment)?;
}
// Update transcript.
// [TRANSCRIPT-3]
for commitment in &advice_commitments_affine {
self.transcript.write_point(*commitment)?;
}
// Set advice_polys & advice_blinds
for ((column_index, advice_values), blind) in
column_indices.iter().zip(advice_values).zip(blinds)
{
advice.advice_polys[*column_index] = advice_values;
advice.advice_blinds[*column_index] = blind;
}
Ok(())
};
// Set advice_polys & advice_blinds
for ((column_index, advice_values), blind) in
column_indices.iter().zip(advice_values).zip(blinds)
{
advice.advice_polys[*column_index] = advice_values;
advice.advice_blinds[*column_index] = blind;
}
Ok(())
};
// Update blindings for each advice column
// [TRANSCRIPT-3]
@ -476,7 +508,7 @@ impl<
/// - 11. Compute and hash fixed evals
/// - 12. Evaluate permutation, lookups and shuffles at x
/// - 13. Generate all queries ([`ProverQuery`])
/// - 14. Send the queries to the [`Prover`]
/// - 14. Send the queries to the [`Prover`]
pub fn create_proof(mut self) -> Result<(), Error>
where
Scheme::Scalar: WithSmallOrderMulGroup<3> + FromUniformBytes<64>,
@ -515,6 +547,7 @@ impl<
.iter()
.map(|lookup| {
lookup_commit_permuted(
&self.engine,
lookup,
pk,
params,
@ -554,6 +587,7 @@ impl<
.zip(advices.iter())
.map(|(instance, advice)| {
permutation_commit(
&self.engine,
&cs.permutation,
params,
pk,
@ -579,7 +613,15 @@ impl<
lookups
.into_iter()
.map(|lookup| {
lookup.commit_product(pk, params, beta, gamma, &mut rng, self.transcript)
lookup.commit_product(
&self.engine,
pk,
params,
beta,
gamma,
&mut rng,
self.transcript,
)
})
.collect::<Result<Vec<_>, _>>()
})
@ -597,6 +639,7 @@ impl<
.iter()
.map(|shuffle| {
shuffle_commit_product(
&self.engine,
shuffle,
pk,
params,
@ -617,7 +660,13 @@ impl<
// 5. Commit to the vanishing argument's random polynomial for blinding h(x_3) -------------------
// [TRANSCRIPT-12]
let vanishing = vanishing::Argument::commit(params, domain, &mut rng, self.transcript)?;
let vanishing = vanishing::Argument::commit(
&self.engine.msm_backend,
params,
domain,
&mut rng,
self.transcript,
)?;
// 6. Generate the advice polys ------------------------------------------------------------------
@ -667,7 +716,14 @@ impl<
// 8. Construct the vanishing argument's h(X) commitments --------------------------------------
// [TRANSCRIPT-14]
let vanishing = vanishing.construct(params, domain, h_poly, &mut rng, self.transcript)?;
let vanishing = vanishing.construct(
&self.engine,
params,
domain,
h_poly,
&mut rng,
self.transcript,
)?;
// 9. Compute x --------------------------------------------------------------------------------
// [TRANSCRIPT-15]
@ -836,7 +892,7 @@ impl<
let prover = P::new(params);
prover
.create_proof(rng, self.transcript, queries)
.create_proof_with_engine(&self.engine.msm_backend, rng, self.transcript, queries)
.map_err(|_| Error::ConstraintSystemFailure)?;
Ok(())
@ -846,4 +902,21 @@ impl<
pub fn phases(&self) -> &[u8] {
self.phases.as_slice()
}
/// Create a new prover object
pub fn new(
params: &'params Scheme::ParamsProver,
pk: &'a ProvingKey<Scheme::Curve>,
// TODO: If this was a vector the usage would be simpler.
// https://github.com/privacy-scaling-explorations/halo2/issues/265
circuits_instances: &[&[&[Scheme::Scalar]]],
rng: R,
transcript: &'a mut T,
) -> Result<ProverV2<'a, 'params, Scheme, P, E, R, T, H2cEngine>, Error>
where
Scheme::Scalar: WithSmallOrderMulGroup<3> + FromUniformBytes<64>,
{
let engine = PlonkEngineConfig::build_default();
ProverV2::new_with_engine(engine, params, pk, circuits_instances, rng, transcript)
}
}

View File

@ -13,6 +13,7 @@ use crate::{
};
use group::{ff::BatchInvert, ff::WithSmallOrderMulGroup, Curve};
use halo2_middleware::poly::Rotation;
use halo2_middleware::zal::{impls::PlonkEngine, traits::MsmAccel};
use rand_core::RngCore;
use std::{
iter,
@ -102,7 +103,9 @@ pub(in crate::plonk) fn shuffle_commit_product<
E: EncodedChallenge<C>,
R: RngCore,
T: TranscriptWrite<C, E>,
M: MsmAccel<C>,
>(
engine: &PlonkEngine<C, M>,
arg: &Argument<F>,
pk: &ProvingKey<C>,
params: &P,
@ -188,7 +191,9 @@ where
}
let product_blind = Blind(C::Scalar::random(rng));
let product_commitment = params.commit_lagrange(&z, product_blind).to_affine();
let product_commitment = params
.commit_lagrange(&engine.msm_backend, &z, product_blind)
.to_affine();
let z = pk.vk.domain.lagrange_to_coeff(z);
// Hash product commitment

View File

@ -3,6 +3,7 @@ use std::{collections::HashMap, iter};
use crate::plonk::Error;
use group::Curve;
use halo2_middleware::ff::Field;
use halo2_middleware::zal::{impls::PlonkEngine, traits::MsmAccel};
use rand_chacha::ChaCha20Rng;
use rand_core::{RngCore, SeedableRng};
@ -43,6 +44,7 @@ impl<C: CurveAffine> Argument<C> {
R: RngCore,
T: TranscriptWrite<C, E>,
>(
engine: &impl MsmAccel<C>,
params: &P,
domain: &EvaluationDomain<C::Scalar>,
mut rng: R,
@ -84,7 +86,9 @@ impl<C: CurveAffine> Argument<C> {
let random_blind = Blind(C::Scalar::random(rng));
// Commit
let c = params.commit(&random_poly, random_blind).to_affine();
let c = params
.commit(engine, &random_poly, random_blind)
.to_affine();
transcript.write_point(c)?;
Ok(Committed {
@ -101,8 +105,10 @@ impl<C: CurveAffine> Committed<C> {
E: EncodedChallenge<C>,
R: RngCore,
T: TranscriptWrite<C, E>,
M: MsmAccel<C>,
>(
self,
engine: &PlonkEngine<C, M>,
params: &P,
domain: &EvaluationDomain<C::Scalar>,
h_poly: Polynomial<C::Scalar, ExtendedLagrangeCoeff>,
@ -130,7 +136,7 @@ impl<C: CurveAffine> Committed<C> {
let h_commitments_projective: Vec<_> = h_pieces
.iter()
.zip(h_blinds.iter())
.map(|(h_piece, blind)| params.commit(h_piece, *blind))
.map(|(h_piece, blind)| params.commit(&engine.msm_backend, h_piece, *blind))
.collect();
let mut h_commitments = vec![C::identity(); h_commitments_projective.len()];
C::Curve::batch_normalize(&h_commitments_projective, &mut h_commitments);

View File

@ -3,6 +3,7 @@
use group::Curve;
use halo2_middleware::circuit::Any;
use halo2_middleware::ff::{Field, FromUniformBytes, WithSmallOrderMulGroup};
use halo2_middleware::zal::impls::H2cEngine;
use std::iter;
use super::{vanishing, VerifyingKey};
@ -64,6 +65,9 @@ pub fn verify_proof<
where
Scheme::Scalar: WithSmallOrderMulGroup<3> + FromUniformBytes<64>,
{
// ZAL: Verification is (supposedly) cheap, hence we don't use an accelerator engine
let default_engine = H2cEngine::new();
// Check that instances matches the expected number of instance columns
for instances in instances.iter() {
if instances.len() != vk.cs.num_instance_columns {
@ -87,7 +91,9 @@ where
poly.resize(params.n() as usize, Scheme::Scalar::ZERO);
let poly = vk.domain.lagrange_from_vec(poly);
Ok(params.commit_lagrange(&poly, Blind::default()).to_affine())
Ok(params
.commit_lagrange(&default_engine, &poly, Blind::default())
.to_affine())
})
.collect::<Result<Vec<_>, _>>()
})

View File

@ -1,6 +1,7 @@
use crate::plonk::Error;
use group::ff::Field;
use halo2_middleware::ff::FromUniformBytes;
use halo2_middleware::zal::impls::H2cEngine;
use halo2curves::CurveAffine;
use rand_core::OsRng;
@ -129,7 +130,8 @@ where
);
match final_msm {
Ok(msm) => msm.check(),
// ZAL: Verification is (supposedly) cheap, hence we don't use an accelerator engine
Ok(msm) => msm.check(&H2cEngine::new()),
Err(_) => false,
}
}

View File

@ -6,6 +6,7 @@ use super::{
use crate::poly::Error;
use crate::transcript::{EncodedChallenge, TranscriptRead, TranscriptWrite};
use halo2_middleware::ff::Field;
use halo2_middleware::zal::{impls::PlonkEngineConfig, traits::MsmAccel};
use halo2curves::CurveAffine;
use rand_core::RngCore;
use std::{
@ -62,6 +63,7 @@ pub trait Params<'params, C: CurveAffine>: Sized + Clone + Debug {
/// `r`.
fn commit_lagrange(
&self,
engine: &impl MsmAccel<C>,
poly: &Polynomial<C::ScalarExt, LagrangeCoeff>,
r: Blind<C::ScalarExt>,
) -> C::CurveExt;
@ -84,8 +86,12 @@ pub trait ParamsProver<'params, C: CurveAffine>: Params<'params, C> {
/// This computes a commitment to a polynomial described by the provided
/// slice of coefficients. The commitment may be blinded by the blinding
/// factor `r`.
fn commit(&self, poly: &Polynomial<C::ScalarExt, Coeff>, r: Blind<C::ScalarExt>)
-> C::CurveExt;
fn commit(
&self,
engine: &impl MsmAccel<C>,
poly: &Polynomial<C::ScalarExt, Coeff>,
r: Blind<C::ScalarExt>,
) -> C::CurveExt;
/// Getter for g generators
fn get_g(&self) -> &[C];
@ -111,10 +117,10 @@ pub trait MSM<C: CurveAffine>: Clone + Debug + Send + Sync {
fn scale(&mut self, factor: C::Scalar);
/// Perform multiexp and check that it results in zero
fn check(&self) -> bool;
fn check(&self, engine: &impl MsmAccel<C>) -> bool;
/// Perform multiexp and return the result
fn eval(&self) -> C::CurveExt;
fn eval(&self, engine: &impl MsmAccel<C>) -> C::CurveExt;
/// Return base points
fn bases(&self) -> Vec<C::CurveExt>;
@ -131,6 +137,24 @@ pub trait Prover<'params, Scheme: CommitmentScheme> {
/// Creates new prover instance
fn new(params: &'params Scheme::ParamsProver) -> Self;
/// Create a multi-opening proof
fn create_proof_with_engine<
'com,
E: EncodedChallenge<Scheme::Curve>,
T: TranscriptWrite<Scheme::Curve, E>,
R,
I,
>(
&self,
engine: &impl MsmAccel<Scheme::Curve>,
rng: R,
transcript: &mut T,
queries: I,
) -> io::Result<()>
where
I: IntoIterator<Item = ProverQuery<'com, Scheme::Curve>> + Clone,
R: RngCore;
/// Create a multi-opening proof
fn create_proof<
'com,
@ -146,7 +170,11 @@ pub trait Prover<'params, Scheme: CommitmentScheme> {
) -> io::Result<()>
where
I: IntoIterator<Item = ProverQuery<'com, Scheme::Curve>> + Clone,
R: RngCore;
R: RngCore,
{
let engine = PlonkEngineConfig::build_default::<Scheme::Curve>();
self.create_proof_with_engine(&engine.msm_backend, rng, transcript, queries)
}
}
/// Common multi-open verifier interface for various commitment schemes

View File

@ -10,13 +10,13 @@ use crate::poly::ipa::msm::MSMIPA;
use crate::poly::{Coeff, LagrangeCoeff, Polynomial};
use group::{Curve, Group};
use halo2curves::msm::best_multiexp;
use halo2_middleware::zal::traits::MsmAccel;
use std::marker::PhantomData;
mod prover;
mod verifier;
pub use prover::create_proof;
pub use prover::create_proof_with_engine;
pub use verifier::verify_proof;
use std::io;
@ -88,6 +88,7 @@ impl<'params, C: CurveAffine> Params<'params, C> for ParamsIPA<C> {
/// `r`.
fn commit_lagrange(
&self,
engine: &impl MsmAccel<C>,
poly: &Polynomial<C::Scalar, LagrangeCoeff>,
r: Blind<C::Scalar>,
) -> C::Curve {
@ -100,7 +101,7 @@ impl<'params, C: CurveAffine> Params<'params, C> for ParamsIPA<C> {
tmp_bases.extend(self.g_lagrange.iter());
tmp_bases.push(self.w);
best_multiexp::<C>(&tmp_scalars, &tmp_bases)
engine.msm(&tmp_scalars, &tmp_bases)
}
/// Writes params to a buffer.
@ -210,7 +211,12 @@ impl<'params, C: CurveAffine> ParamsProver<'params, C> for ParamsIPA<C> {
/// This computes a commitment to a polynomial described by the provided
/// slice of coefficients. The commitment will be blinded by the blinding
/// factor `r`.
fn commit(&self, poly: &Polynomial<C::Scalar, Coeff>, r: Blind<C::Scalar>) -> C::Curve {
fn commit(
&self,
engine: &impl MsmAccel<C>,
poly: &Polynomial<C::Scalar, Coeff>,
r: Blind<C::Scalar>,
) -> C::Curve {
let mut tmp_scalars = Vec::with_capacity(poly.len() + 1);
let mut tmp_bases = Vec::with_capacity(poly.len() + 1);
@ -220,7 +226,7 @@ impl<'params, C: CurveAffine> ParamsProver<'params, C> for ParamsIPA<C> {
tmp_bases.extend(self.g.iter());
tmp_bases.push(self.w);
best_multiexp::<C>(&tmp_scalars, &tmp_bases)
engine.msm(&tmp_scalars, &tmp_bases)
}
fn get_g(&self) -> &[C] {
@ -232,11 +238,12 @@ impl<'params, C: CurveAffine> ParamsProver<'params, C> for ParamsIPA<C> {
mod test {
use crate::poly::commitment::ParamsProver;
use crate::poly::commitment::{Blind, Params, MSM};
use crate::poly::ipa::commitment::{create_proof, verify_proof, ParamsIPA};
use crate::poly::ipa::commitment::{create_proof_with_engine, verify_proof, ParamsIPA};
use crate::poly::ipa::msm::MSMIPA;
use group::Curve;
use halo2_middleware::ff::Field;
use halo2_middleware::zal::impls::H2cEngine;
#[test]
fn test_commit_lagrange_epaffine() {
@ -247,6 +254,7 @@ mod test {
use crate::poly::EvaluationDomain;
use halo2curves::pasta::{EpAffine, Fq};
let engine = H2cEngine::new();
let params = ParamsIPA::<EpAffine>::new(K);
let domain = EvaluationDomain::new(1, K);
@ -260,7 +268,10 @@ mod test {
let alpha = Blind(Fq::random(OsRng));
assert_eq!(params.commit(&b, alpha), params.commit_lagrange(&a, alpha));
assert_eq!(
params.commit(&engine, &b, alpha),
params.commit_lagrange(&engine, &a, alpha)
);
}
#[test]
@ -272,6 +283,7 @@ mod test {
use crate::poly::EvaluationDomain;
use halo2curves::pasta::{EqAffine, Fp};
let engine = H2cEngine::new();
let params: ParamsIPA<EqAffine> = ParamsIPA::<EqAffine>::new(K);
let domain = EvaluationDomain::new(1, K);
@ -285,7 +297,10 @@ mod test {
let alpha = Blind(Fp::random(OsRng));
assert_eq!(params.commit(&b, alpha), params.commit_lagrange(&a, alpha));
assert_eq!(
params.commit(&engine, &b, alpha),
params.commit_lagrange(&engine, &a, alpha)
);
}
#[test]
@ -308,6 +323,7 @@ mod test {
let rng = OsRng;
let engine = H2cEngine::new();
let params = ParamsIPA::<EpAffine>::new(K);
let mut params_buffer = vec![];
<ParamsIPA<_> as Params<_>>::write(&params, &mut params_buffer).unwrap();
@ -323,7 +339,7 @@ mod test {
let blind = Blind(Fq::random(rng));
let p = params.commit(&px, blind).to_affine();
let p = params.commit(&engine, &px, blind).to_affine();
let mut transcript =
Blake2bWrite::<Vec<u8>, EpAffine, Challenge255<EpAffine>>::init(vec![]);
@ -334,7 +350,8 @@ mod test {
transcript.write_scalar(v).unwrap();
let (proof, ch_prover) = {
create_proof(&params, rng, &mut transcript, &px, blind, *x).unwrap();
create_proof_with_engine(&engine, &params, rng, &mut transcript, &px, blind, *x)
.unwrap();
let ch_prover = transcript.squeeze_challenge();
(transcript.finalize(), ch_prover)
};
@ -360,12 +377,12 @@ mod test {
{
// Test use_challenges()
let msm_challenges = guard.clone().use_challenges();
assert!(msm_challenges.check());
assert!(msm_challenges.check(&engine));
// Test use_g()
let g = guard.compute_g();
let g = guard.compute_g(&engine);
let (msm_g, _accumulator) = guard.clone().use_g(g);
assert!(msm_g.check());
assert!(msm_g.check(&engine));
}
}
}

View File

@ -1,5 +1,5 @@
use halo2_middleware::ff::Field;
use halo2curves::msm::best_multiexp;
use halo2_middleware::zal::traits::MsmAccel;
use rand_core::RngCore;
use super::ParamsIPA;
@ -25,12 +25,13 @@ use std::io::{self};
/// opening v, and the point x. It's probably also nice for the transcript
/// to have seen the elliptic curve description and the URS, if you want to
/// be rigorous.
pub fn create_proof<
pub fn create_proof_with_engine<
C: CurveAffine,
E: EncodedChallenge<C>,
R: RngCore,
T: TranscriptWrite<C, E>,
>(
engine: &impl MsmAccel<C>,
params: &ParamsIPA<C>,
mut rng: R,
transcript: &mut T,
@ -55,7 +56,7 @@ pub fn create_proof<
let s_poly_blind = Blind(C::Scalar::random(&mut rng));
// Write a commitment to the random polynomial to the transcript
let s_poly_commitment = params.commit(&s_poly, s_poly_blind).to_affine();
let s_poly_commitment = params.commit(engine, &s_poly, s_poly_blind).to_affine();
transcript.write_point(s_poly_commitment)?;
// Challenge that will ensure that the prover cannot change P but can only
@ -105,14 +106,14 @@ pub fn create_proof<
//
// TODO: If we modify multiexp to take "extra" bases, we could speed
// this piece up a bit by combining the multiexps.
let l_j = best_multiexp(&p_prime[half..], &g_prime[0..half]);
let r_j = best_multiexp(&p_prime[0..half], &g_prime[half..]);
let l_j = engine.msm(&p_prime[half..], &g_prime[0..half]);
let r_j = engine.msm(&p_prime[0..half], &g_prime[half..]);
let value_l_j = compute_inner_product(&p_prime[half..], &b[0..half]);
let value_r_j = compute_inner_product(&p_prime[0..half], &b[half..]);
let l_j_randomness = C::Scalar::random(&mut rng);
let r_j_randomness = C::Scalar::random(&mut rng);
let l_j = l_j + best_multiexp(&[value_l_j * z, l_j_randomness], &[params.u, params.w]);
let r_j = r_j + best_multiexp(&[value_r_j * z, r_j_randomness], &[params.u, params.w]);
let l_j = l_j + engine.msm(&[value_l_j * z, l_j_randomness], &[params.u, params.w]);
let r_j = r_j + engine.msm(&[value_r_j * z, r_j_randomness], &[params.u, params.w]);
let l_j = l_j.to_affine();
let r_j = r_j.to_affine();

View File

@ -2,7 +2,7 @@ use crate::arithmetic::CurveAffine;
use crate::poly::{commitment::MSM, ipa::commitment::ParamsVerifierIPA};
use group::Group;
use halo2_middleware::ff::Field;
use halo2curves::msm::best_multiexp;
use halo2_middleware::zal::traits::MsmAccel;
use std::collections::BTreeMap;
/// A multiscalar multiplication in the polynomial commitment scheme
@ -131,11 +131,11 @@ impl<'a, C: CurveAffine> MSM<C> for MSMIPA<'a, C> {
self.u_scalar = self.u_scalar.map(|a| a * factor);
}
fn check(&self) -> bool {
bool::from(self.eval().is_identity())
fn check(&self, engine: &impl MsmAccel<C>) -> bool {
bool::from(self.eval(engine).is_identity())
}
fn eval(&self) -> C::Curve {
fn eval(&self, engine: &impl MsmAccel<C>) -> C::Curve {
let len = self.g_scalars.as_ref().map(|v| v.len()).unwrap_or(0)
+ self.w_scalar.map(|_| 1).unwrap_or(0)
+ self.u_scalar.map(|_| 1).unwrap_or(0)
@ -166,8 +166,7 @@ impl<'a, C: CurveAffine> MSM<C> for MSMIPA<'a, C> {
}
assert_eq!(scalars.len(), len);
best_multiexp(&scalars, &bases)
engine.msm(&scalars, &bases)
}
fn bases(&self) -> Vec<C::CurveExt> {
@ -223,6 +222,7 @@ mod tests {
commitment::{ParamsProver, MSM},
ipa::{commitment::ParamsIPA, msm::MSMIPA},
};
use halo2_middleware::zal::impls::H2cEngine;
use halo2curves::{
pasta::{Ep, EpAffine, Fp, Fq},
CurveAffine,
@ -233,40 +233,41 @@ mod tests {
let base: Ep = EpAffine::from_xy(-Fp::one(), Fp::from(2)).unwrap().into();
let base_viol = base + base;
let engine = H2cEngine::new();
let params = ParamsIPA::new(4);
let mut a: MSMIPA<EpAffine> = MSMIPA::new(&params);
a.append_term(Fq::one(), base);
// a = [1] P
assert!(!a.clone().check());
assert!(!a.clone().check(&engine));
a.append_term(Fq::one(), base);
// a = [1+1] P
assert!(!a.clone().check());
assert!(!a.clone().check(&engine));
a.append_term(-Fq::one(), base_viol);
// a = [1+1] P + [-1] 2P
assert!(a.clone().check());
assert!(a.clone().check(&engine));
let b = a.clone();
// Append a point that is the negation of an existing one.
a.append_term(Fq::from(4), -base);
// a = [1+1-4] P + [-1] 2P
assert!(!a.clone().check());
assert!(!a.clone().check(&engine));
a.append_term(Fq::from(2), base_viol);
// a = [1+1-4] P + [-1+2] 2P
assert!(a.clone().check());
assert!(a.clone().check(&engine));
// Add two MSMs with common bases.
a.scale(Fq::from(3));
a.add_msm(&b);
// a = [3*(1+1)+(1+1-4)] P + [3*(-1)+(-1+2)] 2P
assert!(a.clone().check());
assert!(a.clone().check(&engine));
let mut c: MSMIPA<EpAffine> = MSMIPA::new(&params);
c.append_term(Fq::from(2), base);
c.append_term(Fq::one(), -base_viol);
// c = [2] P + [1] (-2P)
assert!(c.clone().check());
assert!(c.clone().check(&engine));
// Add two MSMs with bases that differ only in sign.
a.add_msm(&c);
assert!(a.check());
assert!(a.check(&engine));
}
}

View File

@ -9,6 +9,7 @@ use crate::transcript::{EncodedChallenge, TranscriptWrite};
use group::Curve;
use halo2_middleware::ff::Field;
use halo2_middleware::zal::traits::MsmAccel;
use rand_core::RngCore;
use std::io;
use std::marker::PhantomData;
@ -27,8 +28,9 @@ impl<'params, C: CurveAffine> Prover<'params, IPACommitmentScheme<C>> for Prover
}
/// Create a multi-opening proof
fn create_proof<'com, Z: EncodedChallenge<C>, T: TranscriptWrite<C, Z>, R, I>(
fn create_proof_with_engine<'com, Z: EncodedChallenge<C>, T: TranscriptWrite<C, Z>, R, I>(
&self,
engine: &impl MsmAccel<C>,
mut rng: R,
transcript: &mut T,
queries: I,
@ -93,7 +95,10 @@ impl<'params, C: CurveAffine> Prover<'params, IPACommitmentScheme<C>> for Prover
.unwrap();
let q_prime_blind = Blind(C::Scalar::random(&mut rng));
let q_prime_commitment = self.params.commit(&q_prime_poly, q_prime_blind).to_affine();
let q_prime_commitment = self
.params
.commit(engine, &q_prime_poly, q_prime_blind)
.to_affine();
transcript.write_point(q_prime_commitment)?;
@ -117,6 +122,14 @@ impl<'params, C: CurveAffine> Prover<'params, IPACommitmentScheme<C>> for Prover
},
);
commitment::create_proof(self.params, rng, transcript, &p_poly, p_poly_blind, *x_3)
commitment::create_proof_with_engine(
engine,
self.params,
rng,
transcript,
&p_poly,
p_poly_blind,
*x_3,
)
}
}

View File

@ -10,7 +10,7 @@ use crate::{
};
use group::Curve;
use halo2_middleware::ff::Field;
use halo2curves::msm::best_multiexp;
use halo2_middleware::zal::{impls::H2cEngine, traits::MsmAccel};
use halo2curves::CurveAffine;
use rand_core::OsRng;
@ -64,10 +64,9 @@ impl<'params, C: CurveAffine> GuardIPA<'params, C> {
}
/// Computes G = ⟨s, params.g⟩
pub fn compute_g(&self) -> C {
pub fn compute_g(&self, engine: &impl MsmAccel<C>) -> C {
let s = compute_s(&self.u, C::Scalar::ONE);
best_multiexp(&s, &self.msm.params.g).to_affine()
engine.msm(&s, &self.msm.params.g).to_affine()
}
}
@ -107,7 +106,8 @@ impl<'params, C: CurveAffine>
/// specific failing proofs, it must re-process the proofs separately.
#[must_use]
fn finalize(self) -> bool {
self.msm.check()
// TODO: Verification is cheap, ZkAccel on verifier is not a priority.
self.msm.check(&H2cEngine::new())
}
}
@ -135,7 +135,8 @@ impl<'params, C: CurveAffine>
) -> Result<Self::Output, Error> {
let guard = f(self.msm)?;
let msm = guard.use_challenges();
if msm.check() {
// ZAL: Verification is (supposedly) cheap, hence we don't use an accelerator engine
if msm.check(&H2cEngine::new()) {
Ok(())
} else {
Err(Error::ConstraintSystemFailure)

View File

@ -5,7 +5,7 @@ use crate::poly::{Coeff, LagrangeCoeff, Polynomial};
use group::{prime::PrimeCurveAffine, Curve, Group};
use halo2_middleware::ff::{Field, PrimeField};
use halo2curves::msm::best_multiexp;
use halo2_middleware::zal::traits::MsmAccel;
use halo2curves::pairing::Engine;
use halo2curves::CurveExt;
use rand_core::{OsRng, RngCore};
@ -302,13 +302,18 @@ where
MSMKZG::new()
}
fn commit_lagrange(&self, poly: &Polynomial<E::Fr, LagrangeCoeff>, _: Blind<E::Fr>) -> E::G1 {
fn commit_lagrange(
&self,
engine: &impl MsmAccel<E::G1Affine>,
poly: &Polynomial<E::Fr, LagrangeCoeff>,
_: Blind<E::Fr>,
) -> E::G1 {
let mut scalars = Vec::with_capacity(poly.len());
scalars.extend(poly.iter());
let bases = &self.g_lagrange;
let size = scalars.len();
assert!(bases.len() >= size);
best_multiexp(&scalars, &bases[0..size])
engine.msm(&scalars, &bases[0..size])
}
/// Writes params to a buffer.
@ -346,13 +351,18 @@ where
Self::setup(k, OsRng)
}
fn commit(&self, poly: &Polynomial<E::Fr, Coeff>, _: Blind<E::Fr>) -> E::G1 {
fn commit(
&self,
engine: &impl MsmAccel<E::G1Affine>,
poly: &Polynomial<E::Fr, Coeff>,
_: Blind<E::Fr>,
) -> E::G1 {
let mut scalars = Vec::with_capacity(poly.len());
scalars.extend(poly.iter());
let bases = &self.g;
let size = scalars.len();
assert!(bases.len() >= size);
best_multiexp(&scalars, &bases[0..size])
engine.msm(&scalars, &bases[0..size])
}
fn get_g(&self) -> &[E::G1Affine] {
@ -366,6 +376,7 @@ mod test {
use crate::poly::commitment::{Blind, Params};
use crate::poly::kzg::commitment::ParamsKZG;
use halo2_middleware::ff::Field;
use halo2_middleware::zal::impls::H2cEngine;
#[test]
fn test_commit_lagrange() {
@ -376,6 +387,7 @@ mod test {
use crate::poly::EvaluationDomain;
use halo2curves::bn256::{Bn256, Fr};
let engine = H2cEngine::new();
let params = ParamsKZG::<Bn256>::new(K);
let domain = EvaluationDomain::new(1, K);
@ -389,7 +401,10 @@ mod test {
let alpha = Blind(Fr::random(OsRng));
assert_eq!(params.commit(&b, alpha), params.commit_lagrange(&a, alpha));
assert_eq!(
params.commit(&engine, &b, alpha),
params.commit_lagrange(&engine, &a, alpha)
);
}
#[test]

View File

@ -3,8 +3,8 @@ use std::fmt::Debug;
use super::commitment::ParamsKZG;
use crate::{arithmetic::parallelize, poly::commitment::MSM};
use group::{Curve, Group};
use halo2_middleware::zal::traits::MsmAccel;
use halo2curves::{
msm::best_multiexp,
pairing::{Engine, MillerLoopResult, MultiMillerLoop},
CurveAffine, CurveExt,
};
@ -71,15 +71,15 @@ where
}
}
fn check(&self) -> bool {
bool::from(self.eval().is_identity())
fn check(&self, engine: &impl MsmAccel<E::G1Affine>) -> bool {
bool::from(self.eval(engine).is_identity())
}
fn eval(&self) -> E::G1 {
fn eval(&self, engine: &impl MsmAccel<E::G1Affine>) -> E::G1 {
use group::prime::PrimeCurveAffine;
let mut bases = vec![E::G1Affine::identity(); self.scalars.len()];
E::G1::batch_normalize(&self.bases, &mut bases);
best_multiexp(&self.scalars, &bases)
engine.msm(&self.scalars, &bases)
}
fn bases(&self) -> Vec<E::G1> {
@ -185,12 +185,12 @@ where
}
/// Performs final pairing check with given verifier params and two channel linear combination
pub fn check(self) -> bool {
pub fn check(self, engine: &impl MsmAccel<E::G1Affine>) -> bool {
let s_g2_prepared = E::G2Prepared::from(self.params.s_g2);
let n_g2_prepared = E::G2Prepared::from(-self.params.g2);
let left = self.left.eval();
let right = self.right.eval();
let left = self.left.eval(engine);
let right = self.right.eval(engine);
let (term_1, term_2) = (
(&left.into(), &s_g2_prepared),

View File

@ -9,6 +9,7 @@ use crate::poly::{commitment::Blind, Polynomial};
use crate::transcript::{EncodedChallenge, TranscriptWrite};
use group::Curve;
use halo2_middleware::zal::traits::MsmAccel;
use halo2curves::pairing::Engine;
use halo2curves::CurveExt;
use rand_core::RngCore;
@ -36,7 +37,7 @@ where
}
/// Create a multi-opening proof
fn create_proof<
fn create_proof_with_engine<
'com,
Ch: EncodedChallenge<E::G1Affine>,
T: TranscriptWrite<E::G1Affine, Ch>,
@ -44,6 +45,7 @@ where
I,
>(
&self,
engine: &impl MsmAccel<E::G1Affine>,
_: R,
transcript: &mut T,
queries: I,
@ -79,7 +81,7 @@ where
};
let w = self
.params
.commit(&witness_poly, Blind::default())
.commit(engine, &witness_poly, Blind::default())
.to_affine();
transcript.write_point(w)?;

View File

@ -15,6 +15,7 @@ use crate::transcript::{EncodedChallenge, TranscriptWrite};
use crate::multicore::{IntoParallelIterator, ParallelIterator};
use group::Curve;
use halo2_middleware::ff::Field;
use halo2_middleware::zal::traits::MsmAccel;
use halo2curves::pairing::Engine;
use halo2curves::CurveExt;
use rand_core::RngCore;
@ -117,7 +118,7 @@ where
}
/// Create a multi-opening proof
fn create_proof<
fn create_proof_with_engine<
'com,
Ch: EncodedChallenge<E::G1Affine>,
T: TranscriptWrite<E::G1Affine, Ch>,
@ -125,6 +126,7 @@ where
I,
>(
&self,
engine: &impl MsmAccel<E::G1Affine>,
_: R,
transcript: &mut T,
queries: I,
@ -208,7 +210,10 @@ where
.reduce(|acc, poly| acc + &poly)
.unwrap();
let h = self.params.commit(&h_x, Blind::default()).to_affine();
let h = self
.params
.commit(engine, &h_x, Blind::default())
.to_affine();
transcript.write_point(h)?;
let u: ChallengeU<_> = transcript.squeeze_challenge_scalar();
@ -290,7 +295,10 @@ where
_marker: PhantomData,
};
let h = self.params.commit(&h_x, Blind::default()).to_affine();
let h = self
.params
.commit(engine, &h_x, Blind::default())
.to_affine();
transcript.write_point(h)?;
Ok(())

View File

@ -11,6 +11,7 @@ use crate::{
},
};
use halo2_middleware::ff::Field;
use halo2_middleware::zal::impls::H2cEngine;
use halo2curves::{
pairing::{Engine, MultiMillerLoop},
CurveAffine, CurveExt,
@ -136,7 +137,9 @@ where
}
fn finalize(self) -> bool {
self.msm_accumulator.check()
// ZAL: Verification is (supposedly) cheap, hence we don't use an accelerator engine
let default_engine = H2cEngine::new();
self.msm_accumulator.check(&default_engine)
}
}
@ -168,7 +171,9 @@ where
// Guard is updated with new msm contributions
let guard = f(self.msm)?;
let msm = guard.msm_accumulator;
if msm.check() {
// Verification is (supposedly) cheap, hence we don't use an accelerator engine
let default_engine = H2cEngine::new();
if msm.check(&default_engine) {
Ok(())
} else {
Err(Error::ConstraintSystemFailure)

View File

@ -16,6 +16,7 @@ mod test {
};
use group::Curve;
use halo2_middleware::ff::WithSmallOrderMulGroup;
use halo2_middleware::zal::{impls::H2cEngine, traits::MsmAccel};
use rand_core::OsRng;
#[test]
@ -27,6 +28,7 @@ mod test {
const K: u32 = 4;
let engine = H2cEngine::new();
let params = ParamsIPA::<EqAffine>::new(K);
let proof = create_proof::<
@ -34,7 +36,7 @@ mod test {
ProverIPA<_>,
_,
Blake2bWrite<_, _, Challenge255<_>>,
>(&params);
>(&engine, &params);
let verifier_params = params.verifier_params();
@ -64,6 +66,7 @@ mod test {
const K: u32 = 4;
let engine = H2cEngine::new();
let params = ParamsIPA::<EqAffine>::new(K);
let proof = create_proof::<
@ -71,7 +74,7 @@ mod test {
ProverIPA<_>,
_,
Keccak256Write<_, _, Challenge255<_>>,
>(&params);
>(&engine, &params);
let verifier_params = params.verifier_params();
@ -101,10 +104,12 @@ mod test {
const K: u32 = 4;
let engine = H2cEngine::new();
let params = ParamsKZG::<Bn256>::new(K);
let proof =
create_proof::<_, ProverGWC<_>, _, Blake2bWrite<_, _, Challenge255<_>>>(&params);
let proof = create_proof::<_, ProverGWC<_>, _, Blake2bWrite<_, _, Challenge255<_>>>(
&engine, &params,
);
let verifier_params = params.verifier_params();
@ -132,6 +137,7 @@ mod test {
const K: u32 = 4;
let engine = H2cEngine::new();
let params = ParamsKZG::<Bn256>::new(K);
let proof = create_proof::<
@ -139,7 +145,7 @@ mod test {
ProverSHPLONK<_>,
_,
Blake2bWrite<_, _, Challenge255<_>>,
>(&params);
>(&engine, &params);
let verifier_params = params.verifier_params();
@ -225,6 +231,7 @@ mod test {
E: EncodedChallenge<Scheme::Curve>,
T: TranscriptWriterBuffer<Vec<u8>, Scheme::Curve, E>,
>(
engine: &impl MsmAccel<Scheme::Curve>,
params: &'params Scheme::ParamsProver,
) -> Vec<u8>
where
@ -250,9 +257,9 @@ mod test {
let mut transcript = T::init(vec![]);
let blind = Blind::new(&mut OsRng);
let a = params.commit(&ax, blind).to_affine();
let b = params.commit(&bx, blind).to_affine();
let c = params.commit(&cx, blind).to_affine();
let a = params.commit(engine, &ax, blind).to_affine();
let b = params.commit(engine, &bx, blind).to_affine();
let c = params.commit(engine, &cx, blind).to_affine();
transcript.write_point(a).unwrap();
transcript.write_point(b).unwrap();

View File

@ -26,14 +26,16 @@ rustdoc-args = ["--cfg", "docsrs", "--html-in-header", "katex-header.html"]
[dependencies]
ff = "0.13"
halo2curves = { version = "0.6.0", default-features = false }
serde = { version = "1", optional = true, features = ["derive"] }
serde_derive = { version = "1", optional = true}
rayon = "1.8"
[dev-dependencies]
ark-std = { version = "0.3" }
proptest = "1"
group = "0.13"
halo2curves = { version = "0.6.0", default-features = false }
rand_core = { version = "0.6", default-features = false }
[target.'cfg(all(target_arch = "wasm32", target_os = "unknown"))'.dev-dependencies]
getrandom = { version = "0.2", features = ["js"] }

View File

@ -5,5 +5,6 @@ pub mod multicore;
pub mod permutation;
pub mod poly;
pub mod shuffle;
pub mod zal;
pub use ff;

350
halo2_middleware/src/zal.rs Normal file
View File

@ -0,0 +1,350 @@
//! This module provides "ZK Acceleration Layer" traits
//! to abstract away the execution engine for performance-critical primitives.
//!
//! Terminology
//! -----------
//!
//! We use the name Backend+Engine for concrete implementations of ZalEngine.
//! For example H2cEngine for pure Halo2curves implementation.
//!
//! Alternative names considered were Executor or Driver however
//! - executor is already used in Rust (and the name is long)
//! - driver will be confusing as we work quite low-level with GPUs and FPGAs.
//!
//! Unfortunately the "Engine" name is used in bn256 for pairings.
//! Fortunately a ZalEngine is only used in the prover (at least for now)
//! while "pairing engine" is only used in the verifier
//!
//! Initialization design space
//! ---------------------------
//!
//! It is recommended that ZAL backends provide:
//! - an initialization function:
//! - either "fn new() -> ZalEngine" for simple libraries
//! - or a builder pattern for complex initializations
//! - a shutdown function or document when it is not needed (when it's a global threadpool like Rayon for example).
//!
//! Backends might want to add as an option:
//! - The number of threads (CPU)
//! - The device(s) to run on (multi-sockets machines, multi-GPUs machines, ...)
//! - The curve (JIT-compiled backend)
//!
//! Descriptors
//! ---------------------------
//!
//! Descriptors enable providers to configure opaque details on data
//! when doing repeated computations with the same input(s).
//! For example:
//! - Pointer(s) caching to limit data movement between CPU and GPU, FPGAs
//! - Length of data
//! - data in layout:
//! - canonical or Montgomery fields, unsaturated representation, endianness
//! - jacobian or projective coordinates or maybe even Twisted Edwards for faster elliptic curve additions,
//! - FFT: canonical or bit-reversed permuted
//! - data out layout
//! - Device(s) ID
//!
//! For resources that need special cleanup like GPU memory, a custom `Drop` is required.
//!
//! Note that resources can also be stored in the engine in a hashmap
//! and an integer ID or a pointer can be opaquely given as a descriptor.
// The ZK Accel Layer API
// ---------------------------------------------------
pub mod traits {
use halo2curves::CurveAffine;
pub trait MsmAccel<C: CurveAffine> {
fn msm(&self, coeffs: &[C::Scalar], base: &[C]) -> C::Curve;
// Caching API
// -------------------------------------------------
// From here we propose an extended API
// that allows reusing coeffs and/or the base points
//
// This is inspired by CuDNN API (Nvidia GPU)
// and oneDNN API (CPU, OpenCL) https://docs.nvidia.com/deeplearning/cudnn/api/index.html#cudnn-ops-infer-so-opaque
// usage of descriptors
//
// https://github.com/oneapi-src/oneDNN/blob/master/doc/programming_model/basic_concepts.md
//
// Descriptors are opaque pointers that hold the input in a format suitable for the accelerator engine.
// They may be:
// - Input moved on accelerator device (only once for repeated calls)
// - Endianess conversion
// - Converting from Montgomery to Canonical form
// - Input changed from Projective to Jacobian coordinates or even to a Twisted Edwards curve.
// - other form of expensive preprocessing
type CoeffsDescriptor<'c>;
type BaseDescriptor<'b>;
fn get_coeffs_descriptor<'c>(&self, coeffs: &'c [C::Scalar]) -> Self::CoeffsDescriptor<'c>;
fn get_base_descriptor<'b>(&self, base: &'b [C]) -> Self::BaseDescriptor<'b>;
fn msm_with_cached_scalars(
&self,
coeffs: &Self::CoeffsDescriptor<'_>,
base: &[C],
) -> C::Curve;
fn msm_with_cached_base(
&self,
coeffs: &[C::Scalar],
base: &Self::BaseDescriptor<'_>,
) -> C::Curve;
fn msm_with_cached_inputs(
&self,
coeffs: &Self::CoeffsDescriptor<'_>,
base: &Self::BaseDescriptor<'_>,
) -> C::Curve;
// Execute MSM according to descriptors
// Unsure of naming, msm_with_cached_inputs, msm_apply, msm_cached, msm_with_descriptors, ...
}
}
// ZAL using Halo2curves as a backend
// ---------------------------------------------------
pub mod impls {
use std::marker::PhantomData;
use crate::zal::traits::MsmAccel;
use halo2curves::msm::best_multiexp;
use halo2curves::CurveAffine;
// Halo2curve Backend
// ---------------------------------------------------
#[derive(Default)]
pub struct H2cEngine;
pub struct H2cMsmCoeffsDesc<'c, C: CurveAffine> {
raw: &'c [C::Scalar],
}
pub struct H2cMsmBaseDesc<'b, C: CurveAffine> {
raw: &'b [C],
}
impl H2cEngine {
pub fn new() -> Self {
Self {}
}
}
impl<C: CurveAffine> MsmAccel<C> for H2cEngine {
fn msm(&self, coeffs: &[C::Scalar], bases: &[C]) -> C::Curve {
best_multiexp(coeffs, bases)
}
// Caching API
// -------------------------------------------------
type CoeffsDescriptor<'c> = H2cMsmCoeffsDesc<'c, C>;
type BaseDescriptor<'b> = H2cMsmBaseDesc<'b, C>;
fn get_coeffs_descriptor<'c>(&self, coeffs: &'c [C::Scalar]) -> Self::CoeffsDescriptor<'c> {
// Do expensive device/library specific preprocessing here
Self::CoeffsDescriptor { raw: coeffs }
}
fn get_base_descriptor<'b>(&self, base: &'b [C]) -> Self::BaseDescriptor<'b> {
Self::BaseDescriptor { raw: base }
}
fn msm_with_cached_scalars(
&self,
coeffs: &Self::CoeffsDescriptor<'_>,
base: &[C],
) -> C::Curve {
best_multiexp(coeffs.raw, base)
}
fn msm_with_cached_base(
&self,
coeffs: &[C::Scalar],
base: &Self::BaseDescriptor<'_>,
) -> C::Curve {
best_multiexp(coeffs, base.raw)
}
fn msm_with_cached_inputs(
&self,
coeffs: &Self::CoeffsDescriptor<'_>,
base: &Self::BaseDescriptor<'_>,
) -> C::Curve {
best_multiexp(coeffs.raw, base.raw)
}
}
// Backend-agnostic engine objects
// ---------------------------------------------------
#[derive(Debug)]
pub struct PlonkEngine<C: CurveAffine, MsmEngine: MsmAccel<C>> {
pub msm_backend: MsmEngine,
_marker: PhantomData<C>, // compiler complains about unused C otherwise
}
#[derive(Default)]
pub struct PlonkEngineConfig<C, M> {
curve: PhantomData<C>,
msm_backend: M,
}
#[derive(Default)]
pub struct NoCurve;
#[derive(Default)]
pub struct HasCurve<C: CurveAffine>(PhantomData<C>);
#[derive(Default)]
pub struct NoMsmEngine;
pub struct HasMsmEngine<C: CurveAffine, M: MsmAccel<C>>(M, PhantomData<C>);
impl PlonkEngineConfig<NoCurve, NoMsmEngine> {
pub fn new() -> PlonkEngineConfig<NoCurve, NoMsmEngine> {
Default::default()
}
pub fn set_curve<C: CurveAffine>(self) -> PlonkEngineConfig<HasCurve<C>, NoMsmEngine> {
Default::default()
}
pub fn build_default<C: CurveAffine>() -> PlonkEngine<C, H2cEngine> {
PlonkEngine {
msm_backend: H2cEngine::new(),
_marker: Default::default(),
}
}
}
impl<C: CurveAffine, M> PlonkEngineConfig<HasCurve<C>, M> {
pub fn set_msm<MsmEngine: MsmAccel<C>>(
self,
engine: MsmEngine,
) -> PlonkEngineConfig<HasCurve<C>, HasMsmEngine<C, MsmEngine>> {
// Copy all other parameters
let Self { curve, .. } = self;
// Return with modified MSM engine
PlonkEngineConfig {
curve,
msm_backend: HasMsmEngine(engine, Default::default()),
}
}
}
impl<C: CurveAffine, M: MsmAccel<C>> PlonkEngineConfig<HasCurve<C>, HasMsmEngine<C, M>> {
pub fn build(self) -> PlonkEngine<C, M> {
PlonkEngine {
msm_backend: self.msm_backend.0,
_marker: Default::default(),
}
}
}
}
// Testing
// ---------------------------------------------------
#[cfg(test)]
mod test {
use crate::zal::impls::{H2cEngine, PlonkEngineConfig};
use crate::zal::traits::MsmAccel;
use halo2curves::bn256::G1Affine;
use halo2curves::msm::best_multiexp;
use halo2curves::CurveAffine;
use ark_std::{end_timer, start_timer};
use ff::Field;
use group::{Curve, Group};
use rand_core::OsRng;
fn run_msm_zal_default<C: CurveAffine>(min_k: usize, max_k: usize) {
let points = (0..1 << max_k)
.map(|_| C::Curve::random(OsRng))
.collect::<Vec<_>>();
let mut affine_points = vec![C::identity(); 1 << max_k];
C::Curve::batch_normalize(&points[..], &mut affine_points[..]);
let points = affine_points;
let scalars = (0..1 << max_k)
.map(|_| C::Scalar::random(OsRng))
.collect::<Vec<_>>();
for k in min_k..=max_k {
let points = &points[..1 << k];
let scalars = &scalars[..1 << k];
let t0 = start_timer!(|| format!("freestanding msm k={}", k));
let e0 = best_multiexp(scalars, points);
end_timer!(t0);
let engine = PlonkEngineConfig::build_default::<G1Affine>();
let t1 = start_timer!(|| format!("H2cEngine msm k={}", k));
let e1 = engine.msm_backend.msm(scalars, points);
end_timer!(t1);
assert_eq!(e0, e1);
// Caching API
// -----------
let t2 = start_timer!(|| format!("H2cEngine msm cached base k={}", k));
let base_descriptor = engine.msm_backend.get_base_descriptor(points);
let e2 = engine
.msm_backend
.msm_with_cached_base(scalars, &base_descriptor);
end_timer!(t2);
assert_eq!(e0, e2)
}
}
fn run_msm_zal_custom<C: CurveAffine>(min_k: usize, max_k: usize) {
let points = (0..1 << max_k)
.map(|_| C::Curve::random(OsRng))
.collect::<Vec<_>>();
let mut affine_points = vec![C::identity(); 1 << max_k];
C::Curve::batch_normalize(&points[..], &mut affine_points[..]);
let points = affine_points;
let scalars = (0..1 << max_k)
.map(|_| C::Scalar::random(OsRng))
.collect::<Vec<_>>();
for k in min_k..=max_k {
let points = &points[..1 << k];
let scalars = &scalars[..1 << k];
let t0 = start_timer!(|| format!("freestanding msm k={}", k));
let e0 = best_multiexp(scalars, points);
end_timer!(t0);
let engine = PlonkEngineConfig::new()
.set_curve::<G1Affine>()
.set_msm(H2cEngine::new())
.build();
let t1 = start_timer!(|| format!("H2cEngine msm k={}", k));
let e1 = engine.msm_backend.msm(scalars, points);
end_timer!(t1);
assert_eq!(e0, e1);
// Caching API
// -----------
let t2 = start_timer!(|| format!("H2cEngine msm cached base k={}", k));
let base_descriptor = engine.msm_backend.get_base_descriptor(points);
let e2 = engine
.msm_backend
.msm_with_cached_base(scalars, &base_descriptor);
end_timer!(t2);
assert_eq!(e0, e2)
}
}
#[test]
fn test_msm_zal() {
run_msm_zal_default::<G1Affine>(3, 14);
run_msm_zal_custom::<G1Affine>(3, 14);
}
}

View File

@ -14,7 +14,7 @@ mod verifier {
pub use keygen::{keygen_pk, keygen_vk, keygen_vk_custom};
pub use prover::create_proof;
pub use prover::{create_proof, create_proof_with_engine};
pub use verifier::verify_proof;
pub use error::Error;

View File

@ -5,9 +5,66 @@ use halo2_backend::plonk::{prover::ProverV2, ProvingKey};
use halo2_frontend::circuit::{compile_circuit_cs, WitnessCalculator};
use halo2_frontend::plonk::Circuit;
use halo2_middleware::ff::{FromUniformBytes, WithSmallOrderMulGroup};
use halo2_middleware::zal::{
impls::{PlonkEngine, PlonkEngineConfig},
traits::MsmAccel,
};
use rand_core::RngCore;
use std::collections::HashMap;
/// This creates a proof for the provided `circuit` when given the public
/// parameters `params` and the proving key [`ProvingKey`] that was
/// generated previously for the same circuit. The provided `instances`
/// are zero-padded internally.
pub fn create_proof_with_engine<
'params,
Scheme: CommitmentScheme,
P: Prover<'params, Scheme>,
E: EncodedChallenge<Scheme::Curve>,
R: RngCore,
T: TranscriptWrite<Scheme::Curve, E>,
ConcreteCircuit: Circuit<Scheme::Scalar>,
M: MsmAccel<Scheme::Curve>,
>(
engine: PlonkEngine<Scheme::Curve, M>,
params: &'params Scheme::ParamsProver,
pk: &ProvingKey<Scheme::Curve>,
circuits: &[ConcreteCircuit],
instances: &[&[&[Scheme::Scalar]]],
rng: R,
transcript: &mut T,
) -> Result<(), Error>
where
Scheme::Scalar: WithSmallOrderMulGroup<3> + FromUniformBytes<64>,
{
if circuits.len() != instances.len() {
return Err(Error::Backend(ErrorBack::InvalidInstances));
}
let (config, cs, _) = compile_circuit_cs::<_, ConcreteCircuit>(
pk.get_vk().compress_selectors.unwrap_or_default(),
#[cfg(feature = "circuit-params")]
circuits[0].params(),
);
let mut witness_calcs: Vec<_> = circuits
.iter()
.enumerate()
.map(|(i, circuit)| WitnessCalculator::new(params.k(), circuit, &config, &cs, instances[i]))
.collect();
let mut prover = ProverV2::<Scheme, P, _, _, _, _>::new_with_engine(
engine, params, pk, instances, rng, transcript,
)?;
let mut challenges = HashMap::new();
let phases = prover.phases().to_vec();
for phase in phases.iter() {
let mut witnesses = Vec::with_capacity(circuits.len());
for witness_calc in witness_calcs.iter_mut() {
witnesses.push(witness_calc.calc(*phase, &challenges)?);
}
challenges = prover.commit_phase(*phase, witnesses).unwrap();
}
Ok(prover.create_proof()?)
}
/// This creates a proof for the provided `circuit` when given the public
/// parameters `params` and the proving key [`ProvingKey`] that was
/// generated previously for the same circuit. The provided `instances`
@ -31,30 +88,10 @@ pub fn create_proof<
where
Scheme::Scalar: WithSmallOrderMulGroup<3> + FromUniformBytes<64>,
{
if circuits.len() != instances.len() {
return Err(Error::Backend(ErrorBack::InvalidInstances));
}
let (config, cs, _) = compile_circuit_cs::<_, ConcreteCircuit>(
pk.get_vk().compress_selectors.unwrap_or_default(),
#[cfg(feature = "circuit-params")]
circuits[0].params(),
);
let mut witness_calcs: Vec<_> = circuits
.iter()
.enumerate()
.map(|(i, circuit)| WitnessCalculator::new(params.k(), circuit, &config, &cs, instances[i]))
.collect();
let mut prover = ProverV2::<Scheme, P, _, _, _>::new(params, pk, instances, rng, transcript)?;
let mut challenges = HashMap::new();
let phases = prover.phases().to_vec();
for phase in phases.iter() {
let mut witnesses = Vec::with_capacity(circuits.len());
for witness_calc in witness_calcs.iter_mut() {
witnesses.push(witness_calc.calc(*phase, &challenges)?);
}
challenges = prover.commit_phase(*phase, witnesses).unwrap();
}
Ok(prover.create_proof()?)
let engine = PlonkEngineConfig::build_default();
create_proof_with_engine::<Scheme, P, _, _, _, _, _>(
engine, params, pk, circuits, instances, rng, transcript,
)
}
#[test]

View File

@ -562,9 +562,15 @@ fn test_mycircuit_full_legacy() {
#[test]
fn test_mycircuit_full_split() {
use halo2_middleware::zal::impls::{H2cEngine, PlonkEngineConfig};
#[cfg(feature = "heap-profiling")]
let _profiler = dhat::Profiler::new_heap();
let engine = PlonkEngineConfig::new()
.set_curve::<G1Affine>()
.set_msm(H2cEngine::new())
.build();
let k = K;
let circuit: MyCircuit<Fr, WIDTH_FACTOR> = MyCircuit::new(k, 42);
let (compiled_circuit, config, cs) = compile_circuit(k, &circuit, false).unwrap();
@ -591,15 +597,22 @@ fn test_mycircuit_full_split() {
let start = Instant::now();
let mut witness_calc = WitnessCalculator::new(k, &circuit, &config, &cs, instances_slice);
let mut transcript = Blake2bWrite::<_, G1Affine, Challenge255<_>>::init(vec![]);
let mut prover =
ProverV2Single::<KZGCommitmentScheme<Bn256>, ProverSHPLONK<'_, Bn256>, _, _, _>::new(
&params,
&pk,
instances_slice,
&mut rng,
&mut transcript,
)
.unwrap();
let mut prover = ProverV2Single::<
KZGCommitmentScheme<Bn256>,
ProverSHPLONK<'_, Bn256>,
_,
_,
_,
_,
>::new_with_engine(
engine,
&params,
&pk,
instances_slice,
&mut rng,
&mut transcript,
)
.unwrap();
let mut challenges = HashMap::new();
for phase in 0..cs.phases().count() {
println!("phase {phase}");

View File

@ -3,13 +3,17 @@
use assert_matches::assert_matches;
use ff::{FromUniformBytes, WithSmallOrderMulGroup};
use halo2_middleware::zal::{
impls::{PlonkEngine, PlonkEngineConfig},
traits::MsmAccel,
};
use halo2_proofs::arithmetic::Field;
use halo2_proofs::circuit::{Cell, Layouter, SimpleFloorPlanner, Value};
use halo2_proofs::dev::MockProver;
use halo2_proofs::plonk::{
create_proof as create_plonk_proof, keygen_pk, keygen_vk, verify_proof as verify_plonk_proof,
Advice, Assigned, Circuit, Column, ConstraintSystem, Error, ErrorFront, Fixed, ProvingKey,
TableColumn, VerifyingKey,
create_proof_with_engine as create_plonk_proof_with_engine, keygen_pk, keygen_vk,
verify_proof as verify_plonk_proof, Advice, Assigned, Circuit, Column, ConstraintSystem, Error,
ErrorFront, Fixed, ProvingKey, TableColumn, VerifyingKey,
};
use halo2_proofs::poly::commitment::{CommitmentScheme, ParamsProver, Prover, Verifier};
use halo2_proofs::poly::Rotation;
@ -467,14 +471,16 @@ fn plonk_api() {
keygen_pk(params, vk, &empty_circuit).expect("keygen_pk should not fail")
}
fn create_proof<
fn create_proof_with_engine<
'params,
Scheme: CommitmentScheme,
P: Prover<'params, Scheme>,
E: EncodedChallenge<Scheme::Curve>,
R: RngCore,
T: TranscriptWriterBuffer<Vec<u8>, Scheme::Curve, E>,
M: MsmAccel<Scheme::Curve>,
>(
engine: PlonkEngine<Scheme::Curve, M>,
rng: R,
params: &'params Scheme::ParamsProver,
pk: &ProvingKey<Scheme::Curve>,
@ -491,7 +497,8 @@ fn plonk_api() {
let mut transcript = T::init(vec![]);
create_plonk_proof::<Scheme, P, _, _, _, _>(
create_plonk_proof_with_engine::<Scheme, P, _, _, _, _, _>(
engine,
params,
pk,
&[circuit.clone(), circuit.clone()],
@ -511,6 +518,25 @@ fn plonk_api() {
transcript.finalize()
}
fn create_proof<
'params,
Scheme: CommitmentScheme,
P: Prover<'params, Scheme>,
E: EncodedChallenge<Scheme::Curve>,
R: RngCore,
T: TranscriptWriterBuffer<Vec<u8>, Scheme::Curve, E>,
>(
rng: R,
params: &'params Scheme::ParamsProver,
pk: &ProvingKey<Scheme::Curve>,
) -> Vec<u8>
where
Scheme::Scalar: Ord + WithSmallOrderMulGroup<3> + FromUniformBytes<64>,
{
let engine = PlonkEngineConfig::build_default();
create_proof_with_engine::<Scheme, P, _, _, T, _>(engine, rng, params, pk)
}
fn verify_proof<
'a,
'params,