Synchronize with upstream (#199)

* refactor: add default impl for `SyncDeps` for backward compatability

* feat: pick changes from https://github.com/zcash/halo2/pull/728 and changes of flag `test-dev-graph`

* feat: pick changes from https://github.com/zcash/halo2/pull/622

* feat: pick changes about mod `circuit` and mod `dev`

* feat: pick rest changes of `halo2_proofs`

* fix: when `--no-default-features`

* ci: sync from upstream, and deduplicate jobs when
push to `main`, and remove always failing job `codecov`.

* fix: make `commit_zk` runnable when `--no-default-features`
This commit is contained in:
Han 2023-09-06 14:59:44 +08:00 committed by GitHub
parent e6cf4bf14e
commit 7a21656171
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
78 changed files with 2015 additions and 1743 deletions

View File

@ -1,14 +1,25 @@
name: CI checks
on: [pull_request, push]
on:
merge_group:
pull_request:
push:
branches:
- main
jobs:
test:
name: Test on ${{ matrix.os }}
name: Test on ${{ matrix.os }} with ${{ matrix.feature_set }} features
runs-on: ${{ matrix.os }}
strategy:
matrix:
feature_set: [basic, all]
os: [ubuntu-latest, windows-latest, macOS-latest]
include:
- feature_set: basic
features: batch,dev-graph,gadget-traces
- feature_set: all
features: batch,dev-graph,gadget-traces,multicore,test-dev-graph,thread-safe-region,sanity-checks,circuit-params
steps:
- uses: actions/checkout@v3
@ -19,7 +30,67 @@ jobs:
uses: actions-rs/cargo@v1
with:
command: test
args: --verbose --release --all --all-features
args: --verbose --release --workspace --no-default-features --features "${{ matrix.features }}"
build:
name: Build target ${{ matrix.target }}
runs-on: ubuntu-latest
strategy:
matrix:
target:
- wasm32-unknown-unknown
- wasm32-wasi
steps:
- uses: actions/checkout@v3
- uses: actions-rs/toolchain@v1
with:
override: false
- name: Add target
run: rustup target add ${{ matrix.target }}
- name: cargo build
uses: actions-rs/cargo@v1
with:
command: build
args: --no-default-features --features batch,dev-graph,gadget-traces --target ${{ matrix.target }}
bitrot:
name: Bitrot check
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions-rs/toolchain@v1
with:
override: false
# Build benchmarks to prevent bitrot
- name: Build benchmarks
uses: actions-rs/cargo@v1
with:
command: build
args: --benches --examples --all-features
doc-links:
name: Intra-doc links
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions-rs/toolchain@v1
with:
override: false
- name: cargo fetch
uses: actions-rs/cargo@v1
with:
command: fetch
# Ensure intra-documentation links all resolve correctly
# Requires #![deny(intra_doc_link_resolution_failure)] in crates.
- name: Check intra-doc links
uses: actions-rs/cargo@v1
with:
command: doc
args: --all --document-private-items
fmt:
name: Rustfmt
@ -35,3 +106,4 @@ jobs:
with:
command: fmt
args: --all -- --check

View File

@ -1,106 +0,0 @@
name: CI checks main
on:
push:
branches:
- main
jobs:
test:
name: Test on ${{ matrix.os }}
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-latest, windows-latest, macOS-latest]
steps:
- uses: actions/checkout@v3
- uses: actions-rs/toolchain@v1
with:
override: false
- name: Run tests
uses: actions-rs/cargo@v1
with:
command: test
args: --verbose --release --all --all-features
bitrot:
name: Bitrot check
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions-rs/toolchain@v1
with:
override: false
# Build benchmarks to prevent bitrot
- name: Build benchmarks
uses: actions-rs/cargo@v1
with:
command: build
args: --benches --examples --all-features
codecov:
name: Code coverage
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
# Use stable for this to ensure that cargo-tarpaulin can be built.
- uses: actions-rs/toolchain@v1
with:
override: false
- name: Install cargo-tarpaulin
uses: actions-rs/cargo@v1
with:
command: install
args: cargo-tarpaulin
- name: Generate coverage report
uses: actions-rs/cargo@v1
with:
command: tarpaulin
args: --all-features --timeout 600 --out Xml
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v3.1.0
doc-links:
name: Intra-doc links
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions-rs/toolchain@v1
with:
override: false
- name: cargo fetch
uses: actions-rs/cargo@v1
with:
command: fetch
# Ensure intra-documentation links all resolve correctly
# Requires #![deny(intra_doc_link_resolution_failure)] in crates.
- name: Check intra-doc links
uses: actions-rs/cargo@v1
with:
command: doc
args: --all --document-private-items
build:
name: Build target ${{ matrix.target }}
runs-on: ubuntu-latest
strategy:
matrix:
target:
- wasm32-unknown-unknown
- wasm32-wasi
steps:
- uses: actions/checkout@v3
- uses: actions-rs/toolchain@v1
with:
override: false
- name: Add target
run: rustup target add ${{ matrix.target }}
- name: cargo build
uses: actions-rs/cargo@v1
with:
command: build
args: --features dev-graph,gadget-traces,unstable --target ${{ matrix.target }}

View File

@ -4,7 +4,7 @@
## Minimum Supported Rust Version
Requires Rust **1.56.1** or higher.
Requires Rust **1.65.0** or higher.
Minimum supported Rust version can be changed in the future, but it will be done with a
minor version bump.
@ -14,6 +14,10 @@ minor version bump.
`halo2` currently uses [rayon](https://github.com/rayon-rs/rayon) for parallel computation.
The `RAYON_NUM_THREADS` environment variable can be used to set the number of threads.
You can disable `rayon` by disabling the `"multicore"` feature.
Warning! Halo2 will lose access to parallelism if you disable the `"multicore"` feature.
This will significantly degrade performance.
## License
Licensed under either of

View File

@ -19,7 +19,7 @@ all-features = true
rustdoc-args = ["--cfg", "docsrs", "--html-in-header", "katex-header.html"]
[dependencies]
halo2_proofs = { version = "0.2", path = "../halo2_proofs" }
halo2_proofs = { version = "0.2", path = "../halo2_proofs", default-features = false }
[lib]
bench = false

View File

@ -26,7 +26,7 @@ arrayvec = "0.7.0"
bitvec = "1"
ff = { version = "0.13", features = ["bits"] }
group = "0.13"
halo2_proofs = { version = "0.2", path = "../halo2_proofs" }
halo2_proofs = { version = "0.2", path = "../halo2_proofs", default-features = false }
lazy_static = "1"
halo2curves = { version = "0.1.0" }
proptest = { version = "1.0.0", optional = true }
@ -35,7 +35,7 @@ subtle = "2.3"
uint = "0.9.2" # MSRV 1.56.1
# Developer tooling dependencies
plotters = { version = "0.3.0", optional = true }
plotters = { version = "0.3.0", default-features = false, optional = true }
[dev-dependencies]
criterion = "0.3"
@ -48,7 +48,13 @@ pprof = { version = "0.8", features = ["criterion", "flamegraph"] } # MSRV 1.56
bench = false
[features]
dev-graph = ["halo2_proofs/dev-graph", "plotters"]
test-dev-graph = [
"halo2_proofs/dev-graph",
"plotters",
"plotters/bitmap_backend",
"plotters/bitmap_encoder",
"plotters/ttf",
]
circuit-params = ["halo2_proofs/circuit-params"]
test-dependencies = ["proptest"]
unstable = []

View File

@ -8,7 +8,7 @@ use halo2curves::pasta::{pallas, EqAffine};
use rand::rngs::OsRng;
use std::{
fs::File,
fs::{create_dir_all, File},
io::{prelude::*, BufReader},
path::Path,
};
@ -88,6 +88,9 @@ fn bench(name: &str, k: u32, c: &mut Criterion) {
}
}
// Create parent directory for assets
create_dir_all("./benches/sha256_assets").expect("Failed to create sha256_assets directory");
// Initialize the polynomial commitment parameters
let params_path = Path::new("./benches/sha256_assets/sha256_params");
if File::open(params_path).is_err() {
@ -134,7 +137,7 @@ fn bench(name: &str, k: u32, c: &mut Criterion) {
&params,
&pk,
&[circuit],
&[],
&[&[]],
OsRng,
&mut transcript,
)
@ -159,7 +162,7 @@ fn bench(name: &str, k: u32, c: &mut Criterion) {
&params,
pk.get_vk(),
strategy,
&[],
&[&[]],
&mut transcript,
)
.unwrap();

View File

@ -903,7 +903,7 @@ pub(crate) mod tests {
assert_eq!(prover.verify(), Ok(()))
}
#[cfg(feature = "dev-graph")]
#[cfg(feature = "test-dev-graph")]
#[test]
fn print_ecc_chip() {
use plotters::prelude::*;

View File

@ -48,8 +48,8 @@ fn compute_window_table<C: CurveAffine>(base: C, num_windows: usize) -> Vec<[C;
(0..H)
.map(|k| {
// scalar = (k+2)*(8^w)
let scalar = C::Scalar::from(k as u64 + 2)
* C::Scalar::from(H as u64).pow([w as u64, 0, 0, 0]);
let scalar =
C::Scalar::from(k as u64 + 2) * C::Scalar::from(H as u64).pow([w as u64]);
(base * scalar).to_affine()
})
.collect::<ArrayVec<C, H>>()
@ -62,14 +62,14 @@ fn compute_window_table<C: CurveAffine>(base: C, num_windows: usize) -> Vec<[C;
// For the last window, we compute [k * (2^3)^w - sum]B, where sum is defined
// as sum = \sum_{j = 0}^{`num_windows - 2`} 2^{3j+1}
let sum = (0..(num_windows - 1)).fold(C::Scalar::ZERO, |acc, j| {
acc + C::Scalar::from(2).pow([FIXED_BASE_WINDOW_SIZE as u64 * j as u64 + 1, 0, 0, 0])
acc + C::Scalar::from(2).pow([FIXED_BASE_WINDOW_SIZE as u64 * j as u64 + 1])
});
window_table.push(
(0..H)
.map(|k| {
// scalar = k * (2^3)^w - sum, where w = `num_windows - 1`
let scalar = C::Scalar::from(k as u64)
* C::Scalar::from(H as u64).pow([(num_windows - 1) as u64, 0, 0, 0])
* C::Scalar::from(H as u64).pow([(num_windows - 1) as u64])
- sum;
(base * scalar).to_affine()
})
@ -197,7 +197,7 @@ pub fn test_lagrange_coeffs<C: CurveAffine>(base: C, num_windows: usize) {
// Compute the actual x-coordinate of the multiple [(k+2)*(8^w)]B.
let point = base
* C::Scalar::from(bits as u64 + 2)
* C::Scalar::from(H as u64).pow([idx as u64, 0, 0, 0]);
* C::Scalar::from(H as u64).pow([idx as u64]);
let x = *point.to_affine().coordinates().unwrap().x();
// Check that the interpolated x-coordinate matches the actual one.
@ -214,10 +214,10 @@ pub fn test_lagrange_coeffs<C: CurveAffine>(base: C, num_windows: usize) {
// Compute the actual x-coordinate of the multiple [k * (8^84) - offset]B,
// where offset = \sum_{j = 0}^{83} 2^{3j+1}
let offset = (0..(num_windows - 1)).fold(C::Scalar::ZERO, |acc, w| {
acc + C::Scalar::from(2).pow([FIXED_BASE_WINDOW_SIZE as u64 * w as u64 + 1, 0, 0, 0])
acc + C::Scalar::from(2).pow([FIXED_BASE_WINDOW_SIZE as u64 * w as u64 + 1])
});
let scalar = C::Scalar::from(bits as u64)
* C::Scalar::from(H as u64).pow([(num_windows - 1) as u64, 0, 0, 0])
* C::Scalar::from(H as u64).pow([(num_windows - 1) as u64])
- offset;
let point = base * scalar;
let x = *point.to_affine().coordinates().unwrap().x();

View File

@ -372,7 +372,7 @@ impl<FixedPoints: super::FixedPoints<pallas::Affine>> Config<FixedPoints> {
base: &F,
) -> Result<NonIdentityEccPoint, Error> {
// `scalar = [(k_w + 2) ⋅ 8^w]
let scalar = k.map(|k| (k + *TWO_SCALAR) * (*H_SCALAR).pow([w as u64, 0, 0, 0]));
let scalar = k.map(|k| (k + *TWO_SCALAR) * (*H_SCALAR).pow([w as u64]));
self.process_window::<_, NUM_WINDOWS>(region, offset, w, k_usize, scalar, base)
}
@ -389,12 +389,12 @@ impl<FixedPoints: super::FixedPoints<pallas::Affine>> Config<FixedPoints> {
// offset_acc = \sum_{j = 0}^{NUM_WINDOWS - 2} 2^{FIXED_BASE_WINDOW_SIZE*j + 1}
let offset_acc = (0..(NUM_WINDOWS - 1)).fold(pallas::Scalar::zero(), |acc, w| {
acc + (*TWO_SCALAR).pow([FIXED_BASE_WINDOW_SIZE as u64 * w as u64 + 1, 0, 0, 0])
acc + (*TWO_SCALAR).pow([FIXED_BASE_WINDOW_SIZE as u64 * w as u64 + 1])
});
// `scalar = [k * 8^(NUM_WINDOWS - 1) - offset_acc]`.
let scalar = scalar.windows_field()[scalar.windows_field().len() - 1]
.map(|k| k * (*H_SCALAR).pow([(NUM_WINDOWS - 1) as u64, 0, 0, 0]) - offset_acc);
.map(|k| k * (*H_SCALAR).pow([(NUM_WINDOWS - 1) as u64]) - offset_acc);
self.process_window::<_, NUM_WINDOWS>(
region,

View File

@ -869,7 +869,7 @@ mod tests {
}
}
#[cfg(feature = "dev-graph")]
#[cfg(feature = "test-dev-graph")]
#[test]
fn print_poseidon_chip() {
use plotters::prelude::*;

View File

@ -450,7 +450,7 @@ trait Table16Assignment {
}
#[cfg(test)]
#[cfg(feature = "dev-graph")]
#[cfg(feature = "test-dev-graph")]
mod tests {
use super::super::{Sha256, BLOCK_SIZE};
use super::{message_schedule::msg_schedule_test_input, Table16Chip, Table16Config};

View File

@ -739,7 +739,7 @@ pub(crate) mod tests {
assert_eq!(prover.verify(), Ok(()))
}
#[cfg(feature = "dev-graph")]
#[cfg(feature = "test-dev-graph")]
#[test]
fn print_sinsemilla_chip() {
use plotters::prelude::*;

View File

@ -382,7 +382,7 @@ pub mod tests {
assert_eq!(prover.verify(), Ok(()))
}
#[cfg(feature = "dev-graph")]
#[cfg(feature = "test-dev-graph")]
#[test]
fn print_merkle_chip() {
use plotters::prelude::*;

View File

@ -49,7 +49,6 @@ harness = false
[dependencies]
backtrace = { version = "0.3", optional = true }
rayon = "1.5.1"
ff = "0.13"
group = "0.13"
halo2curves = { version = "0.1.0" }
@ -58,11 +57,15 @@ tracing = "0.1"
blake2b_simd = "1"
sha3 = "0.9.1"
rand_chacha = "0.3"
maybe-rayon = { version = "0.1.0", default-features = false }
# Developer tooling dependencies
plotters = { version = "0.3.0", optional = true }
plotters = { version = "0.3.0", default-features = false, optional = true }
tabbycat = { version = "0.1", features = ["attributes"], optional = true }
# Legacy circuit compatibility
halo2_legacy_pdqsort = { version = "0.1.0", optional = true }
[dev-dependencies]
assert_matches = "1.5"
criterion = "0.3"
@ -74,8 +77,15 @@ rand_core = { version = "0.6", default-features = false, features = ["getrandom"
getrandom = { version = "0.2", features = ["js"] }
[features]
default = ["batch"]
default = ["batch", "multicore"]
multicore = ["maybe-rayon/threads"]
dev-graph = ["plotters", "tabbycat"]
test-dev-graph = [
"dev-graph",
"plotters/bitmap_backend",
"plotters/bitmap_encoder",
"plotters/ttf",
]
gadget-traces = ["backtrace"]
thread-safe-region = []
sanity-checks = []
@ -87,6 +97,4 @@ bench = false
[[example]]
name = "circuit-layout"
required-features = ["dev-graph"]
required-features = ["test-dev-graph"]

View File

@ -4,7 +4,7 @@
## Minimum Supported Rust Version
Requires Rust **1.56.1** or higher.
Requires Rust **1.65.0** or higher.
Minimum supported Rust version can be changed in the future, but it will be done with a
minor version bump.
@ -15,6 +15,10 @@ minor version bump.
computation. The `RAYON_NUM_THREADS` environment variable can be used to set the number of
threads.
You can disable `rayon` by disabling the `"multicore"` feature.
Warning! Halo2 will lose access to parallelism if you disable the `"multicore"` feature.
This will significantly degrade performance.
## License
Licensed under either of

View File

@ -2,12 +2,20 @@ extern crate criterion;
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
use group::ff::Field;
use halo2_proofs::*;
use halo2_proofs::arithmetic::parallelize;
use halo2curves::pasta::pallas::Scalar;
use rand_chacha::rand_core::RngCore;
use rand_chacha::ChaCha20Rng;
use rand_core::SeedableRng;
use rayon::{current_num_threads, prelude::*};
use std::{collections::HashMap, iter};
#[cfg(feature = "multicore")]
use maybe_rayon::current_num_threads;
#[cfg(not(feature = "multicore"))]
fn current_num_threads() -> usize {
1
}
fn rand_poly_serial(mut rng: ChaCha20Rng, domain: usize) -> Vec<Scalar> {
// Sample a random polynomial of degree n - 1
@ -21,25 +29,33 @@ fn rand_poly_serial(mut rng: ChaCha20Rng, domain: usize) -> Vec<Scalar> {
fn rand_poly_par(mut rng: ChaCha20Rng, domain: usize) -> Vec<Scalar> {
// Sample a random polynomial of degree n - 1
let n_threads = current_num_threads();
let n = 1usize << domain;
let n_chunks = n_threads + usize::from(n % n_threads != 0);
let mut rand_vec = vec![Scalar::zero(); n];
let n = 1usize << domain as usize;
let mut random_poly = vec![Scalar::ZERO; n];
let mut thread_seeds: Vec<ChaCha20Rng> = (0..n_chunks)
.map(|_| {
let num_threads = current_num_threads();
let chunk_size = n / num_threads;
let thread_seeds = (0..)
.step_by(chunk_size + 1)
.take(n % num_threads)
.chain(
(chunk_size != 0)
.then(|| ((n % num_threads) * (chunk_size + 1)..).step_by(chunk_size))
.into_iter()
.flatten(),
)
.take(num_threads)
.zip(iter::repeat_with(|| {
let mut seed = [0u8; 32];
rng.fill_bytes(&mut seed);
ChaCha20Rng::from_seed(seed)
})
.collect();
}))
.collect::<HashMap<_, _>>();
thread_seeds
.par_iter_mut()
.zip_eq(rand_vec.par_chunks_mut(n / n_threads))
.for_each(|(mut rng, chunk)| chunk.iter_mut().for_each(|v| *v = Scalar::random(&mut rng)));
rand_vec
parallelize(&mut random_poly, |chunk, offset| {
let mut rng = thread_seeds[&offset].clone();
chunk.iter_mut().for_each(|v| *v = Scalar::random(&mut rng));
});
random_poly
}
fn bench_commit(c: &mut Criterion) {

View File

@ -180,7 +180,7 @@ impl<F: Field> NumericInstructions<F> for FieldChip<F> {
#[cfg(feature = "thread-safe-region")]
{
use rayon::prelude::{
use maybe_rayon::prelude::{
IndexedParallelIterator, IntoParallelRefIterator, ParallelIterator,
};
layouter.assign_region(

View File

@ -51,7 +51,7 @@ fn multiexp_serial<C: CurveAffine>(coeffs: &[C::Scalar], bases: &[C], acc: &mut
let mut tmp = u64::from_le_bytes(v);
tmp >>= skip_bits - (skip_bytes * 8);
tmp = tmp % (1 << c);
tmp %= 1 << c;
tmp as usize
}
@ -110,7 +110,7 @@ fn multiexp_serial<C: CurveAffine>(coeffs: &[C::Scalar], bases: &[C], acc: &mut
let mut running_sum = C::Curve::identity();
for exp in buckets.into_iter().rev() {
running_sum = exp.add(running_sum);
*acc = *acc + &running_sum;
*acc += &running_sum;
}
}
}
@ -262,7 +262,7 @@ pub fn recursive_butterfly_arithmetic<Scalar: Field, G: FftGroup<Scalar>>(
a[1] -= &t;
} else {
let (left, right) = a.split_at_mut(n / 2);
rayon::join(
multicore::join(
|| recursive_butterfly_arithmetic(left, n / 2, twiddle_chunk * 2, twiddles),
|| recursive_butterfly_arithmetic(right, n / 2, twiddle_chunk * 2, twiddles),
);

View File

@ -1,6 +1,6 @@
//! Traits and structs for implementing circuit components.
use std::{convert::TryInto, fmt, marker::PhantomData};
use std::{fmt, marker::PhantomData};
use ff::Field;
@ -13,9 +13,11 @@ pub use value::Value;
pub mod floor_planner;
pub use floor_planner::single_pass::SimpleFloorPlanner;
pub use floor_planner::single_pass::SimpleTableLayouter;
pub mod layouter;
mod table_layouter;
pub use table_layouter::{SimpleTableLayouter, TableLayouter};
/// A chip implements a set of instructions that can be used by gadgets.
///
@ -314,6 +316,19 @@ impl<'r, F: Field> Region<'r, F> {
})
}
/// Returns the value of the instance column's cell at absolute location `row`.
///
/// This method is only provided for convenience; it does not create any constraints.
/// Callers still need to use [`Self::assign_advice_from_instance`] to constrain the
/// instance values in their circuit.
pub fn instance_value(
&mut self,
instance: Column<Instance>,
row: usize,
) -> Result<Value<F>, Error> {
self.region.instance_value(instance, row)
}
/// Assign a fixed value.
///
/// Even though `to` has `FnMut` bounds, it is guaranteed to be called at most once.
@ -369,11 +384,11 @@ impl<'r, F: Field> Region<'r, F> {
/// A lookup table in the circuit.
#[derive(Debug)]
pub struct Table<'r, F: Field> {
table: &'r mut dyn layouter::TableLayouter<F>,
table: &'r mut dyn TableLayouter<F>,
}
impl<'r, F: Field> From<&'r mut dyn layouter::TableLayouter<F>> for Table<'r, F> {
fn from(table: &'r mut dyn layouter::TableLayouter<F>) -> Self {
impl<'r, F: Field> From<&'r mut dyn TableLayouter<F>> for Table<'r, F> {
fn from(table: &'r mut dyn TableLayouter<F>) -> Self {
Table { table }
}
}

View File

@ -8,6 +8,7 @@ use ff::Field;
use crate::{
circuit::{
layouter::{RegionColumn, RegionLayouter, RegionShape, SyncDeps, TableLayouter},
table_layouter::{compute_table_lengths, SimpleTableLayouter},
Cell, Layouter, Region, RegionIndex, RegionStart, Table, Value,
},
plonk::{
@ -167,24 +168,7 @@ impl<'a, F: Field, CS: Assignment<F> + 'a + SyncDeps> Layouter<F>
// Check that all table columns have the same length `first_unused`,
// and all cells up to that length are assigned.
let first_unused = {
match default_and_assigned
.values()
.map(|(_, assigned)| {
if assigned.iter().all(|b| *b) {
Some(assigned.len())
} else {
None
}
})
.reduce(|acc, item| match (acc, item) {
(Some(a), Some(b)) if a == b => Some(a),
_ => None,
}) {
Some(Some(len)) => len,
_ => return Err(Error::Synthesis), // TODO better error
}
};
let first_unused = compute_table_lengths(&default_and_assigned)?;
// Record these columns so that we can prevent them from being used again.
for column in default_and_assigned.keys() {
@ -265,11 +249,6 @@ impl<'r, 'a, F: Field, CS: Assignment<F> + 'a> SingleChipLayouterRegion<'r, 'a,
}
}
impl<'r, 'a, F: Field, CS: Assignment<F> + 'a + SyncDeps> SyncDeps
for SingleChipLayouterRegion<'r, 'a, F, CS>
{
}
impl<'r, 'a, F: Field, CS: Assignment<F> + 'a + SyncDeps> RegionLayouter<F>
for SingleChipLayouterRegion<'r, 'a, F, CS>
{
@ -351,6 +330,14 @@ impl<'r, 'a, F: Field, CS: Assignment<F> + 'a + SyncDeps> RegionLayouter<F>
Ok((cell, value))
}
fn instance_value(
&mut self,
instance: Column<Instance>,
row: usize,
) -> Result<Value<F>, Error> {
self.layouter.cs.query_instance(instance, row)
}
fn assign_fixed<'v>(
&'v mut self,
annotation: &'v (dyn Fn() -> String + 'v),
@ -389,88 +376,6 @@ impl<'r, 'a, F: Field, CS: Assignment<F> + 'a + SyncDeps> RegionLayouter<F>
}
}
/// The default value to fill a table column with.
///
/// - The outer `Option` tracks whether the value in row 0 of the table column has been
/// assigned yet. This will always be `Some` once a valid table has been completely
/// assigned.
/// - The inner `Value` tracks whether the underlying `Assignment` is evaluating
/// witnesses or not.
type DefaultTableValue<F> = Option<Value<Assigned<F>>>;
/// A table layouter that can be used to assign values to a table.
pub struct SimpleTableLayouter<'r, 'a, F: Field, CS: Assignment<F> + 'a> {
cs: &'a mut CS,
used_columns: &'r [TableColumn],
/// maps from a fixed column to a pair (default value, vector saying which rows are assigned)
pub default_and_assigned: HashMap<TableColumn, (DefaultTableValue<F>, Vec<bool>)>,
}
impl<'r, 'a, F: Field, CS: Assignment<F> + 'a> fmt::Debug for SimpleTableLayouter<'r, 'a, F, CS> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SimpleTableLayouter")
.field("used_columns", &self.used_columns)
.field("default_and_assigned", &self.default_and_assigned)
.finish()
}
}
impl<'r, 'a, F: Field, CS: Assignment<F> + 'a> SimpleTableLayouter<'r, 'a, F, CS> {
/// Returns a new SimpleTableLayouter
pub fn new(cs: &'a mut CS, used_columns: &'r [TableColumn]) -> Self {
SimpleTableLayouter {
cs,
used_columns,
default_and_assigned: HashMap::default(),
}
}
}
impl<'r, 'a, F: Field, CS: Assignment<F> + 'a> TableLayouter<F>
for SimpleTableLayouter<'r, 'a, F, CS>
{
fn assign_cell<'v>(
&'v mut self,
annotation: &'v (dyn Fn() -> String + 'v),
column: TableColumn,
offset: usize,
to: &'v mut (dyn FnMut() -> Value<Assigned<F>> + 'v),
) -> Result<(), Error> {
if self.used_columns.contains(&column) {
return Err(Error::Synthesis); // TODO better error
}
let entry = self.default_and_assigned.entry(column).or_default();
let mut value = Value::unknown();
self.cs.assign_fixed(
annotation,
column.inner(),
offset, // tables are always assigned starting at row 0
|| {
let res = to();
value = res;
res
},
)?;
match (entry.0.is_none(), offset) {
// Use the value at offset 0 as the default value for this table column.
(true, 0) => entry.0 = Some(value),
// Since there is already an existing default value for this table column,
// the caller should not be attempting to assign another value at offset 0.
(false, 0) => return Err(Error::Synthesis), // TODO better error
_ => (),
}
if entry.1.len() <= offset {
entry.1.resize(offset + 1, false);
}
entry.1[offset] = true;
Ok(())
}
}
#[cfg(test)]
mod tests {
use halo2curves::pasta::vesta;

View File

@ -4,8 +4,8 @@ use ff::Field;
use crate::{
circuit::{
floor_planner::single_pass::SimpleTableLayouter,
layouter::{RegionColumn, RegionLayouter, RegionShape, SyncDeps, TableLayouter},
table_layouter::{compute_table_lengths, SimpleTableLayouter},
Cell, Layouter, Region, RegionIndex, RegionStart, Table, Value,
},
plonk::{
@ -313,24 +313,7 @@ impl<'p, 'a, F: Field, CS: Assignment<F> + SyncDeps> AssignmentPass<'p, 'a, F, C
// Check that all table columns have the same length `first_unused`,
// and all cells up to that length are assigned.
let first_unused = {
match default_and_assigned
.values()
.map(|(_, assigned)| {
if assigned.iter().all(|b| *b) {
Some(assigned.len())
} else {
None
}
})
.reduce(|acc, item| match (acc, item) {
(Some(a), Some(b)) if a == b => Some(a),
_ => None,
}) {
Some(Some(len)) => len,
_ => return Err(Error::Synthesis), // TODO better error
}
};
let first_unused = compute_table_lengths(&default_and_assigned)?;
// Record these columns so that we can prevent them from being used again.
for column in default_and_assigned.keys() {
@ -384,8 +367,6 @@ impl<'r, 'a, F: Field, CS: Assignment<F> + 'a> V1Region<'r, 'a, F, CS> {
}
}
impl<'r, 'a, F: Field, CS: Assignment<F> + SyncDeps> SyncDeps for V1Region<'r, 'a, F, CS> {}
impl<'r, 'a, F: Field, CS: Assignment<F> + SyncDeps> RegionLayouter<F> for V1Region<'r, 'a, F, CS> {
fn enable_selector<'v>(
&'v mut self,
@ -457,6 +438,14 @@ impl<'r, 'a, F: Field, CS: Assignment<F> + SyncDeps> RegionLayouter<F> for V1Reg
Ok((cell, value))
}
fn instance_value(
&mut self,
instance: Column<Instance>,
row: usize,
) -> Result<Value<F>, Error> {
self.plan.cs.query_instance(instance, row)
}
fn assign_fixed<'v>(
&'v mut self,
annotation: &'v (dyn Fn() -> String + 'v),

View File

@ -199,7 +199,7 @@ pub fn slot_in_biggest_advice_first(
region_shapes: Vec<RegionShape>,
) -> (Vec<RegionStart>, CircuitAllocations) {
let mut sorted_regions: Vec<_> = region_shapes.into_iter().collect();
sorted_regions.sort_unstable_by_key(|shape| {
let sort_key = |shape: &RegionShape| {
// Count the number of advice columns
let advice_cols = shape
.columns()
@ -211,7 +211,24 @@ pub fn slot_in_biggest_advice_first(
.count();
// Sort by advice area (since this has the most contention).
advice_cols * shape.row_count()
});
};
// This used to incorrectly use `sort_unstable_by_key` with non-unique keys, which gave
// output that differed between 32-bit and 64-bit platforms, and potentially between Rust
// versions.
// We now use `sort_by_cached_key` with non-unique keys, and rely on `region_shapes`
// being sorted by region index (which we also rely on below to return `RegionStart`s
// in the correct order).
#[cfg(not(feature = "floor-planner-v1-legacy-pdqsort"))]
sorted_regions.sort_by_cached_key(sort_key);
// To preserve compatibility, when the "floor-planner-v1-legacy-pdqsort" feature is enabled,
// we use a copy of the pdqsort implementation from the Rust 1.56.1 standard library, fixed
// to its behaviour on 64-bit platforms.
// https://github.com/rust-lang/rust/blob/1.56.1/library/core/src/slice/mod.rs#L2365-L2402
#[cfg(feature = "floor-planner-v1-legacy-pdqsort")]
halo2_legacy_pdqsort::sort::quicksort(&mut sorted_regions, |a, b| sort_key(a).lt(&sort_key(b)));
sorted_regions.reverse();
// Lay out the sorted regions.

View File

@ -6,17 +6,24 @@ use std::fmt;
use ff::Field;
pub use super::table_layouter::TableLayouter;
use super::{Cell, RegionIndex, Value};
use crate::plonk::{Advice, Any, Assigned, Column, Error, Fixed, Instance, Selector, TableColumn};
use crate::plonk::{Advice, Any, Assigned, Column, Error, Fixed, Instance, Selector};
/// Intermediate trait requirements for [`RegionLayouter`] when thread-safe regions are enabled.
#[cfg(feature = "thread-safe-region")]
pub trait SyncDeps: Send + Sync {}
#[cfg(feature = "thread-safe-region")]
impl<T: Send + Sync> SyncDeps for T {}
/// Intermediate trait requirements for [`RegionLayouter`].
#[cfg(not(feature = "thread-safe-region"))]
pub trait SyncDeps {}
#[cfg(not(feature = "thread-safe-region"))]
impl<T> SyncDeps for T {}
/// Helper trait for implementing a custom [`Layouter`].
///
/// This trait is used for implementing region assignments:
@ -92,7 +99,8 @@ pub trait RegionLayouter<F: Field>: fmt::Debug + SyncDeps {
/// Assign the value of the instance column's cell at absolute location
/// `row` to the column `advice` at `offset` within this region.
///
/// Returns the advice cell, and its value if known.
/// Returns the advice cell that has been equality-constrained to the
/// instance cell, and its value if known.
fn assign_advice_from_instance<'v>(
&mut self,
annotation: &'v (dyn Fn() -> String + 'v),
@ -102,7 +110,11 @@ pub trait RegionLayouter<F: Field>: fmt::Debug + SyncDeps {
offset: usize,
) -> Result<(Cell, Value<F>), Error>;
/// Assign a fixed value
/// Returns the value of the instance column's cell at absolute location `row`.
fn instance_value(&mut self, instance: Column<Instance>, row: usize)
-> Result<Value<F>, Error>;
/// Assigns a fixed value
fn assign_fixed<'v>(
&'v mut self,
annotation: &'v (dyn Fn() -> String + 'v),
@ -122,24 +134,6 @@ pub trait RegionLayouter<F: Field>: fmt::Debug + SyncDeps {
fn constrain_equal(&mut self, left: Cell, right: Cell) -> Result<(), Error>;
}
/// Helper trait for implementing a custom [`Layouter`].
///
/// This trait is used for implementing table assignments.
///
/// [`Layouter`]: super::Layouter
pub trait TableLayouter<F: Field>: fmt::Debug {
/// Assigns a fixed value to a table cell.
///
/// Returns an error if the table cell has already been assigned to.
fn assign_cell<'v>(
&'v mut self,
annotation: &'v (dyn Fn() -> String + 'v),
column: TableColumn,
offset: usize,
to: &'v mut (dyn FnMut() -> Value<Assigned<F>> + 'v),
) -> Result<(), Error>;
}
/// The shape of a region. For a region at a certain index, we track
/// the set of columns it uses as well as the number of rows it uses.
#[derive(Clone, Debug)]
@ -149,8 +143,6 @@ pub struct RegionShape {
pub(super) row_count: usize,
}
impl SyncDeps for RegionShape {}
/// The virtual column involved in a region. This includes concrete columns,
/// as well as selectors that are not concrete columns at this stage.
#[derive(Eq, PartialEq, Copy, Clone, Debug, Hash)]
@ -278,6 +270,14 @@ impl<F: Field> RegionLayouter<F> for RegionShape {
))
}
fn instance_value(
&mut self,
_instance: Column<Instance>,
_row: usize,
) -> Result<Value<F>, Error> {
Ok(Value::unknown())
}
fn assign_fixed<'v>(
&'v mut self,
_: &'v (dyn Fn() -> String + 'v),

View File

@ -0,0 +1,413 @@
//! Implementations of common table layouters.
use std::{
collections::HashMap,
fmt::{self, Debug},
};
use ff::Field;
use crate::plonk::{Assigned, Assignment, Error, TableColumn, TableError};
use super::Value;
/// Helper trait for implementing a custom [`Layouter`].
///
/// This trait is used for implementing table assignments.
///
/// [`Layouter`]: super::Layouter
pub trait TableLayouter<F: Field>: std::fmt::Debug {
/// Assigns a fixed value to a table cell.
///
/// Returns an error if the table cell has already been assigned to.
fn assign_cell<'v>(
&'v mut self,
annotation: &'v (dyn Fn() -> String + 'v),
column: TableColumn,
offset: usize,
to: &'v mut (dyn FnMut() -> Value<Assigned<F>> + 'v),
) -> Result<(), Error>;
}
/// The default value to fill a table column with.
///
/// - The outer `Option` tracks whether the value in row 0 of the table column has been
/// assigned yet. This will always be `Some` once a valid table has been completely
/// assigned.
/// - The inner `Value` tracks whether the underlying `Assignment` is evaluating
/// witnesses or not.
type DefaultTableValue<F> = Option<Value<Assigned<F>>>;
/// A table layouter that can be used to assign values to a table.
pub struct SimpleTableLayouter<'r, 'a, F: Field, CS: Assignment<F> + 'a> {
cs: &'a mut CS,
used_columns: &'r [TableColumn],
/// maps from a fixed column to a pair (default value, vector saying which rows are assigned)
pub default_and_assigned: HashMap<TableColumn, (DefaultTableValue<F>, Vec<bool>)>,
}
impl<'r, 'a, F: Field, CS: Assignment<F> + 'a> fmt::Debug for SimpleTableLayouter<'r, 'a, F, CS> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SimpleTableLayouter")
.field("used_columns", &self.used_columns)
.field("default_and_assigned", &self.default_and_assigned)
.finish()
}
}
impl<'r, 'a, F: Field, CS: Assignment<F> + 'a> SimpleTableLayouter<'r, 'a, F, CS> {
/// Returns a new SimpleTableLayouter
pub fn new(cs: &'a mut CS, used_columns: &'r [TableColumn]) -> Self {
SimpleTableLayouter {
cs,
used_columns,
default_and_assigned: HashMap::default(),
}
}
}
impl<'r, 'a, F: Field, CS: Assignment<F> + 'a> TableLayouter<F>
for SimpleTableLayouter<'r, 'a, F, CS>
{
fn assign_cell<'v>(
&'v mut self,
annotation: &'v (dyn Fn() -> String + 'v),
column: TableColumn,
offset: usize,
to: &'v mut (dyn FnMut() -> Value<Assigned<F>> + 'v),
) -> Result<(), Error> {
if self.used_columns.contains(&column) {
return Err(Error::TableError(TableError::UsedColumn(column)));
}
let entry = self.default_and_assigned.entry(column).or_default();
let mut value = Value::unknown();
self.cs.assign_fixed(
annotation,
column.inner(),
offset, // tables are always assigned starting at row 0
|| {
let res = to();
value = res;
res
},
)?;
match (entry.0.is_none(), offset) {
// Use the value at offset 0 as the default value for this table column.
(true, 0) => entry.0 = Some(value),
// Since there is already an existing default value for this table column,
// the caller should not be attempting to assign another value at offset 0.
(false, 0) => {
return Err(Error::TableError(TableError::OverwriteDefault(
column,
format!("{:?}", entry.0.unwrap()),
format!("{:?}", value),
)))
}
_ => (),
}
if entry.1.len() <= offset {
entry.1.resize(offset + 1, false);
}
entry.1[offset] = true;
Ok(())
}
}
pub(crate) fn compute_table_lengths<F: Debug>(
default_and_assigned: &HashMap<TableColumn, (DefaultTableValue<F>, Vec<bool>)>,
) -> Result<usize, Error> {
let column_lengths: Result<Vec<_>, Error> = default_and_assigned
.iter()
.map(|(col, (default_value, assigned))| {
if default_value.is_none() || assigned.is_empty() {
return Err(Error::TableError(TableError::ColumnNotAssigned(*col)));
}
if assigned.iter().all(|b| *b) {
// All values in the column have been assigned
Ok((col, assigned.len()))
} else {
Err(Error::TableError(TableError::ColumnNotAssigned(*col)))
}
})
.collect();
let column_lengths = column_lengths?;
column_lengths
.into_iter()
.try_fold((None, 0), |acc, (col, col_len)| {
if acc.1 == 0 || acc.1 == col_len {
Ok((Some(*col), col_len))
} else {
let mut cols = [(*col, col_len), (acc.0.unwrap(), acc.1)];
cols.sort();
Err(Error::TableError(TableError::UnevenColumnLengths(
cols[0], cols[1],
)))
}
})
.map(|col_len| col_len.1)
}
#[cfg(test)]
mod tests {
use halo2curves::pasta::Fp;
use crate::{
circuit::{Layouter, SimpleFloorPlanner},
dev::MockProver,
plonk::{Circuit, ConstraintSystem},
poly::Rotation,
};
use super::*;
#[test]
fn table_no_default() {
const K: u32 = 4;
#[derive(Clone)]
struct FaultyCircuitConfig {
table: TableColumn,
}
struct FaultyCircuit;
impl Circuit<Fp> for FaultyCircuit {
type Config = FaultyCircuitConfig;
type FloorPlanner = SimpleFloorPlanner;
#[cfg(feature = "circuit-params")]
type Params = ();
fn without_witnesses(&self) -> Self {
Self
}
fn configure(meta: &mut ConstraintSystem<Fp>) -> Self::Config {
let a = meta.advice_column();
let table = meta.lookup_table_column();
meta.lookup("", |cells| {
let a = cells.query_advice(a, Rotation::cur());
vec![(a, table)]
});
Self::Config { table }
}
fn synthesize(
&self,
config: Self::Config,
mut layouter: impl Layouter<Fp>,
) -> Result<(), Error> {
layouter.assign_table(
|| "duplicate assignment",
|mut table| {
table.assign_cell(
|| "default",
config.table,
1,
|| Value::known(Fp::zero()),
)
},
)
}
}
let prover = MockProver::run(K, &FaultyCircuit, vec![]);
assert_eq!(
format!("{}", prover.unwrap_err()),
"TableColumn { inner: Column { index: 0, column_type: Fixed } } not fully assigned. Help: assign a value at offset 0."
);
}
#[test]
fn table_overwrite_default() {
const K: u32 = 4;
#[derive(Clone)]
struct FaultyCircuitConfig {
table: TableColumn,
}
struct FaultyCircuit;
impl Circuit<Fp> for FaultyCircuit {
type Config = FaultyCircuitConfig;
type FloorPlanner = SimpleFloorPlanner;
#[cfg(feature = "circuit-params")]
type Params = ();
fn without_witnesses(&self) -> Self {
Self
}
fn configure(meta: &mut ConstraintSystem<Fp>) -> Self::Config {
let a = meta.advice_column();
let table = meta.lookup_table_column();
meta.lookup("", |cells| {
let a = cells.query_advice(a, Rotation::cur());
vec![(a, table)]
});
Self::Config { table }
}
fn synthesize(
&self,
config: Self::Config,
mut layouter: impl Layouter<Fp>,
) -> Result<(), Error> {
layouter.assign_table(
|| "duplicate assignment",
|mut table| {
table.assign_cell(
|| "default",
config.table,
0,
|| Value::known(Fp::zero()),
)?;
table.assign_cell(
|| "duplicate",
config.table,
0,
|| Value::known(Fp::zero()),
)
},
)
}
}
let prover = MockProver::run(K, &FaultyCircuit, vec![]);
assert_eq!(
format!("{}", prover.unwrap_err()),
"Attempted to overwrite default value Value { inner: Some(Trivial(0x0000000000000000000000000000000000000000000000000000000000000000)) } with Value { inner: Some(Trivial(0x0000000000000000000000000000000000000000000000000000000000000000)) } in TableColumn { inner: Column { index: 0, column_type: Fixed } }"
);
}
#[test]
fn table_reuse_column() {
const K: u32 = 4;
#[derive(Clone)]
struct FaultyCircuitConfig {
table: TableColumn,
}
struct FaultyCircuit;
impl Circuit<Fp> for FaultyCircuit {
type Config = FaultyCircuitConfig;
type FloorPlanner = SimpleFloorPlanner;
#[cfg(feature = "circuit-params")]
type Params = ();
fn without_witnesses(&self) -> Self {
Self
}
fn configure(meta: &mut ConstraintSystem<Fp>) -> Self::Config {
let a = meta.advice_column();
let table = meta.lookup_table_column();
meta.lookup("", |cells| {
let a = cells.query_advice(a, Rotation::cur());
vec![(a, table)]
});
Self::Config { table }
}
fn synthesize(
&self,
config: Self::Config,
mut layouter: impl Layouter<Fp>,
) -> Result<(), Error> {
layouter.assign_table(
|| "first assignment",
|mut table| {
table.assign_cell(
|| "default",
config.table,
0,
|| Value::known(Fp::zero()),
)
},
)?;
layouter.assign_table(
|| "reuse",
|mut table| {
table.assign_cell(|| "reuse", config.table, 1, || Value::known(Fp::zero()))
},
)
}
}
let prover = MockProver::run(K, &FaultyCircuit, vec![]);
assert_eq!(
format!("{}", prover.unwrap_err()),
"TableColumn { inner: Column { index: 0, column_type: Fixed } } has already been used"
);
}
#[test]
fn table_uneven_columns() {
const K: u32 = 4;
#[derive(Clone)]
struct FaultyCircuitConfig {
table: (TableColumn, TableColumn),
}
struct FaultyCircuit;
impl Circuit<Fp> for FaultyCircuit {
type Config = FaultyCircuitConfig;
type FloorPlanner = SimpleFloorPlanner;
#[cfg(feature = "circuit-params")]
type Params = ();
fn without_witnesses(&self) -> Self {
Self
}
fn configure(meta: &mut ConstraintSystem<Fp>) -> Self::Config {
let a = meta.advice_column();
let table = (meta.lookup_table_column(), meta.lookup_table_column());
meta.lookup("", |cells| {
let a = cells.query_advice(a, Rotation::cur());
vec![(a.clone(), table.0), (a, table.1)]
});
Self::Config { table }
}
fn synthesize(
&self,
config: Self::Config,
mut layouter: impl Layouter<Fp>,
) -> Result<(), Error> {
layouter.assign_table(
|| "table with uneven columns",
|mut table| {
table.assign_cell(|| "", config.table.0, 0, || Value::known(Fp::zero()))?;
table.assign_cell(|| "", config.table.0, 1, || Value::known(Fp::zero()))?;
table.assign_cell(|| "", config.table.1, 0, || Value::known(Fp::zero()))
},
)
}
}
let prover = MockProver::run(K, &FaultyCircuit, vec![]);
assert_eq!(
format!("{}", prover.unwrap_err()),
"TableColumn { inner: Column { index: 0, column_type: Fixed } } has length 2 while TableColumn { inner: Column { index: 1, column_type: Fixed } } has length 1"
);
}
}

View File

@ -63,6 +63,11 @@ impl<V> Value<V> {
}
}
/// ONLY FOR INTERNAL CRATE USAGE; DO NOT EXPOSE!
pub(crate) fn into_option(self) -> Option<V> {
self.inner
}
/// Enforces an assertion on the contained value, if known.
///
/// The assertion is ignored if `self` is [`Value::unknown()`]. Do not try to enforce

View File

@ -1,36 +1,29 @@
//! Tools for developing circuits.
use std::collections::BTreeMap;
use std::collections::HashMap;
use std::collections::HashSet;
use std::fmt;
use std::iter;
use std::ops::{Add, Mul, Neg, Range};
use std::time::{Duration, Instant};
use blake2b_simd::blake2b;
use ff::Field;
use ff::FromUniformBytes;
use group::Group;
use crate::circuit::layouter::SyncDeps;
use crate::plonk::permutation::keygen::Assembly;
use crate::{
circuit,
plonk::{
permutation,
sealed::{self, SealedPhase},
Advice, Any, Assigned, Assignment, Challenge, Circuit, Column, ColumnType,
ConstraintSystem, Error, Expression, FirstPhase, Fixed, FloorPlanner, Instance, Phase,
Selector, VirtualCell,
Advice, Any, Assigned, Assignment, Challenge, Circuit, Column, ConstraintSystem, Error,
Expression, FirstPhase, Fixed, FloorPlanner, Instance, Phase, Selector,
},
poly::Rotation,
};
use rayon::{
iter::{
IndexedParallelIterator, IntoParallelIterator, IntoParallelRefIterator, ParallelIterator,
},
slice::ParallelSliceMut,
#[cfg(feature = "multicore")]
use crate::multicore::{
IndexedParallelIterator, IntoParallelIterator, IntoParallelRefIterator, ParallelIterator,
ParallelSliceMut,
};
pub mod metadata;
@ -46,6 +39,9 @@ pub use cost::CircuitCost;
mod gates;
pub use gates::CircuitGates;
mod tfp;
pub use tfp::TracingFloorPlanner;
#[cfg(feature = "dev-graph")]
mod graph;
@ -306,7 +302,7 @@ pub struct MockProver<F: Field> {
// The advice cells in the circuit, arranged as [column][row].
advice: Vec<Vec<CellValue<F>>>,
// The instance cells in the circuit, arranged as [column][row].
instance: Vec<Vec<F>>,
instance: Vec<Vec<InstanceValue<F>>>,
selectors: Vec<Vec<bool>>,
@ -320,6 +316,21 @@ pub struct MockProver<F: Field> {
current_phase: sealed::Phase,
}
#[derive(Debug, Clone, PartialEq, Eq)]
enum InstanceValue<F: Field> {
Assigned(F),
Padding,
}
impl<F: Field> InstanceValue<F> {
fn value(&self) -> F {
match self {
InstanceValue::Assigned(v) => *v,
InstanceValue::Padding => F::ZERO,
}
}
}
impl<F: Field> MockProver<F> {
fn in_phase<P: Phase>(&self, phase: P) -> bool {
self.current_phase == phase.to_sealed()
@ -420,7 +431,7 @@ impl<F: Field> Assignment<F> for MockProver<F> {
.instance
.get(column.index())
.and_then(|column| column.get(row))
.map(|v| circuit::Value::known(*v))
.map(|v| circuit::Value::known(v.value()))
.expect("bound failure"))
}
@ -618,7 +629,7 @@ impl<F: FromUniformBytes<64> + Ord> MockProver<F> {
let instance = instance
.into_iter()
.map(|mut instance| {
.map(|instance| {
assert!(
instance.len() <= n - (cs.blinding_factors() + 1),
"instance.len={}, n={}, cs.blinding_factors={}",
@ -627,8 +638,12 @@ impl<F: FromUniformBytes<64> + Ord> MockProver<F> {
cs.blinding_factors()
);
instance.resize(n, F::ZERO);
instance
let mut instance_values = vec![InstanceValue::Padding; n];
for (idx, value) in instance.into_iter().enumerate() {
instance_values[idx] = InstanceValue::Assigned(value);
}
instance_values
})
.collect::<Vec<_>>();
@ -770,17 +785,42 @@ impl<F: FromUniformBytes<64> + Ord> MockProver<F> {
// Determine where this cell should have been assigned.
let cell_row = ((gate_row + n + cell.rotation.0) % n) as usize;
// Check that it was assigned!
if r.cells.get(&(cell.column, cell_row)).is_some() {
None
} else {
Some(VerifyFailure::CellNotAssigned {
gate: (gate_index, gate.name()).into(),
region: (r_i, r.name.clone(), r.annotations.clone()).into(),
gate_offset: *selector_row,
column: cell.column,
offset: cell_row as isize - r.rows.unwrap().0 as isize,
})
match cell.column.column_type() {
Any::Instance => {
// Handle instance cells, which are not in the region.
let instance_value =
&self.instance[cell.column.index()][cell_row];
match instance_value {
InstanceValue::Assigned(_) => None,
_ => Some(VerifyFailure::InstanceCellNotAssigned {
gate: (gate_index, gate.name()).into(),
region: (r_i, r.name.clone()).into(),
gate_offset: *selector_row,
column: cell.column.try_into().unwrap(),
row: cell_row,
}),
}
}
_ => {
// Check that it was assigned!
if r.cells.contains_key(&(cell.column, cell_row)) {
None
} else {
Some(VerifyFailure::CellNotAssigned {
gate: (gate_index, gate.name()).into(),
region: (
r_i,
r.name.clone(),
r.annotations.clone(),
)
.into(),
gate_offset: *selector_row,
column: cell.column,
offset: cell_row as isize
- r.rows.unwrap().0 as isize,
})
}
}
}
})
})
@ -882,7 +922,8 @@ impl<F: FromUniformBytes<64> + Ord> MockProver<F> {
let rotation = query.1 .0;
Value::Real(
self.instance[column_index]
[(row as i32 + n + rotation) as usize % n as usize],
[(row as i32 + n + rotation) as usize % n as usize]
.value(),
)
},
&|challenge| Value::Real(self.challenges[challenge.index()]),
@ -1067,7 +1108,10 @@ impl<F: FromUniformBytes<64> + Ord> MockProver<F> {
.map(|c: &Column<Any>| match c.column_type() {
Any::Advice(_) => self.advice[c.index()][row],
Any::Fixed => self.fixed[c.index()][row],
Any::Instance => CellValue::Assigned(self.instance[c.index()][row]),
Any::Instance => {
let cell: &InstanceValue<F> = &self.instance[c.index()][row];
CellValue::Assigned(cell.value())
}
})
.unwrap()
};
@ -1127,6 +1171,7 @@ impl<F: FromUniformBytes<64> + Ord> MockProver<F> {
/// Returns `Ok(())` if this `MockProver` is satisfied, or a list of errors indicating
/// the reasons that the circuit is not satisfied.
/// Constraints and lookup are checked at `usable_rows`, parallelly.
#[cfg(feature = "multicore")]
pub fn verify_par(&self) -> Result<(), Vec<VerifyFailure>> {
self.verify_at_rows_par(self.usable_rows.clone(), self.usable_rows.clone())
}
@ -1134,6 +1179,7 @@ impl<F: FromUniformBytes<64> + Ord> MockProver<F> {
/// Returns `Ok(())` if this `MockProver` is satisfied, or a list of errors indicating
/// the reasons that the circuit is not satisfied.
/// Constraints are only checked at `gate_row_ids`, and lookup inputs are only checked at `lookup_input_row_ids`, parallelly.
#[cfg(feature = "multicore")]
pub fn verify_at_rows_par<I: Clone + Iterator<Item = usize>>(
&self,
gate_row_ids: I,
@ -1186,23 +1232,44 @@ impl<F: FromUniformBytes<64> + Ord> MockProver<F> {
let cell_row =
((gate_row + n + cell.rotation.0) % n) as usize;
// Check that it was assigned!
if r.cells.contains_key(&(cell.column, cell_row)) {
None
} else {
Some(VerifyFailure::CellNotAssigned {
gate: (gate_index, gate.name()).into(),
region: (
r_i,
r.name.clone(),
r.annotations.clone(),
)
.into(),
gate_offset: *selector_row,
column: cell.column,
offset: cell_row as isize
- r.rows.unwrap().0 as isize,
})
match cell.column.column_type() {
Any::Instance => {
// Handle instance cells, which are not in the region.
let instance_value =
&self.instance[cell.column.index()][cell_row];
match instance_value {
InstanceValue::Assigned(_) => None,
_ => Some(
VerifyFailure::InstanceCellNotAssigned {
gate: (gate_index, gate.name()).into(),
region: (r_i, r.name.clone()).into(),
gate_offset: *selector_row,
column: cell.column.try_into().unwrap(),
row: cell_row,
},
),
}
}
_ => {
// Check that it was assigned!
if r.cells.contains_key(&(cell.column, cell_row)) {
None
} else {
Some(VerifyFailure::CellNotAssigned {
gate: (gate_index, gate.name()).into(),
region: (
r_i,
r.name.clone(),
r.annotations.clone(),
)
.into(),
gate_offset: *selector_row,
column: cell.column,
offset: cell_row as isize
- r.rows.unwrap().0 as isize,
})
}
}
}
})
.collect::<Vec<_>>()
@ -1308,7 +1375,8 @@ impl<F: FromUniformBytes<64> + Ord> MockProver<F> {
&|query| {
Value::Real(
self.instance[query.column_index]
[(row as i32 + n + query.rotation.0) as usize % n as usize],
[(row as i32 + n + query.rotation.0) as usize % n as usize]
.value(),
)
},
&|challenge| Value::Real(self.challenges[challenge.index()]),
@ -1489,7 +1557,10 @@ impl<F: FromUniformBytes<64> + Ord> MockProver<F> {
.map(|c: &Column<Any>| match c.column_type() {
Any::Advice(_) => self.advice[c.index()][row],
Any::Fixed => self.fixed[c.index()][row],
Any::Instance => CellValue::Assigned(self.instance[c.index()][row]),
Any::Instance => {
let cell: &InstanceValue<F> = &self.instance[c.index()][row];
CellValue::Assigned(cell.value())
}
})
.unwrap()
};
@ -1576,6 +1647,7 @@ impl<F: FromUniformBytes<64> + Ord> MockProver<F> {
/// ```ignore
/// assert_eq!(prover.verify_par(), Ok(()));
/// ```
#[cfg(feature = "multicore")]
pub fn assert_satisfied_par(&self) {
if let Err(errs) = self.verify_par() {
for err in errs {
@ -1597,6 +1669,7 @@ impl<F: FromUniformBytes<64> + Ord> MockProver<F> {
/// ```ignore
/// assert_eq!(prover.verify_at_rows_par(), Ok(()));
/// ```
#[cfg(feature = "multicore")]
pub fn assert_satisfied_at_rows_par<I: Clone + Iterator<Item = usize>>(
&self,
gate_row_ids: I,
@ -2212,5 +2285,3 @@ mod tests {
)
}
}
impl<F: Field> SyncDeps for MockProver<F> {}

View File

@ -1,6 +1,7 @@
//! Developer tools for investigating the cost of a circuit.
use std::{
cmp,
collections::{HashMap, HashSet},
iter,
marker::PhantomData,
@ -11,7 +12,7 @@ use ff::{Field, PrimeField};
use group::prime::PrimeGroup;
use crate::{
circuit::{layouter::SyncDeps, Value},
circuit::{layouter::RegionColumn, Value},
plonk::{
Advice, Any, Assigned, Assignment, Challenge, Circuit, Column, ConstraintSystem, Error,
Fixed, FloorPlanner, Instance, Selector,
@ -20,10 +21,11 @@ use crate::{
};
/// Measures a circuit to determine its costs, and explain what contributes to them.
#[allow(dead_code)]
#[derive(Debug)]
pub struct CircuitCost<G: PrimeGroup, ConcreteCircuit: Circuit<G::Scalar>> {
/// Power-of-2 bound on the number of rows in the circuit.
k: usize,
k: u32,
/// Maximum degree of the circuit.
max_deg: usize,
/// Number of advice columns.
@ -38,27 +40,141 @@ pub struct CircuitCost<G: PrimeGroup, ConcreteCircuit: Circuit<G::Scalar>> {
permutation_cols: usize,
/// Number of distinct sets of points in the multiopening argument.
point_sets: usize,
/// Maximum rows used over all columns
max_rows: usize,
/// Maximum rows used over all advice columns
max_advice_rows: usize,
/// Maximum rows used over all fixed columns
max_fixed_rows: usize,
num_fixed_columns: usize,
num_advice_columns: usize,
num_instance_columns: usize,
num_total_columns: usize,
_marker: PhantomData<(G, ConcreteCircuit)>,
}
struct Assembly {
selectors: Vec<Vec<bool>>,
/// Region implementation used by Layout
#[allow(dead_code)]
#[derive(Debug)]
pub(crate) struct LayoutRegion {
/// The name of the region. Not required to be unique.
pub(crate) name: String,
/// The columns used by this region.
pub(crate) columns: HashSet<RegionColumn>,
/// The row that this region starts on, if known.
pub(crate) offset: Option<usize>,
/// The number of rows that this region takes up.
pub(crate) rows: usize,
/// The cells assigned in this region.
pub(crate) cells: Vec<(RegionColumn, usize)>,
}
impl SyncDeps for Assembly {}
/// Cost and graphing layouter
#[derive(Default, Debug)]
pub(crate) struct Layout {
/// k = 1 << n
pub(crate) k: u32,
/// Regions of the layout
pub(crate) regions: Vec<LayoutRegion>,
current_region: Option<usize>,
/// Total row count
pub(crate) total_rows: usize,
/// Total advice rows
pub(crate) total_advice_rows: usize,
/// Total fixed rows
pub(crate) total_fixed_rows: usize,
/// Any cells assigned outside of a region.
pub(crate) loose_cells: Vec<(RegionColumn, usize)>,
/// Pairs of cells between which we have equality constraints.
pub(crate) equality: Vec<(Column<Any>, usize, Column<Any>, usize)>,
/// Selector assignments used for optimization pass
pub(crate) selectors: Vec<Vec<bool>>,
}
impl<F: Field> Assignment<F> for Assembly {
fn enter_region<NR, N>(&mut self, _: N)
impl Layout {
/// Creates a empty layout
pub fn new(k: u32, n: usize, num_selectors: usize) -> Self {
Layout {
k,
regions: vec![],
current_region: None,
total_rows: 0,
total_advice_rows: 0,
total_fixed_rows: 0,
/// Any cells assigned outside of a region.
loose_cells: vec![],
/// Pairs of cells between which we have equality constraints.
equality: vec![],
/// Selector assignments used for optimization pass
selectors: vec![vec![false; n]; num_selectors],
}
}
/// Update layout metadata
pub fn update(&mut self, column: RegionColumn, row: usize) {
self.total_rows = cmp::max(self.total_rows, row + 1);
if let RegionColumn::Column(col) = column {
match col.column_type() {
Any::Advice(_) => {
self.total_advice_rows = cmp::max(self.total_advice_rows, row + 1)
}
Any::Fixed => self.total_fixed_rows = cmp::max(self.total_fixed_rows, row + 1),
_ => {}
}
}
if let Some(region) = self.current_region {
let region = &mut self.regions[region];
region.columns.insert(column);
// The region offset is the earliest row assigned to.
let mut offset = region.offset.unwrap_or(row);
if row < offset {
// The first row assigned was not at offset 0 within the region.
region.rows += offset - row;
offset = row;
}
// The number of rows in this region is the gap between the earliest and
// latest rows assigned.
region.rows = cmp::max(region.rows, row - offset + 1);
region.offset = Some(offset);
region.cells.push((column, row));
} else {
self.loose_cells.push((column, row));
}
}
}
impl<F: Field> Assignment<F> for Layout {
fn enter_region<NR, N>(&mut self, name_fn: N)
where
NR: Into<String>,
N: FnOnce() -> NR,
{
// Do nothing; we don't care about regions in this context.
assert!(self.current_region.is_none());
self.current_region = Some(self.regions.len());
self.regions.push(LayoutRegion {
name: name_fn().into(),
columns: HashSet::default(),
offset: None,
rows: 0,
cells: vec![],
})
}
fn annotate_column<A, AR>(&mut self, _: A, _: Column<Any>)
where
A: FnOnce() -> AR,
AR: Into<String>,
{
}
fn exit_region(&mut self) {
// Do nothing; we don't care about regions in this context.
assert!(self.current_region.is_some());
self.current_region = None;
}
fn enable_selector<A, AR>(&mut self, _: A, selector: &Selector, row: usize) -> Result<(), Error>
@ -66,8 +182,13 @@ impl<F: Field> Assignment<F> for Assembly {
A: FnOnce() -> AR,
AR: Into<String>,
{
self.selectors[selector.0][row] = true;
if let Some(cell) = self.selectors[selector.0].get_mut(row) {
*cell = true;
} else {
return Err(Error::not_enough_rows_available(self.k));
}
self.update((*selector).into(), row);
Ok(())
}
@ -78,8 +199,8 @@ impl<F: Field> Assignment<F> for Assembly {
fn assign_advice<V, VR, A, AR>(
&mut self,
_: A,
_: Column<Advice>,
_: usize,
column: Column<Advice>,
row: usize,
_: V,
) -> Result<(), Error>
where
@ -88,14 +209,15 @@ impl<F: Field> Assignment<F> for Assembly {
A: FnOnce() -> AR,
AR: Into<String>,
{
self.update(Column::<Any>::from(column).into(), row);
Ok(())
}
fn assign_fixed<V, VR, A, AR>(
&mut self,
_: A,
_: Column<Fixed>,
_: usize,
column: Column<Fixed>,
row: usize,
_: V,
) -> Result<(), Error>
where
@ -104,10 +226,18 @@ impl<F: Field> Assignment<F> for Assembly {
A: FnOnce() -> AR,
AR: Into<String>,
{
self.update(Column::<Any>::from(column).into(), row);
Ok(())
}
fn copy(&mut self, _: Column<Any>, _: usize, _: Column<Any>, _: usize) -> Result<(), Error> {
fn copy(
&mut self,
l_col: Column<Any>,
l_row: usize,
r_col: Column<Any>,
r_row: usize,
) -> Result<(), crate::plonk::Error> {
self.equality.push((l_col, l_row, r_col, r_row));
Ok(())
}
@ -124,14 +254,6 @@ impl<F: Field> Assignment<F> for Assembly {
Value::unknown()
}
fn annotate_column<A, AR>(&mut self, _annotation: A, _column: Column<Any>)
where
A: FnOnce() -> AR,
AR: Into<String>,
{
// Do nothing
}
fn push_namespace<NR, N>(&mut self, _: N)
where
NR: Into<String>,
@ -149,24 +271,19 @@ impl<G: PrimeGroup, ConcreteCircuit: Circuit<G::Scalar>> CircuitCost<G, Concrete
/// Measures a circuit with parameter constant `k`.
///
/// Panics if `k` is not large enough for the circuit.
pub fn measure(k: usize, circuit: &ConcreteCircuit) -> Self {
pub fn measure(k: u32, circuit: &ConcreteCircuit) -> Self {
// Collect the layout details.
let mut cs = ConstraintSystem::default();
#[cfg(feature = "circuit-params")]
let config = ConcreteCircuit::configure_with_params(&mut cs, circuit.params());
#[cfg(not(feature = "circuit-params"))]
let config = ConcreteCircuit::configure(&mut cs);
let mut assembly = Assembly {
selectors: vec![vec![false; 1 << k]; cs.num_selectors],
};
let mut layout = Layout::new(k, 1 << k, cs.num_selectors);
ConcreteCircuit::FloorPlanner::synthesize(
&mut assembly,
&mut layout,
circuit,
config,
cs.constants.clone(),
)
.unwrap();
let (cs, _) = cs.compress_selectors(assembly.selectors);
let (cs, _) = cs.compress_selectors(layout.selectors);
assert!((1 << k) >= cs.minimum_rows());
@ -222,6 +339,15 @@ impl<G: PrimeGroup, ConcreteCircuit: Circuit<G::Scalar>> CircuitCost<G, Concrete
permutation_cols,
point_sets: point_sets.len(),
_marker: PhantomData::default(),
max_rows: layout.total_rows,
max_advice_rows: layout.total_advice_rows,
max_fixed_rows: layout.total_fixed_rows,
num_advice_columns: cs.num_advice_columns,
num_fixed_columns: cs.num_fixed_columns,
num_instance_columns: cs.num_instance_columns,
num_total_columns: cs.num_instance_columns
+ cs.num_advice_columns
+ cs.num_fixed_columns,
}
}
@ -249,8 +375,11 @@ impl<G: PrimeGroup, ConcreteCircuit: Circuit<G::Scalar>> CircuitCost<G, Concrete
// Global permutation argument:
// - chunks commitments per instance
// - 2*chunks + (chunks - 1) evals per instance
equality: ProofContribution::new(chunks, 3 * chunks - 1),
// - 2 * chunks + (chunks - 1) evals per instance
equality: ProofContribution::new(
chunks,
if chunks == 0 { chunks } else { 3 * chunks - 1 },
),
_marker: PhantomData::default(),
}
@ -293,7 +422,7 @@ impl<G: PrimeGroup, ConcreteCircuit: Circuit<G::Scalar>> CircuitCost<G, Concrete
// - inner product argument (2 * k round commitments)
// - a
// - xi
polycomm: ProofContribution::new(1 + 2 * self.k, 2),
polycomm: ProofContribution::new((1 + 2 * self.k).try_into().unwrap(), 2),
_marker: PhantomData::default(),
}
@ -393,3 +522,40 @@ impl<G: PrimeGroup> From<ProofSize<G>> for usize {
+ proof.polycomm.len(point, scalar)
}
}
#[cfg(test)]
mod tests {
use halo2curves::pasta::{Eq, Fp};
use crate::circuit::SimpleFloorPlanner;
use super::*;
#[test]
fn circuit_cost_without_permutation() {
const K: u32 = 4;
struct MyCircuit;
impl Circuit<Fp> for MyCircuit {
type Config = ();
type FloorPlanner = SimpleFloorPlanner;
#[cfg(feature = "circuit-params")]
type Params = ();
fn without_witnesses(&self) -> Self {
Self
}
fn configure(_meta: &mut ConstraintSystem<Fp>) -> Self::Config {}
fn synthesize(
&self,
_config: Self::Config,
_layouter: impl crate::circuit::Layouter<Fp>,
) -> Result<(), Error> {
Ok(())
}
}
CircuitCost::<Eq, MyCircuit>::measure(K, &MyCircuit).proof_size(1);
}
}

View File

@ -14,7 +14,6 @@ use crate::dev::metadata::Constraint;
use crate::{
dev::{Instance, Value},
plonk::{Any, Column, ConstraintSystem, Expression, Gate},
poly::Rotation,
};
mod emitter;
@ -101,16 +100,17 @@ impl FailureLocation {
.iter()
.enumerate()
.find(|(_, r)| {
if r.rows.is_none() {
return false;
if let Some((start, end)) = r.rows {
// We match the region if any input columns overlap, rather than all of
// them, because matching complex selector columns is hard. As long as
// regions are rectangles, and failures occur due to assignments entirely
// within single regions, "any" will be equivalent to "all". If these
// assumptions change, we'll start getting bug reports from users :)
(start..=end).contains(&failure_row) && !failure_columns.is_disjoint(&r.columns)
} else {
// Zero-area region
false
}
let (start, end) = r.rows.unwrap();
// We match the region if any input columns overlap, rather than all of
// them, because matching complex selector columns is hard. As long as
// regions are rectangles, and failures occur due to assignments entirely
// within single regions, "any" will be equivalent to "all". If these
// assumptions change, we'll start getting bug reports from users :)
(start..=end).contains(&failure_row) && !failure_columns.is_disjoint(&r.columns)
})
.map(|(r_i, r)| FailureLocation::InRegion {
region: (r_i, r.name.clone(), r.annotations.clone()).into(),
@ -139,6 +139,20 @@ pub enum VerifyFailure {
/// offset 0, but the gate uses `Rotation::prev()`).
offset: isize,
},
/// An instance cell used in an active gate was not assigned to.
InstanceCellNotAssigned {
/// The index of the active gate.
gate: metadata::Gate,
/// The region in which this gate was activated.
region: metadata::Region,
/// The offset (relative to the start of the region) at which the active gate
/// queries this cell.
gate_offset: usize,
/// The column in which this cell should be assigned.
column: Column<Instance>,
/// The absolute row at which this cell should be assigned.
row: usize,
},
/// A constraint was not satisfied for a particular row.
ConstraintNotSatisfied {
/// The polynomial constraint that is not satisfied.
@ -225,6 +239,19 @@ impl fmt::Display for VerifyFailure {
region, gate, gate_offset, column, offset, region.get_column_annotation((*column).into())
)
}
Self::InstanceCellNotAssigned {
gate,
region,
gate_offset,
column,
row,
} => {
write!(
f,
"{} uses {} at offset {}, which requires cell in instance column {:?} at row {} to be assigned.",
region, gate, gate_offset, column, row
)
}
Self::ConstraintNotSatisfied {
constraint,
location,

View File

@ -7,10 +7,7 @@ use ff::PrimeField;
use crate::{
dev::util,
plonk::{
sealed::{self, SealedPhase},
Circuit, ConstraintSystem, FirstPhase,
},
plonk::{sealed::SealedPhase, Circuit, ConstraintSystem, FirstPhase},
};
#[derive(Debug)]

View File

@ -80,8 +80,6 @@ struct Graph {
current_namespace: Vec<usize>,
}
impl crate::dev::SyncDeps for Graph {}
impl<F: Field> Assignment<F> for Graph {
fn enter_region<NR, N>(&mut self, _: N)
where

View File

@ -3,16 +3,13 @@ use plotters::{
coord::Shift,
prelude::{DrawingArea, DrawingAreaErrorKind, DrawingBackend},
};
use std::cmp;
use std::collections::HashSet;
use std::ops::Range;
use crate::{
circuit::{layouter::RegionColumn, Value},
plonk::{
Advice, Any, Assigned, Assignment, Challenge, Circuit, Column, ConstraintSystem, Error,
Fixed, FloorPlanner, Instance, Selector,
},
circuit::layouter::RegionColumn,
dev::cost::Layout,
plonk::{Any, Circuit, Column, ConstraintSystem, FloorPlanner},
};
/// Graphical renderer for circuit layouts.
@ -324,198 +321,3 @@ impl CircuitLayout {
Ok(())
}
}
#[derive(Debug)]
struct Region {
/// The name of the region. Not required to be unique.
name: String,
/// The columns used by this region.
columns: HashSet<RegionColumn>,
/// The row that this region starts on, if known.
offset: Option<usize>,
/// The number of rows that this region takes up.
rows: usize,
/// The cells assigned in this region. We store this as a `Vec` so that if any cells
/// are double-assigned, they will be visibly darker.
cells: Vec<(RegionColumn, usize)>,
}
#[derive(Default)]
struct Layout {
k: u32,
regions: Vec<Region>,
current_region: Option<usize>,
total_rows: usize,
/// Any cells assigned outside of a region. We store this as a `Vec` so that if any
/// cells are double-assigned, they will be visibly darker.
loose_cells: Vec<(RegionColumn, usize)>,
/// Pairs of cells between which we have equality constraints.
equality: Vec<(Column<Any>, usize, Column<Any>, usize)>,
/// Selector assignments used for optimization pass
selectors: Vec<Vec<bool>>,
}
impl crate::dev::SyncDeps for Layout {}
impl Layout {
fn new(k: u32, n: usize, num_selectors: usize) -> Self {
Layout {
k,
regions: vec![],
current_region: None,
total_rows: 0,
/// Any cells assigned outside of a region. We store this as a `Vec` so that if any
/// cells are double-assigned, they will be visibly darker.
loose_cells: vec![],
/// Pairs of cells between which we have equality constraints.
equality: vec![],
/// Selector assignments used for optimization pass
selectors: vec![vec![false; n]; num_selectors],
}
}
fn update(&mut self, column: RegionColumn, row: usize) {
self.total_rows = cmp::max(self.total_rows, row + 1);
if let Some(region) = self.current_region {
let region = &mut self.regions[region];
region.columns.insert(column);
// The region offset is the earliest row assigned to.
let mut offset = region.offset.unwrap_or(row);
if row < offset {
// The first row assigned was not at offset 0 within the region.
region.rows += offset - row;
offset = row;
}
// The number of rows in this region is the gap between the earliest and
// latest rows assigned.
region.rows = cmp::max(region.rows, row - offset + 1);
region.offset = Some(offset);
region.cells.push((column, row));
} else {
self.loose_cells.push((column, row));
}
}
}
impl<F: Field> Assignment<F> for Layout {
fn enter_region<NR, N>(&mut self, name_fn: N)
where
NR: Into<String>,
N: FnOnce() -> NR,
{
assert!(self.current_region.is_none());
self.current_region = Some(self.regions.len());
self.regions.push(Region {
name: name_fn().into(),
columns: HashSet::default(),
offset: None,
rows: 0,
cells: vec![],
})
}
fn exit_region(&mut self) {
assert!(self.current_region.is_some());
self.current_region = None;
}
fn enable_selector<A, AR>(&mut self, _: A, selector: &Selector, row: usize) -> Result<(), Error>
where
A: FnOnce() -> AR,
AR: Into<String>,
{
if let Some(cell) = self.selectors[selector.0].get_mut(row) {
*cell = true;
} else {
return Err(Error::not_enough_rows_available(self.k));
}
self.update((*selector).into(), row);
Ok(())
}
fn query_instance(&self, _: Column<Instance>, _: usize) -> Result<Value<F>, Error> {
Ok(Value::unknown())
}
fn assign_advice<V, VR, A, AR>(
&mut self,
_: A,
column: Column<Advice>,
row: usize,
_: V,
) -> Result<(), Error>
where
V: FnOnce() -> Value<VR>,
VR: Into<Assigned<F>>,
A: FnOnce() -> AR,
AR: Into<String>,
{
self.update(Column::<Any>::from(column).into(), row);
Ok(())
}
fn assign_fixed<V, VR, A, AR>(
&mut self,
_: A,
column: Column<Fixed>,
row: usize,
_: V,
) -> Result<(), Error>
where
V: FnOnce() -> Value<VR>,
VR: Into<Assigned<F>>,
A: FnOnce() -> AR,
AR: Into<String>,
{
self.update(Column::<Any>::from(column).into(), row);
Ok(())
}
fn copy(
&mut self,
l_col: Column<Any>,
l_row: usize,
r_col: Column<Any>,
r_row: usize,
) -> Result<(), crate::plonk::Error> {
self.equality.push((l_col, l_row, r_col, r_row));
Ok(())
}
fn fill_from_row(
&mut self,
_: Column<Fixed>,
_: usize,
_: Value<Assigned<F>>,
) -> Result<(), Error> {
Ok(())
}
fn get_challenge(&self, _: Challenge) -> Value<F> {
Value::unknown()
}
fn annotate_column<A, AR>(&mut self, _annotation: A, _column: Column<Any>)
where
A: FnOnce() -> AR,
AR: Into<String>,
{
// Do nothing
}
fn push_namespace<NR, N>(&mut self, _: N)
where
NR: Into<String>,
N: FnOnce() -> NR,
{
// Do nothing; we don't care about namespaces in this context.
}
fn pop_namespace(&mut self, _: Option<String>) {
// Do nothing; we don't care about namespaces in this context.
}
}

508
halo2_proofs/src/dev/tfp.rs Normal file
View File

@ -0,0 +1,508 @@
use std::{fmt, marker::PhantomData};
use ff::Field;
use tracing::{debug, debug_span, span::EnteredSpan};
use crate::{
circuit::{
layouter::{RegionLayouter, SyncDeps},
AssignedCell, Cell, Layouter, Region, Table, Value,
},
plonk::{
Advice, Any, Assigned, Assignment, Challenge, Circuit, Column, ConstraintSystem, Error,
Fixed, FloorPlanner, Instance, Selector,
},
};
/// A helper type that augments a [`FloorPlanner`] with [`tracing`] spans and events.
///
/// `TracingFloorPlanner` can be used to instrument your circuit and determine exactly
/// what is happening during a particular run of keygen or proving. This can be useful for
/// identifying unexpected non-determinism or changes to a circuit.
///
/// # No stability guarantees
///
/// The `tracing` output is intended for use during circuit development. It should not be
/// considered production-stable, and the precise format or data exposed may change at any
/// time.
///
/// # Examples
///
/// ```
/// use ff::Field;
/// use halo2_proofs::{
/// circuit::{floor_planner, Layouter, Value},
/// dev::TracingFloorPlanner,
/// plonk::{Circuit, ConstraintSystem, Error},
/// };
///
/// # struct MyCircuit<F: Field> {
/// # some_witness: Value<F>,
/// # };
/// # #[derive(Clone)]
/// # struct MyConfig;
/// impl<F: Field> Circuit<F> for MyCircuit<F> {
/// // Wrap `TracingFloorPlanner` around your existing floor planner of choice.
/// //type FloorPlanner = floor_planner::V1;
/// type FloorPlanner = TracingFloorPlanner<floor_planner::V1>;
///
/// // The rest of your `Circuit` implementation is unchanged.
/// type Config = MyConfig;
///
/// #[cfg(feature = "circuit-params")]
/// type Params = ();
///
/// fn without_witnesses(&self) -> Self {
/// Self { some_witness: Value::unknown() }
/// }
///
/// fn configure(meta: &mut ConstraintSystem<F>) -> Self::Config {
/// // ..
/// # todo!()
/// }
///
/// fn synthesize(&self, config: Self::Config, layouter: impl Layouter<F>) -> Result<(), Error> {
/// // ..
/// # todo!()
/// }
/// }
///
/// #[test]
/// fn some_circuit_test() {
/// // At the start of your test, enable tracing.
/// tracing_subscriber::fmt()
/// .with_max_level(tracing::Level::DEBUG)
/// .with_ansi(false)
/// .without_time()
/// .init();
///
/// // Now when the rest of the test runs, you will get `tracing` output for every
/// // operation that the circuit performs under the hood!
/// }
/// ```
#[derive(Debug)]
pub struct TracingFloorPlanner<P: FloorPlanner> {
_phantom: PhantomData<P>,
}
impl<P: FloorPlanner> FloorPlanner for TracingFloorPlanner<P> {
fn synthesize<F: Field, CS: Assignment<F> + SyncDeps, C: Circuit<F>>(
cs: &mut CS,
circuit: &C,
config: C::Config,
constants: Vec<Column<Fixed>>,
) -> Result<(), Error> {
P::synthesize(
&mut TracingAssignment::new(cs),
&TracingCircuit::borrowed(circuit),
config,
constants,
)
}
}
/// A helper type that augments a [`Circuit`] with [`tracing`] spans and events.
enum TracingCircuit<'c, F: Field, C: Circuit<F>> {
Borrowed(&'c C, PhantomData<F>),
Owned(C, PhantomData<F>),
}
impl<'c, F: Field, C: Circuit<F>> TracingCircuit<'c, F, C> {
fn borrowed(circuit: &'c C) -> Self {
Self::Borrowed(circuit, PhantomData)
}
fn owned(circuit: C) -> Self {
Self::Owned(circuit, PhantomData)
}
fn inner_ref(&self) -> &C {
match self {
TracingCircuit::Borrowed(circuit, ..) => circuit,
TracingCircuit::Owned(circuit, ..) => circuit,
}
}
}
impl<'c, F: Field, C: Circuit<F>> Circuit<F> for TracingCircuit<'c, F, C> {
type Config = C::Config;
type FloorPlanner = C::FloorPlanner;
#[cfg(feature = "circuit-params")]
type Params = ();
fn without_witnesses(&self) -> Self {
Self::owned(self.inner_ref().without_witnesses())
}
fn configure(meta: &mut ConstraintSystem<F>) -> Self::Config {
let _span = debug_span!("configure").entered();
C::configure(meta)
}
fn synthesize(&self, config: Self::Config, layouter: impl Layouter<F>) -> Result<(), Error> {
let _span = debug_span!("synthesize").entered();
self.inner_ref()
.synthesize(config, TracingLayouter::new(layouter))
}
}
/// A helper type that augments a [`Layouter`] with [`tracing`] spans and events.
struct TracingLayouter<F: Field, L: Layouter<F>> {
layouter: L,
namespace_spans: Vec<EnteredSpan>,
_phantom: PhantomData<F>,
}
impl<F: Field, L: Layouter<F>> TracingLayouter<F, L> {
fn new(layouter: L) -> Self {
Self {
layouter,
namespace_spans: vec![],
_phantom: PhantomData,
}
}
}
impl<F: Field, L: Layouter<F>> Layouter<F> for TracingLayouter<F, L> {
type Root = Self;
fn assign_region<A, AR, N, NR>(&mut self, name: N, mut assignment: A) -> Result<AR, Error>
where
A: FnMut(Region<'_, F>) -> Result<AR, Error>,
N: Fn() -> NR,
NR: Into<String>,
{
let _span = debug_span!("region", name = name().into()).entered();
self.layouter.assign_region(name, |region| {
let mut region = TracingRegion(region);
let region: &mut dyn RegionLayouter<F> = &mut region;
assignment(region.into())
})
}
fn assign_table<A, N, NR>(&mut self, name: N, assignment: A) -> Result<(), Error>
where
A: FnMut(Table<'_, F>) -> Result<(), Error>,
N: Fn() -> NR,
NR: Into<String>,
{
let _span = debug_span!("table", name = name().into()).entered();
self.layouter.assign_table(name, assignment)
}
fn constrain_instance(
&mut self,
cell: Cell,
column: Column<Instance>,
row: usize,
) -> Result<(), Error> {
self.layouter.constrain_instance(cell, column, row)
}
fn get_challenge(&self, _: Challenge) -> Value<F> {
Value::unknown()
}
fn get_root(&mut self) -> &mut Self::Root {
self
}
fn push_namespace<NR, N>(&mut self, name_fn: N)
where
NR: Into<String>,
N: FnOnce() -> NR,
{
let name = name_fn().into();
self.namespace_spans.push(debug_span!("ns", name).entered());
self.layouter.push_namespace(|| name);
}
fn pop_namespace(&mut self, gadget_name: Option<String>) {
self.layouter.pop_namespace(gadget_name);
self.namespace_spans.pop();
}
}
fn debug_value_and_return_cell<F: Field, V: fmt::Debug>(value: AssignedCell<V, F>) -> Cell {
if let Some(v) = value.value().into_option() {
debug!(target: "assigned", value = ?v);
}
value.cell()
}
/// A helper type that augments a [`Region`] with [`tracing`] spans and events.
#[derive(Debug)]
struct TracingRegion<'r, F: Field>(Region<'r, F>);
impl<'r, F: Field> RegionLayouter<F> for TracingRegion<'r, F> {
fn enable_selector<'v>(
&'v mut self,
annotation: &'v (dyn Fn() -> String + 'v),
selector: &Selector,
offset: usize,
) -> Result<(), Error> {
let _guard = debug_span!("enable_selector", name = annotation(), offset = offset).entered();
debug!(target: "layouter", "Entered");
self.0.enable_selector(annotation, selector, offset)
}
fn name_column<'v>(
&'v mut self,
_: &'v (dyn std::ops::Fn() -> std::string::String + 'v),
_: Column<Any>,
) {
}
fn assign_advice<'v>(
&'v mut self,
annotation: &'v (dyn Fn() -> String + 'v),
column: Column<Advice>,
offset: usize,
to: &'v mut (dyn FnMut() -> Value<Assigned<F>> + 'v),
) -> Result<Cell, Error> {
let _guard =
debug_span!("assign_advice", name = annotation(), column = ?column, offset = offset)
.entered();
debug!(target: "layouter", "Entered");
self.0
.assign_advice(annotation, column, offset, to)
.map(debug_value_and_return_cell)
}
fn assign_advice_from_constant<'v>(
&'v mut self,
annotation: &'v (dyn Fn() -> String + 'v),
column: Column<Advice>,
offset: usize,
constant: Assigned<F>,
) -> Result<Cell, Error> {
let _guard = debug_span!("assign_advice_from_constant",
name = annotation(),
column = ?column,
offset = offset,
constant = ?constant,
)
.entered();
debug!(target: "layouter", "Entered");
self.0
.assign_advice_from_constant(annotation, column, offset, constant)
.map(debug_value_and_return_cell)
}
fn assign_advice_from_instance<'v>(
&mut self,
annotation: &'v (dyn Fn() -> String + 'v),
instance: Column<Instance>,
row: usize,
advice: Column<Advice>,
offset: usize,
) -> Result<(Cell, Value<F>), Error> {
let _guard = debug_span!("assign_advice_from_instance",
name = annotation(),
instance = ?instance,
row = row,
advice = ?advice,
offset = offset,
)
.entered();
debug!(target: "layouter", "Entered");
self.0
.assign_advice_from_instance(annotation, instance, row, advice, offset)
.map(|value| {
if let Some(v) = value.value().into_option() {
debug!(target: "assigned", value = ?v);
}
(value.cell(), value.value().cloned())
})
}
fn instance_value(
&mut self,
instance: Column<Instance>,
row: usize,
) -> Result<Value<F>, Error> {
self.0.instance_value(instance, row)
}
fn assign_fixed<'v>(
&'v mut self,
annotation: &'v (dyn Fn() -> String + 'v),
column: Column<Fixed>,
offset: usize,
to: &'v mut (dyn FnMut() -> Value<Assigned<F>> + 'v),
) -> Result<Cell, Error> {
let _guard =
debug_span!("assign_fixed", name = annotation(), column = ?column, offset = offset)
.entered();
debug!(target: "layouter", "Entered");
self.0
.assign_fixed(annotation, column, offset, to)
.map(debug_value_and_return_cell)
}
fn constrain_constant(&mut self, cell: Cell, constant: Assigned<F>) -> Result<(), Error> {
debug!(target: "constrain_constant", cell = ?cell, constant = ?constant);
self.0.constrain_constant(cell, constant)
}
fn constrain_equal(&mut self, left: Cell, right: Cell) -> Result<(), Error> {
debug!(target: "constrain_equal", left = ?left, right = ?right);
self.0.constrain_equal(left, right)
}
}
/// A helper type that augments an [`Assignment`] with [`tracing`] spans and events.
struct TracingAssignment<'cs, F: Field, CS: Assignment<F>> {
cs: &'cs mut CS,
in_region: bool,
_phantom: PhantomData<F>,
}
impl<'cs, F: Field, CS: Assignment<F>> TracingAssignment<'cs, F, CS> {
fn new(cs: &'cs mut CS) -> Self {
Self {
cs,
in_region: false,
_phantom: PhantomData,
}
}
}
impl<'cs, F: Field, CS: Assignment<F>> Assignment<F> for TracingAssignment<'cs, F, CS> {
fn enter_region<NR, N>(&mut self, name_fn: N)
where
NR: Into<String>,
N: FnOnce() -> NR,
{
self.in_region = true;
self.cs.enter_region(name_fn);
}
fn annotate_column<A, AR>(&mut self, _: A, _: Column<Any>)
where
A: FnOnce() -> AR,
AR: Into<String>,
{
}
fn exit_region(&mut self) {
self.cs.exit_region();
self.in_region = false;
}
fn enable_selector<A, AR>(
&mut self,
annotation: A,
selector: &Selector,
row: usize,
) -> Result<(), Error>
where
A: FnOnce() -> AR,
AR: Into<String>,
{
let annotation = annotation().into();
if self.in_region {
debug!(target: "position", row = row);
} else {
debug!(target: "enable_selector", name = annotation, row = row);
}
self.cs.enable_selector(|| annotation, selector, row)
}
fn query_instance(&self, column: Column<Instance>, row: usize) -> Result<Value<F>, Error> {
let _guard = debug_span!("positioned").entered();
debug!(target: "query_instance", column = ?column, row = row);
self.cs.query_instance(column, row)
}
fn assign_advice<V, VR, A, AR>(
&mut self,
annotation: A,
column: Column<Advice>,
row: usize,
to: V,
) -> Result<(), Error>
where
V: FnOnce() -> Value<VR>,
VR: Into<Assigned<F>>,
A: FnOnce() -> AR,
AR: Into<String>,
{
let annotation = annotation().into();
if self.in_region {
debug!(target: "position", row = row);
} else {
debug!(target: "assign_advice", name = annotation, column = ?column, row = row);
}
self.cs.assign_advice(|| annotation, column, row, to)
}
fn assign_fixed<V, VR, A, AR>(
&mut self,
annotation: A,
column: Column<Fixed>,
row: usize,
to: V,
) -> Result<(), Error>
where
V: FnOnce() -> Value<VR>,
VR: Into<Assigned<F>>,
A: FnOnce() -> AR,
AR: Into<String>,
{
let annotation = annotation().into();
if self.in_region {
debug!(target: "position", row = row);
} else {
debug!(target: "assign_fixed", name = annotation, column = ?column, row = row);
}
self.cs.assign_fixed(|| annotation, column, row, to)
}
fn copy(
&mut self,
left_column: Column<Any>,
left_row: usize,
right_column: Column<Any>,
right_row: usize,
) -> Result<(), Error> {
let _guard = debug_span!("positioned").entered();
debug!(
target: "copy",
left_column = ?left_column,
left_row = left_row,
right_column = ?right_column,
right_row = right_row,
);
self.cs.copy(left_column, left_row, right_column, right_row)
}
fn fill_from_row(
&mut self,
column: Column<Fixed>,
row: usize,
to: Value<Assigned<F>>,
) -> Result<(), Error> {
let _guard = debug_span!("positioned").entered();
debug!(target: "fill_from_row", column = ?column, row = row);
self.cs.fill_from_row(column, row, to)
}
fn get_challenge(&self, _: Challenge) -> Value<F> {
Value::unknown()
}
fn push_namespace<NR, N>(&mut self, name_fn: N)
where
NR: Into<String>,
N: FnOnce() -> NR,
{
// We enter namespace spans in TracingLayouter.
self.cs.push_namespace(name_fn)
}
fn pop_namespace(&mut self, gadget_name: Option<String>) {
self.cs.pop_namespace(gadget_name);
// We exit namespace spans in TracingLayouter.
}
}

View File

@ -1,7 +1,7 @@
use group::ff::Field;
use std::collections::BTreeMap;
use super::{metadata, CellValue, Value};
use super::{metadata, CellValue, InstanceValue, Value};
use crate::{
plonk::{
Advice, AdviceQuery, Any, Column, ColumnType, Expression, FixedQuery, Gate, InstanceQuery,
@ -88,12 +88,13 @@ pub(super) fn load_instance<'a, F: Field, T: ColumnType, Q: Into<AnyQuery> + Cop
n: i32,
row: i32,
queries: &'a [(Column<T>, Rotation)],
cells: &'a [Vec<F>],
cells: &'a [Vec<InstanceValue<F>>],
) -> impl Fn(Q) -> Value<F> + 'a {
move |query| {
let (column, at) = &queries[query.into().index.unwrap()];
let resolved_row = (row + at.0) % n;
Value::Real(cells[column.index()][resolved_row as usize])
let cell = &cells[column.index()][resolved_row as usize];
Value::Real(cell.value())
}
}

View File

@ -1,6 +1,6 @@
use crate::poly::Polynomial;
use ff::PrimeField;
use halo2curves::{pairing::Engine, serde::SerdeObject, CurveAffine};
use halo2curves::{serde::SerdeObject, CurveAffine};
use std::io;
/// This enum specifies how various types are serialized and deserialized.

View File

@ -1,29 +1,12 @@
//! # halo2_proofs
#![cfg_attr(docsrs, feature(doc_cfg))]
// Build without warnings on stable 1.51 and later.
#![allow(unknown_lints)]
// Disable old lint warnings until our MSRV is at least 1.51.
#![allow(renamed_and_removed_lints)]
// Use the old lint name to build without warnings until our MSRV is at least 1.51.
#![allow(clippy::unknown_clippy_lints)]
// The actual lints we want to disable.
#![allow(
clippy::op_ref,
clippy::assign_op_pattern,
clippy::too_many_arguments,
clippy::suspicious_arithmetic_impl,
clippy::many_single_char_names,
clippy::same_item_push,
clippy::upper_case_acronyms
)]
#![deny(broken_intra_doc_links)]
#![allow(clippy::op_ref, clippy::many_single_char_names)]
#![deny(rustdoc::broken_intra_doc_links)]
#![deny(missing_debug_implementations)]
#![deny(missing_docs)]
#![deny(unsafe_code)]
// Remove this once we update pasta_curves
#![allow(unused_imports)]
#![allow(clippy::derive_partial_eq_without_eq)]
pub mod arithmetic;
pub mod circuit;

View File

@ -1,5 +1,70 @@
//! An interface for dealing with the kinds of parallel computations involved in
//! `halo2`. It's currently just a (very!) thin wrapper around [`rayon`] but may
//! be extended in the future to allow for various parallelism strategies.
#[cfg(all(
feature = "multicore",
target_arch = "wasm32",
not(target_feature = "atomics")
))]
compile_error!(
"The multicore feature flag is not supported on wasm32 architectures without atomics"
);
pub use rayon::{current_num_threads, scope, Scope};
pub use maybe_rayon::{
iter::{IntoParallelIterator, IntoParallelRefMutIterator, ParallelIterator},
join, scope, Scope,
};
#[cfg(feature = "multicore")]
pub use maybe_rayon::{
current_num_threads,
iter::{IndexedParallelIterator, IntoParallelRefIterator},
slice::ParallelSliceMut,
};
#[cfg(not(feature = "multicore"))]
pub fn current_num_threads() -> usize {
1
}
pub trait TryFoldAndReduce<T, E> {
/// Implements `iter.try_fold().try_reduce()` for `rayon::iter::ParallelIterator`,
/// falling back on `Iterator::try_fold` when the `multicore` feature flag is
/// disabled.
/// The `try_fold_and_reduce` function can only be called by a iter with
/// `Result<T, E>` item type because the `fold_op` must meet the trait
/// bounds of both `try_fold` and `try_reduce` from rayon.
fn try_fold_and_reduce(
self,
identity: impl Fn() -> T + Send + Sync,
fold_op: impl Fn(T, Result<T, E>) -> Result<T, E> + Send + Sync,
) -> Result<T, E>;
}
#[cfg(feature = "multicore")]
impl<T, E, I> TryFoldAndReduce<T, E> for I
where
T: Send + Sync,
E: Send + Sync,
I: maybe_rayon::iter::ParallelIterator<Item = Result<T, E>>,
{
fn try_fold_and_reduce(
self,
identity: impl Fn() -> T + Send + Sync,
fold_op: impl Fn(T, Result<T, E>) -> Result<T, E> + Send + Sync,
) -> Result<T, E> {
self.try_fold(&identity, &fold_op)
.try_reduce(&identity, |a, b| fold_op(a, Ok(b)))
}
}
#[cfg(not(feature = "multicore"))]
impl<T, E, I> TryFoldAndReduce<T, E> for I
where
I: std::iter::Iterator<Item = Result<T, E>>,
{
fn try_fold_and_reduce(
mut self,
identity: impl Fn() -> T + Send + Sync,
fold_op: impl Fn(T, Result<T, E>) -> Result<T, E> + Send + Sync,
) -> Result<T, E> {
self.try_fold(identity(), fold_op)
}
}

View File

@ -14,8 +14,8 @@ use crate::helpers::{
SerdePrimeField,
};
use crate::poly::{
commitment::Params, Coeff, EvaluationDomain, ExtendedLagrangeCoeff, LagrangeCoeff,
PinnedEvaluationDomain, Polynomial,
Coeff, EvaluationDomain, ExtendedLagrangeCoeff, LagrangeCoeff, PinnedEvaluationDomain,
Polynomial,
};
use crate::transcript::{ChallengeScalar, EncodedChallenge, Transcript};
use crate::SerdeFormat;

View File

@ -446,7 +446,6 @@ mod tests {
mod proptests {
use std::{
cmp,
convert::TryFrom,
ops::{Add, Mul, Neg, Sub},
};

View File

@ -9,9 +9,8 @@ use core::cmp::max;
use core::ops::{Add, Mul};
use ff::Field;
use sealed::SealedPhase;
use std::cmp::Ordering;
use std::collections::HashMap;
use std::fmt::{Debug, Formatter};
use std::fmt::Debug;
use std::{
convert::TryFrom,
ops::{Neg, Sub},
@ -564,7 +563,7 @@ impl InstanceQuery {
/// they cannot simultaneously be used as general fixed columns.
///
/// [`Layouter::assign_table`]: crate::circuit::Layouter::assign_table
#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)]
#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash, Ord, PartialOrd)]
pub struct TableColumn {
/// The fixed column that this table column is stored in.
///
@ -873,6 +872,7 @@ impl<F: Field> Expression<F> {
/// Evaluate the polynomial using the provided closures to perform the
/// operations.
#[allow(clippy::too_many_arguments)]
pub fn evaluate<T>(
&self,
constant: &impl Fn(F) -> T,
@ -982,6 +982,7 @@ impl<F: Field> Expression<F> {
/// Evaluate the polynomial lazily using the provided closures to perform the
/// operations.
#[allow(clippy::too_many_arguments)]
pub fn evaluate_lazy<T: PartialEq>(
&self,
constant: &impl Fn(F) -> T,

View File

@ -1,8 +1,8 @@
use std::cmp;
use std::error;
use std::fmt;
use std::io;
use super::TableColumn;
use super::{Any, Column};
/// This is an error that could occur during proving or circuit synthesis.
@ -37,6 +37,8 @@ pub enum Error {
/// The instance sets up a copy constraint involving a column that has not been
/// included in the permutation.
ColumnNotInPermutation(Column<Any>),
/// An error relating to a lookup table.
TableError(TableError),
}
impl From<io::Error> for Error {
@ -79,6 +81,7 @@ impl fmt::Display for Error {
"Column {:?} must be included in the permutation. Help: try applying `meta.enable_equalty` on the column",
column
),
Error::TableError(error) => write!(f, "{}", error)
}
}
}
@ -91,3 +94,45 @@ impl error::Error for Error {
}
}
}
/// This is an error that could occur during table synthesis.
#[derive(Debug)]
pub enum TableError {
/// A `TableColumn` has not been assigned.
ColumnNotAssigned(TableColumn),
/// A Table has columns of uneven lengths.
UnevenColumnLengths((TableColumn, usize), (TableColumn, usize)),
/// Attempt to assign a used `TableColumn`
UsedColumn(TableColumn),
/// Attempt to overwrite a default value
OverwriteDefault(TableColumn, String, String),
}
impl fmt::Display for TableError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
TableError::ColumnNotAssigned(col) => {
write!(
f,
"{:?} not fully assigned. Help: assign a value at offset 0.",
col
)
}
TableError::UnevenColumnLengths((col, col_len), (table, table_len)) => write!(
f,
"{:?} has length {} while {:?} has length {}",
col, col_len, table, table_len
),
TableError::UsedColumn(col) => {
write!(f, "{:?} has already been used", col)
}
TableError::OverwriteDefault(col, default, val) => {
write!(
f,
"Attempted to overwrite default value {} with {} in {:?}",
default, val, col
)
}
}
}
}

View File

@ -1,30 +1,11 @@
use crate::multicore;
use crate::plonk::lookup::prover::Committed;
use crate::plonk::permutation::Argument;
use crate::plonk::{lookup, permutation, AdviceQuery, Any, FixedQuery, InstanceQuery, ProvingKey};
use crate::plonk::{lookup, permutation, Any, ProvingKey};
use crate::poly::Basis;
use crate::{
arithmetic::{eval_polynomial, parallelize, CurveAffine},
poly::{
commitment::Params, Coeff, EvaluationDomain, ExtendedLagrangeCoeff, LagrangeCoeff,
Polynomial, ProverQuery, Rotation,
},
transcript::{EncodedChallenge, TranscriptWrite},
};
use group::prime::PrimeCurve;
use group::{
ff::{BatchInvert, Field, PrimeField, WithSmallOrderMulGroup},
Curve,
};
use std::any::TypeId;
use std::convert::TryInto;
use std::num::ParseIntError;
use std::slice;
use std::{
collections::BTreeMap,
iter,
ops::{Index, Mul, MulAssign},
arithmetic::{parallelize, CurveAffine},
poly::{Coeff, ExtendedLagrangeCoeff, Polynomial, Rotation},
};
use group::ff::{Field, PrimeField, WithSmallOrderMulGroup};
use super::{shuffle, ConstraintSystem, Expression};
@ -68,6 +49,7 @@ impl Default for ValueSource {
impl ValueSource {
/// Get the value for this source
#[allow(clippy::too_many_arguments)]
pub fn get<F: Field, B: Basis>(
&self,
rotations: &[usize],
@ -128,6 +110,7 @@ pub enum Calculation {
impl Calculation {
/// Get the resulting value of this calculation
#[allow(clippy::too_many_arguments)]
pub fn evaluate<F: Field, B: Basis>(
&self,
rotations: &[usize],
@ -312,6 +295,7 @@ impl<C: CurveAffine> Evaluator<C> {
}
/// Evaluate h poly
#[allow(clippy::too_many_arguments)]
pub(in crate::plonk) fn evaluate_h(
&self,
pk: &ProvingKey<C>,
@ -796,6 +780,7 @@ impl<C: CurveAffine> GraphEvaluator<C> {
}
}
#[allow(clippy::too_many_arguments)]
pub fn evaluate<B: Basis>(
&self,
data: &mut EvaluationData<C>,

View File

@ -11,15 +11,14 @@ use super::{
Selector,
},
evaluation::Evaluator,
permutation, Assigned, Challenge, Error, Expression, LagrangeCoeff, Polynomial, ProvingKey,
VerifyingKey,
permutation, Assigned, Challenge, Error, LagrangeCoeff, Polynomial, ProvingKey, VerifyingKey,
};
use crate::{
arithmetic::{parallelize, CurveAffine},
circuit::{layouter::SyncDeps, Value},
circuit::Value,
poly::{
batch_invert_assigned,
commitment::{Blind, Params, MSM},
commitment::{Blind, Params},
EvaluationDomain,
},
};
@ -61,8 +60,6 @@ struct Assembly<F: Field> {
_marker: std::marker::PhantomData<F>,
}
impl<F: Field> SyncDeps for Assembly<F> {}
impl<F: Field> Assignment<F> for Assembly<F> {
fn enter_region<NR, N>(&mut self, _: N)
where

View File

@ -8,8 +8,7 @@ use crate::{
arithmetic::{eval_polynomial, parallelize, CurveAffine},
poly::{
commitment::{Blind, Params},
Coeff, EvaluationDomain, ExtendedLagrangeCoeff, LagrangeCoeff, Polynomial, ProverQuery,
Rotation,
Coeff, EvaluationDomain, LagrangeCoeff, Polynomial, ProverQuery, Rotation,
},
transcript::{EncodedChallenge, TranscriptWrite},
};
@ -19,7 +18,6 @@ use group::{
Curve,
};
use rand_core::RngCore;
use std::{any::TypeId, convert::TryInto, num::ParseIntError, ops::Index};
use std::{
collections::BTreeMap,
iter,
@ -62,6 +60,7 @@ impl<F: WithSmallOrderMulGroup<3>> Argument<F> {
/// - constructs Permuted<C> struct using permuted_input_value = A', and
/// permuted_table_expression = S'.
/// The Permuted<C> struct is used to update the Lookup, and is then returned.
#[allow(clippy::too_many_arguments)]
pub(in crate::plonk) fn commit_permuted<
'a,
'params: 'a,

View File

@ -90,6 +90,7 @@ impl<C: CurveAffine> Committed<C> {
}
impl<C: CurveAffine> Evaluated<C> {
#[allow(clippy::too_many_arguments)]
pub(in crate::plonk) fn expressions<'a>(
&'a self,
l_0: C::Scalar,
@ -141,7 +142,7 @@ impl<C: CurveAffine> Evaluated<C> {
std::iter::empty()
.chain(
// l_0(X) * (1 - z'(X)) = 0
// l_0(X) * (1 - z(X)) = 0
Some(l_0 * &(C::Scalar::ONE - &self.product_eval)),
)
.chain(

View File

@ -10,7 +10,6 @@ use crate::{
poly::{Coeff, ExtendedLagrangeCoeff, LagrangeCoeff, Polynomial},
SerdeFormat,
};
use ff::PrimeField;
pub(crate) mod keygen;
pub(crate) mod prover;

View File

@ -1,22 +1,22 @@
use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet};
use ff::{Field, PrimeField};
use group::Curve;
use rayon::prelude::{
IndexedParallelIterator, IntoParallelIterator, IntoParallelRefIterator,
IntoParallelRefMutIterator, ParallelIterator, ParallelSliceMut,
};
use super::{Argument, ProvingKey, VerifyingKey};
use crate::{
arithmetic::{parallelize, CurveAffine},
plonk::{Any, Column, Error},
poly::{
commitment::{Blind, CommitmentScheme, Params},
commitment::{Blind, Params},
EvaluationDomain,
},
};
#[cfg(feature = "multicore")]
use crate::multicore::{IndexedParallelIterator, ParallelIterator};
#[cfg(feature = "thread-safe-region")]
use std::collections::{BTreeSet, HashMap};
#[cfg(not(feature = "thread-safe-region"))]
/// Struct that accumulates all the necessary data in order to construct the permutation argument.
#[derive(Clone, Debug, PartialEq, Eq)]
@ -133,12 +133,21 @@ impl Assembly {
&self.columns
}
#[cfg(feature = "multicore")]
/// Returns mappings of the copies.
pub fn mapping(
&self,
) -> impl Iterator<Item = impl IndexedParallelIterator<Item = (usize, usize)> + '_> {
use crate::multicore::IntoParallelRefIterator;
self.mapping.iter().map(|c| c.par_iter().copied())
}
#[cfg(not(feature = "multicore"))]
/// Returns mappings of the copies.
pub fn mapping(&self) -> impl Iterator<Item = impl Iterator<Item = (usize, usize)> + '_> {
self.mapping.iter().map(|c| c.iter().copied())
}
}
#[cfg(feature = "thread-safe-region")]
@ -239,6 +248,8 @@ impl Assembly {
/// Builds the ordered mapping of the cycles.
/// This will only get executed once.
pub fn build_ordered_mapping(&mut self) {
use crate::multicore::IntoParallelRefMutIterator;
// will only get called once
if self.ordered_cycles.is_empty() && !self.cycles.is_empty() {
self.ordered_cycles = self
@ -304,16 +315,25 @@ impl Assembly {
&self.columns
}
#[cfg(feature = "multicore")]
/// Returns mappings of the copies.
pub fn mapping(
&self,
) -> impl Iterator<Item = impl IndexedParallelIterator<Item = (usize, usize)> + '_> {
use crate::multicore::IntoParallelIterator;
(0..self.num_cols).map(move |i| {
(0..self.col_len)
.into_par_iter()
.map(move |j| self.mapping_at_idx(i, j))
})
}
#[cfg(not(feature = "multicore"))]
/// Returns mappings of the copies.
pub fn mapping(&self) -> impl Iterator<Item = impl Iterator<Item = (usize, usize)> + '_> {
(0..self.num_cols).map(move |i| (0..self.col_len).map(move |j| self.mapping_at_idx(i, j)))
}
}
pub(crate) fn build_pk<'params, C: CurveAffine, P: Params<'params, C>>(

View File

@ -12,7 +12,6 @@ use crate::{
arithmetic::{eval_polynomial, parallelize, CurveAffine},
plonk::{self, Error},
poly::{
self,
commitment::{Blind, Params},
Coeff, ExtendedLagrangeCoeff, LagrangeCoeff, Polynomial, ProverQuery, Rotation,
},
@ -43,6 +42,7 @@ pub(crate) struct Evaluated<C: CurveAffine> {
}
impl Argument {
#[allow(clippy::too_many_arguments)]
pub(in crate::plonk) fn commit<
'params,
C: CurveAffine,

View File

@ -99,6 +99,7 @@ impl<C: CurveAffine> Committed<C> {
}
impl<C: CurveAffine> Evaluated<C> {
#[allow(clippy::too_many_arguments)]
pub(in crate::plonk) fn expressions<'a>(
&'a self,
vk: &'a plonk::VerifyingKey<C>,

View File

@ -1,32 +1,27 @@
use ff::{Field, FromUniformBytes, PrimeField, WithSmallOrderMulGroup};
use ff::{Field, FromUniformBytes, WithSmallOrderMulGroup};
use group::Curve;
use halo2curves::CurveExt;
use rand_core::RngCore;
use std::collections::BTreeSet;
use std::env::var;
use std::ops::RangeTo;
use std::sync::atomic::AtomicUsize;
use std::time::Instant;
use std::{collections::HashMap, iter, mem, sync::atomic::Ordering};
use std::{collections::HashMap, iter};
use super::{
circuit::{
sealed::{self, SealedPhase},
Advice, Any, Assignment, Challenge, Circuit, Column, ConstraintSystem, FirstPhase, Fixed,
FloorPlanner, Instance, Selector,
sealed::{self},
Advice, Any, Assignment, Challenge, Circuit, Column, ConstraintSystem, Fixed, FloorPlanner,
Instance, Selector,
},
lookup, permutation, shuffle, vanishing, ChallengeBeta, ChallengeGamma, ChallengeTheta,
ChallengeX, ChallengeY, Error, Expression, ProvingKey,
ChallengeX, ChallengeY, Error, ProvingKey,
};
use crate::circuit::layouter::SyncDeps;
use crate::{
arithmetic::{eval_polynomial, CurveAffine},
circuit::Value,
plonk::Assigned,
poly::{
self,
commitment::{Blind, CommitmentScheme, Params, Prover},
Basis, Coeff, ExtendedLagrangeCoeff, LagrangeCoeff, Polynomial, ProverQuery,
Basis, Coeff, LagrangeCoeff, Polynomial, ProverQuery,
},
};
use crate::{
@ -58,6 +53,10 @@ pub fn create_proof<
where
Scheme::Scalar: WithSmallOrderMulGroup<3> + FromUniformBytes<64>,
{
if circuits.len() != instances.len() {
return Err(Error::InvalidInstances);
}
for instance in instances.iter() {
if instance.len() != pk.vk.cs.num_instance_columns {
return Err(Error::InvalidInstances);
@ -154,8 +153,6 @@ where
_marker: std::marker::PhantomData<F>,
}
impl<'a, F: Field> SyncDeps for WitnessCollection<'a, F> {}
impl<'a, F: Field> Assignment<F> for WitnessCollection<'a, F> {
fn enter_region<NR, N>(&mut self, _: N)
where
@ -554,7 +551,7 @@ where
let vanishing = vanishing.construct(params, domain, h_poly, &mut rng, transcript)?;
let x: ChallengeX<_> = transcript.squeeze_challenge_scalar();
let xn = x.pow([params.n(), 0, 0, 0]);
let xn = x.pow([params.n()]);
if P::QUERY_INSTANCE {
// Compute and hash instance evals for each circuit instance
@ -700,3 +697,69 @@ where
.create_proof(rng, transcript, instances)
.map_err(|_| Error::ConstraintSystemFailure)
}
#[test]
fn test_create_proof() {
use crate::{
circuit::SimpleFloorPlanner,
plonk::{keygen_pk, keygen_vk},
poly::kzg::{
commitment::{KZGCommitmentScheme, ParamsKZG},
multiopen::ProverSHPLONK,
},
transcript::{Blake2bWrite, Challenge255, TranscriptWriterBuffer},
};
use halo2curves::bn256::Bn256;
use rand_core::OsRng;
#[derive(Clone, Copy)]
struct MyCircuit;
impl<F: Field> Circuit<F> for MyCircuit {
type Config = ();
type FloorPlanner = SimpleFloorPlanner;
#[cfg(feature = "circuit-params")]
type Params = ();
fn without_witnesses(&self) -> Self {
*self
}
fn configure(_meta: &mut ConstraintSystem<F>) -> Self::Config {}
fn synthesize(
&self,
_config: Self::Config,
_layouter: impl crate::circuit::Layouter<F>,
) -> Result<(), Error> {
Ok(())
}
}
let params: ParamsKZG<Bn256> = ParamsKZG::setup(3, OsRng);
let vk = keygen_vk(&params, &MyCircuit).expect("keygen_vk should not fail");
let pk = keygen_pk(&params, vk, &MyCircuit).expect("keygen_pk should not fail");
let mut transcript = Blake2bWrite::<_, _, Challenge255<_>>::init(vec![]);
// Create proof with wrong number of instances
let proof = create_proof::<KZGCommitmentScheme<_>, ProverSHPLONK<_>, _, _, _, _>(
&params,
&pk,
&[MyCircuit, MyCircuit],
&[],
OsRng,
&mut transcript,
);
assert!(matches!(proof.unwrap_err(), Error::InvalidInstances));
// Create proof with correct number of instances
create_proof::<KZGCommitmentScheme<_>, ProverSHPLONK<_>, _, _, _, _>(
&params,
&pk,
&[MyCircuit, MyCircuit],
&[&[], &[]],
OsRng,
&mut transcript,
)
.expect("proof generation should not fail");
}

View File

@ -1,6 +1,5 @@
use super::super::{
circuit::Expression, ChallengeBeta, ChallengeGamma, ChallengeTheta, ChallengeX, Error,
ProvingKey,
circuit::Expression, ChallengeGamma, ChallengeTheta, ChallengeX, Error, ProvingKey,
};
use super::Argument;
use crate::plonk::evaluation::evaluate;
@ -8,20 +7,14 @@ use crate::{
arithmetic::{eval_polynomial, parallelize, CurveAffine},
poly::{
commitment::{Blind, Params},
Coeff, EvaluationDomain, ExtendedLagrangeCoeff, LagrangeCoeff, Polynomial, ProverQuery,
Rotation,
Coeff, EvaluationDomain, LagrangeCoeff, Polynomial, ProverQuery, Rotation,
},
transcript::{EncodedChallenge, TranscriptWrite},
};
use ff::WithSmallOrderMulGroup;
use group::{
ff::{BatchInvert, Field},
Curve,
};
use group::{ff::BatchInvert, Curve};
use rand_core::RngCore;
use std::{any::TypeId, convert::TryInto, num::ParseIntError, ops::Index};
use std::{
collections::BTreeMap,
iter,
ops::{Mul, MulAssign},
};
@ -47,6 +40,7 @@ impl<F: WithSmallOrderMulGroup<3>> Argument<F> {
/// [S_0, S_1, ..., S_{m-1}], this method
/// - constructs A_compressed = \theta^{m-1} A_0 + theta^{m-2} A_1 + ... + \theta A_{m-2} + A_{m-1}
/// and S_compressed = \theta^{m-1} S_0 + theta^{m-2} S_1 + ... + \theta S_{m-2} + S_{m-1},
#[allow(clippy::too_many_arguments)]
fn compress<'a, 'params: 'a, C, P: Params<'params, C>>(
&self,
pk: &ProvingKey<C>,
@ -99,6 +93,7 @@ impl<F: WithSmallOrderMulGroup<3>> Argument<F> {
/// constructs the grand product polynomial over the shuffle.
/// The grand product polynomial is used to populate the Product<C> struct.
/// The Product<C> struct is added to the Shuffle and finally returned by the method.
#[allow(clippy::too_many_arguments)]
pub(in crate::plonk) fn commit_product<
'a,
'params: 'a,

View File

@ -52,6 +52,7 @@ impl<C: CurveAffine> Committed<C> {
}
impl<C: CurveAffine> Evaluated<C> {
#[allow(clippy::too_many_arguments)]
pub(in crate::plonk) fn expressions<'a>(
&'a self,
l_0: C::Scalar,

View File

@ -1,17 +1,16 @@
use std::iter;
use std::{collections::HashMap, iter};
use ff::{Field, PrimeField};
use ff::Field;
use group::Curve;
use rand_chacha::ChaCha20Rng;
use rand_core::{RngCore, SeedableRng};
use rayon::{current_num_threads, prelude::*};
use super::Argument;
use crate::{
arithmetic::{eval_polynomial, CurveAffine},
plonk::{ChallengeX, ChallengeY, Error},
arithmetic::{eval_polynomial, parallelize, CurveAffine},
multicore::current_num_threads,
plonk::{ChallengeX, Error},
poly::{
self,
commitment::{Blind, ParamsProver},
Coeff, EvaluationDomain, ExtendedLagrangeCoeff, Polynomial, ProverQuery,
},
@ -50,26 +49,33 @@ impl<C: CurveAffine> Argument<C> {
) -> Result<Committed<C>, Error> {
// Sample a random polynomial of degree n - 1
let n = 1usize << domain.k() as usize;
let chunk_size = (n as f64 / current_num_threads() as f64).ceil() as usize;
let num_chunks = (n as f64 / chunk_size as f64).ceil() as usize;
let mut rand_vec = vec![C::Scalar::ZERO; n];
let mut thread_seeds: Vec<ChaCha20Rng> = (0..num_chunks)
.map(|_| {
let num_threads = current_num_threads();
let chunk_size = n / num_threads;
let thread_seeds = (0..)
.step_by(chunk_size + 1)
.take(n % num_threads)
.chain(
(chunk_size != 0)
.then(|| ((n % num_threads) * (chunk_size + 1)..).step_by(chunk_size))
.into_iter()
.flatten(),
)
.take(num_threads)
.zip(iter::repeat_with(|| {
let mut seed = [0u8; 32];
rng.fill_bytes(&mut seed);
ChaCha20Rng::from_seed(seed)
})
.collect();
}))
.collect::<HashMap<_, _>>();
thread_seeds
.par_iter_mut()
.zip_eq(rand_vec.par_chunks_mut(chunk_size))
.for_each(|(mut rng, chunk)| {
chunk
.iter_mut()
.for_each(|v| *v = C::Scalar::random(&mut rng))
});
parallelize(&mut rand_vec, |chunk, offset| {
let mut rng = thread_seeds[&offset].clone();
chunk
.iter_mut()
.for_each(|v| *v = C::Scalar::random(&mut rng));
});
let random_poly: Polynomial<C::Scalar, Coeff> = domain.coeff_from_vec(rand_vec);

View File

@ -1,28 +1,25 @@
use ff::{Field, FromUniformBytes, WithSmallOrderMulGroup};
use group::Curve;
use rand_core::RngCore;
use std::iter;
use super::{
vanishing, ChallengeBeta, ChallengeGamma, ChallengeTheta, ChallengeX, ChallengeY, Error,
VerifyingKey,
};
use crate::arithmetic::{compute_inner_product, CurveAffine};
use crate::arithmetic::compute_inner_product;
use crate::poly::commitment::{CommitmentScheme, Verifier};
use crate::poly::VerificationStrategy;
use crate::poly::{
commitment::{Blind, Params, MSM},
Guard, VerifierQuery,
commitment::{Blind, Params},
VerifierQuery,
};
use crate::transcript::{read_n_points, read_n_scalars, EncodedChallenge, TranscriptRead};
use crate::transcript::{read_n_scalars, EncodedChallenge, TranscriptRead};
#[cfg(feature = "batch")]
mod batch;
#[cfg(feature = "batch")]
pub use batch::BatchVerifier;
use crate::poly::commitment::ParamsVerifier;
/// Returns a boolean indicating whether or not the proof is valid
pub fn verify_proof<
'params,
@ -188,7 +185,7 @@ where
})
.collect::<Result<Vec<_>, _>>()?
} else {
let xn = x.pow([params.n(), 0, 0, 0]);
let xn = x.pow([params.n()]);
let (min_rotation, max_rotation) =
vk.cs
.instance_queries
@ -267,7 +264,7 @@ where
// commitments open to the correct values.
let vanishing = {
// x^n
let xn = x.pow([params.n(), 0, 0, 0]);
let xn = x.pow([params.n()]);
let blinding_factors = vk.cs.blinding_factors();
let l_evals = vk

View File

@ -1,14 +1,11 @@
use std::{io, marker::PhantomData};
use ff::FromUniformBytes;
use group::ff::Field;
use halo2curves::CurveAffine;
use rand_core::{OsRng, RngCore};
use rayon::iter::{IndexedParallelIterator, IntoParallelIterator, ParallelIterator};
use rand_core::OsRng;
use super::{verify_proof, VerificationStrategy};
use crate::{
multicore,
multicore::{IntoParallelIterator, TryFoldAndReduce},
plonk::{Error, VerifyingKey},
poly::{
commitment::{Params, MSM},
@ -22,6 +19,9 @@ use crate::{
transcript::{Blake2bRead, TranscriptReadBuffer},
};
#[cfg(feature = "multicore")]
use crate::multicore::{IndexedParallelIterator, ParallelIterator};
/// A proof verification strategy that returns the proof's MSM.
///
/// `BatchVerifier` handles the accumulation of the MSMs for the batched proofs.
@ -123,11 +123,10 @@ where
e
})
})
.try_fold(
.try_fold_and_reduce(
|| params.empty_msm(),
|msm, res| res.map(|proof_msm| accumulate_msm(msm, proof_msm)),
)
.try_reduce(|| params.empty_msm(), |a, b| Ok(accumulate_msm(a, b)));
|acc, res| res.map(|proof_msm| accumulate_msm(acc, proof_msm)),
);
match final_msm {
Ok(msm) => msm.check(),

View File

@ -7,7 +7,6 @@ use crate::helpers::SerdePrimeField;
use crate::plonk::Assigned;
use crate::SerdeFormat;
use ff::PrimeField;
use group::ff::{BatchInvert, Field};
use std::fmt::Debug;
use std::io;
@ -304,7 +303,7 @@ impl<'a, F: Field, B: Basis> Sub<F> for &'a Polynomial<F, B> {
/// Describes the relative rotation of a vector. Negative numbers represent
/// reverse (leftmost) rotations and positive numbers represent forward (rightmost)
/// rotations. Zero represents no rotation.
#[derive(Copy, Clone, Debug, PartialEq)]
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub struct Rotation(pub i32);
impl Rotation {

View File

@ -6,12 +6,11 @@ use super::{
use crate::poly::Error;
use crate::transcript::{EncodedChallenge, TranscriptRead, TranscriptWrite};
use ff::Field;
use group::{Curve, Group};
use halo2curves::{CurveAffine, CurveExt};
use halo2curves::CurveAffine;
use rand_core::RngCore;
use std::{
fmt::Debug,
io::{self, Read, Write},
io::{self},
ops::{Add, AddAssign, Mul, MulAssign},
};

View File

@ -8,10 +8,7 @@ use crate::{
use super::{Coeff, ExtendedLagrangeCoeff, LagrangeCoeff, Polynomial, Rotation};
use ff::WithSmallOrderMulGroup;
use group::{
ff::{BatchInvert, Field, PrimeField},
Group,
};
use group::ff::{BatchInvert, Field};
use std::marker::PhantomData;
@ -54,6 +51,9 @@ impl<F: WithSmallOrderMulGroup<3>> EvaluationDomain<F> {
extended_k += 1;
}
// ensure extended_k <= S
assert!(extended_k <= F::S);
let mut extended_omega = F::ROOT_OF_UNITY;
// Get extended_omega, the 2^{extended_k}'th root of unity
@ -537,7 +537,7 @@ fn test_l_i() {
let mut l = vec![];
let mut points = vec![];
for i in 0..8 {
points.push(domain.omega.pow([i, 0, 0, 0]));
points.push(domain.omega.pow([i]));
}
for i in 0..8 {
let mut l_i = vec![Scalar::zero(); 8];
@ -547,7 +547,7 @@ fn test_l_i() {
}
let x = Scalar::random(OsRng);
let xn = x.pow([8, 0, 0, 0]);
let xn = x.pow([8]);
let evaluations = domain.l_i_range(x, xn, -7..=7);
for i in 0..8 {

View File

@ -1,699 +0,0 @@
use std::{
cmp,
collections::{HashMap, HashSet},
fmt,
hash::{Hash, Hasher},
marker::PhantomData,
ops::{Add, Mul, MulAssign, Neg, Sub},
sync::Arc,
};
use group::ff::Field;
use halo2curves::Field;
use super::{
Basis, Coeff, EvaluationDomain, ExtendedLagrangeCoeff, LagrangeCoeff, Polynomial, Rotation,
};
use crate::{arithmetic::parallelize, multicore};
/// Returns `(chunk_size, num_chunks)` suitable for processing the given polynomial length
/// in the current parallelization environment.
fn get_chunk_params(poly_len: usize) -> (usize, usize) {
// Check the level of parallelization we have available.
let num_threads = multicore::current_num_threads();
// We scale the number of chunks by a constant factor, to ensure that if not all
// threads are available, we can achieve more uniform throughput and don't end up
// waiting on a couple of threads to process the last chunks.
let num_chunks = num_threads * 4;
// Calculate the ideal chunk size for the desired throughput. We use ceiling
// division to ensure the minimum chunk size is 1.
// chunk_size = ceil(poly_len / num_chunks)
let chunk_size = (poly_len + num_chunks - 1) / num_chunks;
// Now re-calculate num_chunks from the actual chunk size.
// num_chunks = ceil(poly_len / chunk_size)
let num_chunks = (poly_len + chunk_size - 1) / chunk_size;
(chunk_size, num_chunks)
}
/// A reference to a polynomial registered with an [`Evaluator`].
#[derive(Clone, Copy)]
pub(crate) struct AstLeaf<E, B: Basis> {
index: usize,
rotation: Rotation,
_evaluator: PhantomData<(E, B)>,
}
impl<E, B: Basis> fmt::Debug for AstLeaf<E, B> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("AstLeaf")
.field("index", &self.index)
.field("rotation", &self.rotation)
.finish()
}
}
impl<E, B: Basis> PartialEq for AstLeaf<E, B> {
fn eq(&self, rhs: &Self) -> bool {
// We compare rotations by offset, which doesn't account for equivalent rotations.
self.index.eq(&rhs.index) && self.rotation.0.eq(&rhs.rotation.0)
}
}
impl<E, B: Basis> Eq for AstLeaf<E, B> {}
impl<E, B: Basis> Hash for AstLeaf<E, B> {
fn hash<H: Hasher>(&self, state: &mut H) {
self.index.hash(state);
self.rotation.0.hash(state);
}
}
impl<E, B: Basis> AstLeaf<E, B> {
/// Produces a new `AstLeaf` node corresponding to the underlying polynomial at a
/// _new_ rotation. Existing rotations applied to this leaf node are ignored and the
/// returned polynomial is not rotated _relative_ to the previous structure.
pub(crate) fn with_rotation(&self, rotation: Rotation) -> Self {
AstLeaf {
index: self.index,
rotation,
_evaluator: PhantomData::default(),
}
}
}
/// An evaluation context for polynomial operations.
///
/// This context enables us to de-duplicate queries of circuit columns (and the rotations
/// they might require), by storing a list of all the underlying polynomials involved in
/// any query (which are almost certainly column polynomials). We use the context like so:
///
/// - We register each underlying polynomial with the evaluator, which returns a reference
/// to it as a [`AstLeaf`].
/// - The references are then used to build up a [`Ast`] that represents the overall
/// operations to be applied to the polynomials.
/// - Finally, we call [`Evaluator::evaluate`] passing in the [`Ast`].
pub(crate) struct Evaluator<E, F: Field, B: Basis> {
polys: Vec<Polynomial<F, B>>,
_context: E,
}
/// Constructs a new `Evaluator`.
///
/// The `context` parameter is used to provide type safety for evaluators. It ensures that
/// an evaluator will only be used to evaluate [`Ast`]s containing [`AstLeaf`]s obtained
/// from itself. It should be set to the empty closure `|| {}`, because anonymous closures
/// all have unique types.
pub(crate) fn new_evaluator<E: Fn() + Clone, F: Field, B: Basis>(context: E) -> Evaluator<E, F, B> {
Evaluator {
polys: vec![],
_context: context,
}
}
impl<E, F: Field, B: Basis> Evaluator<E, F, B> {
/// Registers the given polynomial for use in this evaluation context.
///
/// This API treats each registered polynomial as unique, even if the same polynomial
/// is added multiple times.
pub(crate) fn register_poly(&mut self, poly: Polynomial<F, B>) -> AstLeaf<E, B> {
let index = self.polys.len();
self.polys.push(poly);
AstLeaf {
index,
rotation: Rotation::cur(),
_evaluator: PhantomData::default(),
}
}
/// Evaluates the given polynomial operation against this context.
pub(crate) fn evaluate(
&self,
ast: &Ast<E, F, B>,
domain: &EvaluationDomain<F>,
) -> Polynomial<F, B>
where
E: Copy + Send + Sync,
F: Field,
B: BasisOps,
{
// Traverse `ast` to collect the used leaves.
fn collect_rotations<E: Copy, F: Field, B: Basis>(
ast: &Ast<E, F, B>,
) -> HashSet<AstLeaf<E, B>> {
match ast {
Ast::Poly(leaf) => vec![*leaf].into_iter().collect(),
Ast::Add(a, b) | Ast::Mul(AstMul(a, b)) => {
let lhs = collect_rotations(a);
let rhs = collect_rotations(b);
lhs.union(&rhs).cloned().collect()
}
Ast::Scale(a, _) => collect_rotations(a),
Ast::DistributePowers(terms, _) => terms
.iter()
.flat_map(|term| collect_rotations(term).into_iter())
.collect(),
Ast::LinearTerm(_) | Ast::ConstantTerm(_) => HashSet::default(),
}
}
let leaves = collect_rotations(ast);
// Produce the rotated polynomials.
let rotated: HashMap<_, _> = leaves
.iter()
.cloned()
.map(|leaf| {
(
leaf,
B::rotate(domain, &self.polys[leaf.index], leaf.rotation),
)
})
.collect();
// We're working in a single basis, so all polynomials are the same length.
let poly_len = self.polys.first().unwrap().len();
let (chunk_size, num_chunks) = get_chunk_params(poly_len);
// Split each rotated polynomial into chunks.
let chunks: Vec<HashMap<_, _>> = (0..num_chunks)
.map(|i| {
rotated
.iter()
.map(|(leaf, poly)| {
(
*leaf,
poly.chunks(chunk_size)
.nth(i)
.expect("num_chunks was calculated correctly"),
)
})
.collect()
})
.collect();
struct AstContext<'a, E, F: Field, B: Basis> {
domain: &'a EvaluationDomain<F>,
poly_len: usize,
chunk_size: usize,
chunk_index: usize,
leaves: &'a HashMap<AstLeaf<E, B>, &'a [F]>,
}
fn recurse<E, F: Field, B: BasisOps>(
ast: &Ast<E, F, B>,
ctx: &AstContext<'_, E, F, B>,
) -> Vec<F> {
match ast {
Ast::Poly(leaf) => ctx.leaves.get(leaf).expect("We prepared this").to_vec(),
Ast::Add(a, b) => {
let mut lhs = recurse(a, ctx);
let rhs = recurse(b, ctx);
for (lhs, rhs) in lhs.iter_mut().zip(rhs.iter()) {
*lhs += *rhs;
}
lhs
}
Ast::Mul(AstMul(a, b)) => {
let mut lhs = recurse(a, ctx);
let rhs = recurse(b, ctx);
for (lhs, rhs) in lhs.iter_mut().zip(rhs.iter()) {
*lhs *= *rhs;
}
lhs
}
Ast::Scale(a, scalar) => {
let mut lhs = recurse(a, ctx);
for lhs in lhs.iter_mut() {
*lhs *= scalar;
}
lhs
}
Ast::DistributePowers(terms, base) => terms.iter().fold(
B::constant_term(ctx.poly_len, ctx.chunk_size, ctx.chunk_index, F::ZERO),
|mut acc, term| {
let term = recurse(term, ctx);
for (acc, term) in acc.iter_mut().zip(term) {
*acc *= base;
*acc += term;
}
acc
},
),
Ast::LinearTerm(scalar) => B::linear_term(
ctx.domain,
ctx.poly_len,
ctx.chunk_size,
ctx.chunk_index,
*scalar,
),
Ast::ConstantTerm(scalar) => {
B::constant_term(ctx.poly_len, ctx.chunk_size, ctx.chunk_index, *scalar)
}
}
}
// Apply `ast` to each chunk in parallel, writing the result into an output
// polynomial.
let mut result = B::empty_poly(domain);
multicore::scope(|scope| {
for (chunk_index, (out, leaves)) in
result.chunks_mut(chunk_size).zip(chunks.iter()).enumerate()
{
scope.spawn(move |_| {
let ctx = AstContext {
domain,
poly_len,
chunk_size,
chunk_index,
leaves,
};
out.copy_from_slice(&recurse(ast, &ctx));
});
}
});
result
}
}
/// Struct representing the [`Ast::Mul`] case.
///
/// This struct exists to make the internals of this case private so that we don't
/// accidentally construct this case directly, because it can only be implemented for the
/// [`ExtendedLagrangeCoeff`] basis.
#[derive(Clone)]
pub(crate) struct AstMul<E, F: Field, B: Basis>(Arc<Ast<E, F, B>>, Arc<Ast<E, F, B>>);
impl<E, F: Field, B: Basis> fmt::Debug for AstMul<E, F, B> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("AstMul")
.field(&self.0)
.field(&self.1)
.finish()
}
}
/// A polynomial operation backed by an [`Evaluator`].
#[derive(Clone)]
pub(crate) enum Ast<E, F: Field, B: Basis> {
Poly(AstLeaf<E, B>),
Add(Arc<Ast<E, F, B>>, Arc<Ast<E, F, B>>),
Mul(AstMul<E, F, B>),
Scale(Arc<Ast<E, F, B>>, F),
/// Represents a linear combination of a vector of nodes and the powers of a
/// field element, where the nodes are ordered from highest to lowest degree
/// terms.
DistributePowers(Arc<Vec<Ast<E, F, B>>>, F),
/// The degree-1 term of a polynomial.
///
/// The field element is the coefficient of the term in the standard basis, not the
/// coefficient basis.
LinearTerm(F),
/// The degree-0 term of a polynomial.
///
/// The field element is the same in both the standard and evaluation bases.
ConstantTerm(F),
}
impl<E, F: Field, B: Basis> Ast<E, F, B> {
pub fn distribute_powers<I: IntoIterator<Item = Self>>(i: I, base: F) -> Self {
Ast::DistributePowers(Arc::new(i.into_iter().collect()), base)
}
}
impl<E, F: Field, B: Basis> fmt::Debug for Ast<E, F, B> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Poly(leaf) => f.debug_tuple("Poly").field(leaf).finish(),
Self::Add(lhs, rhs) => f.debug_tuple("Add").field(lhs).field(rhs).finish(),
Self::Mul(x) => f.debug_tuple("Mul").field(x).finish(),
Self::Scale(base, scalar) => f.debug_tuple("Scale").field(base).field(scalar).finish(),
Self::DistributePowers(terms, base) => f
.debug_tuple("DistributePowers")
.field(terms)
.field(base)
.finish(),
Self::LinearTerm(x) => f.debug_tuple("LinearTerm").field(x).finish(),
Self::ConstantTerm(x) => f.debug_tuple("ConstantTerm").field(x).finish(),
}
}
}
impl<E, F: Field, B: Basis> From<AstLeaf<E, B>> for Ast<E, F, B> {
fn from(leaf: AstLeaf<E, B>) -> Self {
Ast::Poly(leaf)
}
}
impl<E, F: Field, B: Basis> Ast<E, F, B> {
pub(crate) fn one() -> Self {
Self::ConstantTerm(F::ONE)
}
}
impl<E, F: Field, B: Basis> Neg for Ast<E, F, B> {
type Output = Ast<E, F, B>;
fn neg(self) -> Self::Output {
Ast::Scale(Arc::new(self), -F::ONE)
}
}
impl<E: Clone, F: Field, B: Basis> Neg for &Ast<E, F, B> {
type Output = Ast<E, F, B>;
fn neg(self) -> Self::Output {
-(self.clone())
}
}
impl<E, F: Field, B: Basis> Add for Ast<E, F, B> {
type Output = Ast<E, F, B>;
fn add(self, other: Self) -> Self::Output {
Ast::Add(Arc::new(self), Arc::new(other))
}
}
impl<'a, E: Clone, F: Field, B: Basis> Add<&'a Ast<E, F, B>> for &'a Ast<E, F, B> {
type Output = Ast<E, F, B>;
fn add(self, other: &'a Ast<E, F, B>) -> Self::Output {
self.clone() + other.clone()
}
}
impl<E, F: Field, B: Basis> Add<AstLeaf<E, B>> for Ast<E, F, B> {
type Output = Ast<E, F, B>;
fn add(self, other: AstLeaf<E, B>) -> Self::Output {
Ast::Add(Arc::new(self), Arc::new(other.into()))
}
}
impl<E, F: Field, B: Basis> Sub for Ast<E, F, B> {
type Output = Ast<E, F, B>;
fn sub(self, other: Self) -> Self::Output {
self + (-other)
}
}
impl<'a, E: Clone, F: Field, B: Basis> Sub<&'a Ast<E, F, B>> for &'a Ast<E, F, B> {
type Output = Ast<E, F, B>;
fn sub(self, other: &'a Ast<E, F, B>) -> Self::Output {
self + &(-other)
}
}
impl<E, F: Field, B: Basis> Sub<AstLeaf<E, B>> for Ast<E, F, B> {
type Output = Ast<E, F, B>;
fn sub(self, other: AstLeaf<E, B>) -> Self::Output {
self + (-Ast::from(other))
}
}
impl<E, F: Field> Mul for Ast<E, F, LagrangeCoeff> {
type Output = Ast<E, F, LagrangeCoeff>;
fn mul(self, other: Self) -> Self::Output {
Ast::Mul(AstMul(Arc::new(self), Arc::new(other)))
}
}
impl<'a, E: Clone, F: Field> Mul<&'a Ast<E, F, LagrangeCoeff>> for &'a Ast<E, F, LagrangeCoeff> {
type Output = Ast<E, F, LagrangeCoeff>;
fn mul(self, other: &'a Ast<E, F, LagrangeCoeff>) -> Self::Output {
self.clone() * other.clone()
}
}
impl<E, F: Field> Mul<AstLeaf<E, LagrangeCoeff>> for Ast<E, F, LagrangeCoeff> {
type Output = Ast<E, F, LagrangeCoeff>;
fn mul(self, other: AstLeaf<E, LagrangeCoeff>) -> Self::Output {
Ast::Mul(AstMul(Arc::new(self), Arc::new(other.into())))
}
}
impl<E, F: Field> Mul for Ast<E, F, ExtendedLagrangeCoeff> {
type Output = Ast<E, F, ExtendedLagrangeCoeff>;
fn mul(self, other: Self) -> Self::Output {
Ast::Mul(AstMul(Arc::new(self), Arc::new(other)))
}
}
impl<'a, E: Clone, F: Field> Mul<&'a Ast<E, F, ExtendedLagrangeCoeff>>
for &'a Ast<E, F, ExtendedLagrangeCoeff>
{
type Output = Ast<E, F, ExtendedLagrangeCoeff>;
fn mul(self, other: &'a Ast<E, F, ExtendedLagrangeCoeff>) -> Self::Output {
self.clone() * other.clone()
}
}
impl<E, F: Field> Mul<AstLeaf<E, ExtendedLagrangeCoeff>> for Ast<E, F, ExtendedLagrangeCoeff> {
type Output = Ast<E, F, ExtendedLagrangeCoeff>;
fn mul(self, other: AstLeaf<E, ExtendedLagrangeCoeff>) -> Self::Output {
Ast::Mul(AstMul(Arc::new(self), Arc::new(other.into())))
}
}
impl<E, F: Field, B: Basis> Mul<F> for Ast<E, F, B> {
type Output = Ast<E, F, B>;
fn mul(self, other: F) -> Self::Output {
Ast::Scale(Arc::new(self), other)
}
}
impl<E: Clone, F: Field, B: Basis> Mul<F> for &Ast<E, F, B> {
type Output = Ast<E, F, B>;
fn mul(self, other: F) -> Self::Output {
Ast::Scale(Arc::new(self.clone()), other)
}
}
impl<E: Clone, F: Field> MulAssign for Ast<E, F, ExtendedLagrangeCoeff> {
fn mul_assign(&mut self, rhs: Self) {
*self = self.clone().mul(rhs)
}
}
/// Operations which can be performed over a given basis.
pub(crate) trait BasisOps: Basis {
fn empty_poly<F: Field>(domain: &EvaluationDomain<F>) -> Polynomial<F, Self>;
fn constant_term<F: Field>(
poly_len: usize,
chunk_size: usize,
chunk_index: usize,
scalar: F,
) -> Vec<F>;
fn linear_term<F: Field>(
domain: &EvaluationDomain<F>,
poly_len: usize,
chunk_size: usize,
chunk_index: usize,
scalar: F,
) -> Vec<F>;
fn rotate<F: Field>(
domain: &EvaluationDomain<F>,
poly: &Polynomial<F, Self>,
rotation: Rotation,
) -> Polynomial<F, Self>;
}
impl BasisOps for Coeff {
fn empty_poly<F: Field>(domain: &EvaluationDomain<F>) -> Polynomial<F, Self> {
domain.empty_coeff()
}
fn constant_term<F: Field>(
poly_len: usize,
chunk_size: usize,
chunk_index: usize,
scalar: F,
) -> Vec<F> {
let mut chunk = vec![F::ZERO; cmp::min(chunk_size, poly_len - chunk_size * chunk_index)];
if chunk_index == 0 {
chunk[0] = scalar;
}
chunk
}
fn linear_term<F: Field>(
_: &EvaluationDomain<F>,
poly_len: usize,
chunk_size: usize,
chunk_index: usize,
scalar: F,
) -> Vec<F> {
let mut chunk = vec![F::ZERO; cmp::min(chunk_size, poly_len - chunk_size * chunk_index)];
// If the chunk size is 1 (e.g. if we have a small k and many threads), then the
// linear coefficient is the second chunk. Otherwise, the chunk size is greater
// than one, and the linear coefficient is the second element of the first chunk.
// Note that we check against the original chunk size, not the potentially-short
// actual size of the current chunk, because we want to know whether the size of
// the previous chunk was 1.
if chunk_size == 1 && chunk_index == 1 {
chunk[0] = scalar;
} else if chunk_index == 0 {
chunk[1] = scalar;
}
chunk
}
fn rotate<F: Field>(
_: &EvaluationDomain<F>,
_: &Polynomial<F, Self>,
_: Rotation,
) -> Polynomial<F, Self> {
panic!("Can't rotate polynomials in the standard basis")
}
}
impl BasisOps for LagrangeCoeff {
fn empty_poly<F: Field>(domain: &EvaluationDomain<F>) -> Polynomial<F, Self> {
domain.empty_lagrange()
}
fn constant_term<F: Field>(
poly_len: usize,
chunk_size: usize,
chunk_index: usize,
scalar: F,
) -> Vec<F> {
vec![scalar; cmp::min(chunk_size, poly_len - chunk_size * chunk_index)]
}
fn linear_term<F: Field>(
domain: &EvaluationDomain<F>,
poly_len: usize,
chunk_size: usize,
chunk_index: usize,
scalar: F,
) -> Vec<F> {
// Take every power of omega within the chunk, and multiply by scalar.
let omega = domain.get_omega();
let start = chunk_size * chunk_index;
(0..cmp::min(chunk_size, poly_len - start))
.scan(omega.pow_vartime(&[start as u64]) * scalar, |acc, _| {
let ret = *acc;
*acc *= omega;
Some(ret)
})
.collect()
}
fn rotate<F: Field>(
_: &EvaluationDomain<F>,
poly: &Polynomial<F, Self>,
rotation: Rotation,
) -> Polynomial<F, Self> {
poly.rotate(rotation)
}
}
impl BasisOps for ExtendedLagrangeCoeff {
fn empty_poly<F: Field>(domain: &EvaluationDomain<F>) -> Polynomial<F, Self> {
domain.empty_extended()
}
fn constant_term<F: Field>(
poly_len: usize,
chunk_size: usize,
chunk_index: usize,
scalar: F,
) -> Vec<F> {
vec![scalar; cmp::min(chunk_size, poly_len - chunk_size * chunk_index)]
}
fn linear_term<F: Field>(
domain: &EvaluationDomain<F>,
poly_len: usize,
chunk_size: usize,
chunk_index: usize,
scalar: F,
) -> Vec<F> {
// Take every power of the extended omega within the chunk, and multiply by scalar.
let omega = domain.get_extended_omega();
let start = chunk_size * chunk_index;
(0..cmp::min(chunk_size, poly_len - start))
.scan(
omega.pow_vartime(&[start as u64]) * F::ZETA * scalar,
|acc, _| {
let ret = *acc;
*acc *= omega;
Some(ret)
},
)
.collect()
}
fn rotate<F: Field>(
domain: &EvaluationDomain<F>,
poly: &Polynomial<F, Self>,
rotation: Rotation,
) -> Polynomial<F, Self> {
domain.rotate_extended(poly, rotation)
}
}
#[cfg(test)]
mod tests {
use std::iter;
use halo2curves::pasta::pallas;
use super::{get_chunk_params, new_evaluator, Ast, BasisOps, Evaluator};
use crate::{
multicore,
poly::{Coeff, EvaluationDomain, ExtendedLagrangeCoeff, LagrangeCoeff},
};
#[test]
fn short_chunk_regression_test() {
// Pick the smallest polynomial length that is guaranteed to produce a short chunk
// on this machine.
let k = match (1..16)
.map(|k| (k, get_chunk_params(1 << k)))
.find(|(k, (chunk_size, num_chunks))| (1 << k) < chunk_size * num_chunks)
.map(|(k, _)| k)
{
Some(k) => k,
None => {
// We are on a machine with a power-of-two number of threads, and cannot
// trigger the bug.
eprintln!(
"can't find a polynomial length for short_chunk_regression_test; skipping"
);
return;
}
};
eprintln!("Testing short-chunk regression with k = {}", k);
fn test_case<E: Copy + Send + Sync, B: BasisOps>(
k: u32,
mut evaluator: Evaluator<E, pallas::Base, B>,
) {
// Instantiate the evaluator with a trivial polynomial.
let domain = EvaluationDomain::new(1, k);
evaluator.register_poly(B::empty_poly(&domain));
// With the bug present, these will panic.
let _ = evaluator.evaluate(&Ast::ConstantTerm(pallas::Base::zero()), &domain);
let _ = evaluator.evaluate(&Ast::LinearTerm(pallas::Base::zero()), &domain);
}
test_case(k, new_evaluator::<_, _, Coeff>(|| {}));
test_case(k, new_evaluator::<_, _, LagrangeCoeff>(|| {}));
test_case(k, new_evaluator::<_, _, ExtendedLagrangeCoeff>(|| {}));
}
}

View File

@ -3,18 +3,14 @@
//!
//! [halo]: https://eprint.iacr.org/2019/1021
use crate::arithmetic::{
best_fft, best_multiexp, g_to_lagrange, parallelize, CurveAffine, CurveExt,
};
use crate::arithmetic::{best_multiexp, g_to_lagrange, parallelize, CurveAffine, CurveExt};
use crate::helpers::CurveRead;
use crate::poly::commitment::{Blind, CommitmentScheme, Params, ParamsProver, ParamsVerifier, MSM};
use crate::poly::commitment::{Blind, CommitmentScheme, Params, ParamsProver, ParamsVerifier};
use crate::poly::ipa::msm::MSMIPA;
use crate::poly::{Coeff, LagrangeCoeff, Polynomial};
use ff::{Field, PrimeField};
use group::{prime::PrimeCurveAffine, Curve, Group};
use group::{Curve, Group};
use std::marker::PhantomData;
use std::ops::{Add, AddAssign, Mul, MulAssign};
mod prover;
mod verifier;
@ -233,21 +229,13 @@ impl<'params, C: CurveAffine> ParamsProver<'params, C> for ParamsIPA<C> {
#[cfg(test)]
mod test {
use crate::arithmetic::{best_fft, best_multiexp, parallelize, CurveAffine, CurveExt};
use crate::helpers::CurveRead;
use crate::poly::commitment::ParamsProver;
use crate::poly::commitment::{Blind, CommitmentScheme, Params, MSM};
use crate::poly::commitment::{Blind, Params, MSM};
use crate::poly::ipa::commitment::{create_proof, verify_proof, ParamsIPA};
use crate::poly::ipa::msm::MSMIPA;
use crate::poly::{Coeff, LagrangeCoeff, Polynomial};
use ff::{Field, PrimeField};
use group::{prime::PrimeCurveAffine, Curve, Group};
use std::marker::PhantomData;
use std::ops::{Add, AddAssign, Mul, MulAssign};
use std::io;
use ff::Field;
use group::Curve;
#[test]
fn test_commit_lagrange_epaffine() {

View File

@ -1,7 +1,7 @@
use ff::Field;
use rand_core::RngCore;
use super::{Params, ParamsIPA};
use super::ParamsIPA;
use crate::arithmetic::{
best_multiexp, compute_inner_product, eval_polynomial, parallelize, CurveAffine,
};
@ -11,7 +11,7 @@ use crate::poly::{commitment::Blind, Coeff, Polynomial};
use crate::transcript::{EncodedChallenge, TranscriptWrite};
use group::Curve;
use std::io::{self, Write};
use std::io::{self};
/// Create a polynomial commitment opening proof for the polynomial defined
/// by the coefficients `px`, the blinding factor `blind` used for the
@ -51,7 +51,7 @@ pub fn create_proof<
// Evaluate the random polynomial at x_3
let s_at_x3 = eval_polynomial(&s_poly[..], x_3);
// Subtract constant coefficient to get a random polynomial with a root at x_3
s_poly[0] = s_poly[0] - &s_at_x3;
s_poly[0] -= &s_at_x3;
// And sample a random blind
let s_poly_blind = Blind(C::Scalar::random(&mut rng));
@ -72,7 +72,7 @@ pub fn create_proof<
// zero.
let mut p_prime_poly = s_poly * xi + p_poly;
let v = eval_polynomial(&p_prime_poly, x_3);
p_prime_poly[0] = p_prime_poly[0] - &v;
p_prime_poly[0] -= &v;
let p_prime_blind = s_poly_blind * Blind(xi) + p_blind;
// This accumulates the synthetic blinding factor `f` starting

View File

@ -1,18 +1,9 @@
use std::io::Read;
use group::{
ff::{BatchInvert, Field},
Curve,
};
use group::ff::{BatchInvert, Field};
use super::ParamsIPA;
use crate::poly::ipa::commitment::{IPACommitmentScheme, ParamsVerifierIPA};
use crate::{arithmetic::CurveAffine, poly::ipa::strategy::GuardIPA};
use crate::{
arithmetic::{best_multiexp, CurveAffine},
poly::ipa::strategy::GuardIPA,
};
use crate::{
poly::{commitment::MSM, ipa::msm::MSMIPA, strategy::Guard, Error},
poly::{commitment::MSM, ipa::msm::MSMIPA, Error},
transcript::{EncodedChallenge, TranscriptRead},
};
@ -75,6 +66,9 @@ pub fn verify_proof<'params, C: CurveAffine, E: EncodedChallenge<C>, T: Transcri
// P' + \sum([u_j^{-1}] L_j) + \sum([u_j] R_j)
// + [-c] G'_0 + [-cbz] U + [-f] W
// = 0
//
// Note that the guard returned from this function does not include
// the [-c]G'_0 term.
let c = transcript.read_scalar().map_err(|_| Error::SamplingError)?;
let neg_c = -c;

View File

@ -1,9 +1,5 @@
use super::commitment::{IPACommitmentScheme, ParamsIPA};
use crate::arithmetic::{best_multiexp, parallelize, CurveAffine};
use crate::poly::{
commitment::{CommitmentScheme, Params, MSM},
ipa::commitment::ParamsVerifierIPA,
};
use crate::arithmetic::{best_multiexp, CurveAffine};
use crate::poly::{commitment::MSM, ipa::commitment::ParamsVerifierIPA};
use ff::Field;
use group::Group;
use std::collections::BTreeMap;
@ -222,13 +218,10 @@ impl<'a, C: CurveAffine> MSMIPA<'a, C> {
#[cfg(test)]
mod tests {
use super::ParamsIPA;
use crate::poly::commitment::ParamsProver;
use crate::poly::{
commitment::{Params, MSM},
ipa::msm::MSMIPA,
commitment::{ParamsProver, MSM},
ipa::{commitment::ParamsIPA, msm::MSMIPA},
};
use group::Curve;
use halo2curves::{
pasta::{Ep, EpAffine, Fp, Fq},
CurveAffine,

View File

@ -4,7 +4,7 @@
//! [halo]: https://eprint.iacr.org/2019/1021
use super::*;
use crate::{arithmetic::CurveAffine, poly::query::Query, transcript::ChallengeScalar};
use crate::{poly::query::Query, transcript::ChallengeScalar};
use ff::Field;
use std::collections::{BTreeMap, BTreeSet};

View File

@ -1,9 +1,7 @@
use super::{
construct_intermediate_sets, ChallengeX1, ChallengeX2, ChallengeX3, ChallengeX4, Query,
};
use super::{construct_intermediate_sets, ChallengeX1, ChallengeX2, ChallengeX3, ChallengeX4};
use crate::arithmetic::{eval_polynomial, kate_division, CurveAffine};
use crate::poly::commitment::ParamsProver;
use crate::poly::commitment::{Blind, Params, Prover};
use crate::poly::commitment::{Blind, Prover};
use crate::poly::ipa::commitment::{self, IPACommitmentScheme, ParamsIPA};
use crate::poly::query::ProverQuery;
use crate::poly::{Coeff, Polynomial};

View File

@ -1,20 +1,14 @@
use std::fmt::Debug;
use std::io::Read;
use std::marker::PhantomData;
use ff::Field;
use rand_core::RngCore;
use super::{
construct_intermediate_sets, ChallengeX1, ChallengeX2, ChallengeX3, ChallengeX4, Query,
};
use super::{construct_intermediate_sets, ChallengeX1, ChallengeX2, ChallengeX3, ChallengeX4};
use crate::arithmetic::{eval_polynomial, lagrange_interpolate, CurveAffine};
use crate::poly::commitment::{Params, Verifier, MSM};
use crate::poly::ipa::commitment::{IPACommitmentScheme, ParamsIPA, ParamsVerifierIPA};
use crate::poly::ipa::msm::MSMIPA;
use crate::poly::ipa::strategy::GuardIPA;
use crate::poly::query::{CommitmentReference, VerifierQuery};
use crate::poly::strategy::VerificationStrategy;
use crate::poly::Error;
use crate::transcript::{EncodedChallenge, TranscriptRead};
@ -57,7 +51,9 @@ impl<'params, C: CurveAffine> Verifier<'params, IPACommitmentScheme<C>>
// Compress the commitments and expected evaluations at x together.
// using the challenge x_1
let mut q_commitments: Vec<_> = vec![self.params.empty_msm(); point_sets.len()];
let mut q_commitments: Vec<_> = vec![
(self.params.empty_msm(), C::Scalar::ONE); // (accumulator, next x_1 power).
point_sets.len()];
// A vec of vecs of evals. The outer vec corresponds to the point set,
// while the inner vec corresponds to the points in a particular set.
@ -65,28 +61,32 @@ impl<'params, C: CurveAffine> Verifier<'params, IPACommitmentScheme<C>>
for point_set in point_sets.iter() {
q_eval_sets.push(vec![C::Scalar::ZERO; point_set.len()]);
}
{
let mut accumulate = |set_idx: usize,
new_commitment: CommitmentReference<C, MSMIPA<'params, C>>,
evals: Vec<C::Scalar>| {
q_commitments[set_idx].scale(*x_1);
let (q_commitment, x_1_power) = &mut q_commitments[set_idx];
match new_commitment {
CommitmentReference::Commitment(c) => {
q_commitments[set_idx].append_term(C::Scalar::ONE, (*c).into());
q_commitment.append_term(*x_1_power, (*c).into());
}
CommitmentReference::MSM(msm) => {
q_commitments[set_idx].add_msm(msm);
let mut msm = msm.clone();
msm.scale(*x_1_power);
q_commitment.add_msm(&msm);
}
}
for (eval, set_eval) in evals.iter().zip(q_eval_sets[set_idx].iter_mut()) {
*set_eval *= &(*x_1);
*set_eval += eval;
*set_eval += (*eval) * (*x_1_power);
}
*x_1_power *= *x_1;
};
// Each commitment corresponds to evaluations at a set of points.
// For each set, we collapse each commitment's evals pointwise.
for commitment_data in commitment_map.into_iter() {
// Run in order of increasing x_1 powers.
for commitment_data in commitment_map.into_iter().rev() {
accumulate(
commitment_data.set_index, // set_idx,
commitment_data.commitment, // commitment,
@ -135,7 +135,7 @@ impl<'params, C: CurveAffine> Verifier<'params, IPACommitmentScheme<C>>
msm.append_term(C::Scalar::ONE, q_prime_commitment.into());
let (msm, v) = q_commitments.into_iter().zip(u.iter()).fold(
(msm, msm_eval),
|(mut msm, msm_eval), (q_commitment, q_eval)| {
|(mut msm, msm_eval), ((q_commitment, _), q_eval)| {
msm.scale(*x_4);
msm.add_msm(&q_commitment);
(msm, msm_eval * &(*x_4) + q_eval)

View File

@ -1,10 +1,6 @@
use std::marker::PhantomData;
use super::commitment::{IPACommitmentScheme, ParamsIPA, ParamsVerifierIPA};
use super::commitment::{IPACommitmentScheme, ParamsIPA};
use super::msm::MSMIPA;
use super::multiopen::VerifierIPA;
use crate::poly::commitment::CommitmentScheme;
use crate::transcript::TranscriptRead;
use crate::{
arithmetic::best_multiexp,
plonk::Error,
@ -12,12 +8,11 @@ use crate::{
commitment::MSM,
strategy::{Guard, VerificationStrategy},
},
transcript::EncodedChallenge,
};
use ff::Field;
use group::Curve;
use halo2curves::CurveAffine;
use rand_core::{OsRng, RngCore};
use rand_core::OsRng;
/// Wrapper for verification accumulator
#[derive(Debug, Clone)]

View File

@ -1,8 +1,6 @@
use crate::arithmetic::{
best_fft, best_multiexp, g_to_lagrange, parallelize, CurveAffine, CurveExt,
};
use crate::arithmetic::{best_multiexp, g_to_lagrange, parallelize};
use crate::helpers::SerdeCurveAffine;
use crate::poly::commitment::{Blind, CommitmentScheme, Params, ParamsProver, ParamsVerifier, MSM};
use crate::poly::commitment::{Blind, CommitmentScheme, Params, ParamsProver, ParamsVerifier};
use crate::poly::{Coeff, LagrangeCoeff, Polynomial};
use crate::SerdeFormat;
@ -12,7 +10,6 @@ use halo2curves::pairing::Engine;
use rand_core::{OsRng, RngCore};
use std::fmt::Debug;
use std::marker::PhantomData;
use std::ops::{Add, AddAssign, Mul, MulAssign};
use std::io;
@ -371,21 +368,10 @@ where
#[cfg(test)]
mod test {
use crate::arithmetic::{best_fft, best_multiexp, parallelize, CurveAffine, CurveExt};
use crate::poly::commitment::ParamsProver;
use crate::poly::commitment::{Blind, CommitmentScheme, Params, MSM};
use crate::poly::kzg::commitment::{ParamsKZG, ParamsVerifierKZG};
use crate::poly::kzg::msm::MSMKZG;
use crate::poly::kzg::multiopen::ProverSHPLONK;
use crate::poly::{Coeff, LagrangeCoeff, Polynomial};
use ff::{Field, PrimeField};
use group::{prime::PrimeCurveAffine, Curve, Group};
use halo2curves::bn256::G1Affine;
use std::marker::PhantomData;
use std::ops::{Add, AddAssign, Mul, MulAssign};
use std::io;
use crate::poly::commitment::{Blind, Params};
use crate::poly::kzg::commitment::ParamsKZG;
use ff::Field;
#[test]
fn test_commit_lagrange() {
@ -416,13 +402,8 @@ mod test {
fn test_parameter_serialisation_roundtrip() {
const K: u32 = 4;
use ff::Field;
use rand_core::OsRng;
use super::super::commitment::{Blind, Params};
use crate::arithmetic::eval_polynomial;
use crate::halo2curves::bn256::{Bn256, Fr};
use crate::poly::EvaluationDomain;
use super::super::commitment::Params;
use crate::halo2curves::bn256::Bn256;
let params0 = ParamsKZG::<Bn256>::new(K);
let mut data = vec![];

View File

@ -1,8 +1,8 @@
use std::fmt::Debug;
use super::commitment::{KZGCommitmentScheme, ParamsKZG};
use super::commitment::ParamsKZG;
use crate::{
arithmetic::{best_multiexp, parallelize, CurveAffine},
arithmetic::{best_multiexp, parallelize},
poly::commitment::MSM,
};
use group::{Curve, Group};
@ -92,8 +92,6 @@ impl<E: Engine + Debug> PreMSM<E> {
}
pub(crate) fn normalize(self) -> MSMKZG<E> {
use group::prime::PrimeCurveAffine;
let (scalars, bases) = self
.projectives_msms
.into_iter()

View File

@ -4,20 +4,9 @@ mod verifier;
pub use prover::ProverGWC;
pub use verifier::VerifierGWC;
use crate::{
arithmetic::{eval_polynomial, CurveAffine},
poly::{
commitment::{Params, ParamsVerifier},
query::Query,
Coeff, Polynomial,
},
transcript::ChallengeScalar,
};
use crate::{poly::query::Query, transcript::ChallengeScalar};
use ff::Field;
use std::{
collections::{BTreeMap, BTreeSet},
marker::PhantomData,
};
use std::marker::PhantomData;
#[derive(Clone, Copy, Debug)]
struct U {}

View File

@ -1,23 +1,19 @@
use super::{construct_intermediate_sets, ChallengeV, Query};
use crate::arithmetic::{eval_polynomial, kate_division, powers, CurveAffine};
use crate::arithmetic::{kate_division, powers};
use crate::helpers::SerdeCurveAffine;
use crate::poly::commitment::ParamsProver;
use crate::poly::commitment::Prover;
use crate::poly::kzg::commitment::{KZGCommitmentScheme, ParamsKZG};
use crate::poly::query::ProverQuery;
use crate::poly::Rotation;
use crate::poly::{
commitment::{Blind, Params},
Coeff, Polynomial,
};
use crate::poly::{commitment::Blind, Polynomial};
use crate::transcript::{EncodedChallenge, TranscriptWrite};
use ff::{Field, PrimeField};
use ff::PrimeField;
use group::Curve;
use halo2curves::pairing::Engine;
use rand_core::RngCore;
use std::fmt::Debug;
use std::io::{self, Write};
use std::io;
use std::marker::PhantomData;
/// Concrete KZG prover with GWC variant

View File

@ -1,28 +1,20 @@
use std::fmt::Debug;
use std::io::Read;
use std::marker::PhantomData;
use super::{construct_intermediate_sets, ChallengeU, ChallengeV};
use crate::arithmetic::{eval_polynomial, lagrange_interpolate, powers, CurveAffine};
use crate::arithmetic::powers;
use crate::helpers::SerdeCurveAffine;
use crate::poly::commitment::Verifier;
use crate::poly::commitment::MSM;
use crate::poly::kzg::commitment::{KZGCommitmentScheme, ParamsKZG};
use crate::poly::kzg::msm::{DualMSM, MSMKZG};
use crate::poly::kzg::strategy::{AccumulatorStrategy, GuardKZG, SingleStrategy};
use crate::poly::kzg::strategy::GuardKZG;
use crate::poly::query::Query;
use crate::poly::query::{CommitmentReference, VerifierQuery};
use crate::poly::strategy::VerificationStrategy;
use crate::poly::{
commitment::{Params, ParamsVerifier},
Error,
};
use crate::poly::Error;
use crate::transcript::{EncodedChallenge, TranscriptRead};
use ff::{Field, PrimeField};
use group::Group;
use halo2curves::pairing::{Engine, MillerLoopResult, MultiMillerLoop};
use rand_core::OsRng;
use halo2curves::pairing::{Engine, MultiMillerLoop};
#[derive(Debug)]
/// Concrete KZG verifier with GWC variant

View File

@ -1,21 +1,16 @@
mod prover;
mod verifier;
use crate::{
arithmetic::{eval_polynomial, lagrange_interpolate, CurveAffine},
poly::{query::Query, Coeff, Polynomial},
transcript::ChallengeScalar,
};
use crate::multicore::IntoParallelIterator;
use crate::{poly::query::Query, transcript::ChallengeScalar};
use ff::Field;
pub use prover::ProverSHPLONK;
use rayon::prelude::*;
use std::{
collections::{btree_map::Entry, BTreeMap, BTreeSet, HashMap, HashSet},
marker::PhantomData,
sync::Arc,
};
use std::collections::BTreeSet;
pub use verifier::VerifierSHPLONK;
#[cfg(feature = "multicore")]
use crate::multicore::ParallelIterator;
#[derive(Clone, Copy, Debug)]
struct U {}
type ChallengeU<F> = ChallengeScalar<F, U>;
@ -126,7 +121,8 @@ where
.into_par_iter()
.map(|commitment| {
let evals: Vec<F> = rotations_vec
.par_iter()
.as_slice()
.into_par_iter()
.map(|&&rotation| get_eval(commitment, rotation))
.collect();
Commitment((commitment, evals))
@ -148,18 +144,10 @@ where
#[cfg(test)]
mod proptests {
use proptest::{
collection::vec,
prelude::*,
sample::{select, subsequence},
strategy::Strategy,
};
use super::{construct_intermediate_sets, Commitment, IntermediateSets};
use crate::poly::Rotation;
use ff::{Field, FromUniformBytes};
use ff::FromUniformBytes;
use halo2curves::pasta::Fp;
use std::collections::BTreeMap;
use proptest::{collection::vec, prelude::*, sample::select};
use std::convert::TryFrom;
#[derive(Debug, Clone)]

View File

@ -1,5 +1,5 @@
use super::{
construct_intermediate_sets, ChallengeU, ChallengeV, ChallengeY, Commitment, Query, RotationSet,
construct_intermediate_sets, ChallengeU, ChallengeV, ChallengeY, Commitment, RotationSet,
};
use crate::arithmetic::{
eval_polynomial, evaluate_vanishing_polynomial, kate_division, lagrange_interpolate,
@ -9,20 +9,22 @@ use crate::helpers::SerdeCurveAffine;
use crate::poly::commitment::{Blind, ParamsProver, Prover};
use crate::poly::kzg::commitment::{KZGCommitmentScheme, ParamsKZG};
use crate::poly::query::{PolynomialPointer, ProverQuery};
use crate::poly::Rotation;
use crate::poly::{commitment::Params, Coeff, Polynomial};
use crate::poly::{Coeff, Polynomial};
use crate::transcript::{EncodedChallenge, TranscriptWrite};
use crate::multicore::IntoParallelIterator;
use ff::{Field, PrimeField};
use group::Curve;
use halo2curves::pairing::Engine;
use rand_core::RngCore;
use rayon::prelude::*;
use std::fmt::Debug;
use std::io::{self, Write};
use std::io;
use std::marker::PhantomData;
use std::ops::MulAssign;
#[cfg(feature = "multicore")]
use crate::multicore::ParallelIterator;
fn div_by_vanishing<F: Field>(poly: Polynomial<F, Coeff>, roots: &[F]) -> Vec<F> {
let poly = roots
.iter()
@ -136,40 +138,41 @@ where
// for different sets that are already combined with anoter challenge
let y: ChallengeY<_> = transcript.squeeze_challenge_scalar();
let quotient_contribution =
|rotation_set: &RotationSetExtension<E::G1Affine>| -> Polynomial<E::Scalar, Coeff> {
// [P_i_0(X) - R_i_0(X), P_i_1(X) - R_i_1(X), ... ]
let numerators = rotation_set
.commitments
.par_iter()
.map(|commitment| commitment.quotient_contribution())
.collect::<Vec<_>>();
let quotient_contribution = |rotation_set: &RotationSetExtension<E::G1Affine>| {
// [P_i_0(X) - R_i_0(X), P_i_1(X) - R_i_1(X), ... ]
#[allow(clippy::needless_collect)]
let numerators = rotation_set
.commitments
.as_slice()
.into_par_iter()
.map(|commitment| commitment.quotient_contribution())
.collect::<Vec<_>>();
// define numerator polynomial as
// N_i_j(X) = (P_i_j(X) - R_i_j(X))
// and combine polynomials with same evaluation point set
// N_i(X) = linear_combinination(y, N_i_j(X))
// where y is random scalar to combine numerator polynomials
let n_x = numerators
.into_iter()
.zip(powers(*y))
.map(|(numerator, power_of_y)| numerator * power_of_y)
.reduce(|acc, numerator| acc + &numerator)
.unwrap();
// define numerator polynomial as
// N_i_j(X) = (P_i_j(X) - R_i_j(X))
// and combine polynomials with same evaluation point set
// N_i(X) = linear_combinination(y, N_i_j(X))
// where y is random scalar to combine numerator polynomials
let n_x = numerators
.into_iter()
.zip(powers(*y))
.map(|(numerator, power_of_y)| numerator * power_of_y)
.reduce(|acc, numerator| acc + &numerator)
.unwrap();
let points = &rotation_set.points[..];
let points = &rotation_set.points[..];
// quotient contribution of this evaluation set is
// Q_i(X) = N_i(X) / Z_i(X) where
// Z_i(X) = (x - r_i_0) * (x - r_i_1) * ...
let mut poly = div_by_vanishing(n_x, points);
poly.resize(self.params.n as usize, E::Scalar::ZERO);
// quotient contribution of this evaluation set is
// Q_i(X) = N_i(X) / Z_i(X) where
// Z_i(X) = (x - r_i_0) * (x - r_i_1) * ...
let mut poly = div_by_vanishing(n_x, points);
poly.resize(self.params.n as usize, E::Scalar::ZERO);
Polynomial {
values: poly,
_marker: PhantomData,
}
};
Polynomial {
values: poly,
_marker: PhantomData,
}
};
let intermediate_sets = construct_intermediate_sets(queries);
let (rotation_sets, super_point_set) = (
@ -182,7 +185,8 @@ where
.map(|rotation_set| {
let commitments: Vec<CommitmentExtension<E::G1Affine>> = rotation_set
.commitments
.par_iter()
.as_slice()
.into_par_iter()
.map(|commitment_data| commitment_data.extend(&rotation_set.points))
.collect();
rotation_set.extend(commitments)
@ -191,8 +195,10 @@ where
let v: ChallengeV<_> = transcript.squeeze_challenge_scalar();
#[allow(clippy::needless_collect)]
let quotient_polynomials = rotation_sets
.par_iter()
.as_slice()
.into_par_iter()
.map(quotient_contribution)
.collect::<Vec<_>>();
@ -207,36 +213,43 @@ where
transcript.write_point(h)?;
let u: ChallengeU<_> = transcript.squeeze_challenge_scalar();
let linearisation_contribution =
|rotation_set: RotationSetExtension<E::G1Affine>| -> (Polynomial<E::Scalar, Coeff>, E::Scalar) {
let mut diffs = super_point_set.clone();
for point in rotation_set.points.iter() {
diffs.remove(point);
}
let diffs = diffs.into_iter().collect::<Vec<_>>();
let linearisation_contribution = |rotation_set: RotationSetExtension<E::G1Affine>| {
let mut diffs = super_point_set.clone();
for point in rotation_set.points.iter() {
diffs.remove(point);
}
let diffs = diffs.into_iter().collect::<Vec<_>>();
// calculate difference vanishing polynomial evaluation
let z_i = evaluate_vanishing_polynomial(&diffs[..], *u);
// calculate difference vanishing polynomial evaluation
let z_i = evaluate_vanishing_polynomial(&diffs[..], *u);
// inner linearisation contibutions are
// [P_i_0(X) - r_i_0, P_i_1(X) - r_i_1, ... ] where
// r_i_j = R_i_j(u) is the evaluation of low degree equivalent polynomial
// where u is random evaluation point
let inner_contributions = rotation_set
.commitments
.par_iter()
.map(|commitment| commitment.linearisation_contribution(*u)).collect::<Vec<_>>();
// inner linearisation contibutions are
// [P_i_0(X) - r_i_0, P_i_1(X) - r_i_1, ... ] where
// r_i_j = R_i_j(u) is the evaluation of low degree equivalent polynomial
// where u is random evaluation point
#[allow(clippy::needless_collect)]
let inner_contributions = rotation_set
.commitments
.as_slice()
.into_par_iter()
.map(|commitment| commitment.linearisation_contribution(*u))
.collect::<Vec<_>>();
// define inner contributor polynomial as
// L_i_j(X) = (P_i_j(X) - r_i_j)
// and combine polynomials with same evaluation point set
// L_i(X) = linear_combinination(y, L_i_j(X))
// where y is random scalar to combine inner contibutors
let l_x: Polynomial<E::Scalar, Coeff> = inner_contributions.into_iter().zip(powers(*y)).map(|(poly, power_of_y)| poly * power_of_y).reduce(|acc, poly| acc + &poly).unwrap();
// define inner contributor polynomial as
// L_i_j(X) = (P_i_j(X) - r_i_j)
// and combine polynomials with same evaluation point set
// L_i(X) = linear_combinination(y, L_i_j(X))
// where y is random scalar to combine inner contibutors
let l_x: Polynomial<E::Scalar, Coeff> = inner_contributions
.into_iter()
.zip(powers(*y))
.map(|(poly, power_of_y)| poly * power_of_y)
.reduce(|acc, poly| acc + &poly)
.unwrap();
// finally scale l_x by difference vanishing polynomial evaluation z_i
(l_x * z_i, z_i)
};
// finally scale l_x by difference vanishing polynomial evaluation z_i
(l_x * z_i, z_i)
};
#[allow(clippy::type_complexity)]
let (linearisation_contibutions, z_diffs): (

View File

@ -1,10 +1,9 @@
use std::fmt::Debug;
use std::io::Read;
use super::ChallengeY;
use super::{construct_intermediate_sets, ChallengeU, ChallengeV};
use crate::arithmetic::{
eval_polynomial, evaluate_vanishing_polynomial, lagrange_interpolate, powers, CurveAffine,
eval_polynomial, evaluate_vanishing_polynomial, lagrange_interpolate, powers,
};
use crate::helpers::SerdeCurveAffine;
use crate::poly::commitment::Verifier;
@ -12,19 +11,12 @@ use crate::poly::commitment::MSM;
use crate::poly::kzg::commitment::{KZGCommitmentScheme, ParamsKZG};
use crate::poly::kzg::msm::DualMSM;
use crate::poly::kzg::msm::{PreMSM, MSMKZG};
use crate::poly::kzg::strategy::{AccumulatorStrategy, GuardKZG, SingleStrategy};
use crate::poly::query::Query;
use crate::poly::kzg::strategy::GuardKZG;
use crate::poly::query::{CommitmentReference, VerifierQuery};
use crate::poly::strategy::VerificationStrategy;
use crate::poly::{
commitment::{Params, ParamsVerifier},
Error,
};
use crate::poly::Error;
use crate::transcript::{EncodedChallenge, TranscriptRead};
use ff::{Field, PrimeField};
use group::Group;
use halo2curves::pairing::{Engine, MillerLoopResult, MultiMillerLoop};
use rand_core::OsRng;
use halo2curves::pairing::{Engine, MultiMillerLoop};
use std::ops::MulAssign;
/// Concrete KZG multiopen verifier with SHPLONK variant

View File

@ -1,27 +1,19 @@
use std::{fmt::Debug, marker::PhantomData};
use super::{
commitment::{KZGCommitmentScheme, ParamsKZG},
msm::{DualMSM, MSMKZG},
multiopen::VerifierGWC,
msm::DualMSM,
};
use crate::{
helpers::SerdeCurveAffine,
plonk::Error,
poly::{
commitment::{Verifier, MSM},
ipa::msm::MSMIPA,
commitment::Verifier,
strategy::{Guard, VerificationStrategy},
},
transcript::{EncodedChallenge, TranscriptRead},
};
use ff::{Field, PrimeField};
use group::Group;
use halo2curves::{
pairing::{Engine, MillerLoopResult, MultiMillerLoop},
CurveAffine,
};
use halo2curves::pairing::{Engine, MultiMillerLoop};
use rand_core::OsRng;
use std::fmt::Debug;
/// Wrapper for linear verification accumulator
#[derive(Debug, Clone)]

View File

@ -2,33 +2,28 @@
mod test {
use crate::arithmetic::eval_polynomial;
use crate::plonk::Error;
use crate::poly::commitment::Blind;
use crate::poly::commitment::ParamsProver;
use crate::poly::commitment::{Blind, ParamsVerifier, MSM};
use crate::poly::query::PolynomialPointer;
use crate::poly::{
commitment::{CommitmentScheme, Params, Prover, Verifier},
query::{ProverQuery, VerifierQuery},
strategy::VerificationStrategy,
EvaluationDomain,
};
use crate::poly::{Coeff, Polynomial};
use crate::transcript::{
self, Blake2bRead, Blake2bWrite, Challenge255, EncodedChallenge, Keccak256Read,
Keccak256Write, TranscriptRead, TranscriptReadBuffer, TranscriptWrite,
TranscriptWriterBuffer,
Blake2bRead, Blake2bWrite, Challenge255, EncodedChallenge, Keccak256Read, Keccak256Write,
TranscriptReadBuffer, TranscriptWriterBuffer,
};
use ff::{Field, PrimeField, WithSmallOrderMulGroup};
use group::{Curve, Group};
use halo2curves::CurveAffine;
use rand_core::{OsRng, RngCore};
use std::io::{Read, Write};
use ff::WithSmallOrderMulGroup;
use group::Curve;
use rand_core::OsRng;
#[test]
fn test_roundtrip_ipa() {
use crate::poly::ipa::commitment::{IPACommitmentScheme, ParamsIPA};
use crate::poly::ipa::multiopen::{ProverIPA, VerifierIPA};
use crate::poly::ipa::strategy::AccumulatorStrategy;
use halo2curves::pasta::{Ep, EqAffine, Fp};
use halo2curves::pasta::EqAffine;
const K: u32 = 4;
@ -65,7 +60,7 @@ mod test {
use crate::poly::ipa::commitment::{IPACommitmentScheme, ParamsIPA};
use crate::poly::ipa::multiopen::{ProverIPA, VerifierIPA};
use crate::poly::ipa::strategy::AccumulatorStrategy;
use halo2curves::pasta::{Ep, EqAffine, Fp};
use halo2curves::pasta::EqAffine;
const K: u32 = 4;
@ -102,8 +97,7 @@ mod test {
use crate::poly::kzg::commitment::{KZGCommitmentScheme, ParamsKZG};
use crate::poly::kzg::multiopen::{ProverGWC, VerifierGWC};
use crate::poly::kzg::strategy::AccumulatorStrategy;
use halo2curves::bn256::{Bn256, G1Affine};
use halo2curves::pairing::Engine;
use halo2curves::bn256::Bn256;
const K: u32 = 4;
@ -134,8 +128,7 @@ mod test {
use crate::poly::kzg::commitment::{KZGCommitmentScheme, ParamsKZG};
use crate::poly::kzg::multiopen::{ProverSHPLONK, VerifierSHPLONK};
use crate::poly::kzg::strategy::AccumulatorStrategy;
use halo2curves::bn256::{Bn256, G1Affine};
use halo2curves::pairing::Engine;
use halo2curves::bn256::Bn256;
const K: u32 = 4;

View File

@ -1,11 +1,10 @@
use std::{fmt::Debug, ops::Deref};
use std::fmt::Debug;
use super::commitment::{Blind, CommitmentScheme, Params, MSM};
use super::commitment::{Blind, MSM};
use crate::{
arithmetic::eval_polynomial,
poly::{commitment, Coeff, Polynomial},
poly::{Coeff, Polynomial},
};
use ff::Field;
use halo2curves::CurveAffine;
pub trait Query<F>: Sized + Clone + Send + Sync {
@ -100,6 +99,7 @@ impl<'com, C: CurveAffine, M: MSM<C>> Clone for VerifierQuery<'com, C, M> {
}
}
#[allow(clippy::upper_case_acronyms)]
#[derive(Clone, Debug)]
pub enum CommitmentReference<'r, C: CurveAffine, M: MSM<C>> {
Commitment(&'r C),

View File

@ -1,11 +1,5 @@
use halo2curves::CurveAffine;
use rand_core::RngCore;
use super::commitment::{CommitmentScheme, Verifier, MSM};
use crate::{
plonk::Error,
transcript::{EncodedChallenge, TranscriptRead},
};
use super::commitment::{CommitmentScheme, Verifier};
use crate::plonk::Error;
/// Guards is unfinished verification result. Implement this to construct various
/// verification strategies such as aggregation and recursion.