commit 51e448641d6cbcd582afa22cd8475f8c3086dad7 Author: Jack Grigg Date: Wed Aug 2 11:17:25 2017 +0100 Squashed 'src/snark/' content from commit 9ada3f8 git-subtree-dir: src/snark git-subtree-split: 9ada3f84ab484c57b2247c2f41091fd6a0916573 diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..f6fb450a2 --- /dev/null +++ b/.gitignore @@ -0,0 +1,49 @@ +*.o +*.a +*.so +*.d +depinst/ +depsrc/ +README.html +doxygen/ +src/gadgetlib2/examples/tutorial +src/gadgetlib2/tests/gadgetlib2_test + +src/algebra/curves/tests/test_bilinearity +src/algebra/curves/tests/test_groups +src/algebra/fields/tests/test_fields +src/common/routing_algorithms/profiling/profile_routing_algorithms +src/common/routing_algorithms/tests/test_routing_algorithms +src/gadgetlib1/gadgets/cpu_checkers/fooram/examples/test_fooram +src/gadgetlib1/gadgets/hashes/knapsack/tests/test_knapsack_gadget +src/gadgetlib1/gadgets/hashes/sha256/tests/test_sha256_gadget +src/gadgetlib1/gadgets/merkle_tree/tests/test_merkle_tree_gadgets +src/gadgetlib1/gadgets/routing/profiling/profile_routing_gadgets +src/gadgetlib1/gadgets/set_commitment/tests/test_set_commitment_gadget +src/gadgetlib1/gadgets/verifiers/tests/test_r1cs_ppzksnark_verifier_gadget +src/reductions/ram_to_r1cs/examples/demo_arithmetization +src/relations/arithmetic_programs/qap/tests/test_qap +src/relations/arithmetic_programs/ssp/tests/test_ssp +src/zk_proof_systems/pcd/r1cs_pcd/r1cs_mp_ppzkpcd/profiling/profile_r1cs_mp_ppzkpcd +src/zk_proof_systems/pcd/r1cs_pcd/r1cs_mp_ppzkpcd/tests/test_r1cs_mp_ppzkpcd +src/zk_proof_systems/pcd/r1cs_pcd/r1cs_sp_ppzkpcd/profiling/profile_r1cs_sp_ppzkpcd +src/zk_proof_systems/pcd/r1cs_pcd/r1cs_sp_ppzkpcd/tests/test_r1cs_sp_ppzkpcd +src/zk_proof_systems/ppzkadsnark/r1cs_ppzkadsnark/examples/demo_r1cs_ppzkadsnark +src/zk_proof_systems/ppzksnark/bacs_ppzksnark/profiling/profile_bacs_ppzksnark +src/zk_proof_systems/ppzksnark/bacs_ppzksnark/tests/test_bacs_ppzksnark +src/zk_proof_systems/ppzksnark/r1cs_gg_ppzksnark/profiling/profile_r1cs_gg_ppzksnark +src/zk_proof_systems/ppzksnark/r1cs_gg_ppzksnark/tests/test_r1cs_gg_ppzksnark +src/zk_proof_systems/ppzksnark/r1cs_ppzksnark/profiling/profile_r1cs_ppzksnark +src/zk_proof_systems/ppzksnark/r1cs_ppzksnark/tests/test_r1cs_ppzksnark +src/zk_proof_systems/ppzksnark/ram_ppzksnark/examples/demo_ram_ppzksnark +src/zk_proof_systems/ppzksnark/ram_ppzksnark/examples/demo_ram_ppzksnark_generator +src/zk_proof_systems/ppzksnark/ram_ppzksnark/examples/demo_ram_ppzksnark_prover +src/zk_proof_systems/ppzksnark/ram_ppzksnark/examples/demo_ram_ppzksnark_verifier +src/zk_proof_systems/ppzksnark/ram_ppzksnark/profiling/profile_ram_ppzksnark +src/zk_proof_systems/ppzksnark/ram_ppzksnark/tests/test_ram_ppzksnark +src/zk_proof_systems/ppzksnark/tbcs_ppzksnark/profiling/profile_tbcs_ppzksnark +src/zk_proof_systems/ppzksnark/tbcs_ppzksnark/tests/test_tbcs_ppzksnark +src/zk_proof_systems/ppzksnark/uscs_ppzksnark/profiling/profile_uscs_ppzksnark +src/zk_proof_systems/ppzksnark/uscs_ppzksnark/tests/test_uscs_ppzksnark +src/zk_proof_systems/zksnark/ram_zksnark/profiling/profile_ram_zksnark +src/zk_proof_systems/zksnark/ram_zksnark/tests/test_ram_zksnark diff --git a/AUTHORS b/AUTHORS new file mode 100644 index 000000000..1b2d7a247 --- /dev/null +++ b/AUTHORS @@ -0,0 +1,19 @@ +SCIPR Lab: + Eli Ben-Sasson + Alessandro Chiesa + Daniel Genkin + Shaul Kfir + Eran Tromer + Madars Virza + +External contributors: + Michael Backes + Manuel Barbosa + Dario Fiore + Jens Groth + Joshua A. Kroll + Shigeo MITSUNARI + Raphael Reischuk + Tadanori TERUYA + Sean Bowe + Daira Hopwood diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..81cea11e1 --- /dev/null +++ b/LICENSE @@ -0,0 +1,24 @@ +The libsnark library is developed by SCIPR Lab (http://scipr-lab.org) +and contributors. + +Copyright (c) 2012-2014 SCIPR Lab and contributors (see AUTHORS file). + +All files, with the exceptions below, are released under the MIT License: + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..13e54da68 --- /dev/null +++ b/Makefile @@ -0,0 +1,277 @@ +#******************************************************************************** +# Makefile for the libsnark library. +#******************************************************************************** +#* @author This file is part of libsnark, developed by SCIPR Lab +#* and contributors (see AUTHORS). +#* @copyright MIT license (see LICENSE file) +#*******************************************************************************/ + +# To override these, use "make OPTFLAGS=..." etc. +CURVE = BN128 +OPTFLAGS = -O2 -march=native -mtune=native +FEATUREFLAGS = -DUSE_ASM -DMONTGOMERY_OUTPUT + +# Initialize this using "CXXFLAGS=... make". The makefile appends to that. +CXXFLAGS += -std=c++11 -Wall -Wextra -Wno-unused-parameter -Wno-comment -Wfatal-errors $(OPTFLAGS) $(FEATUREFLAGS) -DCURVE_$(CURVE) + +DEPSRC = depsrc +DEPINST = depinst + +CXXFLAGS += -I$(DEPINST)/include -Isrc +LDFLAGS += -L$(DEPINST)/lib -Wl,-rpath,$(DEPINST)/lib +LDLIBS += -lgmpxx -lgmp -lboost_program_options +# OpenSSL and its dependencies (needed explicitly for static builds): +LDLIBS += -lcrypto -ldl -lz +# List of .a files to include within libsnark.a and libsnark.so: +AR_LIBS = +# List of library files to install: +INSTALL_LIBS = $(LIB_FILE) +# Sentinel file to check existence of this directory (since directories don't work as a Make dependency): +DEPINST_EXISTS = $(DEPINST)/.exists + + +COMPILE_GTEST := +ifneq ($(NO_GTEST),1) + GTESTDIR=/usr/src/gtest +# Compile GTest from sourcecode if we can (e.g., Ubuntu). Otherwise use precompiled one (e.g., Fedora). +# See https://code.google.com/p/googletest/wiki/FAQ#Why_is_it_not_recommended_to_install_a_pre-compiled_copy_of_Goog . + COMPILE_GTEST :=$(shell test -d $(GTESTDIR) && echo -n 1) + GTEST_LDLIBS += -lgtest -lpthread +endif + +ifneq ($(NO_SUPERCOP),1) + SUPERCOP_LDLIBS += -lsupercop + INSTALL_LIBS += depinst/lib/libsupercop.a + # Would have been nicer to roll supercop into libsnark.a ("AR_LIBS += $(DEPINST)/lib/libsupercop.a"), but it doesn't support position-independent code (libsnark issue #20). +endif + +LIB_SRCS = \ + src/algebra/curves/alt_bn128/alt_bn128_g1.cpp \ + src/algebra/curves/alt_bn128/alt_bn128_g2.cpp \ + src/algebra/curves/alt_bn128/alt_bn128_init.cpp \ + src/algebra/curves/alt_bn128/alt_bn128_pairing.cpp \ + src/algebra/curves/alt_bn128/alt_bn128_pp.cpp \ + src/common/profiling.cpp \ + src/common/utils.cpp \ + src/gadgetlib1/constraint_profiling.cpp \ + +ifeq ($(CURVE),BN128) + LIB_SRCS += \ + src/algebra/curves/bn128/bn128_g1.cpp \ + src/algebra/curves/bn128/bn128_g2.cpp \ + src/algebra/curves/bn128/bn128_gt.cpp \ + src/algebra/curves/bn128/bn128_init.cpp \ + src/algebra/curves/bn128/bn128_pairing.cpp \ + src/algebra/curves/bn128/bn128_pp.cpp + + CXXFLAGS += -DBN_SUPPORT_SNARK + AR_LIBS += $(DEPINST)/lib/libzm.a +endif + +# FIXME: most of these are broken due to removed code. +DISABLED_EXECUTABLES = \ + src/algebra/curves/tests/test_bilinearity \ + src/algebra/curves/tests/test_groups \ + src/algebra/fields/tests/test_fields \ + src/common/routing_algorithms/profiling/profile_routing_algorithms \ + src/common/routing_algorithms/tests/test_routing_algorithms \ + src/gadgetlib1/gadgets/cpu_checkers/fooram/examples/test_fooram \ + src/gadgetlib1/gadgets/hashes/knapsack/tests/test_knapsack_gadget \ + src/gadgetlib1/gadgets/hashes/sha256/tests/test_sha256_gadget \ + src/gadgetlib1/gadgets/merkle_tree/tests/test_merkle_tree_gadgets \ + src/gadgetlib1/gadgets/routing/profiling/profile_routing_gadgets \ + src/gadgetlib1/gadgets/set_commitment/tests/test_set_commitment_gadget \ + src/gadgetlib1/gadgets/verifiers/tests/test_r1cs_ppzksnark_verifier_gadget \ + src/reductions/ram_to_r1cs/examples/demo_arithmetization \ + src/relations/arithmetic_programs/qap/tests/test_qap \ + src/relations/arithmetic_programs/ssp/tests/test_ssp \ + src/zk_proof_systems/pcd/r1cs_pcd/r1cs_mp_ppzkpcd/profiling/profile_r1cs_mp_ppzkpcd \ + src/zk_proof_systems/pcd/r1cs_pcd/r1cs_mp_ppzkpcd/tests/test_r1cs_mp_ppzkpcd \ + src/zk_proof_systems/pcd/r1cs_pcd/r1cs_sp_ppzkpcd/profiling/profile_r1cs_sp_ppzkpcd \ + src/zk_proof_systems/pcd/r1cs_pcd/r1cs_sp_ppzkpcd/tests/test_r1cs_sp_ppzkpcd \ + src/zk_proof_systems/ppzksnark/bacs_ppzksnark/profiling/profile_bacs_ppzksnark \ + src/zk_proof_systems/ppzksnark/bacs_ppzksnark/tests/test_bacs_ppzksnark \ + src/zk_proof_systems/ppzksnark/r1cs_gg_ppzksnark/profiling/profile_r1cs_gg_ppzksnark \ + src/zk_proof_systems/ppzksnark/r1cs_gg_ppzksnark/tests/test_r1cs_gg_ppzksnark \ + src/zk_proof_systems/ppzksnark/r1cs_ppzksnark/profiling/profile_r1cs_ppzksnark \ + src/zk_proof_systems/ppzksnark/r1cs_ppzksnark/tests/test_r1cs_ppzksnark \ + src/zk_proof_systems/ppzksnark/ram_ppzksnark/examples/demo_ram_ppzksnark \ + src/zk_proof_systems/ppzksnark/ram_ppzksnark/examples/demo_ram_ppzksnark_generator \ + src/zk_proof_systems/ppzksnark/ram_ppzksnark/examples/demo_ram_ppzksnark_prover \ + src/zk_proof_systems/ppzksnark/ram_ppzksnark/examples/demo_ram_ppzksnark_verifier \ + src/zk_proof_systems/ppzksnark/ram_ppzksnark/profiling/profile_ram_ppzksnark \ + src/zk_proof_systems/ppzksnark/ram_ppzksnark/tests/test_ram_ppzksnark \ + src/zk_proof_systems/ppzksnark/tbcs_ppzksnark/profiling/profile_tbcs_ppzksnark \ + src/zk_proof_systems/ppzksnark/tbcs_ppzksnark/tests/test_tbcs_ppzksnark \ + src/zk_proof_systems/ppzksnark/uscs_ppzksnark/profiling/profile_uscs_ppzksnark \ + src/zk_proof_systems/ppzksnark/uscs_ppzksnark/tests/test_uscs_ppzksnark \ + src/zk_proof_systems/zksnark/ram_zksnark/profiling/profile_ram_zksnark \ + src/zk_proof_systems/zksnark/ram_zksnark/tests/test_ram_zksnark + +EXECUTABLES = \ + src/algebra/fields/tests/test_bigint + +EXECUTABLES_WITH_GTEST = \ + src/gadgetlib2/examples/tutorial \ + src/gadgetlib2/tests/gadgetlib2_test + +EXECUTABLES_WITH_SUPERCOP = \ + src/zk_proof_systems/ppzkadsnark/r1cs_ppzkadsnark/examples/demo_r1cs_ppzkadsnark + +DOCS = README.html + +LIBSNARK_A = libsnark.a + +# For documentation of the following options, see README.md . + +ifeq ($(NO_PROCPS),1) + CXXFLAGS += -DNO_PROCPS +else + LDLIBS += -lprocps +endif + +ifeq ($(LOWMEM),1) + CXXFLAGS += -DLOWMEM +endif + +ifeq ($(PROFILE_OP_COUNTS),1) + STATIC = 1 + CXXFLAGS += -DPROFILE_OP_COUNTS +endif + +ifeq ($(STATIC),1) + CXXFLAGS += -static -DSTATIC +else + CXXFLAGS += -fPIC +endif + +ifeq ($(MULTICORE),1) + CXXFLAGS += -DMULTICORE -fopenmp +endif + +ifeq ($(CPPDEBUG),1) + CXXFLAGS += -D_GLIBCXX_DEBUG -D_GLIBCXX_DEBUG_PEDANTIC + DEBUG = 1 +endif + +ifeq ($(DEBUG),1) + CXXFLAGS += -DDEBUG -ggdb3 +endif + +ifeq ($(PERFORMANCE),1) + OPTFLAGS = -O3 -march=native -mtune=native + CXXFLAGS += -DNDEBUG + # Enable link-time optimization: + CXXFLAGS += -flto -fuse-linker-plugin + LDFLAGS += -flto +endif + +LIB_OBJS =$(patsubst %.cpp,%.o,$(LIB_SRCS)) +EXEC_OBJS =$(patsubst %,%.o,$(EXECUTABLES) $(EXECUTABLES_WITH_GTEST) $(EXECUTABLES_WITH_SUPERCOP)) + +all: \ + $(if $(NO_GTEST),,$(EXECUTABLES_WITH_GTEST)) \ + $(if $(NO_SUPERCOP),,$(EXECUTABLES_WITH_SUPERCOP)) \ + $(EXECUTABLES) \ + $(if $(NO_DOCS),,doc) + +doc: $(DOCS) + +$(DEPINST_EXISTS): + # Create placeholder directories for installed dependencies. Some make settings (including the default) require actually running ./prepare-depends.sh to populate this directory. + mkdir -p $(DEPINST)/lib $(DEPINST)/include + touch $@ + +# In order to detect changes to #include dependencies. -MMD below generates a .d file for each .o file. Include the .d file. +-include $(patsubst %.o,%.d, $(LIB_OBJS) $(EXEC_OBJS) ) + +$(LIB_OBJS) $(EXEC_OBJS): %.o: %.cpp + $(CXX) -o $@ $< -c -MMD $(CXXFLAGS) + +LIBGTEST_A = $(DEPINST)/lib/libgtest.a + +$(LIBGTEST_A): $(GTESTDIR)/src/gtest-all.cc $(DEPINST_EXISTS) + $(CXX) -o $(DEPINST)/lib/gtest-all.o -I $(GTESTDIR) -c -isystem $(GTESTDIR)/include $< $(CXXFLAGS) + $(AR) -rv $(LIBGTEST_A) $(DEPINST)/lib/gtest-all.o + +# libsnark.a will contains all of our relevant object files, and we also mash in the .a files of relevant dependencies built by ./prepare-depends.sh +$(LIBSNARK_A): $(LIB_OBJS) $(AR_LIBS) + ( \ + echo "create $(LIBSNARK_A)"; \ + echo "addmod $(LIB_OBJS)"; \ + if [ -n "$(AR_LIBS)" ]; then for AR_LIB in $(AR_LIBS); do echo addlib $$AR_LIB; done; fi; \ + echo "save"; \ + echo "end"; \ + ) | $(AR) -M + $(AR) s $(LIBSNARK_A) + +libsnark.so: $(LIBSNARK_A) $(DEPINST_EXISTS) + $(CXX) -o $@ --shared -Wl,--whole-archive $(LIBSNARK_A) $(CXXFLAGS) $(LDFLAGS) -Wl,--no-whole-archive $(LDLIBS) + +src/gadgetlib2/tests/gadgetlib2_test: \ + src/gadgetlib2/tests/adapters_UTEST.cpp \ + src/gadgetlib2/tests/constraint_UTEST.cpp \ + src/gadgetlib2/tests/gadget_UTEST.cpp \ + src/gadgetlib2/tests/integration_UTEST.cpp \ + src/gadgetlib2/tests/protoboard_UTEST.cpp \ + src/gadgetlib2/tests/variable_UTEST.cpp + +$(EXECUTABLES): %: %.o $(LIBSNARK_A) $(DEPINST_EXISTS) + $(CXX) -o $@ $@.o $(LIBSNARK_A) $(CXXFLAGS) $(LDFLAGS) $(LDLIBS) + +$(EXECUTABLES_WITH_GTEST): %: %.o $(LIBSNARK_A) $(if $(COMPILE_GTEST),$(LIBGTEST_A)) $(DEPINST_EXISTS) + $(CXX) -o $@ $@.o $(LIBSNARK_A) $(CXXFLAGS) $(LDFLAGS) $(GTEST_LDLIBS) $(LDLIBS) + +$(EXECUTABLES_WITH_SUPERCOP): %: %.o $(LIBSNARK_A) $(DEPINST_EXISTS) + $(CXX) -o $@ $@.o $(LIBSNARK_A) $(CXXFLAGS) $(LDFLAGS) $(SUPERCOP_LDLIBS) $(LDLIBS) + + +ifeq ($(STATIC),1) +LIB_FILE = $(LIBSNARK_A) +else +LIB_FILE = libsnark.so +endif + +lib: $(LIB_FILE) + +$(DOCS): %.html: %.md + markdown_py -f $@ $^ -x toc -x extra --noisy +# TODO: Would be nice to enable "-x smartypants" but Ubuntu 12.04 doesn't support that. +# TODO: switch to redcarpet, to produce same output as GitHub's processing of README.md. But what about TOC? + +ifeq ($(PREFIX),) +install: + $(error Please provide PREFIX. E.g. make install PREFIX=/usr) +else +HEADERS_SRC=$(shell find src -name '*.hpp' -o -name '*.tcc') +HEADERS_DEST=$(patsubst src/%,$(PREFIX)/include/libsnark/%,$(HEADERS_SRC)) + +$(HEADERS_DEST): $(PREFIX)/include/libsnark/%: src/% + mkdir -p $(shell dirname $@) + cp $< $@ + +install: $(INSTALL_LIBS) $(HEADERS_DEST) $(DEPINST_EXISTS) + mkdir -p $(PREFIX)/lib + cp -v $(INSTALL_LIBS) $(PREFIX)/lib/ + cp -rv $(DEPINST)/include $(PREFIX) +endif + +doxy: + doxygen doxygen.conf + +# Clean generated files, except locally-compiled dependencies +clean: + $(RM) \ + $(LIB_OBJS) $(EXEC_OBJS) \ + $(EXECUTABLES) $(EXECUTABLES_WITH_GTEST) $(EXECUTABLES_WITH_SUPERCOP) \ + $(DOCS) \ + ${patsubst %.o,%.d,${LIB_OBJS} ${EXEC_OBJS}} \ + libsnark.so $(LIBSNARK_A) \ + $(RM) -fr doxygen/ \ + $(RM) $(LIBGTEST_A) $(DEPINST)/lib/gtest-all.o + +# Clean all, including locally-compiled dependencies +clean-all: clean + $(RM) -fr $(DEPSRC) $(DEPINST) + +.PHONY: all clean clean-all doc doxy lib install diff --git a/README.md b/README.md new file mode 100644 index 000000000..d5aa34006 --- /dev/null +++ b/README.md @@ -0,0 +1,628 @@ +libsnark: a C++ library for zkSNARK proofs +================================================================================ + +-------------------------------------------------------------------------------- +Authors +-------------------------------------------------------------------------------- + +The libsnark library is developed by the [SCIPR Lab] project and contributors +and is released under the MIT License (see the [LICENSE] file). + +Copyright (c) 2012-2014 SCIPR Lab and contributors (see [AUTHORS] file). + +-------------------------------------------------------------------------------- +[TOC] + + + +-------------------------------------------------------------------------------- +Overview +-------------------------------------------------------------------------------- + +This library implements __zkSNARK__ schemes, which are a cryptographic method +for proving/verifying, in zero knowledge, the integrity of computations. + +A computation can be expressed as an NP statement, in forms such as the following: + +- "The C program _foo_, when executed, returns exit code 0 if given the input _bar_ and some additional input _qux_." +- "The Boolean circuit _foo_ is satisfiable by some input _qux_." +- "The arithmetic circuit _foo_ accepts the partial assignment _bar_, when extended into some full assignment _qux_." +- "The set of constraints _foo_ is satisfiable by the partial assignment _bar_, when extended into some full assignment _qux_." + +A prover who knows the witness for the NP statement (i.e., a satisfying input/assignment) can produce a short proof attesting to the truth of the NP statement. This proof can be verified by anyone, and offers the following properties. + +- __Zero knowledge:__ + the verifier learns nothing from the proof beside the truth of the statement (i.e., the value _qux_, in the above examples, remains secret). +- __Succinctness:__ + the proof is short and easy to verify. +- __Non-interactivity:__ + the proof is a string (i.e. it does not require back-and-forth interaction between the prover and the verifier). +- __Soundness:__ + the proof is computationally sound (i.e., it is infeasible to fake a proof of a false NP statement). Such a proof system is also called an _argument_. +- __Proof of knowledge:__ + the proof attests not just that the NP statement is true, but also that the + prover knows why (e.g., knows a valid _qux_). + +These properties are summarized by the _zkSNARK_ acronym, which stands for _Zero-Knowledge Succinct Non-interactive ARgument of Knowledge_ (though zkSNARKs are also knows as +_succinct non-interactive computationally-sound zero-knowledge proofs of knowledge_). +For formal definitions and theoretical discussions about these, see +\[BCCT12], \[BCIOP13], and the references therein. + +The libsnark library currently provides a C++ implementation of: + +1. General-purpose proof systems: + 1. A preprocessing zkSNARK for the NP-complete language "R1CS" + (_Rank-1 Constraint Systems_), which is a language that is similar to arithmetic + circuit satisfiability. + 2. A preprocessing SNARK for a language of arithmetic circuits, "BACS" + (_Bilinear Arithmetic Circuit Satisfiability_). This simplifies the writing + of NP statements when the additional flexibility of R1CS is not needed. + Internally, it reduces to R1CS. + 3. A preprocessing SNARK for the language "USCS" + (_Unitary-Square Constraint Systems_). This abstracts and implements the core + contribution of \[DFGK14] + 4. A preprocessing SNARK for a language of Boolean circuits, "TBCS" + (_Two-input Boolean Circuit Satisfiability_). Internally, it reduces to USCS. + This is much more efficient than going through R1CS. + 5. ADSNARK, a preprocessing SNARKs for proving statements on authenticated + data, as described in \[BBFR15]. + 6. Proof-Carrying Data (PCD). This uses recursive composition of SNARKs, as + explained in \[BCCT13] and optimized in \[BCTV14b]. +2. Gadget libraries (gadgetlib1 and gadgetlib2) for constructing R1CS + instances out of modular "gadget" classes. +3. Examples of applications that use the above proof systems to prove + statements about: + 1. Several toy examples. + 2. Execution of TinyRAM machine code, as explained in \[BCTV14a] and + \[BCGTV13]. (Such machine code can be obtained, e.g., by compiling from C.) + This is easily adapted to any other Random Access Machine that satisfies a + simple load-store interface. + 3. A scalable for TinyRAM using Proof-Carrying Data, as explained in \[BCTV14b] + 4. Zero-knowldge cluster MapReduce, as explained in \[CTV15]. + +The zkSNARK construction implemented by libsnark follows, extends, and +optimizes the approach described in \[BCTV14], itself an extension of +\[BCGTV13], following the approach of \[BCIOP13] and \[GGPR13]. An alternative +implementation of the basic approach is the _Pinocchio_ system of \[PGHR13]. +See these references for discussions of efficiency aspects that arise in +practical use of such constructions, as well as security and trust +considerations. + +This scheme is a _preprocessing zkSNARK_ (_ppzkSNARK_): before proofs can be +created and verified, one needs to first decide on a size/circuit/system +representing the NP statements to be proved, and run a _generator_ algorithm to +create corresponding public parameters (a long proving key and a short +verification key). + +Using the library involves the following high-level steps: + +1. Express the statements to be proved as an R1CS (or any of the other + languages above, such as arithmetic circuits, Boolean circuits, or TinyRAM). + This is done by writing C++ code that constructs an R1CS, and linking this code + together with libsnark +2. Use libsnark's generator algorithm to create the public parameters for this + statement (once and for all). +3. Use libsnark's prover algorithm to create proofs of true statements about + the satisfiability of the R1CS. +4. Use libsnark's verifier algorithm to check proofs for alleged statements. + + +-------------------------------------------------------------------------------- +The NP-complete language R1CS +-------------------------------------------------------------------------------- + +The ppzkSNARK supports proving/verifying membership in a specific NP-complete +language: R1CS (*rank-1 constraint systems*). An instance of the language is +specified by a set of equations over a prime field F, and each equation looks like: + < A, (1,X) > * < B , (1,X) > = < C, (1,X) > +where A,B,C are vectors over F, and X is a vector of variables. + +In particular, arithmetic (as well as boolean) circuits are easily reducible to +this language by converting each gate into a rank-1 constraint. See \[BCGTV13] +Appendix E (and "System of Rank 1 Quadratic Equations") for more details about this. + + +-------------------------------------------------------------------------------- +Elliptic curve choices +-------------------------------------------------------------------------------- + +The ppzkSNARK can be instantiated with different parameter choices, depending on +which elliptic curve is used. The libsnark library currently provides three +options: + +* "edwards": + an instantiation based on an Edwards curve, providing 80 bits of security. + +* "bn128": + an instantiation based on a Barreto-Naehrig curve, providing 128 + bits of security. The underlying curve implementation is + \[ate-pairing], which has incorporated our patch that changes the + BN curve to one suitable for SNARK applications. + + * This implementation uses dynamically-generated machine code for the curve + arithmetic. Some modern systems disallow execution of code on the heap, and + will thus block this implementation. + + For example, on Fedora 20 at its default settings, you will get the error + `zmInit ERR:can't protect` when running this code. To solve this, + run `sudo setsebool -P allow_execheap 1` to allow execution, + or use `make CURVE=ALT_BN128` instead. + +* "alt_bn128": + an alternative to "bn128", somewhat slower but avoids dynamic code generation. + +Note that bn128 requires an x86-64 CPU while the other curve choices +should be architecture-independent; see [portability](#portability). + + +-------------------------------------------------------------------------------- +Gadget libraries +-------------------------------------------------------------------------------- + +The libsnark library currently provides two libraries for conveniently constructing +R1CS instances out of reusable "gadgets". Both libraries provide a way to construct +gadgets on other gadgets as well as additional explicit equations. In this way, +complex R1CS instances can be built bottom up. + +### gadgetlib1 + +This is a low-level library which expose all features of the preprocessing +zkSNARK for R1CS. Its design is based on templates (as does the ppzkSNARK code) +to efficiently support working on multiple elliptic curves simultaneously. This +library is used for most of the constraint-building in libsnark, both internal +(reductions and Proof-Carrying Data) and examples applications. + +### gadgetlib2 + +This is an alternative library for constructing systems of polynomial equations +and, in particular, also R1CS instances. It is better documented and easier to +use than gadgetlib1, and its interface does not use templates. However, fewer +useful gadgets are provided. + + +-------------------------------------------------------------------------------- +Security +-------------------------------------------------------------------------------- + +The theoretical security of the underlying mathematical constructions, and the +requisite assumptions, are analyzed in detailed in the aforementioned research +papers. + +** +This code is a research-quality proof of concept, and has not +yet undergone extensive review or testing. It is thus not suitable, +as is, for use in critical or production systems. +** + +Known issues include the following: + +* The ppzkSNARK's generator and prover exhibit data-dependent running times + and memory usage. These form timing and cache-contention side channels, + which may be an issue in some applications. + +* Randomness is retrieved from /dev/urandom, but this should be + changed to a carefully considered (depending on system and threat + model) external, high-quality randomness source when creating + long-term proving/verification keys. + + +-------------------------------------------------------------------------------- +Build instructions +-------------------------------------------------------------------------------- + +The libsnark library relies on the following: + +- C++ build environment +- GMP for certain bit-integer arithmetic +- libprocps for reporting memory usage +- GTest for some of the unit tests + +So far we have tested these only on Linux, though we have been able to make the library work, +with some features disabled (such as memory profiling or GTest tests), on Windows via Cygwin +and on Mac OS X. (If you succeed in achieving more complete ports of the library, please +let us know!) See also the notes on [portability](#portability) below. + +For example, on a fresh install of Ubuntu 14.04, install the following packages: + + $ sudo apt-get install build-essential git libgmp3-dev libprocps3-dev libgtest-dev python-markdown libboost-all-dev libssl-dev + +Or, on Fedora 20: + + $ sudo yum install gcc-c++ make git gmp-devel procps-ng-devel gtest-devel python-markdown + +Run the following, to fetch dependencies from their GitHub repos and compile them. +(Not required if you set `CURVE` to other than the default `BN128` and also set `NO_SUPERCOP=1`.) + + $ ./prepare-depends.sh + +Then, to compile the library, tests, profiling harness and documentation, run: + + $ make + +To create just the HTML documentation, run + + $ make doc + +and then view the resulting `README.html` (which contains the very text you are reading now). + +To create Doxygen documentation summarizing all files, classes and functions, +with some (currently sparse) comments, install the `doxygen` and `graphviz` packages, then run + + $ make doxy + +(this may take a few minutes). Then view the resulting [`doxygen/index.html`](doxygen/index.html). + +### Using libsnark as a library + +To develop an application that uses libsnark, you could add it within the libsnark directory tree and adjust the Makefile, but it is far better to build libsnark as a (shared or static) library. You can then write your code in a separate directory tree, and link it against libsnark. + + +To build just the shared object library `libsnark.so`, run: + + $ make lib + +To build just the static library `libsnark.a`, run: + + $ make lib STATIC=1 + +Note that static compilation requires static versions of all libraries it depends on. +It may help to minize these dependencies by appending +`CURVE=ALT_BN128 NO_PROCPS=1 NO_GTEST=1 NO_SUPERCOP=1`. On Fedora 21, the requisite +library RPM dependencies are then: +`boost-static glibc-static gmp-static libstdc++-static openssl-static zlib-static + boost-devel glibc-devel gmp-devel gmp-devel libstdc++-devel openssl-devel openssl-devel`. + +To build *and install* the libsnark library: + + $ make install PREFIX=/install/path + +This will install `libsnark.so` into `/install/path/lib`; so your application should be linked using `-L/install/path/lib -lsnark`. It also installs the requisite headers into `/install/path/include`; so your application should be compiled using `-I/install/path/include`. + +In addition, unless you use `NO_SUPERCOP=1`, `libsupercop.a` will be installed and should be linked in using `-lsupercop`. + + +### Building on Windows using Cygwin +Install Cygwin using the graphical installer, including the `g++`, `libgmp` +and `git` packages. Then disable the dependencies not easily supported under CygWin, +using: + + $ make NO_PROCPS=1 NO_GTEST=1 NO_DOCS=1 + + +### Building on Mac OS X + +On Mac OS X, install GMP from MacPorts (`port install gmp`). Then disable the +dependencies not easily supported under CygWin, using: + + $ make NO_PROCPS=1 NO_GTEST=1 NO_DOCS=1 + +MacPorts does not write its libraries into standard system folders, so you +might need to explicitly provide the paths to the header files and libraries by +appending `CXXFLAGS=-I/opt/local/include LDFLAGS=-L/opt/local/lib` to the line +above. Similarly, to pass the paths to ate-pairing you would run +`INC_DIR=-I/opt/local/include LIB_DIR=-L/opt/local/lib ./prepare-depends.sh` +instead of `./prepare-depends.sh` above. + +-------------------------------------------------------------------------------- +Tutorials +-------------------------------------------------------------------------------- + +libsnark includes a tutorial, and some usage examples, for the high-level API. + +* `src/gadgetlib1/examples1` contains a simple example for constructing a + constraint system using gadgetlib1. + +* `src/gadgetlib2/examples` contains a tutorial for using gadgetlib2 to express + NP statements as constraint systems. It introduces basic terminology, design + overview, and recommended programming style. It also shows how to invoke + ppzkSNARKs on such constraint systems. The main file, `tutorial.cpp`, builds + into a standalone executable. + +* `src/zk_proof_systems/ppzksnark/r1cs_ppzksnark/profiling/profile_r1cs_ppzksnark.cpp` + constructs a simple constraint system and runs the ppzksnark. See below for how to + run it. + + +-------------------------------------------------------------------------------- +Executing profiling example +-------------------------------------------------------------------------------- + +The command + + $ src/zk_proof_systems/ppzksnark/r1cs_ppzksnark/profiling/profile_r1cs_ppzksnark 1000 10 Fr + +exercises the ppzkSNARK (first generator, then prover, then verifier) on an +R1CS instance with 1000 equations and an input consisting of 10 field elements. + +(If you get the error `zmInit ERR:can't protect`, see the discussion +[above](#elliptic-curve-choices).) + +The command + + $ src/zk_proof_systems/ppzksnark/r1cs_ppzksnark/profiling/profile_r1cs_ppzksnark 1000 10 bytes + +does the same but now the input consists of 10 bytes. + + +-------------------------------------------------------------------------------- +Build options +-------------------------------------------------------------------------------- + +The following flags change the behavior of the compiled code. + +* `make FEATUREFLAGS='-Dname1 -Dname2 ...'` + + Override the active conditional #define names (you can see the default at the top of the Makefile). + The next bullets list the most important conditionally-#defined features. + For example, `make FEATUREFLAGS='-DBINARY_OUTPUT'` enables binary output and disables the default + assembly optimizations and Montgomery-representation output. + +* define `BINARY_OUTPUT` + + In serialization, output raw binary data (instead of decimal, when not set). + +* `make CURVE=choice` / define `CURVE_choice` (where `choice` is one of: + ALT_BN128, BN128, EDWARDS, MNT4, MNT6) + + Set the default curve to one of the above (see [elliptic curve choices](#elliptic-curve-choices)). + +* `make DEBUG=1` / define `DEBUG` + + Print additional information for debugging purposes. + +* `make LOWMEM=1` / define `LOWMEM` + + Limit the size of multi-exponentiation tables, for low-memory platforms. + +* `make NO_DOCS=1` + + Do not generate HTML documentation, e.g. on platforms where Markdown is not easily available. + +* `make NO_PROCPS=1` + + Do not link against libprocps. This disables memory profiling. + +* `make NO_GTEST=1` + + Do not link against GTest. The tutorial and test suite of gadgetlib2 tutorial won't be compiled. + +* `make NO_SUPERCOP=1` + + Do not link against SUPERCOP for optimized crypto. The ADSNARK executables will not be built. + +* `make MULTICORE=1` + + Enable parallelized execution of the ppzkSNARK generator and prover, using OpenMP. + This will utilize all cores on the CPU for heavyweight parallelizabe operations such as + FFT and multiexponentiation. The default is single-core. + + To override the maximum number of cores used, set the environment variable `OMP_NUM_THREADS` + at runtime (not compile time), e.g., `OMP_NUM_THREADS=8 test_r1cs_sp_ppzkpc`. It defaults + to the autodetected number of cores, but on some devices, dynamic core management confused + OpenMP's autodetection, so setting `OMP_NUM_THREADS` is necessary for full utilization. + +* define `NO_PT_COMPRESSION` + + Do not use point compression. + This gives much faster serialization times, at the expense of ~2x larger + sizes for serialized keys and proofs. + +* define `MONTGOMERY_OUTPUT` (on by default) + + Serialize Fp elements as their Montgomery representations. If this + option is disabled then Fp elements are serialized as their + equivalence classes, which is slower but produces human-readable + output. + +* `make PROFILE_OP_COUNTS=1` / define `PROFILE_OP_COUNTS` + + Collect counts for field and curve operations inside static variables + of the corresponding algebraic objects. This option works for all + curves except bn128. + +* define `USE_ASM` (on by default) + + Use unrolled assembly routines for F[p] arithmetic and faster heap in + multi-exponentiation. (When not set, use GMP's `mpn_*` routines instead.) + +* define `USE_MIXED_ADDITION` + + Convert each element of the proving key and verification key to + affine coordinates. This allows using mixed addition formulas in + multiexponentiation and results in slightly faster prover and + verifier runtime at expense of increased proving time. + +* `make PERFORMANCE=1` + + Enables compiler optimizations such as link-time optimization, and disables debugging aids. + (On some distributions this causes a `plugin needed to handle lto object` link error and `undefined reference`s, which can be remedied by `AR=gcc-ar make ...`.) + +Not all combinations are tested together or supported by every part of the codebase. + + +-------------------------------------------------------------------------------- +Portability +-------------------------------------------------------------------------------- + +libsnark is written in fairly standard C++11. + +However, having been developed on Linux on x86-64 CPUs, libsnark has some limitations +with respect to portability. Specifically: + +1. libsnark's algebraic data structures assume little-endian byte order. + +2. Profiling routines use `clock_gettime` and `readproc` calls, which are Linux-specific. + +3. Random-number generation is done by reading from `/dev/urandom`, which is + specific to Unix-like systems. + +4. libsnark binary serialization routines (see `BINARY_OUTPUT` above) assume + a fixed machine word size (i.e. sizeof(mp_limb_t) for GMP's limb data type). + Objects serialized in binary on a 64-bit system cannot be de-serialized on + a 32-bit system, and vice versa. + (The decimal serialization routines have no such limitation.) + +5. libsnark requires a C++ compiler with good C++11 support. It has been + tested with g++ 4.7, g++ 4.8, and clang 3.4. + +6. On x86-64, we by default use highly optimized assembly implementations for some + operations (see `USE_ASM` above). On other architectures we fall back to a + portable C++ implementation, which is slower. + +Tested configurations include: + +* Debian jessie with g++ 4.7 on x86-64 +* Debian jessie with clang 3.4 on x86-64 +* Fedora 20/21 with g++ 4.8.2/4.9.2 on x86-64 and i686 +* Ubuntu 14.04 LTS with g++ 4.8 on x86-64 +* Ubuntu 14.04 LTS with g++ 4.8 on x86-32, for EDWARDS and ALT_BN128 curve choices +* Debian wheezy with g++ 4.7 on ARM little endian (Debian armel port) inside QEMU, for EDWARDS and ALT_BN128 curve choices +* Windows 7 with g++ 4.8.3 under Cygwin 1.7.30 on x86-64 with NO_PROCPS=1, NO_GTEST=1 and NO_DOCS=1, for EDWARDS and ALT_BN128 curve choices +* Mac OS X 10.9.4 (Mavericks) with Apple LLVM version 5.1 (based on LLVM 3.4svn) on x86-64 with NO_PROCPS=1, NO_GTEST=1 and NO_DOCS=1 + + +-------------------------------------------------------------------------------- +Directory structure +-------------------------------------------------------------------------------- + +The directory structure of the libsnark library is as follows: + +* src/ --- main C++ source code, containing the following modules: + * algebra/ --- fields and elliptic curve groups + * common/ --- miscellaneous utilities + * gadgetlib1/ --- gadgetlib1, a library to construct R1CS instances + * gadgets/ --- basic gadgets for gadgetlib1 + * gadgetlib2/ --- gadgetlib2, a library to construct R1CS instances + * qap/ --- quadratic arithmetic program + * domains/ --- support for fast interpolation/evaluation, by providing + FFTs and Lagrange-coefficient computations for various domains + * relations/ --- interfaces for expressing statement (relations between instances and witnesses) as various NP-complete languages + * constraint_satisfaction_problems/ --- R1CS and USCS languages + * circuit_satisfaction_problems/ --- Boolean and arithmetic circuit satisfiability languages + * ram_computations/ --- RAM computation languages + * zk_proof_systems --- interfaces and implementations of the proof systems + * reductions --- reductions between languages (used internally, but contains many examples of building constraints) + + Some of these module directories have the following subdirectories: + + * ... + * examples/ --- example code and tutorials for this module + * tests/ --- unit tests for this module + + In particular, the top-level API examples are at `src/r1cs_ppzksnark/examples/` and `src/gadgetlib2/examples/`. + +* depsrc/ --- created by `prepare_depends.sh` for retrieved sourcecode and local builds of external code + (currently: \[ate-pairing], and its dependency xbyak). + +* depinst/ --- created by `prepare_depends.sh` and `Makefile` + for local installation of locally-compiled dependencies. + +* doxygen/ --- created by `make doxy` and contains a Doxygen summary of all files, classes etc. in libsnark. + + +-------------------------------------------------------------------------------- +Further considerations +-------------------------------------------------------------------------------- + +### Multiexponentiation window size + +The ppzkSNARK's generator has to solve a fixed-base multi-exponentiation +problem. We use a window-based method in which the optimal window size depends +on the size of the multiexponentiation instance *and* the platform. + +On our benchmarking platform (a 3.40 GHz Intel Core i7-4770 CPU), we have +computed for each curve optimal windows, provided as +"fixed_base_exp_window_table" initialization sequences, for each curve; see +`X_init.cpp` for X=edwards,bn128,alt_bn128. + +Performance on other platforms may not be optimal (but probably not be far off). +Future releases of the libsnark library will include a tool that generates +optimal window sizes. + + +-------------------------------------------------------------------------------- +References +-------------------------------------------------------------------------------- + +\[BBFR15] [ + _ADSNARK: nearly practical and privacy-preserving proofs on authenticated data_ +](https://eprint.iacr.org/2014/617), + Michael Backes, Manuel Barbosa, Dario Fiore, Raphael M. Reischuk, + IEEE Symposium on Security and Privacy (Oakland) 2015 + +\[BCCT12] [ + _From extractable collision resistance to succinct non-Interactive arguments of knowledge, and back again_ +](http://eprint.iacr.org/2011/443), + Nir Bitansky, Ran Canetti, Alessandro Chiesa, Eran Tromer, + Innovations in Computer Science (ITCS) 2012 + +\[BCCT13] [ + _Recursive composition and bootstrapping for SNARKs and proof-carrying data_ +](http://eprint.iacr.org/2012/095) + Nir Bitansky, Ran Canetti, Alessandro Chiesa, Eran Tromer, + Symposium on Theory of Computing (STOC) 13 + +\[BCGTV13] [ + _SNARKs for C: Verifying Program Executions Succinctly and in Zero Knowledge_ +](http://eprint.iacr.org/2013/507), + Eli Ben-Sasson, Alessandro Chiesa, Daniel Genkin, Eran Tromer, Madars Virza, + CRYPTO 2013 + +\[BCIOP13] [ + _Succinct Non-Interactive Arguments via Linear Interactive Proofs_ +](http://eprint.iacr.org/2012/718), + Nir Bitansky, Alessandro Chiesa, Yuval Ishai, Rafail Ostrovsky, Omer Paneth, + Theory of Cryptography Conference 2013 + +\[BCTV14a] [ + _Succinct Non-Interactive Zero Knowledge for a von Neumann Architecture_ +](http://eprint.iacr.org/2013/879), + Eli Ben-Sasson, Alessandro Chiesa, Eran Tromer, Madars Virza, + USENIX Security 2014 + +\[BCTV14b] [ + _Scalable succinct non-interactive arguments via cycles of elliptic curves_ +](https://eprint.iacr.org/2014/595), + Eli Ben-Sasson, Alessandro Chiesa, Eran Tromer, Madars Virza, + CRYPTO 2014 + +\[CTV15] [ + _Cluster computing in zero knowledge_ +](https://eprint.iacr.org/2015/377), + Alessandro Chiesa, Eran Tromer, Madars Virza, + Eurocrypt 2015 + +\[DFGK14] [ + Square span programs with applications to succinct NIZK arguments +](https://eprint.iacr.org/2014/718), + George Danezis, Cedric Fournet, Jens Groth, Markulf Kohlweiss, + ASIACCS 2014 + +\[GGPR13] [ + _Quadratic span programs and succinct NIZKs without PCPs_ +](http://eprint.iacr.org/2012/215), + Rosario Gennaro, Craig Gentry, Bryan Parno, Mariana Raykova, + EUROCRYPT 2013 + +\[ate-pairing] [ + _High-Speed Software Implementation of the Optimal Ate Pairing over Barreto-Naehrig Curves_ +](https://github.com/herumi/ate-pairing), + MITSUNARI Shigeo, TERUYA Tadanori + +\[PGHR13] [ + _Pinocchio: Nearly Practical Verifiable Computation_ +](http://eprint.iacr.org/2013/279), + Bryan Parno, Craig Gentry, Jon Howell, Mariana Raykova, + IEEE Symposium on Security and Privacy (Oakland) 2013 + +[SCIPR Lab]: http://www.scipr-lab.org/ (Succinct Computational Integrity and Privacy Research Lab) + +[LICENSE]: LICENSE (LICENSE file in top directory of libsnark distribution) + +[AUTHORS]: AUTHORS (AUTHORS file in top directory of libsnark distribution) diff --git a/doxygen.conf b/doxygen.conf new file mode 100644 index 000000000..5fbe61681 --- /dev/null +++ b/doxygen.conf @@ -0,0 +1,1807 @@ +# Doxyfile 1.8.2 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project +# +# All text after a hash (#) is considered a comment and will be ignored +# The format is: +# TAG = value [value, ...] +# For lists items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (" ") + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# This tag specifies the encoding used for all characters in the config file +# that follow. The default is UTF-8 which is also the encoding used for all +# text before the first occurrence of this tag. Doxygen uses libiconv (or the +# iconv built into libc) for the transcoding. See +# http://www.gnu.org/software/libiconv for the list of possible encodings. + +DOXYFILE_ENCODING = UTF-8 + +# The PROJECT_NAME tag is a single word (or sequence of words) that should +# identify the project. Note that if you do not use Doxywizard you need +# to put quotes around the project name if it contains spaces. + +PROJECT_NAME = libsnark + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. +# This could be handy for archiving the generated documentation or +# if some version control system is used. + +PROJECT_NUMBER = + +# Using the PROJECT_BRIEF tag one can provide an optional one line description +# for a project that appears at the top of each page and should give viewer +# a quick idea about the purpose of the project. Keep the description short. + +PROJECT_BRIEF = + +# With the PROJECT_LOGO tag one can specify an logo or icon that is +# included in the documentation. The maximum height of the logo should not +# exceed 55 pixels and the maximum width should not exceed 200 pixels. +# Doxygen will copy the logo to the output directory. + +PROJECT_LOGO = + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) +# base path where the generated documentation will be put. +# If a relative path is entered, it will be relative to the location +# where doxygen was started. If left blank the current directory will be used. + +OUTPUT_DIRECTORY = + +# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create +# 4096 sub-directories (in 2 levels) under the output directory of each output +# format and will distribute the generated files over these directories. +# Enabling this option can be useful when feeding doxygen a huge amount of +# source files, where putting all generated files in the same directory would +# otherwise cause performance problems for the file system. + +CREATE_SUBDIRS = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# The default language is English, other supported languages are: +# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, +# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, +# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English +# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, +# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak, +# Slovene, Spanish, Swedish, Ukrainian, and Vietnamese. + +OUTPUT_LANGUAGE = English + +# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will +# include brief member descriptions after the members that are listed in +# the file and class documentation (similar to JavaDoc). +# Set to NO to disable this. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend +# the brief description of a member or function before the detailed description. +# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator +# that is used to form the text in various listings. Each string +# in this list, if found as the leading text of the brief description, will be +# stripped from the text and the result after processing the whole list, is +# used as the annotated text. Otherwise, the brief description is used as-is. +# If left blank, the following values are used ("$name" is automatically +# replaced with the name of the entity): "The $name class" "The $name widget" +# "The $name file" "is" "provides" "specifies" "contains" +# "represents" "a" "an" "the" + +ABBREVIATE_BRIEF = + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# Doxygen will generate a detailed section even if there is only a brief +# description. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full +# path before files name in the file list and in the header files. If set +# to NO the shortest path that makes the file name unique will be used. + +FULL_PATH_NAMES = YES + +# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag +# can be used to strip a user-defined part of the path. Stripping is +# only done if one of the specified strings matches the left-hand part of +# the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the +# path to strip. Note that you specify absolute paths here, but also +# relative paths, which will be relative from the directory where doxygen is +# started. + +STRIP_FROM_PATH = src + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of +# the path mentioned in the documentation of a class, which tells +# the reader which header file to include in order to use a class. +# If left blank only the name of the header file containing the class +# definition is used. Otherwise one should specify the include paths that +# are normally passed to the compiler using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter +# (but less readable) file names. This can be useful if your file system +# doesn't support long names like on DOS, Mac, or CD-ROM. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen +# will interpret the first line (until the first dot) of a JavaDoc-style +# comment as the brief description. If set to NO, the JavaDoc +# comments will behave just like regular Qt-style comments +# (thus requiring an explicit @brief command for a brief description.) + +JAVADOC_AUTOBRIEF = YES + +# If the QT_AUTOBRIEF tag is set to YES then Doxygen will +# interpret the first line (until the first dot) of a Qt-style +# comment as the brief description. If set to NO, the comments +# will behave just like regular Qt-style comments (thus requiring +# an explicit \brief command for a brief description.) + +QT_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen +# treat a multi-line C++ special comment block (i.e. a block of //! or /// +# comments) as a brief description. This used to be the default behaviour. +# The new default is to treat a multi-line C++ comment block as a detailed +# description. Set this tag to YES if you prefer the old behaviour instead. + +MULTILINE_CPP_IS_BRIEF = NO + +# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented +# member inherits the documentation from any documented member that it +# re-implements. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce +# a new page for each member. If set to NO, the documentation of a member will +# be part of the file/class/namespace that contains it. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. +# Doxygen uses this value to replace tabs by spaces in code fragments. + +TAB_SIZE = 8 + +# This tag can be used to specify a number of aliases that acts +# as commands in the documentation. An alias has the form "name=value". +# For example adding "sideeffect=\par Side Effects:\n" will allow you to +# put the command \sideeffect (or @sideeffect) in the documentation, which +# will result in a user-defined paragraph with heading "Side Effects:". +# You can put \n's in the value part of an alias to insert newlines. + +ALIASES = + +# This tag can be used to specify a number of word-keyword mappings (TCL only). +# A mapping has the form "name=value". For example adding +# "class=itcl::class" will allow you to use the command class in the +# itcl::class meaning. + +TCL_SUBST = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C +# sources only. Doxygen will then generate output that is more tailored for C. +# For instance, some of the names that are used will be different. The list +# of all members will be omitted, etc. + +OPTIMIZE_OUTPUT_FOR_C = NO + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java +# sources only. Doxygen will then generate output that is more tailored for +# Java. For instance, namespaces will be presented as packages, qualified +# scopes will look different, etc. + +OPTIMIZE_OUTPUT_JAVA = NO + +# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran +# sources only. Doxygen will then generate output that is more tailored for +# Fortran. + +OPTIMIZE_FOR_FORTRAN = NO + +# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL +# sources. Doxygen will then generate output that is tailored for +# VHDL. + +OPTIMIZE_OUTPUT_VHDL = NO + +# Doxygen selects the parser to use depending on the extension of the files it +# parses. With this tag you can assign which parser to use for a given +# extension. Doxygen has a built-in mapping, but you can override or extend it +# using this tag. The format is ext=language, where ext is a file extension, +# and language is one of the parsers supported by doxygen: IDL, Java, +# Javascript, CSharp, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, +# C++. For instance to make doxygen treat .inc files as Fortran files (default +# is PHP), and .f files as C (default is Fortran), use: inc=Fortran f=C. Note +# that for custom extensions you also need to set FILE_PATTERNS otherwise the +# files are not read by doxygen. + +EXTENSION_MAPPING = tcc=C++ + +# If MARKDOWN_SUPPORT is enabled (the default) then doxygen pre-processes all +# comments according to the Markdown format, which allows for more readable +# documentation. See http://daringfireball.net/projects/markdown/ for details. +# The output of markdown processing is further processed by doxygen, so you +# can mix doxygen, HTML, and XML commands with Markdown formatting. +# Disable only in case of backward compatibilities issues. + +MARKDOWN_SUPPORT = YES + +# When enabled doxygen tries to link words that correspond to documented classes, +# or namespaces to their corresponding documentation. Such a link can be +# prevented in individual cases by by putting a % sign in front of the word or +# globally by setting AUTOLINK_SUPPORT to NO. + +AUTOLINK_SUPPORT = YES + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want +# to include (a tag file for) the STL sources as input, then you should +# set this tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); v.s. +# func(std::string) {}). This also makes the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. + +BUILTIN_STL_SUPPORT = NO + +# If you use Microsoft's C++/CLI language, you should set this option to YES to +# enable parsing support. + +CPP_CLI_SUPPORT = NO + +# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. +# Doxygen will parse them like normal C++ but will assume all classes use public +# instead of private inheritance when no explicit protection keyword is present. + +SIP_SUPPORT = NO + +# For Microsoft's IDL there are propget and propput attributes to indicate +# getter and setter methods for a property. Setting this option to YES (the +# default) will make doxygen replace the get and set methods by a property in +# the documentation. This will only work if the methods are indeed getting or +# setting a simple type. If this is not the case, or you want to show the +# methods anyway, you should set this option to NO. + +IDL_PROPERTY_SUPPORT = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES, then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. + +DISTRIBUTE_GROUP_DOC = NO + +# Set the SUBGROUPING tag to YES (the default) to allow class member groups of +# the same type (for instance a group of public functions) to be put as a +# subgroup of that type (e.g. under the Public Functions section). Set it to +# NO to prevent subgrouping. Alternatively, this can be done per class using +# the \nosubgrouping command. + +SUBGROUPING = YES + +# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and +# unions are shown inside the group in which they are included (e.g. using +# @ingroup) instead of on a separate page (for HTML and Man pages) or +# section (for LaTeX and RTF). + +INLINE_GROUPED_CLASSES = NO + +# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and +# unions with only public data fields will be shown inline in the documentation +# of the scope in which they are defined (i.e. file, namespace, or group +# documentation), provided this scope is documented. If set to NO (the default), +# structs, classes, and unions are shown on a separate page (for HTML and Man +# pages) or section (for LaTeX and RTF). + +INLINE_SIMPLE_STRUCTS = NO + +# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum +# is documented as struct, union, or enum with the name of the typedef. So +# typedef struct TypeS {} TypeT, will appear in the documentation as a struct +# with name TypeT. When disabled the typedef will appear as a member of a file, +# namespace, or class. And the struct will be named TypeS. This can typically +# be useful for C code in case the coding convention dictates that all compound +# types are typedef'ed and only the typedef is referenced, never the tag name. + +TYPEDEF_HIDES_STRUCT = NO + +# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to +# determine which symbols to keep in memory and which to flush to disk. +# When the cache is full, less often used symbols will be written to disk. +# For small to medium size projects (<1000 input files) the default value is +# probably good enough. For larger projects a too small cache size can cause +# doxygen to be busy swapping symbols to and from disk most of the time +# causing a significant performance penalty. +# If the system has enough physical memory increasing the cache will improve the +# performance by keeping more symbols in memory. Note that the value works on +# a logarithmic scale so increasing the size by one will roughly double the +# memory usage. The cache size is given by this formula: +# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0, +# corresponding to a cache size of 2^16 = 65536 symbols. + +SYMBOL_CACHE_SIZE = 0 + +# Similar to the SYMBOL_CACHE_SIZE the size of the symbol lookup cache can be +# set using LOOKUP_CACHE_SIZE. This cache is used to resolve symbols given +# their name and scope. Since this can be an expensive process and often the +# same symbol appear multiple times in the code, doxygen keeps a cache of +# pre-resolved symbols. If the cache is too small doxygen will become slower. +# If the cache is too large, memory is wasted. The cache size is given by this +# formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range is 0..9, the default is 0, +# corresponding to a cache size of 2^16 = 65536 symbols. + +LOOKUP_CACHE_SIZE = 0 + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in +# documentation are documented, even if no documentation was available. +# Private class members and static file members will be hidden unless +# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES + +EXTRACT_ALL = YES + +# If the EXTRACT_PRIVATE tag is set to YES all private members of a class +# will be included in the documentation. + +EXTRACT_PRIVATE = YES + +# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal +# scope will be included in the documentation. + +EXTRACT_PACKAGE = NO + +# If the EXTRACT_STATIC tag is set to YES all static members of a file +# will be included in the documentation. + +EXTRACT_STATIC = YES + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) +# defined locally in source files will be included in the documentation. +# If set to NO only classes defined in header files are included. + +EXTRACT_LOCAL_CLASSES = YES + +# This flag is only useful for Objective-C code. When set to YES local +# methods, which are defined in the implementation section but not in +# the interface are included in the documentation. +# If set to NO (the default) only methods in the interface are included. + +EXTRACT_LOCAL_METHODS = NO + +# If this flag is set to YES, the members of anonymous namespaces will be +# extracted and appear in the documentation as a namespace called +# 'anonymous_namespace{file}', where file will be replaced with the base +# name of the file that contains the anonymous namespace. By default +# anonymous namespaces are hidden. + +EXTRACT_ANON_NSPACES = NO + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all +# undocumented members of documented classes, files or namespaces. +# If set to NO (the default) these members will be included in the +# various overviews, but no documentation section is generated. +# This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_MEMBERS = NO + +# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. +# If set to NO (the default) these classes will be included in the various +# overviews. This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_CLASSES = NO + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all +# friend (class|struct|union) declarations. +# If set to NO (the default) these declarations will be included in the +# documentation. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any +# documentation blocks found inside the body of a function. +# If set to NO (the default) these blocks will be appended to the +# function's detailed documentation block. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation +# that is typed after a \internal command is included. If the tag is set +# to NO (the default) then the documentation will be excluded. +# Set it to YES to include the internal documentation. + +INTERNAL_DOCS = NO + +# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate +# file names in lower-case letters. If set to YES upper-case letters are also +# allowed. This is useful if you have classes or files whose names only differ +# in case and if your file system supports case sensitive file names. Windows +# and Mac users are advised to set this option to NO. + +CASE_SENSE_NAMES = YES + +# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen +# will show members with their full class and namespace scopes in the +# documentation. If set to YES the scope will be hidden. + +HIDE_SCOPE_NAMES = NO + +# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen +# will put a list of the files that are included by a file in the documentation +# of that file. + +SHOW_INCLUDE_FILES = YES + +# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen +# will list include files with double quotes in the documentation +# rather than with sharp brackets. + +FORCE_LOCAL_INCLUDES = NO + +# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] +# is inserted in the documentation for inline members. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen +# will sort the (detailed) documentation of file and class members +# alphabetically by member name. If set to NO the members will appear in +# declaration order. + +SORT_MEMBER_DOCS = YES + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the +# brief documentation of file, namespace and class members alphabetically +# by member name. If set to NO (the default) the members will appear in +# declaration order. + +SORT_BRIEF_DOCS = NO + +# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen +# will sort the (brief and detailed) documentation of class members so that +# constructors and destructors are listed first. If set to NO (the default) +# the constructors will appear in the respective orders defined by +# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. +# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO +# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO. + +SORT_MEMBERS_CTORS_1ST = NO + +# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the +# hierarchy of group names into alphabetical order. If set to NO (the default) +# the group names will appear in their defined order. + +SORT_GROUP_NAMES = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be +# sorted by fully-qualified names, including namespaces. If set to +# NO (the default), the class list will be sorted only by class name, +# not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the +# alphabetical list. + +SORT_BY_SCOPE_NAME = NO + +# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to +# do proper type resolution of all parameters of a function it will reject a +# match between the prototype and the implementation of a member function even +# if there is only one candidate or it is obvious which candidate to choose +# by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen +# will still accept a match between prototype and implementation in such cases. + +STRICT_PROTO_MATCHING = NO + +# The GENERATE_TODOLIST tag can be used to enable (YES) or +# disable (NO) the todo list. This list is created by putting \todo +# commands in the documentation. + +GENERATE_TODOLIST = YES + +# The GENERATE_TESTLIST tag can be used to enable (YES) or +# disable (NO) the test list. This list is created by putting \test +# commands in the documentation. + +GENERATE_TESTLIST = YES + +# The GENERATE_BUGLIST tag can be used to enable (YES) or +# disable (NO) the bug list. This list is created by putting \bug +# commands in the documentation. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or +# disable (NO) the deprecated list. This list is created by putting +# \deprecated commands in the documentation. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional +# documentation sections, marked by \if sectionname ... \endif. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines +# the initial value of a variable or macro consists of for it to appear in +# the documentation. If the initializer consists of more lines than specified +# here it will be hidden. Use a value of 0 to hide initializers completely. +# The appearance of the initializer of individual variables and macros in the +# documentation can be controlled using \showinitializer or \hideinitializer +# command in the documentation regardless of this setting. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated +# at the bottom of the documentation of classes and structs. If set to YES the +# list will mention the files that were used to generate the documentation. + +SHOW_USED_FILES = YES + +# Set the SHOW_FILES tag to NO to disable the generation of the Files page. +# This will remove the Files entry from the Quick Index and from the +# Folder Tree View (if specified). The default is YES. + +SHOW_FILES = YES + +# Set the SHOW_NAMESPACES tag to NO to disable the generation of the +# Namespaces page. This will remove the Namespaces entry from the Quick Index +# and from the Folder Tree View (if specified). The default is YES. + +SHOW_NAMESPACES = YES + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from +# the version control system). Doxygen will invoke the program by executing (via +# popen()) the command , where is the value of +# the FILE_VERSION_FILTER tag, and is the name of an input file +# provided by doxygen. Whatever the program writes to standard output +# is used as the file version. See the manual for examples. + +FILE_VERSION_FILTER = + +# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed +# by doxygen. The layout file controls the global structure of the generated +# output files in an output format independent way. To create the layout file +# that represents doxygen's defaults, run doxygen with the -l option. +# You can optionally specify a file name after the option, if omitted +# DoxygenLayout.xml will be used as the name of the layout file. + +LAYOUT_FILE = + +# The CITE_BIB_FILES tag can be used to specify one or more bib files +# containing the references data. This must be a list of .bib files. The +# .bib extension is automatically appended if omitted. Using this command +# requires the bibtex tool to be installed. See also +# http://en.wikipedia.org/wiki/BibTeX for more info. For LaTeX the style +# of the bibliography can be controlled using LATEX_BIB_STYLE. To use this +# feature you need bibtex and perl available in the search path. + +CITE_BIB_FILES = + +#--------------------------------------------------------------------------- +# configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated +# by doxygen. Possible values are YES and NO. If left blank NO is used. + +QUIET = NO + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated by doxygen. Possible values are YES and NO. If left blank +# NO is used. + +WARNINGS = YES + +# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings +# for undocumented members. If EXTRACT_ALL is set to YES then this flag will +# automatically be disabled. + +WARN_IF_UNDOCUMENTED = YES + +# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some +# parameters in a documented function, or documenting parameters that +# don't exist or using markup commands wrongly. + +WARN_IF_DOC_ERROR = YES + +# The WARN_NO_PARAMDOC option can be enabled to get warnings for +# functions that are documented, but have no documentation for their parameters +# or return value. If set to NO (the default) doxygen will only warn about +# wrong or incomplete parameter documentation, but not about the absence of +# documentation. + +WARN_NO_PARAMDOC = NO + +# The WARN_FORMAT tag determines the format of the warning messages that +# doxygen can produce. The string should contain the $file, $line, and $text +# tags, which will be replaced by the file and line number from which the +# warning originated and the warning text. Optionally the format may contain +# $version, which will be replaced by the version of the file (if it could +# be obtained via FILE_VERSION_FILTER) + +WARN_FORMAT = "$file:$line: $text" + +# The WARN_LOGFILE tag can be used to specify a file to which warning +# and error messages should be written. If left blank the output is written +# to stderr. + +WARN_LOGFILE = + +#--------------------------------------------------------------------------- +# configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag can be used to specify the files and/or directories that contain +# documented source files. You may enter file names like "myfile.cpp" or +# directories like "/usr/src/myproject". Separate the files or directories +# with spaces. + +INPUT = src README.md + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is +# also the default input encoding. Doxygen uses libiconv (or the iconv built +# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for +# the list of possible encodings. + +INPUT_ENCODING = UTF-8 + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank the following patterns are tested: +# *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh +# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py +# *.f90 *.f *.for *.vhd *.vhdl + +FILE_PATTERNS = *.md *.c *.h *.cpp *.hpp *.tcc *.inc *.cc + +# The RECURSIVE tag can be used to turn specify whether or not subdirectories +# should be searched for input files as well. Possible values are YES and NO. +# If left blank NO is used. + +RECURSIVE = YES + +# The EXCLUDE tag can be used to specify files and/or directories that should be +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. +# Note that relative paths are relative to the directory from which doxygen is +# run. + +EXCLUDE = Debug \ + Release + +# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or +# directories that are symbolic links (a Unix file system feature) are excluded +# from the input. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. Note that the wildcards are matched +# against the file with absolute path, so to exclude all test directories +# for example use the pattern */test/* + +EXCLUDE_PATTERNS = + +# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names +# (namespaces, classes, functions, etc.) that should be excluded from the +# output. The symbol name can be a fully qualified name, a word, or if the +# wildcard * is used, a substring. Examples: ANamespace, AClass, +# AClass::ANamespace, ANamespace::*Test + +EXCLUDE_SYMBOLS = + +# The EXAMPLE_PATH tag can be used to specify one or more files or +# directories that contain example code fragments that are included (see +# the \include command). + +EXAMPLE_PATH = + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank all files are included. + +EXAMPLE_PATTERNS = + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude +# commands irrespective of the value of the RECURSIVE tag. +# Possible values are YES and NO. If left blank NO is used. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or +# directories that contain image that are included in the documentation (see +# the \image command). + +IMAGE_PATH = + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command , where +# is the value of the INPUT_FILTER tag, and is the name of an +# input file. Doxygen will then use the output that the filter program writes +# to standard output. If FILTER_PATTERNS is specified, this tag will be +# ignored. + +INPUT_FILTER = "perl -pe 's/^(libsnark: .*)$/$1 {#mainpage}/ if $.==1; s!//+ *(TODO|FIXME|XXX)!/// \\todo!'" + # The 1st replacement marks README.md as the main page. + # The 2nd replacement identifies additional TODO notations. + # These should be done with FILTER_PATTERNS instead, but it looks like shell escaping is different there. + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. The filters are a list of the form: +# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further +# info on how filters are used. If FILTER_PATTERNS is empty or if +# non of the patterns match the file name, INPUT_FILTER is applied. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will be used to filter the input files when producing source +# files to browse (i.e. when SOURCE_BROWSER is set to YES). + +FILTER_SOURCE_FILES = NO + +# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file +# pattern. A pattern will override the setting for FILTER_PATTERN (if any) +# and it is also possible to disable source filtering for a specific pattern +# using *.ext= (so without naming a filter). This option only has effect when +# FILTER_SOURCE_FILES is enabled. + +FILTER_SOURCE_PATTERNS = + +#--------------------------------------------------------------------------- +# configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will +# be generated. Documented entities will be cross-referenced with these sources. +# Note: To get rid of all source code in the generated output, make sure also +# VERBATIM_HEADERS is set to NO. + +SOURCE_BROWSER = YES + +# Setting the INLINE_SOURCES tag to YES will include the body +# of functions and classes directly in the documentation. + +INLINE_SOURCES = YES + +# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct +# doxygen to hide any special comment blocks from generated source code +# fragments. Normal C, C++ and Fortran comments will always remain visible. + +STRIP_CODE_COMMENTS = NO + +# If the REFERENCED_BY_RELATION tag is set to YES +# then for each documented function all documented +# functions referencing it will be listed. + +REFERENCED_BY_RELATION = NO + +# If the REFERENCES_RELATION tag is set to YES +# then for each documented function all documented entities +# called/used by that function will be listed. + +REFERENCES_RELATION = NO + +# If the REFERENCES_LINK_SOURCE tag is set to YES (the default) +# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from +# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will +# link to the source code. Otherwise they will link to the documentation. + +REFERENCES_LINK_SOURCE = YES + +# If the USE_HTAGS tag is set to YES then the references to source code +# will point to the HTML generated by the htags(1) tool instead of doxygen +# built-in source browser. The htags tool is part of GNU's global source +# tagging system (see http://www.gnu.org/software/global/global.html). You +# will need version 4.8.6 or higher. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen +# will generate a verbatim copy of the header file for each class for +# which an include is specified. Set to NO to disable this. + +VERBATIM_HEADERS = YES + +#--------------------------------------------------------------------------- +# configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index +# of all compounds will be generated. Enable this if the project +# contains a lot of classes, structs, unions or interfaces. + +ALPHABETICAL_INDEX = YES + +# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then +# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns +# in which this list will be split (can be a number in the range [1..20]) + +COLS_IN_ALPHA_INDEX = 5 + +# In case all classes in a project start with a common prefix, all +# classes will be put under the same header in the alphabetical index. +# The IGNORE_PREFIX tag can be used to specify one or more prefixes that +# should be ignored while generating the index headers. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES (the default) Doxygen will +# generate HTML output. + +GENERATE_HTML = YES + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `html' will be used as the default path. + +HTML_OUTPUT = doxygen + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for +# each generated HTML page (for example: .htm,.php,.asp). If it is left blank +# doxygen will generate files with .html extension. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a personal HTML header for +# each generated HTML page. If it is left blank doxygen will generate a +# standard header. Note that when using a custom header you are responsible +# for the proper inclusion of any scripts and style sheets that doxygen +# needs, which is dependent on the configuration options used. +# It is advised to generate a default header using "doxygen -w html +# header.html footer.html stylesheet.css YourConfigFile" and then modify +# that header. Note that the header is subject to change so you typically +# have to redo this when upgrading to a newer version of doxygen or when +# changing the value of configuration settings such as GENERATE_TREEVIEW! + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a personal HTML footer for +# each generated HTML page. If it is left blank doxygen will generate a +# standard footer. + +HTML_FOOTER = + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading +# style sheet that is used by each HTML page. It can be used to +# fine-tune the look of the HTML output. If left blank doxygen will +# generate a default style sheet. Note that it is recommended to use +# HTML_EXTRA_STYLESHEET instead of this one, as it is more robust and this +# tag will in the future become obsolete. + +HTML_STYLESHEET = + +# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional +# user-defined cascading style sheet that is included after the standard +# style sheets created by doxygen. Using this option one can overrule +# certain style aspects. This is preferred over using HTML_STYLESHEET +# since it does not replace the standard style sheet and is therefor more +# robust against future updates. Doxygen will copy the style sheet file to +# the output directory. + +HTML_EXTRA_STYLESHEET = + +# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or +# other source files which should be copied to the HTML output directory. Note +# that these files will be copied to the base HTML output directory. Use the +# $relpath$ marker in the HTML_HEADER and/or HTML_FOOTER files to load these +# files. In the HTML_STYLESHEET file, use the file name only. Also note that +# the files will be copied as-is; there are no commands or markers available. + +HTML_EXTRA_FILES = + +# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. +# Doxygen will adjust the colors in the style sheet and background images +# according to this color. Hue is specified as an angle on a colorwheel, +# see http://en.wikipedia.org/wiki/Hue for more information. +# For instance the value 0 represents red, 60 is yellow, 120 is green, +# 180 is cyan, 240 is blue, 300 purple, and 360 is red again. +# The allowed range is 0 to 359. + +HTML_COLORSTYLE_HUE = 220 + +# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of +# the colors in the HTML output. For a value of 0 the output will use +# grayscales only. A value of 255 will produce the most vivid colors. + +HTML_COLORSTYLE_SAT = 100 + +# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to +# the luminance component of the colors in the HTML output. Values below +# 100 gradually make the output lighter, whereas values above 100 make +# the output darker. The value divided by 100 is the actual gamma applied, +# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2, +# and 100 does not change the gamma. + +HTML_COLORSTYLE_GAMMA = 80 + +# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML +# page will contain the date and time when the page was generated. Setting +# this to NO can help when comparing the output of multiple runs. + +HTML_TIMESTAMP = NO + +# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML +# documentation will contain sections that can be hidden and shown after the +# page has loaded. + +HTML_DYNAMIC_SECTIONS = NO + +# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of +# entries shown in the various tree structured indices initially; the user +# can expand and collapse entries dynamically later on. Doxygen will expand +# the tree to such a level that at most the specified number of entries are +# visible (unless a fully collapsed tree already exceeds this amount). +# So setting the number of entries 1 will produce a full collapsed tree by +# default. 0 is a special value representing an infinite number of entries +# and will result in a full expanded tree by default. + +HTML_INDEX_NUM_ENTRIES = 0 + +# If the GENERATE_DOCSET tag is set to YES, additional index files +# will be generated that can be used as input for Apple's Xcode 3 +# integrated development environment, introduced with OSX 10.5 (Leopard). +# To create a documentation set, doxygen will generate a Makefile in the +# HTML output directory. Running make will produce the docset in that +# directory and running "make install" will install the docset in +# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find +# it at startup. +# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html +# for more information. + +GENERATE_DOCSET = NO + +# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the +# feed. A documentation feed provides an umbrella under which multiple +# documentation sets from a single provider (such as a company or product suite) +# can be grouped. + +DOCSET_FEEDNAME = "Doxygen generated docs" + +# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that +# should uniquely identify the documentation set bundle. This should be a +# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen +# will append .docset to the name. + +DOCSET_BUNDLE_ID = org.doxygen.Project + +# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely +# identify the documentation publisher. This should be a reverse domain-name +# style string, e.g. com.mycompany.MyDocSet.documentation. + +DOCSET_PUBLISHER_ID = org.doxygen.Publisher + +# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher. + +DOCSET_PUBLISHER_NAME = Publisher + +# If the GENERATE_HTMLHELP tag is set to YES, additional index files +# will be generated that can be used as input for tools like the +# Microsoft HTML help workshop to generate a compiled HTML help file (.chm) +# of the generated HTML documentation. + +GENERATE_HTMLHELP = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can +# be used to specify the file name of the resulting .chm file. You +# can add a path in front of the file if the result should not be +# written to the html output directory. + +CHM_FILE = + +# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can +# be used to specify the location (absolute path including file name) of +# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run +# the HTML help compiler on the generated index.hhp. + +HHC_LOCATION = + +# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag +# controls if a separate .chi index file is generated (YES) or that +# it should be included in the master .chm file (NO). + +GENERATE_CHI = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING +# is used to encode HtmlHelp index (hhk), content (hhc) and project file +# content. + +CHM_INDEX_ENCODING = + +# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag +# controls whether a binary table of contents is generated (YES) or a +# normal table of contents (NO) in the .chm file. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members +# to the contents of the HTML help documentation and to the tree view. + +TOC_EXPAND = NO + +# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and +# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated +# that can be used as input for Qt's qhelpgenerator to generate a +# Qt Compressed Help (.qch) of the generated HTML documentation. + +GENERATE_QHP = NO + +# If the QHG_LOCATION tag is specified, the QCH_FILE tag can +# be used to specify the file name of the resulting .qch file. +# The path specified is relative to the HTML output folder. + +QCH_FILE = + +# The QHP_NAMESPACE tag specifies the namespace to use when generating +# Qt Help Project output. For more information please see +# http://doc.trolltech.com/qthelpproject.html#namespace + +QHP_NAMESPACE = org.doxygen.Project + +# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating +# Qt Help Project output. For more information please see +# http://doc.trolltech.com/qthelpproject.html#virtual-folders + +QHP_VIRTUAL_FOLDER = doc + +# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to +# add. For more information please see +# http://doc.trolltech.com/qthelpproject.html#custom-filters + +QHP_CUST_FILTER_NAME = + +# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the +# custom filter to add. For more information please see +# +# Qt Help Project / Custom Filters. + +QHP_CUST_FILTER_ATTRS = + +# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this +# project's +# filter section matches. +# +# Qt Help Project / Filter Attributes. + +QHP_SECT_FILTER_ATTRS = + +# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can +# be used to specify the location of Qt's qhelpgenerator. +# If non-empty doxygen will try to run qhelpgenerator on the generated +# .qhp file. + +QHG_LOCATION = + +# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files +# will be generated, which together with the HTML files, form an Eclipse help +# plugin. To install this plugin and make it available under the help contents +# menu in Eclipse, the contents of the directory containing the HTML and XML +# files needs to be copied into the plugins directory of eclipse. The name of +# the directory within the plugins directory should be the same as +# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before +# the help appears. + +GENERATE_ECLIPSEHELP = NO + +# A unique identifier for the eclipse help plugin. When installing the plugin +# the directory name containing the HTML and XML files should also have +# this name. + +ECLIPSE_DOC_ID = org.doxygen.Project + +# The DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) +# at top of each HTML page. The value NO (the default) enables the index and +# the value YES disables it. Since the tabs have the same information as the +# navigation tree you can set this option to NO if you already set +# GENERATE_TREEVIEW to YES. + +DISABLE_INDEX = NO + +# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index +# structure should be generated to display hierarchical information. +# If the tag value is set to YES, a side panel will be generated +# containing a tree-like index structure (just like the one that +# is generated for HTML Help). For this to work a browser that supports +# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser). +# Windows users are probably better off using the HTML help feature. +# Since the tree basically has the same information as the tab index you +# could consider to set DISABLE_INDEX to NO when enabling this option. + +GENERATE_TREEVIEW = YES + +# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values +# (range [0,1..20]) that doxygen will group on one line in the generated HTML +# documentation. Note that a value of 0 will completely suppress the enum +# values from appearing in the overview section. + +ENUM_VALUES_PER_LINE = 4 + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be +# used to set the initial width (in pixels) of the frame in which the tree +# is shown. + +TREEVIEW_WIDTH = 250 + +# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open +# links to external symbols imported via tag files in a separate window. + +EXT_LINKS_IN_WINDOW = NO + +# Use this tag to change the font size of Latex formulas included +# as images in the HTML documentation. The default is 10. Note that +# when you change the font size after a successful doxygen run you need +# to manually remove any form_*.png images from the HTML output directory +# to force them to be regenerated. + +FORMULA_FONTSIZE = 10 + +# Use the FORMULA_TRANPARENT tag to determine whether or not the images +# generated for formulas are transparent PNGs. Transparent PNGs are +# not supported properly for IE 6.0, but are supported on all modern browsers. +# Note that when changing this option you need to delete any form_*.png files +# in the HTML output before the changes have effect. + +FORMULA_TRANSPARENT = YES + +# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax +# (see http://www.mathjax.org) which uses client side Javascript for the +# rendering instead of using prerendered bitmaps. Use this if you do not +# have LaTeX installed or if you want to formulas look prettier in the HTML +# output. When enabled you may also need to install MathJax separately and +# configure the path to it using the MATHJAX_RELPATH option. + +USE_MATHJAX = YES + +# When MathJax is enabled you need to specify the location relative to the +# HTML output directory using the MATHJAX_RELPATH option. The destination +# directory should contain the MathJax.js script. For instance, if the mathjax +# directory is located at the same level as the HTML output directory, then +# MATHJAX_RELPATH should be ../mathjax. The default value points to +# the MathJax Content Delivery Network so you can quickly see the result without +# installing MathJax. However, it is strongly recommended to install a local +# copy of MathJax from http://www.mathjax.org before deployment. + +MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest + +# The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension +# names that should be enabled during MathJax rendering. + +MATHJAX_EXTENSIONS = + +# When the SEARCHENGINE tag is enabled doxygen will generate a search box +# for the HTML output. The underlying search engine uses javascript +# and DHTML and should work on any modern browser. Note that when using +# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets +# (GENERATE_DOCSET) there is already a search function so this one should +# typically be disabled. For large projects the javascript based search engine +# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution. + +SEARCHENGINE = YES + +# When the SERVER_BASED_SEARCH tag is enabled the search engine will be +# implemented using a PHP enabled web server instead of at the web client +# using Javascript. Doxygen will generate the search PHP script and index +# file to put on the web server. The advantage of the server +# based approach is that it scales better to large projects and allows +# full text search. The disadvantages are that it is more difficult to setup +# and does not have live searching capabilities. + +SERVER_BASED_SEARCH = NO + +#--------------------------------------------------------------------------- +# configuration options related to the LaTeX output +#--------------------------------------------------------------------------- + +# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will +# generate Latex output. + +GENERATE_LATEX = NO + +# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `latex' will be used as the default path. + +LATEX_OUTPUT = latex + +# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be +# invoked. If left blank `latex' will be used as the default command name. +# Note that when enabling USE_PDFLATEX this option is only used for +# generating bitmaps for formulas in the HTML output, but not in the +# Makefile that is written to the output directory. + +LATEX_CMD_NAME = latex + +# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to +# generate index for LaTeX. If left blank `makeindex' will be used as the +# default command name. + +MAKEINDEX_CMD_NAME = makeindex + +# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact +# LaTeX documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_LATEX = NO + +# The PAPER_TYPE tag can be used to set the paper type that is used +# by the printer. Possible values are: a4, letter, legal and +# executive. If left blank a4wide will be used. + +PAPER_TYPE = a4 + +# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX +# packages that should be included in the LaTeX output. + +EXTRA_PACKAGES = amsfonts + +# The LATEX_HEADER tag can be used to specify a personal LaTeX header for +# the generated latex document. The header should contain everything until +# the first chapter. If it is left blank doxygen will generate a +# standard header. Notice: only use this tag if you know what you are doing! + +LATEX_HEADER = + +# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for +# the generated latex document. The footer should contain everything after +# the last chapter. If it is left blank doxygen will generate a +# standard footer. Notice: only use this tag if you know what you are doing! + +LATEX_FOOTER = + +# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated +# is prepared for conversion to pdf (using ps2pdf). The pdf file will +# contain links (just like the HTML output) instead of page references +# This makes the output suitable for online browsing using a pdf viewer. + +PDF_HYPERLINKS = NO + +# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of +# plain latex in the generated Makefile. Set this option to YES to get a +# higher quality PDF documentation. + +USE_PDFLATEX = NO + +# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. +# command to the generated LaTeX files. This will instruct LaTeX to keep +# running if errors occur, instead of asking the user for help. +# This option is also used when generating formulas in HTML. + +LATEX_BATCHMODE = NO + +# If LATEX_HIDE_INDICES is set to YES then doxygen will not +# include the index chapters (such as File Index, Compound Index, etc.) +# in the output. + +LATEX_HIDE_INDICES = NO + +# If LATEX_SOURCE_CODE is set to YES then doxygen will include +# source code with syntax highlighting in the LaTeX output. +# Note that which sources are shown also depends on other settings +# such as SOURCE_BROWSER. + +LATEX_SOURCE_CODE = NO + +# The LATEX_BIB_STYLE tag can be used to specify the style to use for the +# bibliography, e.g. plainnat, or ieeetr. The default style is "plain". See +# http://en.wikipedia.org/wiki/BibTeX for more info. + +LATEX_BIB_STYLE = plain + +#--------------------------------------------------------------------------- +# configuration options related to the RTF output +#--------------------------------------------------------------------------- + +# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output +# The RTF output is optimized for Word 97 and may not look very pretty with +# other RTF readers or editors. + +GENERATE_RTF = NO + +# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `rtf' will be used as the default path. + +RTF_OUTPUT = rtf + +# If the COMPACT_RTF tag is set to YES Doxygen generates more compact +# RTF documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_RTF = NO + +# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated +# will contain hyperlink fields. The RTF file will +# contain links (just like the HTML output) instead of page references. +# This makes the output suitable for online browsing using WORD or other +# programs which support those fields. +# Note: wordpad (write) and others do not support links. + +RTF_HYPERLINKS = NO + +# Load style sheet definitions from file. Syntax is similar to doxygen's +# config file, i.e. a series of assignments. You only have to provide +# replacements, missing definitions are set to their default value. + +RTF_STYLESHEET_FILE = + +# Set optional variables used in the generation of an rtf document. +# Syntax is similar to doxygen's config file. + +RTF_EXTENSIONS_FILE = + +#--------------------------------------------------------------------------- +# configuration options related to the man page output +#--------------------------------------------------------------------------- + +# If the GENERATE_MAN tag is set to YES (the default) Doxygen will +# generate man pages + +GENERATE_MAN = NO + +# The MAN_OUTPUT tag is used to specify where the man pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `man' will be used as the default path. + +MAN_OUTPUT = man + +# The MAN_EXTENSION tag determines the extension that is added to +# the generated man pages (default is the subroutine's section .3) + +MAN_EXTENSION = .3 + +# If the MAN_LINKS tag is set to YES and Doxygen generates man output, +# then it will generate one additional man file for each entity +# documented in the real man page(s). These additional files +# only source the real man page, but without them the man command +# would be unable to find the correct page. The default is NO. + +MAN_LINKS = NO + +#--------------------------------------------------------------------------- +# configuration options related to the XML output +#--------------------------------------------------------------------------- + +# If the GENERATE_XML tag is set to YES Doxygen will +# generate an XML file that captures the structure of +# the code including all documentation. + +GENERATE_XML = NO + +# The XML_OUTPUT tag is used to specify where the XML pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `xml' will be used as the default path. + +XML_OUTPUT = xml + +# The XML_SCHEMA tag can be used to specify an XML schema, +# which can be used by a validating XML parser to check the +# syntax of the XML files. + +XML_SCHEMA = + +# The XML_DTD tag can be used to specify an XML DTD, +# which can be used by a validating XML parser to check the +# syntax of the XML files. + +XML_DTD = + +# If the XML_PROGRAMLISTING tag is set to YES Doxygen will +# dump the program listings (including syntax highlighting +# and cross-referencing information) to the XML output. Note that +# enabling this will significantly increase the size of the XML output. + +XML_PROGRAMLISTING = YES + +#--------------------------------------------------------------------------- +# configuration options for the AutoGen Definitions output +#--------------------------------------------------------------------------- + +# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will +# generate an AutoGen Definitions (see autogen.sf.net) file +# that captures the structure of the code including all +# documentation. Note that this feature is still experimental +# and incomplete at the moment. + +GENERATE_AUTOGEN_DEF = NO + +#--------------------------------------------------------------------------- +# configuration options related to the Perl module output +#--------------------------------------------------------------------------- + +# If the GENERATE_PERLMOD tag is set to YES Doxygen will +# generate a Perl module file that captures the structure of +# the code including all documentation. Note that this +# feature is still experimental and incomplete at the +# moment. + +GENERATE_PERLMOD = NO + +# If the PERLMOD_LATEX tag is set to YES Doxygen will generate +# the necessary Makefile rules, Perl scripts and LaTeX code to be able +# to generate PDF and DVI output from the Perl module output. + +PERLMOD_LATEX = NO + +# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be +# nicely formatted so it can be parsed by a human reader. This is useful +# if you want to understand what is going on. On the other hand, if this +# tag is set to NO the size of the Perl module output will be much smaller +# and Perl will parse it just the same. + +PERLMOD_PRETTY = YES + +# The names of the make variables in the generated doxyrules.make file +# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. +# This is useful so different doxyrules.make files included by the same +# Makefile don't overwrite each other's variables. + +PERLMOD_MAKEVAR_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the preprocessor +#--------------------------------------------------------------------------- + +# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will +# evaluate all C-preprocessor directives found in the sources and include +# files. + +ENABLE_PREPROCESSING = YES + +# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro +# names in the source code. If set to NO (the default) only conditional +# compilation will be performed. Macro expansion can be done in a controlled +# way by setting EXPAND_ONLY_PREDEF to YES. + +MACRO_EXPANSION = NO + +# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES +# then the macro expansion is limited to the macros specified with the +# PREDEFINED and EXPAND_AS_DEFINED tags. + +EXPAND_ONLY_PREDEF = NO + +# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files +# pointed to by INCLUDE_PATH will be searched when a #include is found. + +SEARCH_INCLUDES = YES + +# The INCLUDE_PATH tag can be used to specify one or more directories that +# contain include files that are not input files but should be processed by +# the preprocessor. + +INCLUDE_PATH = + +# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard +# patterns (like *.h and *.hpp) to filter out the header-files in the +# directories. If left blank, the patterns specified with FILE_PATTERNS will +# be used. + +INCLUDE_FILE_PATTERNS = + +# The PREDEFINED tag can be used to specify one or more macro names that +# are defined before the preprocessor is started (similar to the -D option of +# gcc). The argument of the tag is a list of macros of the form: name +# or name=definition (no spaces). If the definition and the = are +# omitted =1 is assumed. To prevent a macro definition from being +# undefined via #undef or recursively expanded use the := operator +# instead of the = operator. + +PREDEFINED = + +# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then +# this tag can be used to specify a list of macro names that should be expanded. +# The macro definition that is found in the sources will be used. +# Use the PREDEFINED tag if you want to use a different macro definition that +# overrules the definition found in the source code. + +EXPAND_AS_DEFINED = + +# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then +# doxygen's preprocessor will remove all references to function-like macros +# that are alone on a line, have an all uppercase name, and do not end with a +# semicolon, because these will confuse the parser if not removed. + +SKIP_FUNCTION_MACROS = YES + +#--------------------------------------------------------------------------- +# Configuration::additions related to external references +#--------------------------------------------------------------------------- + +# The TAGFILES option can be used to specify one or more tagfiles. For each +# tag file the location of the external documentation should be added. The +# format of a tag file without this location is as follows: +# TAGFILES = file1 file2 ... +# Adding location for the tag files is done as follows: +# TAGFILES = file1=loc1 "file2 = loc2" ... +# where "loc1" and "loc2" can be relative or absolute paths +# or URLs. Note that each tag file must have a unique name (where the name does +# NOT include the path). If a tag file is not located in the directory in which +# doxygen is run, you must also specify the path to the tagfile here. + +TAGFILES = + +# When a file name is specified after GENERATE_TAGFILE, doxygen will create +# a tag file that is based on the input files it reads. + +GENERATE_TAGFILE = + +# If the ALLEXTERNALS tag is set to YES all external classes will be listed +# in the class index. If set to NO only the inherited external classes +# will be listed. + +ALLEXTERNALS = NO + +# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed +# in the modules index. If set to NO, only the current project's groups will +# be listed. + +EXTERNAL_GROUPS = YES + +# The PERL_PATH should be the absolute path and name of the perl script +# interpreter (i.e. the result of `which perl'). + +PERL_PATH = /usr/bin/perl + +#--------------------------------------------------------------------------- +# Configuration options related to the dot tool +#--------------------------------------------------------------------------- + +# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will +# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base +# or super classes. Setting the tag to NO turns the diagrams off. Note that +# this option also works with HAVE_DOT disabled, but it is recommended to +# install and use dot, since it yields more powerful graphs. + +CLASS_DIAGRAMS = NO + +# You can define message sequence charts within doxygen comments using the \msc +# command. Doxygen will then run the mscgen tool (see +# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the +# documentation. The MSCGEN_PATH tag allows you to specify the directory where +# the mscgen tool resides. If left empty the tool is assumed to be found in the +# default search path. + +MSCGEN_PATH = + +# If set to YES, the inheritance and collaboration graphs will hide +# inheritance and usage relations if the target is undocumented +# or is not a class. + +HIDE_UNDOC_RELATIONS = YES + +# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is +# available from the path. This tool is part of Graphviz, a graph visualization +# toolkit from AT&T and Lucent Bell Labs. The other options in this section +# have no effect if this option is set to NO (the default) + +HAVE_DOT = YES + +# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is +# allowed to run in parallel. When set to 0 (the default) doxygen will +# base this on the number of processors available in the system. You can set it +# explicitly to a value larger than 0 to get control over the balance +# between CPU load and processing speed. + +DOT_NUM_THREADS = 0 + +# By default doxygen will use the Helvetica font for all dot files that +# doxygen generates. When you want a differently looking font you can specify +# the font name using DOT_FONTNAME. You need to make sure dot is able to find +# the font, which can be done by putting it in a standard location or by setting +# the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the +# directory containing the font. + +DOT_FONTNAME = Helvetica + +# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. +# The default size is 10pt. + +DOT_FONTSIZE = 10 + +# By default doxygen will tell dot to use the Helvetica font. +# If you specify a different font using DOT_FONTNAME you can use DOT_FONTPATH to +# set the path where dot can find it. + +DOT_FONTPATH = + +# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect inheritance relations. Setting this tag to YES will force the +# CLASS_DIAGRAMS tag to NO. + +CLASS_GRAPH = YES + +# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect implementation dependencies (inheritance, containment, and +# class references variables) of the class with other documented classes. + +COLLABORATION_GRAPH = YES + +# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for groups, showing the direct groups dependencies + +GROUP_GRAPHS = YES + +# If the UML_LOOK tag is set to YES doxygen will generate inheritance and +# collaboration diagrams in a style similar to the OMG's Unified Modeling +# Language. + +UML_LOOK = NO + +# If the UML_LOOK tag is enabled, the fields and methods are shown inside +# the class node. If there are many fields or methods and many nodes the +# graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS +# threshold limits the number of items for each type to make the size more +# managable. Set this to 0 for no limit. Note that the threshold may be +# exceeded by 50% before the limit is enforced. + +UML_LIMIT_NUM_FIELDS = 10 + +# If set to YES, the inheritance and collaboration graphs will show the +# relations between templates and their instances. + +TEMPLATE_RELATIONS = NO + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT +# tags are set to YES then doxygen will generate a graph for each documented +# file showing the direct and indirect include dependencies of the file with +# other documented files. + +INCLUDE_GRAPH = YES + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and +# HAVE_DOT tags are set to YES then doxygen will generate a graph for each +# documented header file showing the documented files that directly or +# indirectly include this file. + +INCLUDED_BY_GRAPH = YES + +# If the CALL_GRAPH and HAVE_DOT options are set to YES then +# doxygen will generate a call dependency graph for every global function +# or class method. Note that enabling this option will significantly increase +# the time of a run. So in most cases it will be better to enable call graphs +# for selected functions only using the \callgraph command. + +CALL_GRAPH = YES + +# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then +# doxygen will generate a caller dependency graph for every global function +# or class method. Note that enabling this option will significantly increase +# the time of a run. So in most cases it will be better to enable caller +# graphs for selected functions only using the \callergraph command. + +CALLER_GRAPH = YES + +# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen +# will generate a graphical hierarchy of all classes instead of a textual one. + +GRAPHICAL_HIERARCHY = YES + +# If the DIRECTORY_GRAPH and HAVE_DOT tags are set to YES +# then doxygen will show the dependencies a directory has on other directories +# in a graphical way. The dependency relations are determined by the #include +# relations between the files in the directories. + +DIRECTORY_GRAPH = YES + +# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images +# generated by dot. Possible values are svg, png, jpg, or gif. +# If left blank png will be used. If you choose svg you need to set +# HTML_FILE_EXTENSION to xhtml in order to make the SVG files +# visible in IE 9+ (other browsers do not have this requirement). + +DOT_IMAGE_FORMAT = png + +# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to +# enable generation of interactive SVG images that allow zooming and panning. +# Note that this requires a modern browser other than Internet Explorer. +# Tested and working are Firefox, Chrome, Safari, and Opera. For IE 9+ you +# need to set HTML_FILE_EXTENSION to xhtml in order to make the SVG files +# visible. Older versions of IE do not have SVG support. + +INTERACTIVE_SVG = NO + +# The tag DOT_PATH can be used to specify the path where the dot tool can be +# found. If left blank, it is assumed the dot tool can be found in the path. + +DOT_PATH = + +# The DOTFILE_DIRS tag can be used to specify one or more directories that +# contain dot files that are included in the documentation (see the +# \dotfile command). + +DOTFILE_DIRS = + +# The MSCFILE_DIRS tag can be used to specify one or more directories that +# contain msc files that are included in the documentation (see the +# \mscfile command). + +MSCFILE_DIRS = + +# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of +# nodes that will be shown in the graph. If the number of nodes in a graph +# becomes larger than this value, doxygen will truncate the graph, which is +# visualized by representing a node as a red box. Note that doxygen if the +# number of direct children of the root node in a graph is already larger than +# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note +# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. + +DOT_GRAPH_MAX_NODES = 50 + +# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the +# graphs generated by dot. A depth value of 3 means that only nodes reachable +# from the root by following a path via at most 3 edges will be shown. Nodes +# that lay further from the root node will be omitted. Note that setting this +# option to 1 or 2 may greatly reduce the computation time needed for large +# code bases. Also note that the size of a graph can be further restricted by +# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. + +MAX_DOT_GRAPH_DEPTH = 0 + +# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent +# background. This is disabled by default, because dot on Windows does not +# seem to support this out of the box. Warning: Depending on the platform used, +# enabling this option may lead to badly anti-aliased labels on the edges of +# a graph (i.e. they become hard to read). + +DOT_TRANSPARENT = NO + +# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output +# files in one run (i.e. multiple -o and -T options on the command line). This +# makes dot run faster, but since only newer versions of dot (>1.8.10) +# support this, this feature is disabled by default. + +DOT_MULTI_TARGETS = NO + +# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will +# generate a legend page explaining the meaning of the various boxes and +# arrows in the dot generated graphs. + +GENERATE_LEGEND = YES + +# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will +# remove the intermediate dot files that are used to generate +# the various graphs. + +DOT_CLEANUP = YES diff --git a/src/algebra/curves/alt_bn128/alt_bn128_g1.cpp b/src/algebra/curves/alt_bn128/alt_bn128_g1.cpp new file mode 100644 index 000000000..bf7f43d6f --- /dev/null +++ b/src/algebra/curves/alt_bn128/alt_bn128_g1.cpp @@ -0,0 +1,524 @@ +/** @file + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#include "algebra/curves/alt_bn128/alt_bn128_g1.hpp" + +namespace libsnark { + +#ifdef PROFILE_OP_COUNTS +long long alt_bn128_G1::add_cnt = 0; +long long alt_bn128_G1::dbl_cnt = 0; +#endif + +std::vector alt_bn128_G1::wnaf_window_table; +std::vector alt_bn128_G1::fixed_base_exp_window_table; +alt_bn128_G1 alt_bn128_G1::G1_zero; +alt_bn128_G1 alt_bn128_G1::G1_one; + +alt_bn128_G1::alt_bn128_G1() +{ + this->X = G1_zero.X; + this->Y = G1_zero.Y; + this->Z = G1_zero.Z; +} + +void alt_bn128_G1::print() const +{ + if (this->is_zero()) + { + printf("O\n"); + } + else + { + alt_bn128_G1 copy(*this); + copy.to_affine_coordinates(); + gmp_printf("(%Nd , %Nd)\n", + copy.X.as_bigint().data, alt_bn128_Fq::num_limbs, + copy.Y.as_bigint().data, alt_bn128_Fq::num_limbs); + } +} + +void alt_bn128_G1::print_coordinates() const +{ + if (this->is_zero()) + { + printf("O\n"); + } + else + { + gmp_printf("(%Nd : %Nd : %Nd)\n", + this->X.as_bigint().data, alt_bn128_Fq::num_limbs, + this->Y.as_bigint().data, alt_bn128_Fq::num_limbs, + this->Z.as_bigint().data, alt_bn128_Fq::num_limbs); + } +} + +void alt_bn128_G1::to_affine_coordinates() +{ + if (this->is_zero()) + { + this->X = alt_bn128_Fq::zero(); + this->Y = alt_bn128_Fq::one(); + this->Z = alt_bn128_Fq::zero(); + } + else + { + alt_bn128_Fq Z_inv = Z.inverse(); + alt_bn128_Fq Z2_inv = Z_inv.squared(); + alt_bn128_Fq Z3_inv = Z2_inv * Z_inv; + this->X = this->X * Z2_inv; + this->Y = this->Y * Z3_inv; + this->Z = alt_bn128_Fq::one(); + } +} + +void alt_bn128_G1::to_special() +{ + this->to_affine_coordinates(); +} + +bool alt_bn128_G1::is_special() const +{ + return (this->is_zero() || this->Z == alt_bn128_Fq::one()); +} + +bool alt_bn128_G1::is_zero() const +{ + return (this->Z.is_zero()); +} + +bool alt_bn128_G1::operator==(const alt_bn128_G1 &other) const +{ + if (this->is_zero()) + { + return other.is_zero(); + } + + if (other.is_zero()) + { + return false; + } + + /* now neither is O */ + + // using Jacobian coordinates so: + // (X1:Y1:Z1) = (X2:Y2:Z2) + // iff + // X1/Z1^2 == X2/Z2^2 and Y1/Z1^3 == Y2/Z2^3 + // iff + // X1 * Z2^2 == X2 * Z1^2 and Y1 * Z2^3 == Y2 * Z1^3 + + alt_bn128_Fq Z1_squared = (this->Z).squared(); + alt_bn128_Fq Z2_squared = (other.Z).squared(); + + if ((this->X * Z2_squared) != (other.X * Z1_squared)) + { + return false; + } + + alt_bn128_Fq Z1_cubed = (this->Z) * Z1_squared; + alt_bn128_Fq Z2_cubed = (other.Z) * Z2_squared; + + if ((this->Y * Z2_cubed) != (other.Y * Z1_cubed)) + { + return false; + } + + return true; +} + +bool alt_bn128_G1::operator!=(const alt_bn128_G1& other) const +{ + return !(operator==(other)); +} + +alt_bn128_G1 alt_bn128_G1::operator+(const alt_bn128_G1 &other) const +{ + // handle special cases having to do with O + if (this->is_zero()) + { + return other; + } + + if (other.is_zero()) + { + return *this; + } + + // no need to handle points of order 2,4 + // (they cannot exist in a prime-order subgroup) + + // check for doubling case + + // using Jacobian coordinates so: + // (X1:Y1:Z1) = (X2:Y2:Z2) + // iff + // X1/Z1^2 == X2/Z2^2 and Y1/Z1^3 == Y2/Z2^3 + // iff + // X1 * Z2^2 == X2 * Z1^2 and Y1 * Z2^3 == Y2 * Z1^3 + + alt_bn128_Fq Z1Z1 = (this->Z).squared(); + alt_bn128_Fq Z2Z2 = (other.Z).squared(); + + alt_bn128_Fq U1 = this->X * Z2Z2; + alt_bn128_Fq U2 = other.X * Z1Z1; + + alt_bn128_Fq Z1_cubed = (this->Z) * Z1Z1; + alt_bn128_Fq Z2_cubed = (other.Z) * Z2Z2; + + alt_bn128_Fq S1 = (this->Y) * Z2_cubed; // S1 = Y1 * Z2 * Z2Z2 + alt_bn128_Fq S2 = (other.Y) * Z1_cubed; // S2 = Y2 * Z1 * Z1Z1 + + if (U1 == U2 && S1 == S2) + { + // dbl case; nothing of above can be reused + return this->dbl(); + } + + // rest of add case + alt_bn128_Fq H = U2 - U1; // H = U2-U1 + alt_bn128_Fq S2_minus_S1 = S2-S1; + alt_bn128_Fq I = (H+H).squared(); // I = (2 * H)^2 + alt_bn128_Fq J = H * I; // J = H * I + alt_bn128_Fq r = S2_minus_S1 + S2_minus_S1; // r = 2 * (S2-S1) + alt_bn128_Fq V = U1 * I; // V = U1 * I + alt_bn128_Fq X3 = r.squared() - J - (V+V); // X3 = r^2 - J - 2 * V + alt_bn128_Fq S1_J = S1 * J; + alt_bn128_Fq Y3 = r * (V-X3) - (S1_J+S1_J); // Y3 = r * (V-X3)-2 S1 J + alt_bn128_Fq Z3 = ((this->Z+other.Z).squared()-Z1Z1-Z2Z2) * H; // Z3 = ((Z1+Z2)^2-Z1Z1-Z2Z2) * H + + return alt_bn128_G1(X3, Y3, Z3); +} + +alt_bn128_G1 alt_bn128_G1::operator-() const +{ + return alt_bn128_G1(this->X, -(this->Y), this->Z); +} + + +alt_bn128_G1 alt_bn128_G1::operator-(const alt_bn128_G1 &other) const +{ + return (*this) + (-other); +} + +alt_bn128_G1 alt_bn128_G1::add(const alt_bn128_G1 &other) const +{ + // handle special cases having to do with O + if (this->is_zero()) + { + return other; + } + + if (other.is_zero()) + { + return *this; + } + + // no need to handle points of order 2,4 + // (they cannot exist in a prime-order subgroup) + + // handle double case + if (this->operator==(other)) + { + return this->dbl(); + } + +#ifdef PROFILE_OP_COUNTS + this->add_cnt++; +#endif + // NOTE: does not handle O and pts of order 2,4 + // http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-add-2007-bl + + alt_bn128_Fq Z1Z1 = (this->Z).squared(); // Z1Z1 = Z1^2 + alt_bn128_Fq Z2Z2 = (other.Z).squared(); // Z2Z2 = Z2^2 + alt_bn128_Fq U1 = (this->X) * Z2Z2; // U1 = X1 * Z2Z2 + alt_bn128_Fq U2 = (other.X) * Z1Z1; // U2 = X2 * Z1Z1 + alt_bn128_Fq S1 = (this->Y) * (other.Z) * Z2Z2; // S1 = Y1 * Z2 * Z2Z2 + alt_bn128_Fq S2 = (other.Y) * (this->Z) * Z1Z1; // S2 = Y2 * Z1 * Z1Z1 + alt_bn128_Fq H = U2 - U1; // H = U2-U1 + alt_bn128_Fq S2_minus_S1 = S2-S1; + alt_bn128_Fq I = (H+H).squared(); // I = (2 * H)^2 + alt_bn128_Fq J = H * I; // J = H * I + alt_bn128_Fq r = S2_minus_S1 + S2_minus_S1; // r = 2 * (S2-S1) + alt_bn128_Fq V = U1 * I; // V = U1 * I + alt_bn128_Fq X3 = r.squared() - J - (V+V); // X3 = r^2 - J - 2 * V + alt_bn128_Fq S1_J = S1 * J; + alt_bn128_Fq Y3 = r * (V-X3) - (S1_J+S1_J); // Y3 = r * (V-X3)-2 S1 J + alt_bn128_Fq Z3 = ((this->Z+other.Z).squared()-Z1Z1-Z2Z2) * H; // Z3 = ((Z1+Z2)^2-Z1Z1-Z2Z2) * H + + return alt_bn128_G1(X3, Y3, Z3); +} + +alt_bn128_G1 alt_bn128_G1::mixed_add(const alt_bn128_G1 &other) const +{ +#ifdef DEBUG + assert(other.is_special()); +#endif + + // handle special cases having to do with O + if (this->is_zero()) + { + return other; + } + + if (other.is_zero()) + { + return *this; + } + + // no need to handle points of order 2,4 + // (they cannot exist in a prime-order subgroup) + + // check for doubling case + + // using Jacobian coordinates so: + // (X1:Y1:Z1) = (X2:Y2:Z2) + // iff + // X1/Z1^2 == X2/Z2^2 and Y1/Z1^3 == Y2/Z2^3 + // iff + // X1 * Z2^2 == X2 * Z1^2 and Y1 * Z2^3 == Y2 * Z1^3 + + // we know that Z2 = 1 + + const alt_bn128_Fq Z1Z1 = (this->Z).squared(); + + const alt_bn128_Fq &U1 = this->X; + const alt_bn128_Fq U2 = other.X * Z1Z1; + + const alt_bn128_Fq Z1_cubed = (this->Z) * Z1Z1; + + const alt_bn128_Fq &S1 = (this->Y); // S1 = Y1 * Z2 * Z2Z2 + const alt_bn128_Fq S2 = (other.Y) * Z1_cubed; // S2 = Y2 * Z1 * Z1Z1 + + if (U1 == U2 && S1 == S2) + { + // dbl case; nothing of above can be reused + return this->dbl(); + } + +#ifdef PROFILE_OP_COUNTS + this->add_cnt++; +#endif + + // NOTE: does not handle O and pts of order 2,4 + // http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-madd-2007-bl + alt_bn128_Fq H = U2-(this->X); // H = U2-X1 + alt_bn128_Fq HH = H.squared() ; // HH = H&2 + alt_bn128_Fq I = HH+HH; // I = 4*HH + I = I + I; + alt_bn128_Fq J = H*I; // J = H*I + alt_bn128_Fq r = S2-(this->Y); // r = 2*(S2-Y1) + r = r + r; + alt_bn128_Fq V = (this->X) * I ; // V = X1*I + alt_bn128_Fq X3 = r.squared()-J-V-V; // X3 = r^2-J-2*V + alt_bn128_Fq Y3 = (this->Y)*J; // Y3 = r*(V-X3)-2*Y1*J + Y3 = r*(V-X3) - Y3 - Y3; + alt_bn128_Fq Z3 = ((this->Z)+H).squared() - Z1Z1 - HH; // Z3 = (Z1+H)^2-Z1Z1-HH + + return alt_bn128_G1(X3, Y3, Z3); +} + +alt_bn128_G1 alt_bn128_G1::dbl() const +{ +#ifdef PROFILE_OP_COUNTS + this->dbl_cnt++; +#endif + // handle point at infinity + if (this->is_zero()) + { + return (*this); + } + + // no need to handle points of order 2,4 + // (they cannot exist in a prime-order subgroup) + + // NOTE: does not handle O and pts of order 2,4 + // http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-dbl-2009-l + + alt_bn128_Fq A = (this->X).squared(); // A = X1^2 + alt_bn128_Fq B = (this->Y).squared(); // B = Y1^2 + alt_bn128_Fq C = B.squared(); // C = B^2 + alt_bn128_Fq D = (this->X + B).squared() - A - C; + D = D+D; // D = 2 * ((X1 + B)^2 - A - C) + alt_bn128_Fq E = A + A + A; // E = 3 * A + alt_bn128_Fq F = E.squared(); // F = E^2 + alt_bn128_Fq X3 = F - (D+D); // X3 = F - 2 D + alt_bn128_Fq eightC = C+C; + eightC = eightC + eightC; + eightC = eightC + eightC; + alt_bn128_Fq Y3 = E * (D - X3) - eightC; // Y3 = E * (D - X3) - 8 * C + alt_bn128_Fq Y1Z1 = (this->Y)*(this->Z); + alt_bn128_Fq Z3 = Y1Z1 + Y1Z1; // Z3 = 2 * Y1 * Z1 + + return alt_bn128_G1(X3, Y3, Z3); +} + +bool alt_bn128_G1::is_well_formed() const +{ + if (this->is_zero()) + { + return true; + } + else + { + /* + y^2 = x^3 + b + + We are using Jacobian coordinates, so equation we need to check is actually + + (y/z^3)^2 = (x/z^2)^3 + b + y^2 / z^6 = x^3 / z^6 + b + y^2 = x^3 + b z^6 + */ + alt_bn128_Fq X2 = this->X.squared(); + alt_bn128_Fq Y2 = this->Y.squared(); + alt_bn128_Fq Z2 = this->Z.squared(); + + alt_bn128_Fq X3 = this->X * X2; + alt_bn128_Fq Z3 = this->Z * Z2; + alt_bn128_Fq Z6 = Z3.squared(); + + return (Y2 == X3 + alt_bn128_coeff_b * Z6); + } +} + +alt_bn128_G1 alt_bn128_G1::zero() +{ + return G1_zero; +} + +alt_bn128_G1 alt_bn128_G1::one() +{ + return G1_one; +} + +alt_bn128_G1 alt_bn128_G1::random_element() +{ + return (scalar_field::random_element().as_bigint()) * G1_one; +} + +std::ostream& operator<<(std::ostream &out, const alt_bn128_G1 &g) +{ + alt_bn128_G1 copy(g); + copy.to_affine_coordinates(); + + out << (copy.is_zero() ? 1 : 0) << OUTPUT_SEPARATOR; +#ifdef NO_PT_COMPRESSION + out << copy.X << OUTPUT_SEPARATOR << copy.Y; +#else + /* storing LSB of Y */ + out << copy.X << OUTPUT_SEPARATOR << (copy.Y.as_bigint().data[0] & 1); +#endif + + return out; +} + +std::istream& operator>>(std::istream &in, alt_bn128_G1 &g) +{ + char is_zero; + alt_bn128_Fq tX, tY; + +#ifdef NO_PT_COMPRESSION + in >> is_zero >> tX >> tY; + is_zero -= '0'; +#else + in.read((char*)&is_zero, 1); // this reads is_zero; + is_zero -= '0'; + consume_OUTPUT_SEPARATOR(in); + + unsigned char Y_lsb; + in >> tX; + consume_OUTPUT_SEPARATOR(in); + in.read((char*)&Y_lsb, 1); + Y_lsb -= '0'; + + // y = +/- sqrt(x^3 + b) + if (!is_zero) + { + alt_bn128_Fq tX2 = tX.squared(); + alt_bn128_Fq tY2 = tX2*tX + alt_bn128_coeff_b; + tY = tY2.sqrt(); + + if ((tY.as_bigint().data[0] & 1) != Y_lsb) + { + tY = -tY; + } + } +#endif + // using Jacobian coordinates + if (!is_zero) + { + g.X = tX; + g.Y = tY; + g.Z = alt_bn128_Fq::one(); + } + else + { + g = alt_bn128_G1::zero(); + } + + return in; +} + +std::ostream& operator<<(std::ostream& out, const std::vector &v) +{ + out << v.size() << "\n"; + for (const alt_bn128_G1& t : v) + { + out << t << OUTPUT_NEWLINE; + } + + return out; +} + +std::istream& operator>>(std::istream& in, std::vector &v) +{ + v.clear(); + + size_t s; + in >> s; + consume_newline(in); + + v.reserve(s); + + for (size_t i = 0; i < s; ++i) + { + alt_bn128_G1 g; + in >> g; + consume_OUTPUT_NEWLINE(in); + v.emplace_back(g); + } + + return in; +} + +template<> +void batch_to_special_all_non_zeros(std::vector &vec) +{ + std::vector Z_vec; + Z_vec.reserve(vec.size()); + + for (auto &el: vec) + { + Z_vec.emplace_back(el.Z); + } + batch_invert(Z_vec); + + const alt_bn128_Fq one = alt_bn128_Fq::one(); + + for (size_t i = 0; i < vec.size(); ++i) + { + alt_bn128_Fq Z2 = Z_vec[i].squared(); + alt_bn128_Fq Z3 = Z_vec[i] * Z2; + + vec[i].X = vec[i].X * Z2; + vec[i].Y = vec[i].Y * Z3; + vec[i].Z = one; + } +} + +} // libsnark diff --git a/src/algebra/curves/alt_bn128/alt_bn128_g1.hpp b/src/algebra/curves/alt_bn128/alt_bn128_g1.hpp new file mode 100644 index 000000000..da11a2e8c --- /dev/null +++ b/src/algebra/curves/alt_bn128/alt_bn128_g1.hpp @@ -0,0 +1,95 @@ +/** @file + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef ALT_BN128_G1_HPP_ +#define ALT_BN128_G1_HPP_ +#include +#include "algebra/curves/alt_bn128/alt_bn128_init.hpp" +#include "algebra/curves/curve_utils.hpp" + +namespace libsnark { + +class alt_bn128_G1; +std::ostream& operator<<(std::ostream &, const alt_bn128_G1&); +std::istream& operator>>(std::istream &, alt_bn128_G1&); + +class alt_bn128_G1 { +public: +#ifdef PROFILE_OP_COUNTS + static long long add_cnt; + static long long dbl_cnt; +#endif + static std::vector wnaf_window_table; + static std::vector fixed_base_exp_window_table; + static alt_bn128_G1 G1_zero; + static alt_bn128_G1 G1_one; + + typedef alt_bn128_Fq base_field; + typedef alt_bn128_Fr scalar_field; + + alt_bn128_Fq X, Y, Z; + + // using Jacobian coordinates + alt_bn128_G1(); + alt_bn128_G1(const alt_bn128_Fq& X, const alt_bn128_Fq& Y, const alt_bn128_Fq& Z) : X(X), Y(Y), Z(Z) {}; + + void print() const; + void print_coordinates() const; + + void to_affine_coordinates(); + void to_special(); + bool is_special() const; + + bool is_zero() const; + + bool operator==(const alt_bn128_G1 &other) const; + bool operator!=(const alt_bn128_G1 &other) const; + + alt_bn128_G1 operator+(const alt_bn128_G1 &other) const; + alt_bn128_G1 operator-() const; + alt_bn128_G1 operator-(const alt_bn128_G1 &other) const; + + alt_bn128_G1 add(const alt_bn128_G1 &other) const; + alt_bn128_G1 mixed_add(const alt_bn128_G1 &other) const; + alt_bn128_G1 dbl() const; + + bool is_well_formed() const; + + static alt_bn128_G1 zero(); + static alt_bn128_G1 one(); + static alt_bn128_G1 random_element(); + + static size_t size_in_bits() { return base_field::size_in_bits() + 1; } + static bigint base_field_char() { return base_field::field_char(); } + static bigint order() { return scalar_field::field_char(); } + + friend std::ostream& operator<<(std::ostream &out, const alt_bn128_G1 &g); + friend std::istream& operator>>(std::istream &in, alt_bn128_G1 &g); +}; + +template +alt_bn128_G1 operator*(const bigint &lhs, const alt_bn128_G1 &rhs) +{ + return scalar_mul(rhs, lhs); +} + +template& modulus_p> +alt_bn128_G1 operator*(const Fp_model &lhs, const alt_bn128_G1 &rhs) +{ + return scalar_mul(rhs, lhs.as_bigint()); +} + +std::ostream& operator<<(std::ostream& out, const std::vector &v); +std::istream& operator>>(std::istream& in, std::vector &v); + +template +void batch_to_special_all_non_zeros(std::vector &vec); +template<> +void batch_to_special_all_non_zeros(std::vector &vec); + +} // libsnark +#endif // ALT_BN128_G1_HPP_ diff --git a/src/algebra/curves/alt_bn128/alt_bn128_g2.cpp b/src/algebra/curves/alt_bn128/alt_bn128_g2.cpp new file mode 100644 index 000000000..c4152e437 --- /dev/null +++ b/src/algebra/curves/alt_bn128/alt_bn128_g2.cpp @@ -0,0 +1,505 @@ +/** @file + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#include "algebra/curves/alt_bn128/alt_bn128_g2.hpp" + +namespace libsnark { + +#ifdef PROFILE_OP_COUNTS +long long alt_bn128_G2::add_cnt = 0; +long long alt_bn128_G2::dbl_cnt = 0; +#endif + +std::vector alt_bn128_G2::wnaf_window_table; +std::vector alt_bn128_G2::fixed_base_exp_window_table; +alt_bn128_G2 alt_bn128_G2::G2_zero; +alt_bn128_G2 alt_bn128_G2::G2_one; + +alt_bn128_G2::alt_bn128_G2() +{ + this->X = G2_zero.X; + this->Y = G2_zero.Y; + this->Z = G2_zero.Z; +} + +alt_bn128_Fq2 alt_bn128_G2::mul_by_b(const alt_bn128_Fq2 &elt) +{ + return alt_bn128_Fq2(alt_bn128_twist_mul_by_b_c0 * elt.c0, alt_bn128_twist_mul_by_b_c1 * elt.c1); +} + +void alt_bn128_G2::print() const +{ + if (this->is_zero()) + { + printf("O\n"); + } + else + { + alt_bn128_G2 copy(*this); + copy.to_affine_coordinates(); + gmp_printf("(%Nd*z + %Nd , %Nd*z + %Nd)\n", + copy.X.c1.as_bigint().data, alt_bn128_Fq::num_limbs, + copy.X.c0.as_bigint().data, alt_bn128_Fq::num_limbs, + copy.Y.c1.as_bigint().data, alt_bn128_Fq::num_limbs, + copy.Y.c0.as_bigint().data, alt_bn128_Fq::num_limbs); + } +} + +void alt_bn128_G2::print_coordinates() const +{ + if (this->is_zero()) + { + printf("O\n"); + } + else + { + gmp_printf("(%Nd*z + %Nd : %Nd*z + %Nd : %Nd*z + %Nd)\n", + this->X.c1.as_bigint().data, alt_bn128_Fq::num_limbs, + this->X.c0.as_bigint().data, alt_bn128_Fq::num_limbs, + this->Y.c1.as_bigint().data, alt_bn128_Fq::num_limbs, + this->Y.c0.as_bigint().data, alt_bn128_Fq::num_limbs, + this->Z.c1.as_bigint().data, alt_bn128_Fq::num_limbs, + this->Z.c0.as_bigint().data, alt_bn128_Fq::num_limbs); + } +} + +void alt_bn128_G2::to_affine_coordinates() +{ + if (this->is_zero()) + { + this->X = alt_bn128_Fq2::zero(); + this->Y = alt_bn128_Fq2::one(); + this->Z = alt_bn128_Fq2::zero(); + } + else + { + alt_bn128_Fq2 Z_inv = Z.inverse(); + alt_bn128_Fq2 Z2_inv = Z_inv.squared(); + alt_bn128_Fq2 Z3_inv = Z2_inv * Z_inv; + this->X = this->X * Z2_inv; + this->Y = this->Y * Z3_inv; + this->Z = alt_bn128_Fq2::one(); + } +} + +void alt_bn128_G2::to_special() +{ + this->to_affine_coordinates(); +} + +bool alt_bn128_G2::is_special() const +{ + return (this->is_zero() || this->Z == alt_bn128_Fq2::one()); +} + +bool alt_bn128_G2::is_zero() const +{ + return (this->Z.is_zero()); +} + +bool alt_bn128_G2::operator==(const alt_bn128_G2 &other) const +{ + if (this->is_zero()) + { + return other.is_zero(); + } + + if (other.is_zero()) + { + return false; + } + + /* now neither is O */ + + // using Jacobian coordinates so: + // (X1:Y1:Z1) = (X2:Y2:Z2) + // iff + // X1/Z1^2 == X2/Z2^2 and Y1/Z1^3 == Y2/Z2^3 + // iff + // X1 * Z2^2 == X2 * Z1^2 and Y1 * Z2^3 == Y2 * Z1^3 + + alt_bn128_Fq2 Z1_squared = (this->Z).squared(); + alt_bn128_Fq2 Z2_squared = (other.Z).squared(); + + if ((this->X * Z2_squared) != (other.X * Z1_squared)) + { + return false; + } + + alt_bn128_Fq2 Z1_cubed = (this->Z) * Z1_squared; + alt_bn128_Fq2 Z2_cubed = (other.Z) * Z2_squared; + + if ((this->Y * Z2_cubed) != (other.Y * Z1_cubed)) + { + return false; + } + + return true; +} + +bool alt_bn128_G2::operator!=(const alt_bn128_G2& other) const +{ + return !(operator==(other)); +} + +alt_bn128_G2 alt_bn128_G2::operator+(const alt_bn128_G2 &other) const +{ + // handle special cases having to do with O + if (this->is_zero()) + { + return other; + } + + if (other.is_zero()) + { + return *this; + } + + // no need to handle points of order 2,4 + // (they cannot exist in a prime-order subgroup) + + // check for doubling case + + // using Jacobian coordinates so: + // (X1:Y1:Z1) = (X2:Y2:Z2) + // iff + // X1/Z1^2 == X2/Z2^2 and Y1/Z1^3 == Y2/Z2^3 + // iff + // X1 * Z2^2 == X2 * Z1^2 and Y1 * Z2^3 == Y2 * Z1^3 + + alt_bn128_Fq2 Z1Z1 = (this->Z).squared(); + alt_bn128_Fq2 Z2Z2 = (other.Z).squared(); + + alt_bn128_Fq2 U1 = this->X * Z2Z2; + alt_bn128_Fq2 U2 = other.X * Z1Z1; + + alt_bn128_Fq2 Z1_cubed = (this->Z) * Z1Z1; + alt_bn128_Fq2 Z2_cubed = (other.Z) * Z2Z2; + + alt_bn128_Fq2 S1 = (this->Y) * Z2_cubed; // S1 = Y1 * Z2 * Z2Z2 + alt_bn128_Fq2 S2 = (other.Y) * Z1_cubed; // S2 = Y2 * Z1 * Z1Z1 + + if (U1 == U2 && S1 == S2) + { + // dbl case; nothing of above can be reused + return this->dbl(); + } + + // rest of add case + alt_bn128_Fq2 H = U2 - U1; // H = U2-U1 + alt_bn128_Fq2 S2_minus_S1 = S2-S1; + alt_bn128_Fq2 I = (H+H).squared(); // I = (2 * H)^2 + alt_bn128_Fq2 J = H * I; // J = H * I + alt_bn128_Fq2 r = S2_minus_S1 + S2_minus_S1; // r = 2 * (S2-S1) + alt_bn128_Fq2 V = U1 * I; // V = U1 * I + alt_bn128_Fq2 X3 = r.squared() - J - (V+V); // X3 = r^2 - J - 2 * V + alt_bn128_Fq2 S1_J = S1 * J; + alt_bn128_Fq2 Y3 = r * (V-X3) - (S1_J+S1_J); // Y3 = r * (V-X3)-2 S1 J + alt_bn128_Fq2 Z3 = ((this->Z+other.Z).squared()-Z1Z1-Z2Z2) * H; // Z3 = ((Z1+Z2)^2-Z1Z1-Z2Z2) * H + + return alt_bn128_G2(X3, Y3, Z3); +} + +alt_bn128_G2 alt_bn128_G2::operator-() const +{ + return alt_bn128_G2(this->X, -(this->Y), this->Z); +} + + +alt_bn128_G2 alt_bn128_G2::operator-(const alt_bn128_G2 &other) const +{ + return (*this) + (-other); +} + +alt_bn128_G2 alt_bn128_G2::add(const alt_bn128_G2 &other) const +{ + // handle special cases having to do with O + if (this->is_zero()) + { + return other; + } + + if (other.is_zero()) + { + return *this; + } + + // no need to handle points of order 2,4 + // (they cannot exist in a prime-order subgroup) + + // handle double case + if (this->operator==(other)) + { + return this->dbl(); + } + +#ifdef PROFILE_OP_COUNTS + this->add_cnt++; +#endif + // NOTE: does not handle O and pts of order 2,4 + // http://www.hyperelliptic.org/EFD/g1p/auto-shortw-projective.html#addition-add-1998-cmo-2 + + alt_bn128_Fq2 Z1Z1 = (this->Z).squared(); // Z1Z1 = Z1^2 + alt_bn128_Fq2 Z2Z2 = (other.Z).squared(); // Z2Z2 = Z2^2 + alt_bn128_Fq2 U1 = (this->X) * Z2Z2; // U1 = X1 * Z2Z2 + alt_bn128_Fq2 U2 = (other.X) * Z1Z1; // U2 = X2 * Z1Z1 + alt_bn128_Fq2 S1 = (this->Y) * (other.Z) * Z2Z2; // S1 = Y1 * Z2 * Z2Z2 + alt_bn128_Fq2 S2 = (other.Y) * (this->Z) * Z1Z1; // S2 = Y2 * Z1 * Z1Z1 + alt_bn128_Fq2 H = U2 - U1; // H = U2-U1 + alt_bn128_Fq2 S2_minus_S1 = S2-S1; + alt_bn128_Fq2 I = (H+H).squared(); // I = (2 * H)^2 + alt_bn128_Fq2 J = H * I; // J = H * I + alt_bn128_Fq2 r = S2_minus_S1 + S2_minus_S1; // r = 2 * (S2-S1) + alt_bn128_Fq2 V = U1 * I; // V = U1 * I + alt_bn128_Fq2 X3 = r.squared() - J - (V+V); // X3 = r^2 - J - 2 * V + alt_bn128_Fq2 S1_J = S1 * J; + alt_bn128_Fq2 Y3 = r * (V-X3) - (S1_J+S1_J); // Y3 = r * (V-X3)-2 S1 J + alt_bn128_Fq2 Z3 = ((this->Z+other.Z).squared()-Z1Z1-Z2Z2) * H; // Z3 = ((Z1+Z2)^2-Z1Z1-Z2Z2) * H + + return alt_bn128_G2(X3, Y3, Z3); +} + +alt_bn128_G2 alt_bn128_G2::mixed_add(const alt_bn128_G2 &other) const +{ +#ifdef DEBUG + assert(other.is_special()); +#endif + + // handle special cases having to do with O + if (this->is_zero()) + { + return other; + } + + if (other.is_zero()) + { + return *this; + } + + // no need to handle points of order 2,4 + // (they cannot exist in a prime-order subgroup) + + // check for doubling case + + // using Jacobian coordinates so: + // (X1:Y1:Z1) = (X2:Y2:Z2) + // iff + // X1/Z1^2 == X2/Z2^2 and Y1/Z1^3 == Y2/Z2^3 + // iff + // X1 * Z2^2 == X2 * Z1^2 and Y1 * Z2^3 == Y2 * Z1^3 + + // we know that Z2 = 1 + + const alt_bn128_Fq2 Z1Z1 = (this->Z).squared(); + + const alt_bn128_Fq2 &U1 = this->X; + const alt_bn128_Fq2 U2 = other.X * Z1Z1; + + const alt_bn128_Fq2 Z1_cubed = (this->Z) * Z1Z1; + + const alt_bn128_Fq2 &S1 = (this->Y); // S1 = Y1 * Z2 * Z2Z2 + const alt_bn128_Fq2 S2 = (other.Y) * Z1_cubed; // S2 = Y2 * Z1 * Z1Z1 + + if (U1 == U2 && S1 == S2) + { + // dbl case; nothing of above can be reused + return this->dbl(); + } + +#ifdef PROFILE_OP_COUNTS + this->add_cnt++; +#endif + + // NOTE: does not handle O and pts of order 2,4 + // http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-madd-2007-bl + alt_bn128_Fq2 H = U2-(this->X); // H = U2-X1 + alt_bn128_Fq2 HH = H.squared() ; // HH = H&2 + alt_bn128_Fq2 I = HH+HH; // I = 4*HH + I = I + I; + alt_bn128_Fq2 J = H*I; // J = H*I + alt_bn128_Fq2 r = S2-(this->Y); // r = 2*(S2-Y1) + r = r + r; + alt_bn128_Fq2 V = (this->X) * I ; // V = X1*I + alt_bn128_Fq2 X3 = r.squared()-J-V-V; // X3 = r^2-J-2*V + alt_bn128_Fq2 Y3 = (this->Y)*J; // Y3 = r*(V-X3)-2*Y1*J + Y3 = r*(V-X3) - Y3 - Y3; + alt_bn128_Fq2 Z3 = ((this->Z)+H).squared() - Z1Z1 - HH; // Z3 = (Z1+H)^2-Z1Z1-HH + + return alt_bn128_G2(X3, Y3, Z3); +} + +alt_bn128_G2 alt_bn128_G2::dbl() const +{ +#ifdef PROFILE_OP_COUNTS + this->dbl_cnt++; +#endif + // handle point at infinity + if (this->is_zero()) + { + return (*this); + } + + // NOTE: does not handle O and pts of order 2,4 + // http://www.hyperelliptic.org/EFD/g1p/auto-shortw-projective.html#doubling-dbl-2007-bl + + alt_bn128_Fq2 A = (this->X).squared(); // A = X1^2 + alt_bn128_Fq2 B = (this->Y).squared(); // B = Y1^2 + alt_bn128_Fq2 C = B.squared(); // C = B^2 + alt_bn128_Fq2 D = (this->X + B).squared() - A - C; + D = D+D; // D = 2 * ((X1 + B)^2 - A - C) + alt_bn128_Fq2 E = A + A + A; // E = 3 * A + alt_bn128_Fq2 F = E.squared(); // F = E^2 + alt_bn128_Fq2 X3 = F - (D+D); // X3 = F - 2 D + alt_bn128_Fq2 eightC = C+C; + eightC = eightC + eightC; + eightC = eightC + eightC; + alt_bn128_Fq2 Y3 = E * (D - X3) - eightC; // Y3 = E * (D - X3) - 8 * C + alt_bn128_Fq2 Y1Z1 = (this->Y)*(this->Z); + alt_bn128_Fq2 Z3 = Y1Z1 + Y1Z1; // Z3 = 2 * Y1 * Z1 + + return alt_bn128_G2(X3, Y3, Z3); +} + +alt_bn128_G2 alt_bn128_G2::mul_by_q() const +{ + return alt_bn128_G2(alt_bn128_twist_mul_by_q_X * (this->X).Frobenius_map(1), + alt_bn128_twist_mul_by_q_Y * (this->Y).Frobenius_map(1), + (this->Z).Frobenius_map(1)); +} + +bool alt_bn128_G2::is_well_formed() const +{ + if (this->is_zero()) + { + return true; + } + else + { + /* + y^2 = x^3 + b + + We are using Jacobian coordinates, so equation we need to check is actually + + (y/z^3)^2 = (x/z^2)^3 + b + y^2 / z^6 = x^3 / z^6 + b + y^2 = x^3 + b z^6 + */ + alt_bn128_Fq2 X2 = this->X.squared(); + alt_bn128_Fq2 Y2 = this->Y.squared(); + alt_bn128_Fq2 Z2 = this->Z.squared(); + + alt_bn128_Fq2 X3 = this->X * X2; + alt_bn128_Fq2 Z3 = this->Z * Z2; + alt_bn128_Fq2 Z6 = Z3.squared(); + + return (Y2 == X3 + alt_bn128_twist_coeff_b * Z6); + } +} + +alt_bn128_G2 alt_bn128_G2::zero() +{ + return G2_zero; +} + +alt_bn128_G2 alt_bn128_G2::one() +{ + return G2_one; +} + +alt_bn128_G2 alt_bn128_G2::random_element() +{ + return (alt_bn128_Fr::random_element().as_bigint()) * G2_one; +} + +std::ostream& operator<<(std::ostream &out, const alt_bn128_G2 &g) +{ + alt_bn128_G2 copy(g); + copy.to_affine_coordinates(); + out << (copy.is_zero() ? 1 : 0) << OUTPUT_SEPARATOR; +#ifdef NO_PT_COMPRESSION + out << copy.X << OUTPUT_SEPARATOR << copy.Y; +#else + /* storing LSB of Y */ + out << copy.X << OUTPUT_SEPARATOR << (copy.Y.c0.as_bigint().data[0] & 1); +#endif + + return out; +} + +std::istream& operator>>(std::istream &in, alt_bn128_G2 &g) +{ + char is_zero; + alt_bn128_Fq2 tX, tY; + +#ifdef NO_PT_COMPRESSION + in >> is_zero >> tX >> tY; + is_zero -= '0'; +#else + in.read((char*)&is_zero, 1); // this reads is_zero; + is_zero -= '0'; + consume_OUTPUT_SEPARATOR(in); + + unsigned char Y_lsb; + in >> tX; + consume_OUTPUT_SEPARATOR(in); + in.read((char*)&Y_lsb, 1); + Y_lsb -= '0'; + + // y = +/- sqrt(x^3 + b) + if (!is_zero) + { + alt_bn128_Fq2 tX2 = tX.squared(); + alt_bn128_Fq2 tY2 = tX2 * tX + alt_bn128_twist_coeff_b; + tY = tY2.sqrt(); + + if ((tY.c0.as_bigint().data[0] & 1) != Y_lsb) + { + tY = -tY; + } + } +#endif + // using projective coordinates + if (!is_zero) + { + g.X = tX; + g.Y = tY; + g.Z = alt_bn128_Fq2::one(); + } + else + { + g = alt_bn128_G2::zero(); + } + + return in; +} + +template<> +void batch_to_special_all_non_zeros(std::vector &vec) +{ + std::vector Z_vec; + Z_vec.reserve(vec.size()); + + for (auto &el: vec) + { + Z_vec.emplace_back(el.Z); + } + batch_invert(Z_vec); + + const alt_bn128_Fq2 one = alt_bn128_Fq2::one(); + + for (size_t i = 0; i < vec.size(); ++i) + { + alt_bn128_Fq2 Z2 = Z_vec[i].squared(); + alt_bn128_Fq2 Z3 = Z_vec[i] * Z2; + + vec[i].X = vec[i].X * Z2; + vec[i].Y = vec[i].Y * Z3; + vec[i].Z = one; + } +} + +} // libsnark diff --git a/src/algebra/curves/alt_bn128/alt_bn128_g2.hpp b/src/algebra/curves/alt_bn128/alt_bn128_g2.hpp new file mode 100644 index 000000000..a996a2d1a --- /dev/null +++ b/src/algebra/curves/alt_bn128/alt_bn128_g2.hpp @@ -0,0 +1,96 @@ +/** @file + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef ALT_BN128_G2_HPP_ +#define ALT_BN128_G2_HPP_ +#include +#include "algebra/curves/alt_bn128/alt_bn128_init.hpp" +#include "algebra/curves/curve_utils.hpp" + +namespace libsnark { + +class alt_bn128_G2; +std::ostream& operator<<(std::ostream &, const alt_bn128_G2&); +std::istream& operator>>(std::istream &, alt_bn128_G2&); + +class alt_bn128_G2 { +public: +#ifdef PROFILE_OP_COUNTS + static long long add_cnt; + static long long dbl_cnt; +#endif + static std::vector wnaf_window_table; + static std::vector fixed_base_exp_window_table; + static alt_bn128_G2 G2_zero; + static alt_bn128_G2 G2_one; + + typedef alt_bn128_Fq base_field; + typedef alt_bn128_Fq2 twist_field; + typedef alt_bn128_Fr scalar_field; + + alt_bn128_Fq2 X, Y, Z; + + // using Jacobian coordinates + alt_bn128_G2(); + alt_bn128_G2(const alt_bn128_Fq2& X, const alt_bn128_Fq2& Y, const alt_bn128_Fq2& Z) : X(X), Y(Y), Z(Z) {}; + + static alt_bn128_Fq2 mul_by_b(const alt_bn128_Fq2 &elt); + + void print() const; + void print_coordinates() const; + + void to_affine_coordinates(); + void to_special(); + bool is_special() const; + + bool is_zero() const; + + bool operator==(const alt_bn128_G2 &other) const; + bool operator!=(const alt_bn128_G2 &other) const; + + alt_bn128_G2 operator+(const alt_bn128_G2 &other) const; + alt_bn128_G2 operator-() const; + alt_bn128_G2 operator-(const alt_bn128_G2 &other) const; + + alt_bn128_G2 add(const alt_bn128_G2 &other) const; + alt_bn128_G2 mixed_add(const alt_bn128_G2 &other) const; + alt_bn128_G2 dbl() const; + alt_bn128_G2 mul_by_q() const; + + bool is_well_formed() const; + + static alt_bn128_G2 zero(); + static alt_bn128_G2 one(); + static alt_bn128_G2 random_element(); + + static size_t size_in_bits() { return twist_field::size_in_bits() + 1; } + static bigint base_field_char() { return base_field::field_char(); } + static bigint order() { return scalar_field::field_char(); } + + friend std::ostream& operator<<(std::ostream &out, const alt_bn128_G2 &g); + friend std::istream& operator>>(std::istream &in, alt_bn128_G2 &g); +}; + +template +alt_bn128_G2 operator*(const bigint &lhs, const alt_bn128_G2 &rhs) +{ + return scalar_mul(rhs, lhs); +} + +template& modulus_p> +alt_bn128_G2 operator*(const Fp_model &lhs, const alt_bn128_G2 &rhs) +{ + return scalar_mul(rhs, lhs.as_bigint()); +} + +template +void batch_to_special_all_non_zeros(std::vector &vec); +template<> +void batch_to_special_all_non_zeros(std::vector &vec); + +} // libsnark +#endif // ALT_BN128_G2_HPP_ diff --git a/src/algebra/curves/alt_bn128/alt_bn128_init.cpp b/src/algebra/curves/alt_bn128/alt_bn128_init.cpp new file mode 100644 index 000000000..7c23773d6 --- /dev/null +++ b/src/algebra/curves/alt_bn128/alt_bn128_init.cpp @@ -0,0 +1,273 @@ +/** @file + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#include "algebra/curves/alt_bn128/alt_bn128_init.hpp" +#include "algebra/curves/alt_bn128/alt_bn128_g1.hpp" +#include "algebra/curves/alt_bn128/alt_bn128_g2.hpp" + +namespace libsnark { + +bigint alt_bn128_modulus_r; +bigint alt_bn128_modulus_q; + +alt_bn128_Fq alt_bn128_coeff_b; +alt_bn128_Fq2 alt_bn128_twist; +alt_bn128_Fq2 alt_bn128_twist_coeff_b; +alt_bn128_Fq alt_bn128_twist_mul_by_b_c0; +alt_bn128_Fq alt_bn128_twist_mul_by_b_c1; +alt_bn128_Fq2 alt_bn128_twist_mul_by_q_X; +alt_bn128_Fq2 alt_bn128_twist_mul_by_q_Y; + +bigint alt_bn128_ate_loop_count; +bool alt_bn128_ate_is_loop_count_neg; +bigint<12*alt_bn128_q_limbs> alt_bn128_final_exponent; +bigint alt_bn128_final_exponent_z; +bool alt_bn128_final_exponent_is_z_neg; + +void init_alt_bn128_params() +{ + typedef bigint bigint_r; + typedef bigint bigint_q; + + assert(sizeof(mp_limb_t) == 8 || sizeof(mp_limb_t) == 4); // Montgomery assumes this + + /* parameters for scalar field Fr */ + + alt_bn128_modulus_r = bigint_r("21888242871839275222246405745257275088548364400416034343698204186575808495617"); + assert(alt_bn128_Fr::modulus_is_valid()); + if (sizeof(mp_limb_t) == 8) + { + alt_bn128_Fr::Rsquared = bigint_r("944936681149208446651664254269745548490766851729442924617792859073125903783"); + alt_bn128_Fr::Rcubed = bigint_r("5866548545943845227489894872040244720403868105578784105281690076696998248512"); + alt_bn128_Fr::inv = 0xc2e1f593efffffff; + } + if (sizeof(mp_limb_t) == 4) + { + alt_bn128_Fr::Rsquared = bigint_r("944936681149208446651664254269745548490766851729442924617792859073125903783"); + alt_bn128_Fr::Rcubed = bigint_r("5866548545943845227489894872040244720403868105578784105281690076696998248512"); + alt_bn128_Fr::inv = 0xefffffff; + } + alt_bn128_Fr::num_bits = 254; + alt_bn128_Fr::euler = bigint_r("10944121435919637611123202872628637544274182200208017171849102093287904247808"); + alt_bn128_Fr::s = 28; + alt_bn128_Fr::t = bigint_r("81540058820840996586704275553141814055101440848469862132140264610111"); + alt_bn128_Fr::t_minus_1_over_2 = bigint_r("40770029410420498293352137776570907027550720424234931066070132305055"); + alt_bn128_Fr::multiplicative_generator = alt_bn128_Fr("5"); + alt_bn128_Fr::root_of_unity = alt_bn128_Fr("19103219067921713944291392827692070036145651957329286315305642004821462161904"); + alt_bn128_Fr::nqr = alt_bn128_Fr("5"); + alt_bn128_Fr::nqr_to_t = alt_bn128_Fr("19103219067921713944291392827692070036145651957329286315305642004821462161904"); + + /* parameters for base field Fq */ + + alt_bn128_modulus_q = bigint_q("21888242871839275222246405745257275088696311157297823662689037894645226208583"); + assert(alt_bn128_Fq::modulus_is_valid()); + if (sizeof(mp_limb_t) == 8) + { + alt_bn128_Fq::Rsquared = bigint_q("3096616502983703923843567936837374451735540968419076528771170197431451843209"); + alt_bn128_Fq::Rcubed = bigint_q("14921786541159648185948152738563080959093619838510245177710943249661917737183"); + alt_bn128_Fq::inv = 0x87d20782e4866389; + } + if (sizeof(mp_limb_t) == 4) + { + alt_bn128_Fq::Rsquared = bigint_q("3096616502983703923843567936837374451735540968419076528771170197431451843209"); + alt_bn128_Fq::Rcubed = bigint_q("14921786541159648185948152738563080959093619838510245177710943249661917737183"); + alt_bn128_Fq::inv = 0xe4866389; + } + alt_bn128_Fq::num_bits = 254; + alt_bn128_Fq::euler = bigint_q("10944121435919637611123202872628637544348155578648911831344518947322613104291"); + alt_bn128_Fq::s = 1; + alt_bn128_Fq::t = bigint_q("10944121435919637611123202872628637544348155578648911831344518947322613104291"); + alt_bn128_Fq::t_minus_1_over_2 = bigint_q("5472060717959818805561601436314318772174077789324455915672259473661306552145"); + alt_bn128_Fq::multiplicative_generator = alt_bn128_Fq("3"); + alt_bn128_Fq::root_of_unity = alt_bn128_Fq("21888242871839275222246405745257275088696311157297823662689037894645226208582"); + alt_bn128_Fq::nqr = alt_bn128_Fq("3"); + alt_bn128_Fq::nqr_to_t = alt_bn128_Fq("21888242871839275222246405745257275088696311157297823662689037894645226208582"); + + /* parameters for twist field Fq2 */ + alt_bn128_Fq2::euler = bigint<2*alt_bn128_q_limbs>("239547588008311421220994022608339370399626158265550411218223901127035046843189118723920525909718935985594116157406550130918127817069793474323196511433944"); + alt_bn128_Fq2::s = 4; + alt_bn128_Fq2::t = bigint<2*alt_bn128_q_limbs>("29943448501038927652624252826042421299953269783193801402277987640879380855398639840490065738714866998199264519675818766364765977133724184290399563929243"); + alt_bn128_Fq2::t_minus_1_over_2 = bigint<2*alt_bn128_q_limbs>("14971724250519463826312126413021210649976634891596900701138993820439690427699319920245032869357433499099632259837909383182382988566862092145199781964621"); + alt_bn128_Fq2::non_residue = alt_bn128_Fq("21888242871839275222246405745257275088696311157297823662689037894645226208582"); + alt_bn128_Fq2::nqr = alt_bn128_Fq2(alt_bn128_Fq("2"),alt_bn128_Fq("1")); + alt_bn128_Fq2::nqr_to_t = alt_bn128_Fq2(alt_bn128_Fq("5033503716262624267312492558379982687175200734934877598599011485707452665730"),alt_bn128_Fq("314498342015008975724433667930697407966947188435857772134235984660852259084")); + alt_bn128_Fq2::Frobenius_coeffs_c1[0] = alt_bn128_Fq("1"); + alt_bn128_Fq2::Frobenius_coeffs_c1[1] = alt_bn128_Fq("21888242871839275222246405745257275088696311157297823662689037894645226208582"); + + /* parameters for Fq6 */ + alt_bn128_Fq6::non_residue = alt_bn128_Fq2(alt_bn128_Fq("9"),alt_bn128_Fq("1")); + alt_bn128_Fq6::Frobenius_coeffs_c1[0] = alt_bn128_Fq2(alt_bn128_Fq("1"),alt_bn128_Fq("0")); + alt_bn128_Fq6::Frobenius_coeffs_c1[1] = alt_bn128_Fq2(alt_bn128_Fq("21575463638280843010398324269430826099269044274347216827212613867836435027261"),alt_bn128_Fq("10307601595873709700152284273816112264069230130616436755625194854815875713954")); + alt_bn128_Fq6::Frobenius_coeffs_c1[2] = alt_bn128_Fq2(alt_bn128_Fq("21888242871839275220042445260109153167277707414472061641714758635765020556616"),alt_bn128_Fq("0")); + alt_bn128_Fq6::Frobenius_coeffs_c1[3] = alt_bn128_Fq2(alt_bn128_Fq("3772000881919853776433695186713858239009073593817195771773381919316419345261"),alt_bn128_Fq("2236595495967245188281701248203181795121068902605861227855261137820944008926")); + alt_bn128_Fq6::Frobenius_coeffs_c1[4] = alt_bn128_Fq2(alt_bn128_Fq("2203960485148121921418603742825762020974279258880205651966"),alt_bn128_Fq("0")); + alt_bn128_Fq6::Frobenius_coeffs_c1[5] = alt_bn128_Fq2(alt_bn128_Fq("18429021223477853657660792034369865839114504446431234726392080002137598044644"),alt_bn128_Fq("9344045779998320333812420223237981029506012124075525679208581902008406485703")); + alt_bn128_Fq6::Frobenius_coeffs_c2[0] = alt_bn128_Fq2(alt_bn128_Fq("1"),alt_bn128_Fq("0")); + alt_bn128_Fq6::Frobenius_coeffs_c2[1] = alt_bn128_Fq2(alt_bn128_Fq("2581911344467009335267311115468803099551665605076196740867805258568234346338"),alt_bn128_Fq("19937756971775647987995932169929341994314640652964949448313374472400716661030")); + alt_bn128_Fq6::Frobenius_coeffs_c2[2] = alt_bn128_Fq2(alt_bn128_Fq("2203960485148121921418603742825762020974279258880205651966"),alt_bn128_Fq("0")); + alt_bn128_Fq6::Frobenius_coeffs_c2[3] = alt_bn128_Fq2(alt_bn128_Fq("5324479202449903542726783395506214481928257762400643279780343368557297135718"),alt_bn128_Fq("16208900380737693084919495127334387981393726419856888799917914180988844123039")); + alt_bn128_Fq6::Frobenius_coeffs_c2[4] = alt_bn128_Fq2(alt_bn128_Fq("21888242871839275220042445260109153167277707414472061641714758635765020556616"),alt_bn128_Fq("0")); + alt_bn128_Fq6::Frobenius_coeffs_c2[5] = alt_bn128_Fq2(alt_bn128_Fq("13981852324922362344252311234282257507216387789820983642040889267519694726527"),alt_bn128_Fq("7629828391165209371577384193250820201684255241773809077146787135900891633097")); + + /* parameters for Fq12 */ + + alt_bn128_Fq12::non_residue = alt_bn128_Fq2(alt_bn128_Fq("9"),alt_bn128_Fq("1")); + alt_bn128_Fq12::Frobenius_coeffs_c1[0] = alt_bn128_Fq2(alt_bn128_Fq("1"),alt_bn128_Fq("0")); + alt_bn128_Fq12::Frobenius_coeffs_c1[1] = alt_bn128_Fq2(alt_bn128_Fq("8376118865763821496583973867626364092589906065868298776909617916018768340080"),alt_bn128_Fq("16469823323077808223889137241176536799009286646108169935659301613961712198316")); + alt_bn128_Fq12::Frobenius_coeffs_c1[2] = alt_bn128_Fq2(alt_bn128_Fq("21888242871839275220042445260109153167277707414472061641714758635765020556617"),alt_bn128_Fq("0")); + alt_bn128_Fq12::Frobenius_coeffs_c1[3] = alt_bn128_Fq2(alt_bn128_Fq("11697423496358154304825782922584725312912383441159505038794027105778954184319"),alt_bn128_Fq("303847389135065887422783454877609941456349188919719272345083954437860409601")); + alt_bn128_Fq12::Frobenius_coeffs_c1[4] = alt_bn128_Fq2(alt_bn128_Fq("21888242871839275220042445260109153167277707414472061641714758635765020556616"),alt_bn128_Fq("0")); + alt_bn128_Fq12::Frobenius_coeffs_c1[5] = alt_bn128_Fq2(alt_bn128_Fq("3321304630594332808241809054958361220322477375291206261884409189760185844239"),alt_bn128_Fq("5722266937896532885780051958958348231143373700109372999374820235121374419868")); + alt_bn128_Fq12::Frobenius_coeffs_c1[6] = alt_bn128_Fq2(alt_bn128_Fq("21888242871839275222246405745257275088696311157297823662689037894645226208582"),alt_bn128_Fq("0")); + alt_bn128_Fq12::Frobenius_coeffs_c1[7] = alt_bn128_Fq2(alt_bn128_Fq("13512124006075453725662431877630910996106405091429524885779419978626457868503"),alt_bn128_Fq("5418419548761466998357268504080738289687024511189653727029736280683514010267")); + alt_bn128_Fq12::Frobenius_coeffs_c1[8] = alt_bn128_Fq2(alt_bn128_Fq("2203960485148121921418603742825762020974279258880205651966"),alt_bn128_Fq("0")); + alt_bn128_Fq12::Frobenius_coeffs_c1[9] = alt_bn128_Fq2(alt_bn128_Fq("10190819375481120917420622822672549775783927716138318623895010788866272024264"),alt_bn128_Fq("21584395482704209334823622290379665147239961968378104390343953940207365798982")); + alt_bn128_Fq12::Frobenius_coeffs_c1[10] = alt_bn128_Fq2(alt_bn128_Fq("2203960485148121921418603742825762020974279258880205651967"),alt_bn128_Fq("0")); + alt_bn128_Fq12::Frobenius_coeffs_c1[11] = alt_bn128_Fq2(alt_bn128_Fq("18566938241244942414004596690298913868373833782006617400804628704885040364344"),alt_bn128_Fq("16165975933942742336466353786298926857552937457188450663314217659523851788715")); + + /* choice of short Weierstrass curve and its twist */ + + alt_bn128_coeff_b = alt_bn128_Fq("3"); + alt_bn128_twist = alt_bn128_Fq2(alt_bn128_Fq("9"), alt_bn128_Fq("1")); + alt_bn128_twist_coeff_b = alt_bn128_coeff_b * alt_bn128_twist.inverse(); + alt_bn128_twist_mul_by_b_c0 = alt_bn128_coeff_b * alt_bn128_Fq2::non_residue; + alt_bn128_twist_mul_by_b_c1 = alt_bn128_coeff_b * alt_bn128_Fq2::non_residue; + alt_bn128_twist_mul_by_q_X = alt_bn128_Fq2(alt_bn128_Fq("21575463638280843010398324269430826099269044274347216827212613867836435027261"), + alt_bn128_Fq("10307601595873709700152284273816112264069230130616436755625194854815875713954")); + alt_bn128_twist_mul_by_q_Y = alt_bn128_Fq2(alt_bn128_Fq("2821565182194536844548159561693502659359617185244120367078079554186484126554"), + alt_bn128_Fq("3505843767911556378687030309984248845540243509899259641013678093033130930403")); + + /* choice of group G1 */ + alt_bn128_G1::G1_zero = alt_bn128_G1(alt_bn128_Fq::zero(), + alt_bn128_Fq::one(), + alt_bn128_Fq::zero()); + alt_bn128_G1::G1_one = alt_bn128_G1(alt_bn128_Fq("1"), + alt_bn128_Fq("2"), + alt_bn128_Fq::one()); + alt_bn128_G1::wnaf_window_table.push_back(11); + alt_bn128_G1::wnaf_window_table.push_back(24); + alt_bn128_G1::wnaf_window_table.push_back(60); + alt_bn128_G1::wnaf_window_table.push_back(127); + + alt_bn128_G1::fixed_base_exp_window_table.resize(0); + // window 1 is unbeaten in [-inf, 4.99] + alt_bn128_G1::fixed_base_exp_window_table.push_back(1); + // window 2 is unbeaten in [4.99, 10.99] + alt_bn128_G1::fixed_base_exp_window_table.push_back(5); + // window 3 is unbeaten in [10.99, 32.29] + alt_bn128_G1::fixed_base_exp_window_table.push_back(11); + // window 4 is unbeaten in [32.29, 55.23] + alt_bn128_G1::fixed_base_exp_window_table.push_back(32); + // window 5 is unbeaten in [55.23, 162.03] + alt_bn128_G1::fixed_base_exp_window_table.push_back(55); + // window 6 is unbeaten in [162.03, 360.15] + alt_bn128_G1::fixed_base_exp_window_table.push_back(162); + // window 7 is unbeaten in [360.15, 815.44] + alt_bn128_G1::fixed_base_exp_window_table.push_back(360); + // window 8 is unbeaten in [815.44, 2373.07] + alt_bn128_G1::fixed_base_exp_window_table.push_back(815); + // window 9 is unbeaten in [2373.07, 6977.75] + alt_bn128_G1::fixed_base_exp_window_table.push_back(2373); + // window 10 is unbeaten in [6977.75, 7122.23] + alt_bn128_G1::fixed_base_exp_window_table.push_back(6978); + // window 11 is unbeaten in [7122.23, 57818.46] + alt_bn128_G1::fixed_base_exp_window_table.push_back(7122); + // window 12 is never the best + alt_bn128_G1::fixed_base_exp_window_table.push_back(0); + // window 13 is unbeaten in [57818.46, 169679.14] + alt_bn128_G1::fixed_base_exp_window_table.push_back(57818); + // window 14 is never the best + alt_bn128_G1::fixed_base_exp_window_table.push_back(0); + // window 15 is unbeaten in [169679.14, 439758.91] + alt_bn128_G1::fixed_base_exp_window_table.push_back(169679); + // window 16 is unbeaten in [439758.91, 936073.41] + alt_bn128_G1::fixed_base_exp_window_table.push_back(439759); + // window 17 is unbeaten in [936073.41, 4666554.74] + alt_bn128_G1::fixed_base_exp_window_table.push_back(936073); + // window 18 is never the best + alt_bn128_G1::fixed_base_exp_window_table.push_back(0); + // window 19 is unbeaten in [4666554.74, 7580404.42] + alt_bn128_G1::fixed_base_exp_window_table.push_back(4666555); + // window 20 is unbeaten in [7580404.42, 34552892.20] + alt_bn128_G1::fixed_base_exp_window_table.push_back(7580404); + // window 21 is never the best + alt_bn128_G1::fixed_base_exp_window_table.push_back(0); + // window 22 is unbeaten in [34552892.20, inf] + alt_bn128_G1::fixed_base_exp_window_table.push_back(34552892); + + /* choice of group G2 */ + + alt_bn128_G2::G2_zero = alt_bn128_G2(alt_bn128_Fq2::zero(), + alt_bn128_Fq2::one(), + alt_bn128_Fq2::zero()); + + alt_bn128_G2::G2_one = alt_bn128_G2(alt_bn128_Fq2(alt_bn128_Fq("10857046999023057135944570762232829481370756359578518086990519993285655852781"), + alt_bn128_Fq("11559732032986387107991004021392285783925812861821192530917403151452391805634")), + alt_bn128_Fq2(alt_bn128_Fq("8495653923123431417604973247489272438418190587263600148770280649306958101930"), + alt_bn128_Fq("4082367875863433681332203403145435568316851327593401208105741076214120093531")), + alt_bn128_Fq2::one()); + alt_bn128_G2::wnaf_window_table.push_back(5); + alt_bn128_G2::wnaf_window_table.push_back(15); + alt_bn128_G2::wnaf_window_table.push_back(39); + alt_bn128_G2::wnaf_window_table.push_back(109); + + alt_bn128_G2::fixed_base_exp_window_table.resize(0); + // window 1 is unbeaten in [-inf, 5.10] + alt_bn128_G2::fixed_base_exp_window_table.push_back(1); + // window 2 is unbeaten in [5.10, 10.43] + alt_bn128_G2::fixed_base_exp_window_table.push_back(5); + // window 3 is unbeaten in [10.43, 25.28] + alt_bn128_G2::fixed_base_exp_window_table.push_back(10); + // window 4 is unbeaten in [25.28, 59.00] + alt_bn128_G2::fixed_base_exp_window_table.push_back(25); + // window 5 is unbeaten in [59.00, 154.03] + alt_bn128_G2::fixed_base_exp_window_table.push_back(59); + // window 6 is unbeaten in [154.03, 334.25] + alt_bn128_G2::fixed_base_exp_window_table.push_back(154); + // window 7 is unbeaten in [334.25, 742.58] + alt_bn128_G2::fixed_base_exp_window_table.push_back(334); + // window 8 is unbeaten in [742.58, 2034.40] + alt_bn128_G2::fixed_base_exp_window_table.push_back(743); + // window 9 is unbeaten in [2034.40, 4987.56] + alt_bn128_G2::fixed_base_exp_window_table.push_back(2034); + // window 10 is unbeaten in [4987.56, 8888.27] + alt_bn128_G2::fixed_base_exp_window_table.push_back(4988); + // window 11 is unbeaten in [8888.27, 26271.13] + alt_bn128_G2::fixed_base_exp_window_table.push_back(8888); + // window 12 is unbeaten in [26271.13, 39768.20] + alt_bn128_G2::fixed_base_exp_window_table.push_back(26271); + // window 13 is unbeaten in [39768.20, 106275.75] + alt_bn128_G2::fixed_base_exp_window_table.push_back(39768); + // window 14 is unbeaten in [106275.75, 141703.40] + alt_bn128_G2::fixed_base_exp_window_table.push_back(106276); + // window 15 is unbeaten in [141703.40, 462422.97] + alt_bn128_G2::fixed_base_exp_window_table.push_back(141703); + // window 16 is unbeaten in [462422.97, 926871.84] + alt_bn128_G2::fixed_base_exp_window_table.push_back(462423); + // window 17 is unbeaten in [926871.84, 4873049.17] + alt_bn128_G2::fixed_base_exp_window_table.push_back(926872); + // window 18 is never the best + alt_bn128_G2::fixed_base_exp_window_table.push_back(0); + // window 19 is unbeaten in [4873049.17, 5706707.88] + alt_bn128_G2::fixed_base_exp_window_table.push_back(4873049); + // window 20 is unbeaten in [5706707.88, 31673814.95] + alt_bn128_G2::fixed_base_exp_window_table.push_back(5706708); + // window 21 is never the best + alt_bn128_G2::fixed_base_exp_window_table.push_back(0); + // window 22 is unbeaten in [31673814.95, inf] + alt_bn128_G2::fixed_base_exp_window_table.push_back(31673815); + + /* pairing parameters */ + + alt_bn128_ate_loop_count = bigint_q("29793968203157093288"); + alt_bn128_ate_is_loop_count_neg = false; + alt_bn128_final_exponent = bigint<12*alt_bn128_q_limbs>("552484233613224096312617126783173147097382103762957654188882734314196910839907541213974502761540629817009608548654680343627701153829446747810907373256841551006201639677726139946029199968412598804882391702273019083653272047566316584365559776493027495458238373902875937659943504873220554161550525926302303331747463515644711876653177129578303191095900909191624817826566688241804408081892785725967931714097716709526092261278071952560171111444072049229123565057483750161460024353346284167282452756217662335528813519139808291170539072125381230815729071544861602750936964829313608137325426383735122175229541155376346436093930287402089517426973178917569713384748081827255472576937471496195752727188261435633271238710131736096299798168852925540549342330775279877006784354801422249722573783561685179618816480037695005515426162362431072245638324744480"); + alt_bn128_final_exponent_z = bigint_q("4965661367192848881"); + alt_bn128_final_exponent_is_z_neg = false; + +} +} // libsnark diff --git a/src/algebra/curves/alt_bn128/alt_bn128_init.hpp b/src/algebra/curves/alt_bn128/alt_bn128_init.hpp new file mode 100644 index 000000000..c3bea7673 --- /dev/null +++ b/src/algebra/curves/alt_bn128/alt_bn128_init.hpp @@ -0,0 +1,57 @@ +/** @file + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef ALT_BN128_INIT_HPP_ +#define ALT_BN128_INIT_HPP_ +#include "algebra/curves/public_params.hpp" +#include "algebra/fields/fp.hpp" +#include "algebra/fields/fp2.hpp" +#include "algebra/fields/fp6_3over2.hpp" +#include "algebra/fields/fp12_2over3over2.hpp" + +namespace libsnark { + +const mp_size_t alt_bn128_r_bitcount = 254; +const mp_size_t alt_bn128_q_bitcount = 254; + +const mp_size_t alt_bn128_r_limbs = (alt_bn128_r_bitcount+GMP_NUMB_BITS-1)/GMP_NUMB_BITS; +const mp_size_t alt_bn128_q_limbs = (alt_bn128_q_bitcount+GMP_NUMB_BITS-1)/GMP_NUMB_BITS; + +extern bigint alt_bn128_modulus_r; +extern bigint alt_bn128_modulus_q; + +typedef Fp_model alt_bn128_Fr; +typedef Fp_model alt_bn128_Fq; +typedef Fp2_model alt_bn128_Fq2; +typedef Fp6_3over2_model alt_bn128_Fq6; +typedef Fp12_2over3over2_model alt_bn128_Fq12; +typedef alt_bn128_Fq12 alt_bn128_GT; + +// parameters for Barreto--Naehrig curve E/Fq : y^2 = x^3 + b +extern alt_bn128_Fq alt_bn128_coeff_b; +// parameters for twisted Barreto--Naehrig curve E'/Fq2 : y^2 = x^3 + b/xi +extern alt_bn128_Fq2 alt_bn128_twist; +extern alt_bn128_Fq2 alt_bn128_twist_coeff_b; +extern alt_bn128_Fq alt_bn128_twist_mul_by_b_c0; +extern alt_bn128_Fq alt_bn128_twist_mul_by_b_c1; +extern alt_bn128_Fq2 alt_bn128_twist_mul_by_q_X; +extern alt_bn128_Fq2 alt_bn128_twist_mul_by_q_Y; + +// parameters for pairing +extern bigint alt_bn128_ate_loop_count; +extern bool alt_bn128_ate_is_loop_count_neg; +extern bigint<12*alt_bn128_q_limbs> alt_bn128_final_exponent; +extern bigint alt_bn128_final_exponent_z; +extern bool alt_bn128_final_exponent_is_z_neg; + +void init_alt_bn128_params(); + +class alt_bn128_G1; +class alt_bn128_G2; + +} // libsnark +#endif // ALT_BN128_INIT_HPP_ diff --git a/src/algebra/curves/alt_bn128/alt_bn128_pairing.cpp b/src/algebra/curves/alt_bn128/alt_bn128_pairing.cpp new file mode 100644 index 000000000..db556c5b2 --- /dev/null +++ b/src/algebra/curves/alt_bn128/alt_bn128_pairing.cpp @@ -0,0 +1,547 @@ +/** @file + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#include "algebra/curves/alt_bn128/alt_bn128_pairing.hpp" +#include "algebra/curves/alt_bn128/alt_bn128_init.hpp" +#include "algebra/curves/alt_bn128/alt_bn128_g1.hpp" +#include "algebra/curves/alt_bn128/alt_bn128_g2.hpp" +#include +#include "common/profiling.hpp" +#include "common/assert_except.hpp" + +namespace libsnark { + +bool alt_bn128_ate_G1_precomp::operator==(const alt_bn128_ate_G1_precomp &other) const +{ + return (this->PX == other.PX && + this->PY == other.PY); +} + +std::ostream& operator<<(std::ostream &out, const alt_bn128_ate_G1_precomp &prec_P) +{ + out << prec_P.PX << OUTPUT_SEPARATOR << prec_P.PY; + + return out; +} + +std::istream& operator>>(std::istream &in, alt_bn128_ate_G1_precomp &prec_P) +{ + in >> prec_P.PX; + consume_OUTPUT_SEPARATOR(in); + in >> prec_P.PY; + + return in; +} + +bool alt_bn128_ate_ell_coeffs::operator==(const alt_bn128_ate_ell_coeffs &other) const +{ + return (this->ell_0 == other.ell_0 && + this->ell_VW == other.ell_VW && + this->ell_VV == other.ell_VV); +} + +std::ostream& operator<<(std::ostream &out, const alt_bn128_ate_ell_coeffs &c) +{ + out << c.ell_0 << OUTPUT_SEPARATOR << c.ell_VW << OUTPUT_SEPARATOR << c.ell_VV; + return out; +} + +std::istream& operator>>(std::istream &in, alt_bn128_ate_ell_coeffs &c) +{ + in >> c.ell_0; + consume_OUTPUT_SEPARATOR(in); + in >> c.ell_VW; + consume_OUTPUT_SEPARATOR(in); + in >> c.ell_VV; + + return in; +} + +bool alt_bn128_ate_G2_precomp::operator==(const alt_bn128_ate_G2_precomp &other) const +{ + return (this->QX == other.QX && + this->QY == other.QY && + this->coeffs == other.coeffs); +} + +std::ostream& operator<<(std::ostream& out, const alt_bn128_ate_G2_precomp &prec_Q) +{ + out << prec_Q.QX << OUTPUT_SEPARATOR << prec_Q.QY << "\n"; + out << prec_Q.coeffs.size() << "\n"; + for (const alt_bn128_ate_ell_coeffs &c : prec_Q.coeffs) + { + out << c << OUTPUT_NEWLINE; + } + return out; +} + +std::istream& operator>>(std::istream& in, alt_bn128_ate_G2_precomp &prec_Q) +{ + in >> prec_Q.QX; + consume_OUTPUT_SEPARATOR(in); + in >> prec_Q.QY; + consume_newline(in); + + prec_Q.coeffs.clear(); + size_t s; + in >> s; + + consume_newline(in); + + prec_Q.coeffs.reserve(s); + + for (size_t i = 0; i < s; ++i) + { + alt_bn128_ate_ell_coeffs c; + in >> c; + consume_OUTPUT_NEWLINE(in); + prec_Q.coeffs.emplace_back(c); + } + + return in; +} + +/* final exponentiations */ + +alt_bn128_Fq12 alt_bn128_final_exponentiation_first_chunk(const alt_bn128_Fq12 &elt) +{ + enter_block("Call to alt_bn128_final_exponentiation_first_chunk"); + + /* + Computes result = elt^((q^6-1)*(q^2+1)). + Follows, e.g., Beuchat et al page 9, by computing result as follows: + elt^((q^6-1)*(q^2+1)) = (conj(elt) * elt^(-1))^(q^2+1) + More precisely: + A = conj(elt) + B = elt.inverse() + C = A * B + D = C.Frobenius_map(2) + result = D * C + */ + + const alt_bn128_Fq12 A = alt_bn128_Fq12(elt.c0,-elt.c1); + const alt_bn128_Fq12 B = elt.inverse(); + const alt_bn128_Fq12 C = A * B; + const alt_bn128_Fq12 D = C.Frobenius_map(2); + const alt_bn128_Fq12 result = D * C; + + leave_block("Call to alt_bn128_final_exponentiation_first_chunk"); + + return result; +} + +alt_bn128_Fq12 alt_bn128_exp_by_neg_z(const alt_bn128_Fq12 &elt) +{ + enter_block("Call to alt_bn128_exp_by_neg_z"); + + alt_bn128_Fq12 result = elt.cyclotomic_exp(alt_bn128_final_exponent_z); + if (!alt_bn128_final_exponent_is_z_neg) + { + result = result.unitary_inverse(); + } + + leave_block("Call to alt_bn128_exp_by_neg_z"); + + return result; +} + +alt_bn128_Fq12 alt_bn128_final_exponentiation_last_chunk(const alt_bn128_Fq12 &elt) +{ + enter_block("Call to alt_bn128_final_exponentiation_last_chunk"); + + /* + Follows Laura Fuentes-Castaneda et al. "Faster hashing to G2" + by computing: + + result = elt^(q^3 * (12*z^3 + 6z^2 + 4z - 1) + + q^2 * (12*z^3 + 6z^2 + 6z) + + q * (12*z^3 + 6z^2 + 4z) + + 1 * (12*z^3 + 12z^2 + 6z + 1)) + which equals + + result = elt^( 2z * ( 6z^2 + 3z + 1 ) * (q^4 - q^2 + 1)/r ). + + Using the following addition chain: + + A = exp_by_neg_z(elt) // = elt^(-z) + B = A^2 // = elt^(-2*z) + C = B^2 // = elt^(-4*z) + D = C * B // = elt^(-6*z) + E = exp_by_neg_z(D) // = elt^(6*z^2) + F = E^2 // = elt^(12*z^2) + G = epx_by_neg_z(F) // = elt^(-12*z^3) + H = conj(D) // = elt^(6*z) + I = conj(G) // = elt^(12*z^3) + J = I * E // = elt^(12*z^3 + 6*z^2) + K = J * H // = elt^(12*z^3 + 6*z^2 + 6*z) + L = K * B // = elt^(12*z^3 + 6*z^2 + 4*z) + M = K * E // = elt^(12*z^3 + 12*z^2 + 6*z) + N = M * elt // = elt^(12*z^3 + 12*z^2 + 6*z + 1) + O = L.Frobenius_map(1) // = elt^(q*(12*z^3 + 6*z^2 + 4*z)) + P = O * N // = elt^(q*(12*z^3 + 6*z^2 + 4*z) * (12*z^3 + 12*z^2 + 6*z + 1)) + Q = K.Frobenius_map(2) // = elt^(q^2 * (12*z^3 + 6*z^2 + 6*z)) + R = Q * P // = elt^(q^2 * (12*z^3 + 6*z^2 + 6*z) + q*(12*z^3 + 6*z^2 + 4*z) * (12*z^3 + 12*z^2 + 6*z + 1)) + S = conj(elt) // = elt^(-1) + T = S * L // = elt^(12*z^3 + 6*z^2 + 4*z - 1) + U = T.Frobenius_map(3) // = elt^(q^3(12*z^3 + 6*z^2 + 4*z - 1)) + V = U * R // = elt^(q^3(12*z^3 + 6*z^2 + 4*z - 1) + q^2 * (12*z^3 + 6*z^2 + 6*z) + q*(12*z^3 + 6*z^2 + 4*z) * (12*z^3 + 12*z^2 + 6*z + 1)) + result = V + + */ + + const alt_bn128_Fq12 A = alt_bn128_exp_by_neg_z(elt); + const alt_bn128_Fq12 B = A.cyclotomic_squared(); + const alt_bn128_Fq12 C = B.cyclotomic_squared(); + const alt_bn128_Fq12 D = C * B; + const alt_bn128_Fq12 E = alt_bn128_exp_by_neg_z(D); + const alt_bn128_Fq12 F = E.cyclotomic_squared(); + const alt_bn128_Fq12 G = alt_bn128_exp_by_neg_z(F); + const alt_bn128_Fq12 H = D.unitary_inverse(); + const alt_bn128_Fq12 I = G.unitary_inverse(); + const alt_bn128_Fq12 J = I * E; + const alt_bn128_Fq12 K = J * H; + const alt_bn128_Fq12 L = K * B; + const alt_bn128_Fq12 M = K * E; + const alt_bn128_Fq12 N = M * elt; + const alt_bn128_Fq12 O = L.Frobenius_map(1); + const alt_bn128_Fq12 P = O * N; + const alt_bn128_Fq12 Q = K.Frobenius_map(2); + const alt_bn128_Fq12 R = Q * P; + const alt_bn128_Fq12 S = elt.unitary_inverse(); + const alt_bn128_Fq12 T = S * L; + const alt_bn128_Fq12 U = T.Frobenius_map(3); + const alt_bn128_Fq12 V = U * R; + + const alt_bn128_Fq12 result = V; + + leave_block("Call to alt_bn128_final_exponentiation_last_chunk"); + + return result; +} + +alt_bn128_GT alt_bn128_final_exponentiation(const alt_bn128_Fq12 &elt) +{ + enter_block("Call to alt_bn128_final_exponentiation"); + /* OLD naive version: + alt_bn128_GT result = elt^alt_bn128_final_exponent; + */ + alt_bn128_Fq12 A = alt_bn128_final_exponentiation_first_chunk(elt); + alt_bn128_GT result = alt_bn128_final_exponentiation_last_chunk(A); + + leave_block("Call to alt_bn128_final_exponentiation"); + return result; +} + +/* ate pairing */ + +void doubling_step_for_flipped_miller_loop(const alt_bn128_Fq two_inv, + alt_bn128_G2 ¤t, + alt_bn128_ate_ell_coeffs &c) +{ + const alt_bn128_Fq2 X = current.X, Y = current.Y, Z = current.Z; + + const alt_bn128_Fq2 A = two_inv * (X * Y); // A = X1 * Y1 / 2 + const alt_bn128_Fq2 B = Y.squared(); // B = Y1^2 + const alt_bn128_Fq2 C = Z.squared(); // C = Z1^2 + const alt_bn128_Fq2 D = C+C+C; // D = 3 * C + const alt_bn128_Fq2 E = alt_bn128_twist_coeff_b * D; // E = twist_b * D + const alt_bn128_Fq2 F = E+E+E; // F = 3 * E + const alt_bn128_Fq2 G = two_inv * (B+F); // G = (B+F)/2 + const alt_bn128_Fq2 H = (Y+Z).squared() - (B+C); // H = (Y1+Z1)^2-(B+C) + const alt_bn128_Fq2 I = E-B; // I = E-B + const alt_bn128_Fq2 J = X.squared(); // J = X1^2 + const alt_bn128_Fq2 E_squared = E.squared(); // E_squared = E^2 + + current.X = A * (B-F); // X3 = A * (B-F) + current.Y = G.squared() - (E_squared+E_squared+E_squared); // Y3 = G^2 - 3*E^2 + current.Z = B * H; // Z3 = B * H + c.ell_0 = alt_bn128_twist * I; // ell_0 = xi * I + c.ell_VW = -H; // ell_VW = - H (later: * yP) + c.ell_VV = J+J+J; // ell_VV = 3*J (later: * xP) +} + +void mixed_addition_step_for_flipped_miller_loop(const alt_bn128_G2 base, + alt_bn128_G2 ¤t, + alt_bn128_ate_ell_coeffs &c) +{ + const alt_bn128_Fq2 X1 = current.X, Y1 = current.Y, Z1 = current.Z; + const alt_bn128_Fq2 &x2 = base.X, &y2 = base.Y; + + const alt_bn128_Fq2 D = X1 - x2 * Z1; // D = X1 - X2*Z1 + const alt_bn128_Fq2 E = Y1 - y2 * Z1; // E = Y1 - Y2*Z1 + const alt_bn128_Fq2 F = D.squared(); // F = D^2 + const alt_bn128_Fq2 G = E.squared(); // G = E^2 + const alt_bn128_Fq2 H = D*F; // H = D*F + const alt_bn128_Fq2 I = X1 * F; // I = X1 * F + const alt_bn128_Fq2 J = H + Z1*G - (I+I); // J = H + Z1*G - (I+I) + + current.X = D * J; // X3 = D*J + current.Y = E * (I-J)-(H * Y1); // Y3 = E*(I-J)-(H*Y1) + current.Z = Z1 * H; // Z3 = Z1*H + c.ell_0 = alt_bn128_twist * (E * x2 - D * y2); // ell_0 = xi * (E * X2 - D * Y2) + c.ell_VV = - E; // ell_VV = - E (later: * xP) + c.ell_VW = D; // ell_VW = D (later: * yP ) +} + +alt_bn128_ate_G1_precomp alt_bn128_ate_precompute_G1(const alt_bn128_G1& P) +{ + enter_block("Call to alt_bn128_ate_precompute_G1"); + + alt_bn128_G1 Pcopy = P; + Pcopy.to_affine_coordinates(); + + alt_bn128_ate_G1_precomp result; + result.PX = Pcopy.X; + result.PY = Pcopy.Y; + + leave_block("Call to alt_bn128_ate_precompute_G1"); + return result; +} + +alt_bn128_ate_G2_precomp alt_bn128_ate_precompute_G2(const alt_bn128_G2& Q) +{ + enter_block("Call to alt_bn128_ate_precompute_G2"); + + alt_bn128_G2 Qcopy(Q); + Qcopy.to_affine_coordinates(); + + alt_bn128_Fq two_inv = (alt_bn128_Fq("2").inverse()); // could add to global params if needed + + alt_bn128_ate_G2_precomp result; + result.QX = Qcopy.X; + result.QY = Qcopy.Y; + + alt_bn128_G2 R; + R.X = Qcopy.X; + R.Y = Qcopy.Y; + R.Z = alt_bn128_Fq2::one(); + + const bigint &loop_count = alt_bn128_ate_loop_count; + bool found_one = false; + alt_bn128_ate_ell_coeffs c; + + for (long i = loop_count.max_bits(); i >= 0; --i) + { + const bool bit = loop_count.test_bit(i); + if (!found_one) + { + /* this skips the MSB itself */ + found_one |= bit; + continue; + } + + doubling_step_for_flipped_miller_loop(two_inv, R, c); + result.coeffs.push_back(c); + + if (bit) + { + mixed_addition_step_for_flipped_miller_loop(Qcopy, R, c); + result.coeffs.push_back(c); + } + } + + alt_bn128_G2 Q1 = Qcopy.mul_by_q(); + assert_except(Q1.Z == alt_bn128_Fq2::one()); + alt_bn128_G2 Q2 = Q1.mul_by_q(); + assert_except(Q2.Z == alt_bn128_Fq2::one()); + + if (alt_bn128_ate_is_loop_count_neg) + { + R.Y = - R.Y; + } + Q2.Y = - Q2.Y; + + mixed_addition_step_for_flipped_miller_loop(Q1, R, c); + result.coeffs.push_back(c); + + mixed_addition_step_for_flipped_miller_loop(Q2, R, c); + result.coeffs.push_back(c); + + leave_block("Call to alt_bn128_ate_precompute_G2"); + return result; +} + +alt_bn128_Fq12 alt_bn128_ate_miller_loop(const alt_bn128_ate_G1_precomp &prec_P, + const alt_bn128_ate_G2_precomp &prec_Q) +{ + enter_block("Call to alt_bn128_ate_miller_loop"); + + alt_bn128_Fq12 f = alt_bn128_Fq12::one(); + + bool found_one = false; + size_t idx = 0; + + const bigint &loop_count = alt_bn128_ate_loop_count; + alt_bn128_ate_ell_coeffs c; + + for (long i = loop_count.max_bits(); i >= 0; --i) + { + const bool bit = loop_count.test_bit(i); + if (!found_one) + { + /* this skips the MSB itself */ + found_one |= bit; + continue; + } + + /* code below gets executed for all bits (EXCEPT the MSB itself) of + alt_bn128_param_p (skipping leading zeros) in MSB to LSB + order */ + + c = prec_Q.coeffs[idx++]; + f = f.squared(); + f = f.mul_by_024(c.ell_0, prec_P.PY * c.ell_VW, prec_P.PX * c.ell_VV); + + if (bit) + { + c = prec_Q.coeffs[idx++]; + f = f.mul_by_024(c.ell_0, prec_P.PY * c.ell_VW, prec_P.PX * c.ell_VV); + } + + } + + if (alt_bn128_ate_is_loop_count_neg) + { + f = f.inverse(); + } + + c = prec_Q.coeffs[idx++]; + f = f.mul_by_024(c.ell_0,prec_P.PY * c.ell_VW,prec_P.PX * c.ell_VV); + + c = prec_Q.coeffs[idx++]; + f = f.mul_by_024(c.ell_0,prec_P.PY * c.ell_VW,prec_P.PX * c.ell_VV); + + leave_block("Call to alt_bn128_ate_miller_loop"); + return f; +} + +alt_bn128_Fq12 alt_bn128_ate_double_miller_loop(const alt_bn128_ate_G1_precomp &prec_P1, + const alt_bn128_ate_G2_precomp &prec_Q1, + const alt_bn128_ate_G1_precomp &prec_P2, + const alt_bn128_ate_G2_precomp &prec_Q2) +{ + enter_block("Call to alt_bn128_ate_double_miller_loop"); + + alt_bn128_Fq12 f = alt_bn128_Fq12::one(); + + bool found_one = false; + size_t idx = 0; + + const bigint &loop_count = alt_bn128_ate_loop_count; + for (long i = loop_count.max_bits(); i >= 0; --i) + { + const bool bit = loop_count.test_bit(i); + if (!found_one) + { + /* this skips the MSB itself */ + found_one |= bit; + continue; + } + + /* code below gets executed for all bits (EXCEPT the MSB itself) of + alt_bn128_param_p (skipping leading zeros) in MSB to LSB + order */ + + alt_bn128_ate_ell_coeffs c1 = prec_Q1.coeffs[idx]; + alt_bn128_ate_ell_coeffs c2 = prec_Q2.coeffs[idx]; + ++idx; + + f = f.squared(); + + f = f.mul_by_024(c1.ell_0, prec_P1.PY * c1.ell_VW, prec_P1.PX * c1.ell_VV); + f = f.mul_by_024(c2.ell_0, prec_P2.PY * c2.ell_VW, prec_P2.PX * c2.ell_VV); + + if (bit) + { + alt_bn128_ate_ell_coeffs c1 = prec_Q1.coeffs[idx]; + alt_bn128_ate_ell_coeffs c2 = prec_Q2.coeffs[idx]; + ++idx; + + f = f.mul_by_024(c1.ell_0, prec_P1.PY * c1.ell_VW, prec_P1.PX * c1.ell_VV); + f = f.mul_by_024(c2.ell_0, prec_P2.PY * c2.ell_VW, prec_P2.PX * c2.ell_VV); + } + } + + if (alt_bn128_ate_is_loop_count_neg) + { + f = f.inverse(); + } + + alt_bn128_ate_ell_coeffs c1 = prec_Q1.coeffs[idx]; + alt_bn128_ate_ell_coeffs c2 = prec_Q2.coeffs[idx]; + ++idx; + f = f.mul_by_024(c1.ell_0, prec_P1.PY * c1.ell_VW, prec_P1.PX * c1.ell_VV); + f = f.mul_by_024(c2.ell_0, prec_P2.PY * c2.ell_VW, prec_P2.PX * c2.ell_VV); + + c1 = prec_Q1.coeffs[idx]; + c2 = prec_Q2.coeffs[idx]; + ++idx; + f = f.mul_by_024(c1.ell_0, prec_P1.PY * c1.ell_VW, prec_P1.PX * c1.ell_VV); + f = f.mul_by_024(c2.ell_0, prec_P2.PY * c2.ell_VW, prec_P2.PX * c2.ell_VV); + + leave_block("Call to alt_bn128_ate_double_miller_loop"); + + return f; +} + +alt_bn128_Fq12 alt_bn128_ate_pairing(const alt_bn128_G1& P, const alt_bn128_G2 &Q) +{ + enter_block("Call to alt_bn128_ate_pairing"); + alt_bn128_ate_G1_precomp prec_P = alt_bn128_ate_precompute_G1(P); + alt_bn128_ate_G2_precomp prec_Q = alt_bn128_ate_precompute_G2(Q); + alt_bn128_Fq12 result = alt_bn128_ate_miller_loop(prec_P, prec_Q); + leave_block("Call to alt_bn128_ate_pairing"); + return result; +} + +alt_bn128_GT alt_bn128_ate_reduced_pairing(const alt_bn128_G1 &P, const alt_bn128_G2 &Q) +{ + enter_block("Call to alt_bn128_ate_reduced_pairing"); + const alt_bn128_Fq12 f = alt_bn128_ate_pairing(P, Q); + const alt_bn128_GT result = alt_bn128_final_exponentiation(f); + leave_block("Call to alt_bn128_ate_reduced_pairing"); + return result; +} + +/* choice of pairing */ + +alt_bn128_G1_precomp alt_bn128_precompute_G1(const alt_bn128_G1& P) +{ + return alt_bn128_ate_precompute_G1(P); +} + +alt_bn128_G2_precomp alt_bn128_precompute_G2(const alt_bn128_G2& Q) +{ + return alt_bn128_ate_precompute_G2(Q); +} + +alt_bn128_Fq12 alt_bn128_miller_loop(const alt_bn128_G1_precomp &prec_P, + const alt_bn128_G2_precomp &prec_Q) +{ + return alt_bn128_ate_miller_loop(prec_P, prec_Q); +} + +alt_bn128_Fq12 alt_bn128_double_miller_loop(const alt_bn128_G1_precomp &prec_P1, + const alt_bn128_G2_precomp &prec_Q1, + const alt_bn128_G1_precomp &prec_P2, + const alt_bn128_G2_precomp &prec_Q2) +{ + return alt_bn128_ate_double_miller_loop(prec_P1, prec_Q1, prec_P2, prec_Q2); +} + +alt_bn128_Fq12 alt_bn128_pairing(const alt_bn128_G1& P, + const alt_bn128_G2 &Q) +{ + return alt_bn128_ate_pairing(P, Q); +} + +alt_bn128_GT alt_bn128_reduced_pairing(const alt_bn128_G1 &P, + const alt_bn128_G2 &Q) +{ + return alt_bn128_ate_reduced_pairing(P, Q); +} +} // libsnark diff --git a/src/algebra/curves/alt_bn128/alt_bn128_pairing.hpp b/src/algebra/curves/alt_bn128/alt_bn128_pairing.hpp new file mode 100644 index 000000000..15d325485 --- /dev/null +++ b/src/algebra/curves/alt_bn128/alt_bn128_pairing.hpp @@ -0,0 +1,92 @@ +/** @file + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef ALT_BN128_PAIRING_HPP_ +#define ALT_BN128_PAIRING_HPP_ +#include +#include "algebra/curves/alt_bn128/alt_bn128_init.hpp" + +namespace libsnark { + +/* final exponentiation */ + +alt_bn128_GT alt_bn128_final_exponentiation(const alt_bn128_Fq12 &elt); + +/* ate pairing */ + +struct alt_bn128_ate_G1_precomp { + alt_bn128_Fq PX; + alt_bn128_Fq PY; + + bool operator==(const alt_bn128_ate_G1_precomp &other) const; + friend std::ostream& operator<<(std::ostream &out, const alt_bn128_ate_G1_precomp &prec_P); + friend std::istream& operator>>(std::istream &in, alt_bn128_ate_G1_precomp &prec_P); +}; + +struct alt_bn128_ate_ell_coeffs { + alt_bn128_Fq2 ell_0; + alt_bn128_Fq2 ell_VW; + alt_bn128_Fq2 ell_VV; + + bool operator==(const alt_bn128_ate_ell_coeffs &other) const; + friend std::ostream& operator<<(std::ostream &out, const alt_bn128_ate_ell_coeffs &dc); + friend std::istream& operator>>(std::istream &in, alt_bn128_ate_ell_coeffs &dc); +}; + +struct alt_bn128_ate_G2_precomp { + alt_bn128_Fq2 QX; + alt_bn128_Fq2 QY; + std::vector coeffs; + + bool operator==(const alt_bn128_ate_G2_precomp &other) const; + friend std::ostream& operator<<(std::ostream &out, const alt_bn128_ate_G2_precomp &prec_Q); + friend std::istream& operator>>(std::istream &in, alt_bn128_ate_G2_precomp &prec_Q); +}; + +alt_bn128_ate_G1_precomp alt_bn128_ate_precompute_G1(const alt_bn128_G1& P); +alt_bn128_ate_G2_precomp alt_bn128_ate_precompute_G2(const alt_bn128_G2& Q); + +alt_bn128_Fq12 alt_bn128_ate_miller_loop(const alt_bn128_ate_G1_precomp &prec_P, + const alt_bn128_ate_G2_precomp &prec_Q); +alt_bn128_Fq12 alt_bn128_ate_double_miller_loop(const alt_bn128_ate_G1_precomp &prec_P1, + const alt_bn128_ate_G2_precomp &prec_Q1, + const alt_bn128_ate_G1_precomp &prec_P2, + const alt_bn128_ate_G2_precomp &prec_Q2); + +alt_bn128_Fq12 alt_bn128_ate_pairing(const alt_bn128_G1& P, + const alt_bn128_G2 &Q); +alt_bn128_GT alt_bn128_ate_reduced_pairing(const alt_bn128_G1 &P, + const alt_bn128_G2 &Q); + +/* choice of pairing */ + +typedef alt_bn128_ate_G1_precomp alt_bn128_G1_precomp; +typedef alt_bn128_ate_G2_precomp alt_bn128_G2_precomp; + +alt_bn128_G1_precomp alt_bn128_precompute_G1(const alt_bn128_G1& P); + +alt_bn128_G2_precomp alt_bn128_precompute_G2(const alt_bn128_G2& Q); + +alt_bn128_Fq12 alt_bn128_miller_loop(const alt_bn128_G1_precomp &prec_P, + const alt_bn128_G2_precomp &prec_Q); + +alt_bn128_Fq12 alt_bn128_double_miller_loop(const alt_bn128_G1_precomp &prec_P1, + const alt_bn128_G2_precomp &prec_Q1, + const alt_bn128_G1_precomp &prec_P2, + const alt_bn128_G2_precomp &prec_Q2); + +alt_bn128_Fq12 alt_bn128_pairing(const alt_bn128_G1& P, + const alt_bn128_G2 &Q); + +alt_bn128_GT alt_bn128_reduced_pairing(const alt_bn128_G1 &P, + const alt_bn128_G2 &Q); + +alt_bn128_GT alt_bn128_affine_reduced_pairing(const alt_bn128_G1 &P, + const alt_bn128_G2 &Q); + +} // libsnark +#endif // ALT_BN128_PAIRING_HPP_ diff --git a/src/algebra/curves/alt_bn128/alt_bn128_pp.cpp b/src/algebra/curves/alt_bn128/alt_bn128_pp.cpp new file mode 100644 index 000000000..25ea924d8 --- /dev/null +++ b/src/algebra/curves/alt_bn128/alt_bn128_pp.cpp @@ -0,0 +1,58 @@ +/** @file + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#include "algebra/curves/alt_bn128/alt_bn128_pp.hpp" + +namespace libsnark { + +void alt_bn128_pp::init_public_params() +{ + init_alt_bn128_params(); +} + +alt_bn128_GT alt_bn128_pp::final_exponentiation(const alt_bn128_Fq12 &elt) +{ + return alt_bn128_final_exponentiation(elt); +} + +alt_bn128_G1_precomp alt_bn128_pp::precompute_G1(const alt_bn128_G1 &P) +{ + return alt_bn128_precompute_G1(P); +} + +alt_bn128_G2_precomp alt_bn128_pp::precompute_G2(const alt_bn128_G2 &Q) +{ + return alt_bn128_precompute_G2(Q); +} + +alt_bn128_Fq12 alt_bn128_pp::miller_loop(const alt_bn128_G1_precomp &prec_P, + const alt_bn128_G2_precomp &prec_Q) +{ + return alt_bn128_miller_loop(prec_P, prec_Q); +} + +alt_bn128_Fq12 alt_bn128_pp::double_miller_loop(const alt_bn128_G1_precomp &prec_P1, + const alt_bn128_G2_precomp &prec_Q1, + const alt_bn128_G1_precomp &prec_P2, + const alt_bn128_G2_precomp &prec_Q2) +{ + return alt_bn128_double_miller_loop(prec_P1, prec_Q1, prec_P2, prec_Q2); +} + +alt_bn128_Fq12 alt_bn128_pp::pairing(const alt_bn128_G1 &P, + const alt_bn128_G2 &Q) +{ + return alt_bn128_pairing(P, Q); +} + +alt_bn128_Fq12 alt_bn128_pp::reduced_pairing(const alt_bn128_G1 &P, + const alt_bn128_G2 &Q) +{ + return alt_bn128_reduced_pairing(P, Q); +} + +} // libsnark diff --git a/src/algebra/curves/alt_bn128/alt_bn128_pp.hpp b/src/algebra/curves/alt_bn128/alt_bn128_pp.hpp new file mode 100644 index 000000000..ec8059dcb --- /dev/null +++ b/src/algebra/curves/alt_bn128/alt_bn128_pp.hpp @@ -0,0 +1,50 @@ +/** @file +***************************************************************************** +* @author This file is part of libsnark, developed by SCIPR Lab +* and contributors (see AUTHORS). +* @copyright MIT license (see LICENSE file) +*****************************************************************************/ + +#ifndef ALT_BN128_PP_HPP_ +#define ALT_BN128_PP_HPP_ +#include "algebra/curves/public_params.hpp" +#include "algebra/curves/alt_bn128/alt_bn128_init.hpp" +#include "algebra/curves/alt_bn128/alt_bn128_g1.hpp" +#include "algebra/curves/alt_bn128/alt_bn128_g2.hpp" +#include "algebra/curves/alt_bn128/alt_bn128_pairing.hpp" + +namespace libsnark { + +class alt_bn128_pp { +public: + typedef alt_bn128_Fr Fp_type; + typedef alt_bn128_G1 G1_type; + typedef alt_bn128_G2 G2_type; + typedef alt_bn128_G1_precomp G1_precomp_type; + typedef alt_bn128_G2_precomp G2_precomp_type; + typedef alt_bn128_Fq Fq_type; + typedef alt_bn128_Fq2 Fqe_type; + typedef alt_bn128_Fq12 Fqk_type; + typedef alt_bn128_GT GT_type; + + static const bool has_affine_pairing = false; + + static void init_public_params(); + static alt_bn128_GT final_exponentiation(const alt_bn128_Fq12 &elt); + static alt_bn128_G1_precomp precompute_G1(const alt_bn128_G1 &P); + static alt_bn128_G2_precomp precompute_G2(const alt_bn128_G2 &Q); + static alt_bn128_Fq12 miller_loop(const alt_bn128_G1_precomp &prec_P, + const alt_bn128_G2_precomp &prec_Q); + static alt_bn128_Fq12 double_miller_loop(const alt_bn128_G1_precomp &prec_P1, + const alt_bn128_G2_precomp &prec_Q1, + const alt_bn128_G1_precomp &prec_P2, + const alt_bn128_G2_precomp &prec_Q2); + static alt_bn128_Fq12 pairing(const alt_bn128_G1 &P, + const alt_bn128_G2 &Q); + static alt_bn128_Fq12 reduced_pairing(const alt_bn128_G1 &P, + const alt_bn128_G2 &Q); +}; + +} // libsnark + +#endif // ALT_BN128_PP_HPP_ diff --git a/src/algebra/curves/curve_utils.hpp b/src/algebra/curves/curve_utils.hpp new file mode 100644 index 000000000..33a8e1e17 --- /dev/null +++ b/src/algebra/curves/curve_utils.hpp @@ -0,0 +1,22 @@ +/** @file + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef CURVE_UTILS_HPP_ +#define CURVE_UTILS_HPP_ +#include + +#include "algebra/fields/bigint.hpp" + +namespace libsnark { + +template +GroupT scalar_mul(const GroupT &base, const bigint &scalar); + +} // libsnark +#include "algebra/curves/curve_utils.tcc" + +#endif // CURVE_UTILS_HPP_ diff --git a/src/algebra/curves/curve_utils.tcc b/src/algebra/curves/curve_utils.tcc new file mode 100644 index 000000000..251d75d8b --- /dev/null +++ b/src/algebra/curves/curve_utils.tcc @@ -0,0 +1,37 @@ +/** @file + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef CURVE_UTILS_TCC_ +#define CURVE_UTILS_TCC_ + +namespace libsnark { + +template +GroupT scalar_mul(const GroupT &base, const bigint &scalar) +{ + GroupT result = GroupT::zero(); + + bool found_one = false; + for (long i = scalar.max_bits() - 1; i >= 0; --i) + { + if (found_one) + { + result = result.dbl(); + } + + if (scalar.test_bit(i)) + { + found_one = true; + result = result + base; + } + } + + return result; +} + +} // libsnark +#endif // CURVE_UTILS_TCC_ diff --git a/src/algebra/curves/public_params.hpp b/src/algebra/curves/public_params.hpp new file mode 100644 index 000000000..07e047560 --- /dev/null +++ b/src/algebra/curves/public_params.hpp @@ -0,0 +1,103 @@ +/** @file + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef PUBLIC_PARAMS_HPP_ +#define PUBLIC_PARAMS_HPP_ +#include + +namespace libsnark { + +/* + for every curve the user should define corresponding + public_params with the following typedefs: + + Fp_type + G1_type + G2_type + G1_precomp_type + G2_precomp_type + affine_ate_G1_precomp_type + affine_ate_G2_precomp_type + Fq_type + Fqe_type + Fqk_type + GT_type + + one should also define the following static methods: + + void init_public_params(); + + GT final_exponentiation(const Fqk &elt); + + G1_precomp precompute_G1(const G1 &P); + G2_precomp precompute_G2(const G2 &Q); + + Fqk miller_loop(const G1_precomp &prec_P, + const G2_precomp &prec_Q); + + affine_ate_G1_precomp affine_ate_precompute_G1(const G1 &P); + affine_ate_G2_precomp affine_ate_precompute_G2(const G2 &Q); + + + Fqk affine_ate_miller_loop(const affine_ate_G1_precomp &prec_P, + const affine_ate_G2_precomp &prec_Q); + Fqk affine_ate_e_over_e_miller_loop(const affine_ate_G1_precomp &prec_P1, + const affine_ate_G2_precomp &prec_Q1, + const affine_ate_G1_precomp &prec_P2, + const affine_ate_G2_precomp &prec_Q2); + Fqk affine_ate_e_times_e_over_e_miller_loop(const affine_ate_G1_precomp &prec_P1, + const affine_ate_G2_precomp &prec_Q1, + const affine_ate_G1_precomp &prec_P2, + const affine_ate_G2_precomp &prec_Q2, + const affine_ate_G1_precomp &prec_P3, + const affine_ate_G2_precomp &prec_Q3); + Fqk double_miller_loop(const G1_precomp &prec_P1, + const G2_precomp &prec_Q1, + const G1_precomp &prec_P2, + const G2_precomp &prec_Q2); + + Fqk pairing(const G1 &P, + const G2 &Q); + GT reduced_pairing(const G1 &P, + const G2 &Q); + GT affine_reduced_pairing(const G1 &P, + const G2 &Q); +*/ + +template +using Fr = typename EC_ppT::Fp_type; +template +using G1 = typename EC_ppT::G1_type; +template +using G2 = typename EC_ppT::G2_type; +template +using G1_precomp = typename EC_ppT::G1_precomp_type; +template +using G2_precomp = typename EC_ppT::G2_precomp_type; +template +using affine_ate_G1_precomp = typename EC_ppT::affine_ate_G1_precomp_type; +template +using affine_ate_G2_precomp = typename EC_ppT::affine_ate_G2_precomp_type; +template +using Fq = typename EC_ppT::Fq_type; +template +using Fqe = typename EC_ppT::Fqe_type; +template +using Fqk = typename EC_ppT::Fqk_type; +template +using GT = typename EC_ppT::GT_type; + +template +using Fr_vector = std::vector >; +template +using G1_vector = std::vector >; +template +using G2_vector = std::vector >; + +} // libsnark + +#endif // PUBLIC_PARAMS_HPP_ diff --git a/src/algebra/curves/tests/test_bilinearity.cpp b/src/algebra/curves/tests/test_bilinearity.cpp new file mode 100644 index 000000000..295745281 --- /dev/null +++ b/src/algebra/curves/tests/test_bilinearity.cpp @@ -0,0 +1,136 @@ +/** + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ +#include "common/profiling.hpp" +#include "algebra/curves/edwards/edwards_pp.hpp" +#ifdef CURVE_BN128 +#include "algebra/curves/bn128/bn128_pp.hpp" +#endif +#include "algebra/curves/alt_bn128/alt_bn128_pp.hpp" +#include "algebra/curves/mnt/mnt4/mnt4_pp.hpp" +#include "algebra/curves/mnt/mnt6/mnt6_pp.hpp" + +using namespace libsnark; + +template +void pairing_test() +{ + GT GT_one = GT::one(); + + printf("Running bilinearity tests:\n"); + G1 P = (Fr::random_element()) * G1::one(); + //G1 P = Fr("2") * G1::one(); + G2 Q = (Fr::random_element()) * G2::one(); + //G2 Q = Fr("3") * G2::one(); + + printf("P:\n"); + P.print(); + P.print_coordinates(); + printf("Q:\n"); + Q.print(); + Q.print_coordinates(); + printf("\n\n"); + + Fr s = Fr::random_element(); + //Fr s = Fr("2"); + G1 sP = s * P; + G2 sQ = s * Q; + + printf("Pairing bilinearity tests (three must match):\n"); + GT ans1 = ppT::reduced_pairing(sP, Q); + GT ans2 = ppT::reduced_pairing(P, sQ); + GT ans3 = ppT::reduced_pairing(P, Q)^s; + ans1.print(); + ans2.print(); + ans3.print(); + assert(ans1 == ans2); + assert(ans2 == ans3); + + assert(ans1 != GT_one); + assert((ans1^Fr::field_char()) == GT_one); + printf("\n\n"); +} + +template +void double_miller_loop_test() +{ + const G1 P1 = (Fr::random_element()) * G1::one(); + const G1 P2 = (Fr::random_element()) * G1::one(); + const G2 Q1 = (Fr::random_element()) * G2::one(); + const G2 Q2 = (Fr::random_element()) * G2::one(); + + const G1_precomp prec_P1 = ppT::precompute_G1(P1); + const G1_precomp prec_P2 = ppT::precompute_G1(P2); + const G2_precomp prec_Q1 = ppT::precompute_G2(Q1); + const G2_precomp prec_Q2 = ppT::precompute_G2(Q2); + + const Fqk ans_1 = ppT::miller_loop(prec_P1, prec_Q1); + const Fqk ans_2 = ppT::miller_loop(prec_P2, prec_Q2); + const Fqk ans_12 = ppT::double_miller_loop(prec_P1, prec_Q1, prec_P2, prec_Q2); + assert(ans_1 * ans_2 == ans_12); +} + +template +void affine_pairing_test() +{ + GT GT_one = GT::one(); + + printf("Running bilinearity tests:\n"); + G1 P = (Fr::random_element()) * G1::one(); + G2 Q = (Fr::random_element()) * G2::one(); + + printf("P:\n"); + P.print(); + printf("Q:\n"); + Q.print(); + printf("\n\n"); + + Fr s = Fr::random_element(); + G1 sP = s * P; + G2 sQ = s * Q; + + printf("Pairing bilinearity tests (three must match):\n"); + GT ans1 = ppT::affine_reduced_pairing(sP, Q); + GT ans2 = ppT::affine_reduced_pairing(P, sQ); + GT ans3 = ppT::affine_reduced_pairing(P, Q)^s; + ans1.print(); + ans2.print(); + ans3.print(); + assert(ans1 == ans2); + assert(ans2 == ans3); + + assert(ans1 != GT_one); + assert((ans1^Fr::field_char()) == GT_one); + printf("\n\n"); +} + +int main(void) +{ + start_profiling(); + edwards_pp::init_public_params(); + pairing_test(); + double_miller_loop_test(); + + mnt6_pp::init_public_params(); + pairing_test(); + double_miller_loop_test(); + affine_pairing_test(); + + mnt4_pp::init_public_params(); + pairing_test(); + double_miller_loop_test(); + affine_pairing_test(); + + alt_bn128_pp::init_public_params(); + pairing_test(); + double_miller_loop_test(); + +#ifdef CURVE_BN128 // BN128 has fancy dependencies so it may be disabled + bn128_pp::init_public_params(); + pairing_test(); + double_miller_loop_test(); +#endif +} diff --git a/src/algebra/curves/tests/test_groups.cpp b/src/algebra/curves/tests/test_groups.cpp new file mode 100644 index 000000000..725e490d7 --- /dev/null +++ b/src/algebra/curves/tests/test_groups.cpp @@ -0,0 +1,175 @@ +/** + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ +#include "common/profiling.hpp" +#include "algebra/curves/edwards/edwards_pp.hpp" +#include "algebra/curves/mnt/mnt4/mnt4_pp.hpp" +#include "algebra/curves/mnt/mnt6/mnt6_pp.hpp" +#ifdef CURVE_BN128 +#include "algebra/curves/bn128/bn128_pp.hpp" +#endif +#include "algebra/curves/alt_bn128/alt_bn128_pp.hpp" +#include + +using namespace libsnark; + +template +void test_mixed_add() +{ + GroupT base, el, result; + + base = GroupT::zero(); + el = GroupT::zero(); + el.to_special(); + result = base.mixed_add(el); + assert(result == base + el); + + base = GroupT::zero(); + el = GroupT::random_element(); + el.to_special(); + result = base.mixed_add(el); + assert(result == base + el); + + base = GroupT::random_element(); + el = GroupT::zero(); + el.to_special(); + result = base.mixed_add(el); + assert(result == base + el); + + base = GroupT::random_element(); + el = GroupT::random_element(); + el.to_special(); + result = base.mixed_add(el); + assert(result == base + el); + + base = GroupT::random_element(); + el = base; + el.to_special(); + result = base.mixed_add(el); + assert(result == base.dbl()); +} + +template +void test_group() +{ + bigint<1> rand1 = bigint<1>("76749407"); + bigint<1> rand2 = bigint<1>("44410867"); + bigint<1> randsum = bigint<1>("121160274"); + + GroupT zero = GroupT::zero(); + assert(zero == zero); + GroupT one = GroupT::one(); + assert(one == one); + GroupT two = bigint<1>(2l) * GroupT::one(); + assert(two == two); + GroupT five = bigint<1>(5l) * GroupT::one(); + + GroupT three = bigint<1>(3l) * GroupT::one(); + GroupT four = bigint<1>(4l) * GroupT::one(); + + assert(two+five == three+four); + + GroupT a = GroupT::random_element(); + GroupT b = GroupT::random_element(); + + assert(one != zero); + assert(a != zero); + assert(a != one); + + assert(b != zero); + assert(b != one); + + assert(a.dbl() == a + a); + assert(b.dbl() == b + b); + assert(one.add(two) == three); + assert(two.add(one) == three); + assert(a + b == b + a); + assert(a - a == zero); + assert(a - b == a + (-b)); + assert(a - b == (-b) + a); + + // handle special cases + assert(zero + (-a) == -a); + assert(zero - a == -a); + assert(a - zero == a); + assert(a + zero == a); + assert(zero + a == a); + + assert((a + b).dbl() == (a + b) + (b + a)); + assert(bigint<1>("2") * (a + b) == (a + b) + (b + a)); + + assert((rand1 * a) + (rand2 * a) == (randsum * a)); + + assert(GroupT::order() * a == zero); + assert(GroupT::order() * one == zero); + assert((GroupT::order() * a) - a != zero); + assert((GroupT::order() * one) - one != zero); + + test_mixed_add(); +} + +template +void test_mul_by_q() +{ + GroupT a = GroupT::random_element(); + assert((GroupT::base_field_char()*a) == a.mul_by_q()); +} + +template +void test_output() +{ + GroupT g = GroupT::zero(); + + for (size_t i = 0; i < 1000; ++i) + { + std::stringstream ss; + ss << g; + GroupT gg; + ss >> gg; + assert(g == gg); + /* use a random point in next iteration */ + g = GroupT::random_element(); + } +} + +int main(void) +{ + edwards_pp::init_public_params(); + test_group >(); + test_output >(); + test_group >(); + test_output >(); + test_mul_by_q >(); + + mnt4_pp::init_public_params(); + test_group >(); + test_output >(); + test_group >(); + test_output >(); + test_mul_by_q >(); + + mnt6_pp::init_public_params(); + test_group >(); + test_output >(); + test_group >(); + test_output >(); + test_mul_by_q >(); + + alt_bn128_pp::init_public_params(); + test_group >(); + test_output >(); + test_group >(); + test_output >(); + test_mul_by_q >(); + +#ifdef CURVE_BN128 // BN128 has fancy dependencies so it may be disabled + bn128_pp::init_public_params(); + test_group >(); + test_output >(); + test_group >(); + test_output >(); +#endif +} diff --git a/src/algebra/evaluation_domain/domains/basic_radix2_domain.hpp b/src/algebra/evaluation_domain/domains/basic_radix2_domain.hpp new file mode 100644 index 000000000..3e127a063 --- /dev/null +++ b/src/algebra/evaluation_domain/domains/basic_radix2_domain.hpp @@ -0,0 +1,45 @@ +/** @file + ***************************************************************************** + + Declaration of interfaces for the "basic radix-2" evaluation domain. + + Roughly, the domain has size m = 2^k and consists of the m-th roots of unity. + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef BASIC_RADIX2_DOMAIN_HPP_ +#define BASIC_RADIX2_DOMAIN_HPP_ + +#include "algebra/evaluation_domain/evaluation_domain.hpp" + +namespace libsnark { + +template +class basic_radix2_domain : public evaluation_domain { +public: + + FieldT omega; + + basic_radix2_domain(const size_t m); + + void FFT(std::vector &a); + void iFFT(std::vector &a); + void cosetFFT(std::vector &a, const FieldT &g); + void icosetFFT(std::vector &a, const FieldT &g); + std::vector lagrange_coeffs(const FieldT &t); + FieldT get_element(const size_t idx); + FieldT compute_Z(const FieldT &t); + void add_poly_Z(const FieldT &coeff, std::vector &H); + void divide_by_Z_on_coset(std::vector &P); + +}; + +} // libsnark + +#include "algebra/evaluation_domain/domains/basic_radix2_domain.tcc" + +#endif // BASIC_RADIX2_DOMAIN_HPP_ diff --git a/src/algebra/evaluation_domain/domains/basic_radix2_domain.tcc b/src/algebra/evaluation_domain/domains/basic_radix2_domain.tcc new file mode 100644 index 000000000..d315e8319 --- /dev/null +++ b/src/algebra/evaluation_domain/domains/basic_radix2_domain.tcc @@ -0,0 +1,112 @@ +/** @file + ***************************************************************************** + + Implementation of interfaces for the "basic radix-2" evaluation domain. + + See basic_radix2_domain.hpp . + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef BASIC_RADIX2_DOMAIN_TCC_ +#define BASIC_RADIX2_DOMAIN_TCC_ + +#include "algebra/evaluation_domain/domains/basic_radix2_domain_aux.hpp" + +namespace libsnark { + +template +basic_radix2_domain::basic_radix2_domain(const size_t m) : evaluation_domain(m) +{ + assert(m > 1); + const size_t logm = log2(m); + assert(logm <= (FieldT::s)); + + omega = get_root_of_unity(m); +} + +template +void basic_radix2_domain::FFT(std::vector &a) +{ + enter_block("Execute FFT"); + assert(a.size() == this->m); + _basic_radix2_FFT(a, omega); + leave_block("Execute FFT"); +} + +template +void basic_radix2_domain::iFFT(std::vector &a) +{ + enter_block("Execute inverse FFT"); + assert(a.size() == this->m); + _basic_radix2_FFT(a, omega.inverse()); + + const FieldT sconst = FieldT(a.size()).inverse(); + for (size_t i = 0; i < a.size(); ++i) + { + a[i] *= sconst; + } + leave_block("Execute inverse FFT"); +} + +template +void basic_radix2_domain::cosetFFT(std::vector &a, const FieldT &g) +{ + enter_block("Execute coset FFT"); + _multiply_by_coset(a, g); + FFT(a); + leave_block("Execute coset FFT"); +} + +template +void basic_radix2_domain::icosetFFT(std::vector &a, const FieldT &g) +{ + enter_block("Execute inverse coset IFFT"); + iFFT(a); + _multiply_by_coset(a, g.inverse()); + leave_block("Execute inverse coset IFFT"); +} + +template +std::vector basic_radix2_domain::lagrange_coeffs(const FieldT &t) +{ + return _basic_radix2_lagrange_coeffs(this->m, t); +} + +template +FieldT basic_radix2_domain::get_element(const size_t idx) +{ + return omega^idx; +} + +template +FieldT basic_radix2_domain::compute_Z(const FieldT &t) +{ + return (t^this->m) - FieldT::one(); +} + +template +void basic_radix2_domain::add_poly_Z(const FieldT &coeff, std::vector &H) +{ + assert(H.size() == this->m+1); + H[this->m] += coeff; + H[0] -= coeff; +} + +template +void basic_radix2_domain::divide_by_Z_on_coset(std::vector &P) +{ + const FieldT coset = FieldT::multiplicative_generator; + const FieldT Z_inverse_at_coset = this->compute_Z(coset).inverse(); + for (size_t i = 0; i < this->m; ++i) + { + P[i] *= Z_inverse_at_coset; + } +} + +} // libsnark + +#endif // BASIC_RADIX2_DOMAIN_TCC_ diff --git a/src/algebra/evaluation_domain/domains/basic_radix2_domain_aux.hpp b/src/algebra/evaluation_domain/domains/basic_radix2_domain_aux.hpp new file mode 100644 index 000000000..c42ab2f6f --- /dev/null +++ b/src/algebra/evaluation_domain/domains/basic_radix2_domain_aux.hpp @@ -0,0 +1,48 @@ +/** @file + ***************************************************************************** + + Declaration of interfaces for auxiliary functions for the "basic radix-2" evaluation domain. + + These functions compute the radix-2 FFT (in single- or multi-thread mode) and, + also compute Lagrange coefficients. + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef BASIC_RADIX2_DOMAIN_AUX_HPP_ +#define BASIC_RADIX2_DOMAIN_AUX_HPP_ + +namespace libsnark { + +/** + * Compute the radix-2 FFT of the vector a over the set S={omega^{0},...,omega^{m-1}}. + */ +template +void _basic_radix2_FFT(std::vector &a, const FieldT &omega); + +/** + * A multi-thread version of _basic_radix2_FFT. + */ +template +void _parallel_basic_radix2_FFT(std::vector &a, const FieldT &omega); + +/** + * Translate the vector a to a coset defined by g. + */ +template +void _multiply_by_coset(std::vector &a, const FieldT &g); + +/** + * Compute the m Lagrange coefficients, relative to the set S={omega^{0},...,omega^{m-1}}, at the field element t. + */ +template +std::vector _basic_radix2_lagrange_coeffs(const size_t m, const FieldT &t); + +} // libsnark + +#include "algebra/evaluation_domain/domains/basic_radix2_domain_aux.tcc" + +#endif // BASIC_RADIX2_DOMAIN_AUX_HPP_ diff --git a/src/algebra/evaluation_domain/domains/basic_radix2_domain_aux.tcc b/src/algebra/evaluation_domain/domains/basic_radix2_domain_aux.tcc new file mode 100644 index 000000000..138b82dbc --- /dev/null +++ b/src/algebra/evaluation_domain/domains/basic_radix2_domain_aux.tcc @@ -0,0 +1,242 @@ +/** @file + ***************************************************************************** + + Implementation of interfaces for auxiliary functions for the "basic radix-2" evaluation domain. + + See basic_radix2_domain_aux.hpp . + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef BASIC_RADIX2_DOMAIN_AUX_TCC_ +#define BASIC_RADIX2_DOMAIN_AUX_TCC_ + +#include +#ifdef MULTICORE +#include +#endif +#include "algebra/fields/field_utils.hpp" +#include "common/profiling.hpp" +#include "common/utils.hpp" + +namespace libsnark { + +#ifdef MULTICORE +#define _basic_radix2_FFT _basic_parallel_radix2_FFT +#else +#define _basic_radix2_FFT _basic_serial_radix2_FFT +#endif + +/* + Below we make use of pseudocode from [CLRS 2n Ed, pp. 864]. + Also, note that it's the caller's responsibility to multiply by 1/N. + */ +template +void _basic_serial_radix2_FFT(std::vector &a, const FieldT &omega) +{ + const size_t n = a.size(), logn = log2(n); + assert(n == (1u << logn)); + + /* swapping in place (from Storer's book) */ + for (size_t k = 0; k < n; ++k) + { + const size_t rk = bitreverse(k, logn); + if (k < rk) + std::swap(a[k], a[rk]); + } + + size_t m = 1; // invariant: m = 2^{s-1} + for (size_t s = 1; s <= logn; ++s) + { + // w_m is 2^s-th root of unity now + const FieldT w_m = omega^(n/(2*m)); + + asm volatile ("/* pre-inner */"); + for (size_t k = 0; k < n; k += 2*m) + { + FieldT w = FieldT::one(); + for (size_t j = 0; j < m; ++j) + { + const FieldT t = w * a[k+j+m]; + a[k+j+m] = a[k+j] - t; + a[k+j] += t; + w *= w_m; + } + } + asm volatile ("/* post-inner */"); + m *= 2; + } +} + +template +void _basic_parallel_radix2_FFT_inner(std::vector &a, const FieldT &omega, const size_t log_cpus) +{ + const size_t num_cpus = 1ul< > tmp(num_cpus); + for (size_t j = 0; j < num_cpus; ++j) + { + tmp[j].resize(1ul<<(log_m-log_cpus), FieldT::zero()); + } + +#ifdef MULTICORE + #pragma omp parallel for +#endif + for (size_t j = 0; j < num_cpus; ++j) + { + const FieldT omega_j = omega^j; + const FieldT omega_step = omega^(j<<(log_m - log_cpus)); + + FieldT elt = FieldT::one(); + for (size_t i = 0; i < 1ul<<(log_m - log_cpus); ++i) + { + for (size_t s = 0; s < num_cpus; ++s) + { + // invariant: elt is omega^(j*idx) + const size_t idx = (i + (s<<(log_m - log_cpus))) % (1u << log_m); + tmp[j][i] += a[idx] * elt; + elt *= omega_step; + } + elt *= omega_j; + } + } + leave_block("Shuffle inputs"); + + enter_block("Execute sub-FFTs"); + const FieldT omega_num_cpus = omega^num_cpus; + +#ifdef MULTICORE + #pragma omp parallel for +#endif + for (size_t j = 0; j < num_cpus; ++j) + { + _basic_serial_radix2_FFT(tmp[j], omega_num_cpus); + } + leave_block("Execute sub-FFTs"); + + enter_block("Re-shuffle outputs"); + +#ifdef MULTICORE + #pragma omp parallel for +#endif + for (size_t i = 0; i < num_cpus; ++i) + { + for (size_t j = 0; j < 1ul<<(log_m - log_cpus); ++j) + { + // now: i = idx >> (log_m - log_cpus) and j = idx % (1u << (log_m - log_cpus)), for idx = ((i<<(log_m-log_cpus))+j) % (1u << log_m) + a[(j< +void _basic_parallel_radix2_FFT(std::vector &a, const FieldT &omega) +{ +#ifdef MULTICORE + const size_t num_cpus = omp_get_max_threads(); +#else + const size_t num_cpus = 1; +#endif + const size_t log_cpus = ((num_cpus & (num_cpus - 1)) == 0 ? log2(num_cpus) : log2(num_cpus) - 1); + +#ifdef DEBUG + print_indent(); printf("* Invoking parallel FFT on 2^%zu CPUs (omp_get_max_threads = %zu)\n", log_cpus, num_cpus); +#endif + + if (log_cpus == 0) + { + _basic_serial_radix2_FFT(a, omega); + } + else + { + _basic_parallel_radix2_FFT_inner(a, omega, log_cpus); + } +} + +template +void _multiply_by_coset(std::vector &a, const FieldT &g) +{ + //enter_block("Multiply by coset"); + FieldT u = g; + for (size_t i = 1; i < a.size(); ++i) + { + a[i] *= u; + u *= g; + } + //leave_block("Multiply by coset"); +} + +template +std::vector _basic_radix2_lagrange_coeffs(const size_t m, const FieldT &t) +{ + if (m == 1) + { + return std::vector(1, FieldT::one()); + } + + assert(m == (1u << log2(m))); + + const FieldT omega = get_root_of_unity(m); + + std::vector u(m, FieldT::zero()); + + /* + If t equals one of the roots of unity in S={omega^{0},...,omega^{m-1}} + then output 1 at the right place, and 0 elsewhere + */ + + if ((t^m) == (FieldT::one())) + { + FieldT omega_i = FieldT::one(); + for (size_t i = 0; i < m; ++i) + { + if (omega_i == t) // i.e., t equals omega^i + { + u[i] = FieldT::one(); + return u; + } + + omega_i *= omega; + } + } + + /* + Otherwise, if t does not equal any of the roots of unity in S, + then compute each L_{i,S}(t) as Z_{S}(t) * v_i / (t-\omega^i) + where: + - Z_{S}(t) = \prod_{j} (t-\omega^j) = (t^m-1), and + - v_{i} = 1 / \prod_{j \neq i} (\omega^i-\omega^j). + Below we use the fact that v_{0} = 1/m and v_{i+1} = \omega * v_{i}. + */ + + const FieldT Z = (t^m)-FieldT::one(); + FieldT l = Z * FieldT(m).inverse(); + FieldT r = FieldT::one(); + for (size_t i = 0; i < m; ++i) + { + u[i] = l * (t - r).inverse(); + l *= omega; + r *= omega; + } + + return u; +} + +} // libsnark + +#endif // BASIC_RADIX2_DOMAIN_AUX_TCC_ diff --git a/src/algebra/evaluation_domain/evaluation_domain.hpp b/src/algebra/evaluation_domain/evaluation_domain.hpp new file mode 100644 index 000000000..358db9798 --- /dev/null +++ b/src/algebra/evaluation_domain/evaluation_domain.hpp @@ -0,0 +1,125 @@ +/** @file + ***************************************************************************** + + Declaration of interfaces for evaluation domains. + + Roughly, given a desired size m for the domain, the constructor selects + a choice of domain S with size ~m that has been selected so to optimize + - computations of Lagrange polynomials, and + - FFT/iFFT computations. + An evaluation domain also provides other other functions, e.g., accessing + individual elements in S or evaluating its vanishing polynomial. + + The descriptions below make use of the definition of a *Lagrange polynomial*, + which we recall. Given a field F, a subset S=(a_i)_i of F, and an index idx + in {0,...,|S-1|}, the idx-th Lagrange polynomial (wrt to subset S) is defined to be + \f[ L_{idx,S}(z) := prod_{k \neq idx} (z - a_k) / prod_{k \neq idx} (a_{idx} - a_k) \f] + Note that, by construction: + \f[ \forall j \neq idx: L_{idx,S}(a_{idx}) = 1 \text{ and } L_{idx,S}(a_j) = 0 \f] + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef EVALUATION_DOMAIN_HPP_ +#define EVALUATION_DOMAIN_HPP_ + +#include + +namespace libsnark { + +/** + * An evaluation domain. + */ +template +class evaluation_domain { +public: + + const size_t m; + + /** + * Construct an evaluation domain S of size m, if possible. + * + * (See the function get_evaluation_domain below.) + */ + evaluation_domain(const size_t m) : m(m) {}; + + /** + * Get the idx-th element in S. + */ + virtual FieldT get_element(const size_t idx) = 0; + + /** + * Compute the FFT, over the domain S, of the vector a. + */ + virtual void FFT(std::vector &a) = 0; + + /** + * Compute the inverse FFT, over the domain S, of the vector a. + */ + virtual void iFFT(std::vector &a) = 0; + + /** + * Compute the FFT, over the domain g*S, of the vector a. + */ + virtual void cosetFFT(std::vector &a, const FieldT &g) = 0; + + /** + * Compute the inverse FFT, over the domain g*S, of the vector a. + */ + virtual void icosetFFT(std::vector &a, const FieldT &g) = 0; + + /** + * Evaluate all Lagrange polynomials. + * + * The inputs are: + * - an integer m + * - an element t + * The output is a vector (b_{0},...,b_{m-1}) + * where b_{i} is the evaluation of L_{i,S}(z) at z = t. + */ + virtual std::vector lagrange_coeffs(const FieldT &t) = 0; + + /** + * Evaluate the vanishing polynomial of S at the field element t. + */ + virtual FieldT compute_Z(const FieldT &t) = 0; + + /** + * Add the coefficients of the vanishing polynomial of S to the coefficients of the polynomial H. + */ + virtual void add_poly_Z(const FieldT &coeff, std::vector &H) = 0; + + /** + * Multiply by the evaluation, on a coset of S, of the inverse of the vanishing polynomial of S. + */ + virtual void divide_by_Z_on_coset(std::vector &P) = 0; +}; + +/** + * Return an evaluation domain object in which the domain S has size |S| >= min_size. + * The function chooses from different supported domains, depending on min_size. + */ +template +std::shared_ptr > get_evaluation_domain(const size_t min_size); + +/** + * Naive evaluation of a *single* Lagrange polynomial, used for testing purposes. + * + * The inputs are: + * - an integer m + * - a domain S = (a_{0},...,a_{m-1}) of size m + * - a field element element t + * - an index idx in {0,...,m-1} + * The output is the polynomial L_{idx,S}(z) evaluated at z = t. + */ +template +FieldT lagrange_eval(const size_t m, const std::vector &domain, const FieldT &t, const size_t idx); + +} // libsnark + +#include "algebra/evaluation_domain/evaluation_domain.tcc" + +#endif // EVALUATION_DOMAIN_HPP_ diff --git a/src/algebra/evaluation_domain/evaluation_domain.tcc b/src/algebra/evaluation_domain/evaluation_domain.tcc new file mode 100644 index 000000000..8e3ea7a62 --- /dev/null +++ b/src/algebra/evaluation_domain/evaluation_domain.tcc @@ -0,0 +1,117 @@ +/** @file + ***************************************************************************** + + Imeplementation of interfaces for evaluation domains. + + See evaluation_domain.hpp . + + We currently implement, and select among, three types of domains: + - "basic radix-2": the domain has size m = 2^k and consists of the m-th roots of unity + - "extended radix-2": the domain has size m = 2^{k+1} and consists of "the m-th roots of unity" union "a coset" + - "step radix-2": the domain has size m = 2^k + 2^r and consists of "the 2^k-th roots of unity" union "a coset of 2^r-th roots of unity" + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef EVALUATION_DOMAIN_TCC_ +#define EVALUATION_DOMAIN_TCC_ + +#include +#include "algebra/fields/field_utils.hpp" +#include "algebra/evaluation_domain/domains/basic_radix2_domain.hpp" + +namespace libsnark { + +template +std::shared_ptr > get_evaluation_domain(const size_t min_size) +{ + assert(min_size > 1); + const size_t log_min_size = log2(min_size); + assert(log_min_size <= (FieldT::s+1)); + + std::shared_ptr > result; + if (min_size == (1u << log_min_size)) + { + if (log_min_size == FieldT::s+1) + { + if (!inhibit_profiling_info) + { + print_indent(); printf("* Selected domain: extended_radix2\n"); + } + assert(0); + } + else + { + if (!inhibit_profiling_info) + { + print_indent(); printf("* Selected domain: basic_radix2\n"); + } + result.reset(new basic_radix2_domain(min_size)); + } + } + else + { + const size_t big = 1ul<<(log2(min_size)-1); + const size_t small = min_size - big; + const size_t rounded_small = (1ul<(big + rounded_small)); + } + else + { + if (!inhibit_profiling_info) + { + print_indent(); printf("* Selected domain: extended_radix2\n"); + } + assert(0); + } + } + else + { + if (!inhibit_profiling_info) + { + print_indent(); printf("* Selected domain: step_radix2\n"); + } + assert(0); + } + } + + return result; +} + +template +FieldT lagrange_eval(const size_t m, const std::vector &domain, const FieldT &t, const size_t idx) +{ + assert(m == domain.size()); + assert(idx < m); + + FieldT num = FieldT::one(); + FieldT denom = FieldT::one(); + + for (size_t k = 0; k < m; ++k) + { + if (k == idx) + { + continue; + } + + num *= t - domain[k]; + denom *= domain[idx] - domain[k]; + } + + return num * denom.inverse(); +} + +} // libsnark + +#endif // EVALUATION_DOMAIN_TCC_ diff --git a/src/algebra/exponentiation/exponentiation.hpp b/src/algebra/exponentiation/exponentiation.hpp new file mode 100644 index 000000000..a8a2c925c --- /dev/null +++ b/src/algebra/exponentiation/exponentiation.hpp @@ -0,0 +1,31 @@ +/** @file + ***************************************************************************** + + Declaration of interfaces for (square-and-multiply) exponentiation. + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef EXPONENTIATION_HPP_ +#define EXPONENTIATION_HPP_ + +#include + +#include "algebra/fields/bigint.hpp" + +namespace libsnark { + +template +FieldT power(const FieldT &base, const bigint &exponent); + +template +FieldT power(const FieldT &base, const unsigned long exponent); + +} // libsnark + +#include "algebra/exponentiation/exponentiation.tcc" + +#endif // EXPONENTIATION_HPP_ diff --git a/src/algebra/exponentiation/exponentiation.tcc b/src/algebra/exponentiation/exponentiation.tcc new file mode 100644 index 000000000..dd557eb12 --- /dev/null +++ b/src/algebra/exponentiation/exponentiation.tcc @@ -0,0 +1,53 @@ +/** @file + ***************************************************************************** + + Implementation of interfaces for (square-and-multiply) exponentiation. + + See exponentiation.hpp . + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef EXPONENTIATION_TCC_ +#define EXPONENTIATION_TCC_ + +#include "common/utils.hpp" + +namespace libsnark { + +template +FieldT power(const FieldT &base, const bigint &exponent) +{ + FieldT result = FieldT::one(); + + bool found_one = false; + + for (long i = exponent.max_bits() - 1; i >= 0; --i) + { + if (found_one) + { + result = result * result; + } + + if (exponent.test_bit(i)) + { + found_one = true; + result = result * base; + } + } + + return result; +} + +template +FieldT power(const FieldT &base, const unsigned long exponent) +{ + return power(base, bigint<1>(exponent)); +} + +} // libsnark + +#endif // EXPONENTIATION_TCC_ diff --git a/src/algebra/fields/bigint.hpp b/src/algebra/fields/bigint.hpp new file mode 100644 index 000000000..ff00dd5cf --- /dev/null +++ b/src/algebra/fields/bigint.hpp @@ -0,0 +1,70 @@ +/** @file + ***************************************************************************** + Declaration of bigint wrapper class around GMP's MPZ long integers. + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef BIGINT_HPP_ +#define BIGINT_HPP_ +#include +#include +#include +#include "common/serialization.hpp" + +namespace libsnark { + +template class bigint; +template std::ostream& operator<<(std::ostream &, const bigint&); +template std::istream& operator>>(std::istream &, bigint&); + +/** + * Wrapper class around GMP's MPZ long integers. It supports arithmetic operations, + * serialization and randomization. Serialization is fragile, see common/serialization.hpp. + */ + +template +class bigint { +public: + static const mp_size_t N = n; + + mp_limb_t data[n] = {0}; + + bigint() = default; + bigint(const unsigned long x); /// Initalize from a small integer + bigint(const char* s); /// Initialize from a string containing an integer in decimal notation + bigint(const mpz_t r); /// Initialize from MPZ element + + void print() const; + void print_hex() const; + bool operator==(const bigint& other) const; + bool operator!=(const bigint& other) const; + void clear(); + bool is_zero() const; + size_t max_bits() const { return n * GMP_NUMB_BITS; } + size_t num_bits() const; + + unsigned long as_ulong() const; /* return the last limb of the integer */ + void to_mpz(mpz_t r) const; + bool test_bit(const std::size_t bitno) const; + + template inline void operator+=(const bigint& other); + template inline bigint operator*(const bigint& other) const; + template static inline void div_qr(bigint& quotient, bigint& remainder, + const bigint& dividend, const bigint& divisor); + template inline bigint shorten(const bigint& q, const char *msg) const; + + inline void limit(const bigint& q, const char *msg) const; + bool operator>(const bigint& other) const; + + bigint& randomize(); + + friend std::ostream& operator<< (std::ostream &out, const bigint &b); + friend std::istream& operator>> (std::istream &in, bigint &b); +}; + +} // libsnark +#include "algebra/fields/bigint.tcc" +#endif diff --git a/src/algebra/fields/bigint.tcc b/src/algebra/fields/bigint.tcc new file mode 100644 index 000000000..f81addf45 --- /dev/null +++ b/src/algebra/fields/bigint.tcc @@ -0,0 +1,278 @@ +/** @file + ***************************************************************************** + Implementation of bigint wrapper class around GMP's MPZ long integers. + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef BIGINT_TCC_ +#define BIGINT_TCC_ +#include +#include +#include +#include "sodium.h" + +namespace libsnark { + +template +bigint::bigint(const unsigned long x) /// Initalize from a small integer +{ + static_assert(ULONG_MAX <= GMP_NUMB_MAX, "unsigned long does not fit in a GMP limb"); + this->data[0] = x; +} + +template +bigint::bigint(const char* s) /// Initialize from a string containing an integer in decimal notation +{ + size_t l = strlen(s); + unsigned char* s_copy = new unsigned char[l]; + + for (size_t i = 0; i < l; ++i) + { + assert(s[i] >= '0' && s[i] <= '9'); + s_copy[i] = s[i] - '0'; + } + + mp_size_t limbs_written = mpn_set_str(this->data, s_copy, l, 10); + assert(limbs_written <= n); + + delete[] s_copy; +} + +template +bigint::bigint(const mpz_t r) /// Initialize from MPZ element +{ + mpz_t k; + mpz_init_set(k, r); + + for (size_t i = 0; i < n; ++i) + { + data[i] = mpz_get_ui(k); + mpz_fdiv_q_2exp(k, k, GMP_NUMB_BITS); + } + + assert(mpz_sgn(k) == 0); + mpz_clear(k); +} + +template +void bigint::print() const +{ + gmp_printf("%Nd\n", this->data, n); +} + +template +void bigint::print_hex() const +{ + gmp_printf("%Nx\n", this->data, n); +} + +template +bool bigint::operator==(const bigint& other) const +{ + return (mpn_cmp(this->data, other.data, n) == 0); +} + +template +bool bigint::operator!=(const bigint& other) const +{ + return !(operator==(other)); +} + +template +void bigint::clear() +{ + mpn_zero(this->data, n); +} + +template +bool bigint::is_zero() const +{ + for (mp_size_t i = 0; i < n; ++i) + { + if (this->data[i]) + { + return false; + } + } + + return true; +} + +template +size_t bigint::num_bits() const +{ +/* + for (long i = max_bits(); i >= 0; --i) + { + if (this->test_bit(i)) + { + return i+1; + } + } + + return 0; +*/ + for (long i = n-1; i >= 0; --i) + { + mp_limb_t x = this->data[i]; + if (x == 0) + { + continue; + } + else + { + return ((i+1) * GMP_NUMB_BITS) - __builtin_clzl(x); + } + } + return 0; +} + +template +unsigned long bigint::as_ulong() const +{ + return this->data[0]; +} + +template +void bigint::to_mpz(mpz_t r) const +{ + mpz_set_ui(r, 0); + + for (int i = n-1; i >= 0; --i) + { + mpz_mul_2exp(r, r, GMP_NUMB_BITS); + mpz_add_ui(r, r, this->data[i]); + } +} + +template +bool bigint::test_bit(const std::size_t bitno) const +{ + if (bitno >= n * GMP_NUMB_BITS) + { + return false; + } + else + { + const std::size_t part = bitno/GMP_NUMB_BITS; + const std::size_t bit = bitno - (GMP_NUMB_BITS*part); + const mp_limb_t one = 1; + return (this->data[part] & (one< template +inline void bigint::operator+=(const bigint& other) +{ + static_assert(n >= m, "first arg must not be smaller than second arg for bigint in-place add"); + mpn_add(data, data, n, other.data, m); +} + +template template +inline bigint bigint::operator*(const bigint& other) const +{ + static_assert(n >= m, "first arg must not be smaller than second arg for bigint mul"); + bigint res; + mpn_mul(res.data, data, n, other.data, m); + return res; +} + +template template +inline void bigint::div_qr(bigint& quotient, bigint& remainder, + const bigint& dividend, const bigint& divisor) +{ + static_assert(n >= d, "dividend must not be smaller than divisor for bigint::div_qr"); + assert(divisor.data[d-1] != 0); + mpn_tdiv_qr(quotient.data, remainder.data, 0, dividend.data, n, divisor.data, d); +} + +// Return a copy shortened to m limbs provided it is less than limit, throwing std::domain_error if not in range. +template template +inline bigint bigint::shorten(const bigint& q, const char *msg) const +{ + static_assert(m <= n, "number of limbs must not increase for bigint::shorten"); + for (mp_size_t i = m; i < n; i++) { // high-order limbs + if (data[i] != 0) { + throw std::domain_error(msg); + } + } + bigint res; + mpn_copyi(res.data, data, n); + res.limit(q, msg); + return res; +} + +template +inline void bigint::limit(const bigint& q, const char *msg) const +{ + if (!(q > *this)) { + throw std::domain_error(msg); + } +} + +template +inline bool bigint::operator>(const bigint& other) const +{ + return mpn_cmp(this->data, other.data, n) > 0; +} + +template +bigint& bigint::randomize() +{ + assert(GMP_NUMB_BITS == sizeof(mp_limb_t) * 8); + + randombytes_buf(this->data, sizeof(mp_limb_t) * n); + + return (*this); +} + + +template +std::ostream& operator<<(std::ostream &out, const bigint &b) +{ +#ifdef BINARY_OUTPUT + out.write((char*)b.data, sizeof(b.data[0]) * n); +#else + mpz_t t; + mpz_init(t); + b.to_mpz(t); + + out << t; + + mpz_clear(t); +#endif + return out; +} + +template +std::istream& operator>>(std::istream &in, bigint &b) +{ +#ifdef BINARY_OUTPUT + in.read((char*)b.data, sizeof(b.data[0]) * n); +#else + std::string s; + in >> s; + + size_t l = s.size(); + unsigned char* s_copy = new unsigned char[l]; + + for (size_t i = 0; i < l; ++i) + { + assert(s[i] >= '0' && s[i] <= '9'); + s_copy[i] = s[i] - '0'; + } + + mp_size_t limbs_written = mpn_set_str(b.data, s_copy, l, 10); + assert(limbs_written <= n); + + delete[] s_copy; +#endif + return in; +} + +} // libsnark +#endif // BIGINT_TCC_ diff --git a/src/algebra/fields/field_utils.hpp b/src/algebra/fields/field_utils.hpp new file mode 100644 index 000000000..a07ecfe28 --- /dev/null +++ b/src/algebra/fields/field_utils.hpp @@ -0,0 +1,51 @@ +/** @file + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef FIELD_UTILS_HPP_ +#define FIELD_UTILS_HPP_ +#include + +#include "common/utils.hpp" +#include "algebra/fields/bigint.hpp" + +namespace libsnark { + +// returns root of unity of order n (for n a power of 2), if one exists +template +FieldT get_root_of_unity(const size_t n); + +template +std::vector pack_int_vector_into_field_element_vector(const std::vector &v, const size_t w); + +template +std::vector pack_bit_vector_into_field_element_vector(const bit_vector &v, const size_t chunk_bits); + +template +std::vector pack_bit_vector_into_field_element_vector(const bit_vector &v); + +template +std::vector convert_bit_vector_to_field_element_vector(const bit_vector &v); + +template +bit_vector convert_field_element_vector_to_bit_vector(const std::vector &v); + +template +bit_vector convert_field_element_to_bit_vector(const FieldT &el); + +template +bit_vector convert_field_element_to_bit_vector(const FieldT &el, const size_t bitcount); + +template +FieldT convert_bit_vector_to_field_element(const bit_vector &v); + +template +void batch_invert(std::vector &vec); + +} // libsnark +#include "algebra/fields/field_utils.tcc" + +#endif // FIELD_UTILS_HPP_ diff --git a/src/algebra/fields/field_utils.tcc b/src/algebra/fields/field_utils.tcc new file mode 100644 index 000000000..13197b226 --- /dev/null +++ b/src/algebra/fields/field_utils.tcc @@ -0,0 +1,183 @@ +/** @file + ***************************************************************************** + Implementation of misc. math and serialization utility functions + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef FIELD_UTILS_TCC_ +#define FIELD_UTILS_TCC_ + +#include "common/utils.hpp" + +namespace libsnark { + +template +FieldT coset_shift() +{ + return FieldT::multiplicative_generator.squared(); +} + +template +FieldT get_root_of_unity(const size_t n) +{ + const size_t logn = log2(n); + assert(n == (1u << logn)); + assert(logn <= FieldT::s); + + FieldT omega = FieldT::root_of_unity; + for (size_t i = FieldT::s; i > logn; --i) + { + omega *= omega; + } + + return omega; +} + +template +std::vector pack_int_vector_into_field_element_vector(const std::vector &v, const size_t w) +{ + const size_t chunk_bits = FieldT::capacity(); + const size_t repacked_size = div_ceil(v.size() * w, chunk_bits); + std::vector result(repacked_size); + + for (size_t i = 0; i < repacked_size; ++i) + { + bigint b; + for (size_t j = 0; j < chunk_bits; ++j) + { + const size_t word_index = (i * chunk_bits + j) / w; + const size_t pos_in_word = (i * chunk_bits + j) % w; + const size_t word_or_0 = (word_index < v.size() ? v[word_index] : 0); + const size_t bit = (word_or_0 >> pos_in_word) & 1; + + b.data[j / GMP_NUMB_BITS] |= bit << (j % GMP_NUMB_BITS); + } + result[i] = FieldT(b); + } + + return result; +} + +template +std::vector pack_bit_vector_into_field_element_vector(const bit_vector &v, const size_t chunk_bits) +{ + assert(chunk_bits <= FieldT::capacity()); + + const size_t repacked_size = div_ceil(v.size(), chunk_bits); + std::vector result(repacked_size); + + for (size_t i = 0; i < repacked_size; ++i) + { + bigint b; + for (size_t j = 0; j < chunk_bits; ++j) + { + b.data[j / GMP_NUMB_BITS] |= ((i * chunk_bits + j) < v.size() && v[i * chunk_bits + j] ? 1ll : 0ll) << (j % GMP_NUMB_BITS); + } + result[i] = FieldT(b); + } + + return result; +} + +template +std::vector pack_bit_vector_into_field_element_vector(const bit_vector &v) +{ + return pack_bit_vector_into_field_element_vector(v, FieldT::capacity()); +} + +template +std::vector convert_bit_vector_to_field_element_vector(const bit_vector &v) +{ + std::vector result; + result.reserve(v.size()); + + for (const bool b : v) + { + result.emplace_back(b ? FieldT::one() : FieldT::zero()); + } + + return result; +} + +template +bit_vector convert_field_element_vector_to_bit_vector(const std::vector &v) +{ + bit_vector result; + + for (const FieldT &el : v) + { + const bit_vector el_bits = convert_field_element_to_bit_vector(el); + result.insert(result.end(), el_bits.begin(), el_bits.end()); + } + + return result; +} + +template +bit_vector convert_field_element_to_bit_vector(const FieldT &el) +{ + bit_vector result; + + bigint b = el.as_bigint(); + for (size_t i = 0; i < FieldT::size_in_bits(); ++i) + { + result.push_back(b.test_bit(i)); + } + + return result; +} + +template +bit_vector convert_field_element_to_bit_vector(const FieldT &el, const size_t bitcount) +{ + bit_vector result = convert_field_element_to_bit_vector(el); + result.resize(bitcount); + + return result; +} + +template +FieldT convert_bit_vector_to_field_element(const bit_vector &v) +{ + assert(v.size() <= FieldT::size_in_bits()); + + FieldT res = FieldT::zero(); + FieldT c = FieldT::one(); + for (bool b : v) + { + res += b ? c : FieldT::zero(); + c += c; + } + return res; +} + +template +void batch_invert(std::vector &vec) +{ + std::vector prod; + prod.reserve(vec.size()); + + FieldT acc = FieldT::one(); + + for (auto el : vec) + { + assert(!el.is_zero()); + prod.emplace_back(acc); + acc = acc * el; + } + + FieldT acc_inverse = acc.inverse(); + + for (long i = vec.size()-1; i >= 0; --i) + { + const FieldT old_el = vec[i]; + vec[i] = acc_inverse * prod[i]; + acc_inverse = acc_inverse * old_el; + } +} + +} // libsnark +#endif // FIELD_UTILS_TCC_ diff --git a/src/algebra/fields/fp.hpp b/src/algebra/fields/fp.hpp new file mode 100644 index 000000000..a4986833c --- /dev/null +++ b/src/algebra/fields/fp.hpp @@ -0,0 +1,182 @@ +/** @file + ***************************************************************************** + Declaration of arithmetic in the finite field F[p], for prime p of fixed length. + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef FP_HPP_ +#define FP_HPP_ + +#include "algebra/fields/bigint.hpp" +#include "algebra/exponentiation/exponentiation.hpp" + +namespace libsnark { + +template& modulus> +class Fp_model; + +template& modulus> +std::ostream& operator<<(std::ostream &, const Fp_model&); + +template& modulus> +std::istream& operator>>(std::istream &, Fp_model &); + +/** + * Arithmetic in the finite field F[p], for prime p of fixed length. + * + * This class implements Fp-arithmetic, for a large prime p, using a fixed number + * of words. It is optimized for tight memory consumption, so the modulus p is + * passed as a template parameter, to avoid per-element overheads. + * + * The implementation is mostly a wrapper around GMP's MPN (constant-size integers). + * But for the integer sizes of interest for libsnark (3 to 5 limbs of 64 bits each), + * we implement performance-critical routines, like addition and multiplication, + * using hand-optimzied assembly code. +*/ +template& modulus> +class Fp_model { +public: + bigint mont_repr; +public: + static const mp_size_t num_limbs = n; + static const constexpr bigint& mod = modulus; +#ifdef PROFILE_OP_COUNTS + static long long add_cnt; + static long long sub_cnt; + static long long mul_cnt; + static long long sqr_cnt; + static long long inv_cnt; +#endif + static size_t num_bits; + static bigint euler; // (modulus-1)/2 + static size_t s; // modulus = 2^s * t + 1 + static bigint t; // with t odd + static bigint t_minus_1_over_2; // (t-1)/2 + static Fp_model nqr; // a quadratic nonresidue + static Fp_model nqr_to_t; // nqr^t + static Fp_model multiplicative_generator; // generator of Fp^* + static Fp_model root_of_unity; // generator^((modulus-1)/2^s) + static mp_limb_t inv; // modulus^(-1) mod W, where W = 2^(word size) + static bigint Rsquared; // R^2, where R = W^k, where k = ?? + static bigint Rcubed; // R^3 + + static bool modulus_is_valid() { return modulus.data[n-1] != 0; } // mpn inverse assumes that highest limb is non-zero + + Fp_model() {}; + Fp_model(const bigint &b); + Fp_model(const long x, const bool is_unsigned=false); + + void set_ulong(const unsigned long x); + + void mul_reduce(const bigint &other); + + void clear(); + + /* Return the standard (not Montgomery) representation of the + Field element's requivalence class. I.e. Fp(2).as_bigint() + would return bigint(2) */ + bigint as_bigint() const; + /* Return the last limb of the standard representation of the + field element. E.g. on 64-bit architectures Fp(123).as_ulong() + and Fp(2^64+123).as_ulong() would both return 123. */ + unsigned long as_ulong() const; + + bool operator==(const Fp_model& other) const; + bool operator!=(const Fp_model& other) const; + bool is_zero() const; + + void print() const; + + Fp_model& operator+=(const Fp_model& other); + Fp_model& operator-=(const Fp_model& other); + Fp_model& operator*=(const Fp_model& other); + Fp_model& operator^=(const unsigned long pow); + + template + Fp_model& operator^=(const bigint &pow); + + Fp_model operator+(const Fp_model& other) const; + Fp_model operator-(const Fp_model& other) const; + Fp_model operator*(const Fp_model& other) const; + Fp_model operator-() const; + Fp_model squared() const; + Fp_model& invert(); + Fp_model inverse() const; + Fp_model sqrt() const; // HAS TO BE A SQUARE (else does not terminate) + + Fp_model operator^(const unsigned long pow) const; + template + Fp_model operator^(const bigint &pow) const; + + static size_t size_in_bits() { return num_bits; } + static size_t capacity() { return num_bits - 1; } + static bigint field_char() { return modulus; } + + static Fp_model zero(); + static Fp_model one(); + static Fp_model random_element(); + + friend std::ostream& operator<< (std::ostream &out, const Fp_model &p); + friend std::istream& operator>> (std::istream &in, Fp_model &p); +}; + +#ifdef PROFILE_OP_COUNTS +template& modulus> +long long Fp_model::add_cnt = 0; + +template& modulus> +long long Fp_model::sub_cnt = 0; + +template& modulus> +long long Fp_model::mul_cnt = 0; + +template& modulus> +long long Fp_model::sqr_cnt = 0; + +template& modulus> +long long Fp_model::inv_cnt = 0; +#endif + +template& modulus> +size_t Fp_model::num_bits; + +template& modulus> +bigint Fp_model::euler; + +template& modulus> +size_t Fp_model::s; + +template& modulus> +bigint Fp_model::t; + +template& modulus> +bigint Fp_model::t_minus_1_over_2; + +template& modulus> +Fp_model Fp_model::nqr; + +template& modulus> +Fp_model Fp_model::nqr_to_t; + +template& modulus> +Fp_model Fp_model::multiplicative_generator; + +template& modulus> +Fp_model Fp_model::root_of_unity; + +template& modulus> +mp_limb_t Fp_model::inv; + +template& modulus> +bigint Fp_model::Rsquared; + +template& modulus> +bigint Fp_model::Rcubed; + +} // libsnark +#include "algebra/fields/fp.tcc" + +#endif // FP_HPP_ diff --git a/src/algebra/fields/fp.tcc b/src/algebra/fields/fp.tcc new file mode 100644 index 000000000..566e99324 --- /dev/null +++ b/src/algebra/fields/fp.tcc @@ -0,0 +1,790 @@ +/** @file + ***************************************************************************** + Implementation of arithmetic in the finite field F[p], for prime p of fixed length. + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef FP_TCC_ +#define FP_TCC_ +#include +#include +#include + +#include "algebra/fields/fp_aux.tcc" +#include "algebra/fields/field_utils.hpp" +#include "common/assert_except.hpp" + +namespace libsnark { + +template& modulus> +void Fp_model::mul_reduce(const bigint &other) +{ + /* stupid pre-processor tricks; beware */ +#if defined(__x86_64__) && defined(USE_ASM) + if (n == 3) + { // Use asm-optimized Comba multiplication and reduction + mp_limb_t res[2*n]; + mp_limb_t c0, c1, c2; + COMBA_3_BY_3_MUL(c0, c1, c2, res, this->mont_repr.data, other.data); + + mp_limb_t k; + mp_limb_t tmp1, tmp2, tmp3; + REDUCE_6_LIMB_PRODUCT(k, tmp1, tmp2, tmp3, inv, res, modulus.data); + + /* subtract t > mod */ + __asm__ + ("/* check for overflow */ \n\t" + MONT_CMP(16) + MONT_CMP(8) + MONT_CMP(0) + + "/* subtract mod if overflow */ \n\t" + "subtract%=: \n\t" + MONT_FIRSTSUB + MONT_NEXTSUB(8) + MONT_NEXTSUB(16) + "done%=: \n\t" + : + : [tmp] "r" (res+n), [M] "r" (modulus.data) + : "cc", "memory", "%rax"); + mpn_copyi(this->mont_repr.data, res+n, n); + } + else if (n == 4) + { // use asm-optimized "CIOS method" + + mp_limb_t tmp[n+1]; + mp_limb_t T0=0, T1=1, cy=2, u=3; // TODO: fix this + + __asm__ (MONT_PRECOMPUTE + MONT_FIRSTITER(1) + MONT_FIRSTITER(2) + MONT_FIRSTITER(3) + MONT_FINALIZE(3) + MONT_ITERFIRST(1) + MONT_ITERITER(1, 1) + MONT_ITERITER(1, 2) + MONT_ITERITER(1, 3) + MONT_FINALIZE(3) + MONT_ITERFIRST(2) + MONT_ITERITER(2, 1) + MONT_ITERITER(2, 2) + MONT_ITERITER(2, 3) + MONT_FINALIZE(3) + MONT_ITERFIRST(3) + MONT_ITERITER(3, 1) + MONT_ITERITER(3, 2) + MONT_ITERITER(3, 3) + MONT_FINALIZE(3) + "/* check for overflow */ \n\t" + MONT_CMP(24) + MONT_CMP(16) + MONT_CMP(8) + MONT_CMP(0) + + "/* subtract mod if overflow */ \n\t" + "subtract%=: \n\t" + MONT_FIRSTSUB + MONT_NEXTSUB(8) + MONT_NEXTSUB(16) + MONT_NEXTSUB(24) + "done%=: \n\t" + : + : [tmp] "r" (tmp), [A] "r" (this->mont_repr.data), [B] "r" (other.data), [inv] "r" (inv), [M] "r" (modulus.data), + [T0] "r" (T0), [T1] "r" (T1), [cy] "r" (cy), [u] "r" (u) + : "cc", "memory", "%rax", "%rdx" + ); + mpn_copyi(this->mont_repr.data, tmp, n); + } + else if (n == 5) + { // use asm-optimized "CIOS method" + + mp_limb_t tmp[n+1]; + mp_limb_t T0=0, T1=1, cy=2, u=3; // TODO: fix this + + __asm__ (MONT_PRECOMPUTE + MONT_FIRSTITER(1) + MONT_FIRSTITER(2) + MONT_FIRSTITER(3) + MONT_FIRSTITER(4) + MONT_FINALIZE(4) + MONT_ITERFIRST(1) + MONT_ITERITER(1, 1) + MONT_ITERITER(1, 2) + MONT_ITERITER(1, 3) + MONT_ITERITER(1, 4) + MONT_FINALIZE(4) + MONT_ITERFIRST(2) + MONT_ITERITER(2, 1) + MONT_ITERITER(2, 2) + MONT_ITERITER(2, 3) + MONT_ITERITER(2, 4) + MONT_FINALIZE(4) + MONT_ITERFIRST(3) + MONT_ITERITER(3, 1) + MONT_ITERITER(3, 2) + MONT_ITERITER(3, 3) + MONT_ITERITER(3, 4) + MONT_FINALIZE(4) + MONT_ITERFIRST(4) + MONT_ITERITER(4, 1) + MONT_ITERITER(4, 2) + MONT_ITERITER(4, 3) + MONT_ITERITER(4, 4) + MONT_FINALIZE(4) + "/* check for overflow */ \n\t" + MONT_CMP(32) + MONT_CMP(24) + MONT_CMP(16) + MONT_CMP(8) + MONT_CMP(0) + + "/* subtract mod if overflow */ \n\t" + "subtract%=: \n\t" + MONT_FIRSTSUB + MONT_NEXTSUB(8) + MONT_NEXTSUB(16) + MONT_NEXTSUB(24) + MONT_NEXTSUB(32) + "done%=: \n\t" + : + : [tmp] "r" (tmp), [A] "r" (this->mont_repr.data), [B] "r" (other.data), [inv] "r" (inv), [M] "r" (modulus.data), + [T0] "r" (T0), [T1] "r" (T1), [cy] "r" (cy), [u] "r" (u) + : "cc", "memory", "%rax", "%rdx" + ); + mpn_copyi(this->mont_repr.data, tmp, n); + } + else +#endif + { + mp_limb_t res[2*n]; + mpn_mul_n(res, this->mont_repr.data, other.data, n); + + /* + The Montgomery reduction here is based on Algorithm 14.32 in + Handbook of Applied Cryptography + . + */ + for (size_t i = 0; i < n; ++i) + { + mp_limb_t k = inv * res[i]; + /* calculate res = res + k * mod * b^i */ + mp_limb_t carryout = mpn_addmul_1(res+i, modulus.data, n, k); + carryout = mpn_add_1(res+n+i, res+n+i, n-i, carryout); + assert(carryout == 0); + } + + if (mpn_cmp(res+n, modulus.data, n) >= 0) + { + const mp_limb_t borrow = mpn_sub(res+n, res+n, n, modulus.data, n); + assert(borrow == 0); + } + + mpn_copyi(this->mont_repr.data, res+n, n); + } +} + +template& modulus> +Fp_model::Fp_model(const bigint &b) +{ + mpn_copyi(this->mont_repr.data, Rsquared.data, n); + mul_reduce(b); +} + +template& modulus> +Fp_model::Fp_model(const long x, const bool is_unsigned) +{ + if (is_unsigned || x >= 0) + { + this->mont_repr.data[0] = x; + } + else + { + const mp_limb_t borrow = mpn_sub_1(this->mont_repr.data, modulus.data, n, -x); + assert(borrow == 0); + } + + mul_reduce(Rsquared); +} + +template& modulus> +void Fp_model::set_ulong(const unsigned long x) +{ + this->mont_repr.clear(); + this->mont_repr.data[0] = x; + mul_reduce(Rsquared); +} + +template& modulus> +void Fp_model::clear() +{ + this->mont_repr.clear(); +} + +template& modulus> +bigint Fp_model::as_bigint() const +{ + bigint one; + one.clear(); + one.data[0] = 1; + + Fp_model res(*this); + res.mul_reduce(one); + + return (res.mont_repr); +} + +template& modulus> +unsigned long Fp_model::as_ulong() const +{ + return this->as_bigint().as_ulong(); +} + +template& modulus> +bool Fp_model::operator==(const Fp_model& other) const +{ + return (this->mont_repr == other.mont_repr); +} + +template& modulus> +bool Fp_model::operator!=(const Fp_model& other) const +{ + return (this->mont_repr != other.mont_repr); +} + +template& modulus> +bool Fp_model::is_zero() const +{ + return (this->mont_repr.is_zero()); // zero maps to zero +} + +template& modulus> +void Fp_model::print() const +{ + Fp_model tmp; + tmp.mont_repr.data[0] = 1; + tmp.mul_reduce(this->mont_repr); + + tmp.mont_repr.print(); +} + +template& modulus> +Fp_model Fp_model::zero() +{ + Fp_model res; + res.mont_repr.clear(); + return res; +} + +template& modulus> +Fp_model Fp_model::one() +{ + Fp_model res; + res.mont_repr.data[0] = 1; + res.mul_reduce(Rsquared); + return res; +} + +template& modulus> +Fp_model& Fp_model::operator+=(const Fp_model& other) +{ +#ifdef PROFILE_OP_COUNTS + this->add_cnt++; +#endif +#if defined(__x86_64__) && defined(USE_ASM) + if (n == 3) + { + __asm__ + ("/* perform bignum addition */ \n\t" + ADD_FIRSTADD + ADD_NEXTADD(8) + ADD_NEXTADD(16) + "/* if overflow: subtract */ \n\t" + "/* (tricky point: if A and B are in the range we do not need to do anything special for the possible carry flag) */ \n\t" + "jc subtract%= \n\t" + + "/* check for overflow */ \n\t" + ADD_CMP(16) + ADD_CMP(8) + ADD_CMP(0) + + "/* subtract mod if overflow */ \n\t" + "subtract%=: \n\t" + ADD_FIRSTSUB + ADD_NEXTSUB(8) + ADD_NEXTSUB(16) + "done%=: \n\t" + : + : [A] "r" (this->mont_repr.data), [B] "r" (other.mont_repr.data), [mod] "r" (modulus.data) + : "cc", "memory", "%rax"); + } + else if (n == 4) + { + __asm__ + ("/* perform bignum addition */ \n\t" + ADD_FIRSTADD + ADD_NEXTADD(8) + ADD_NEXTADD(16) + ADD_NEXTADD(24) + "/* if overflow: subtract */ \n\t" + "/* (tricky point: if A and B are in the range we do not need to do anything special for the possible carry flag) */ \n\t" + "jc subtract%= \n\t" + + "/* check for overflow */ \n\t" + ADD_CMP(24) + ADD_CMP(16) + ADD_CMP(8) + ADD_CMP(0) + + "/* subtract mod if overflow */ \n\t" + "subtract%=: \n\t" + ADD_FIRSTSUB + ADD_NEXTSUB(8) + ADD_NEXTSUB(16) + ADD_NEXTSUB(24) + "done%=: \n\t" + : + : [A] "r" (this->mont_repr.data), [B] "r" (other.mont_repr.data), [mod] "r" (modulus.data) + : "cc", "memory", "%rax"); + } + else if (n == 5) + { + __asm__ + ("/* perform bignum addition */ \n\t" + ADD_FIRSTADD + ADD_NEXTADD(8) + ADD_NEXTADD(16) + ADD_NEXTADD(24) + ADD_NEXTADD(32) + "/* if overflow: subtract */ \n\t" + "/* (tricky point: if A and B are in the range we do not need to do anything special for the possible carry flag) */ \n\t" + "jc subtract%= \n\t" + + "/* check for overflow */ \n\t" + ADD_CMP(32) + ADD_CMP(24) + ADD_CMP(16) + ADD_CMP(8) + ADD_CMP(0) + + "/* subtract mod if overflow */ \n\t" + "subtract%=: \n\t" + ADD_FIRSTSUB + ADD_NEXTSUB(8) + ADD_NEXTSUB(16) + ADD_NEXTSUB(24) + ADD_NEXTSUB(32) + "done%=: \n\t" + : + : [A] "r" (this->mont_repr.data), [B] "r" (other.mont_repr.data), [mod] "r" (modulus.data) + : "cc", "memory", "%rax"); + } + else +#endif + { + mp_limb_t scratch[n+1]; + const mp_limb_t carry = mpn_add_n(scratch, this->mont_repr.data, other.mont_repr.data, n); + scratch[n] = carry; + + if (carry || mpn_cmp(scratch, modulus.data, n) >= 0) + { + const mp_limb_t borrow = mpn_sub(scratch, scratch, n+1, modulus.data, n); + assert(borrow == 0); + } + + mpn_copyi(this->mont_repr.data, scratch, n); + } + + return *this; +} + +template& modulus> +Fp_model& Fp_model::operator-=(const Fp_model& other) +{ +#ifdef PROFILE_OP_COUNTS + this->sub_cnt++; +#endif +#if defined(__x86_64__) && defined(USE_ASM) + if (n == 3) + { + __asm__ + (SUB_FIRSTSUB + SUB_NEXTSUB(8) + SUB_NEXTSUB(16) + + "jnc done%=\n\t" + + SUB_FIRSTADD + SUB_NEXTADD(8) + SUB_NEXTADD(16) + + "done%=:\n\t" + : + : [A] "r" (this->mont_repr.data), [B] "r" (other.mont_repr.data), [mod] "r" (modulus.data) + : "cc", "memory", "%rax"); + } + else if (n == 4) + { + __asm__ + (SUB_FIRSTSUB + SUB_NEXTSUB(8) + SUB_NEXTSUB(16) + SUB_NEXTSUB(24) + + "jnc done%=\n\t" + + SUB_FIRSTADD + SUB_NEXTADD(8) + SUB_NEXTADD(16) + SUB_NEXTADD(24) + + "done%=:\n\t" + : + : [A] "r" (this->mont_repr.data), [B] "r" (other.mont_repr.data), [mod] "r" (modulus.data) + : "cc", "memory", "%rax"); + } + else if (n == 5) + { + __asm__ + (SUB_FIRSTSUB + SUB_NEXTSUB(8) + SUB_NEXTSUB(16) + SUB_NEXTSUB(24) + SUB_NEXTSUB(32) + + "jnc done%=\n\t" + + SUB_FIRSTADD + SUB_NEXTADD(8) + SUB_NEXTADD(16) + SUB_NEXTADD(24) + SUB_NEXTADD(32) + + "done%=:\n\t" + : + : [A] "r" (this->mont_repr.data), [B] "r" (other.mont_repr.data), [mod] "r" (modulus.data) + : "cc", "memory", "%rax"); + } + else +#endif + { + mp_limb_t scratch[n+1]; + if (mpn_cmp(this->mont_repr.data, other.mont_repr.data, n) < 0) + { + const mp_limb_t carry = mpn_add_n(scratch, this->mont_repr.data, modulus.data, n); + scratch[n] = carry; + } + else + { + mpn_copyi(scratch, this->mont_repr.data, n); + scratch[n] = 0; + } + + const mp_limb_t borrow = mpn_sub(scratch, scratch, n+1, other.mont_repr.data, n); + assert(borrow == 0); + + mpn_copyi(this->mont_repr.data, scratch, n); + } + return *this; +} + +template& modulus> +Fp_model& Fp_model::operator*=(const Fp_model& other) +{ +#ifdef PROFILE_OP_COUNTS + this->mul_cnt++; +#endif + + mul_reduce(other.mont_repr); + return *this; +} + +template& modulus> +Fp_model& Fp_model::operator^=(const unsigned long pow) +{ + (*this) = power >(*this, pow); + return (*this); +} + +template& modulus> +template +Fp_model& Fp_model::operator^=(const bigint &pow) +{ + (*this) = power, m>(*this, pow); + return (*this); +} + +template& modulus> +Fp_model Fp_model::operator+(const Fp_model& other) const +{ + Fp_model r(*this); + return (r += other); +} + +template& modulus> +Fp_model Fp_model::operator-(const Fp_model& other) const +{ + Fp_model r(*this); + return (r -= other); +} + +template& modulus> +Fp_model Fp_model::operator*(const Fp_model& other) const +{ + Fp_model r(*this); + return (r *= other); +} + +template& modulus> +Fp_model Fp_model::operator^(const unsigned long pow) const +{ + Fp_model r(*this); + return (r ^= pow); +} + +template& modulus> +template +Fp_model Fp_model::operator^(const bigint &pow) const +{ + Fp_model r(*this); + return (r ^= pow); +} + +template& modulus> +Fp_model Fp_model::operator-() const +{ +#ifdef PROFILE_OP_COUNTS + this->sub_cnt++; +#endif + + if (this->is_zero()) + { + return (*this); + } + else + { + Fp_model r; + mpn_sub_n(r.mont_repr.data, modulus.data, this->mont_repr.data, n); + return r; + } +} + +template& modulus> +Fp_model Fp_model::squared() const +{ +#ifdef PROFILE_OP_COUNTS + this->sqr_cnt++; + this->mul_cnt--; // zero out the upcoming mul +#endif + /* stupid pre-processor tricks; beware */ +#if defined(__x86_64__) && defined(USE_ASM) + if (n == 3) + { // use asm-optimized Comba squaring + mp_limb_t res[2*n]; + mp_limb_t c0, c1, c2; + COMBA_3_BY_3_SQR(c0, c1, c2, res, this->mont_repr.data); + + mp_limb_t k; + mp_limb_t tmp1, tmp2, tmp3; + REDUCE_6_LIMB_PRODUCT(k, tmp1, tmp2, tmp3, inv, res, modulus.data); + + /* subtract t > mod */ + __asm__ volatile + ("/* check for overflow */ \n\t" + MONT_CMP(16) + MONT_CMP(8) + MONT_CMP(0) + + "/* subtract mod if overflow */ \n\t" + "subtract%=: \n\t" + MONT_FIRSTSUB + MONT_NEXTSUB(8) + MONT_NEXTSUB(16) + "done%=: \n\t" + : + : [tmp] "r" (res+n), [M] "r" (modulus.data) + : "cc", "memory", "%rax"); + + Fp_model r; + mpn_copyi(r.mont_repr.data, res+n, n); + return r; + } + else +#endif + { + Fp_model r(*this); + return (r *= r); + } +} + +template& modulus> +Fp_model& Fp_model::invert() +{ +#ifdef PROFILE_OP_COUNTS + this->inv_cnt++; +#endif + + assert(!this->is_zero()); + + bigint g; /* gp should have room for vn = n limbs */ + + mp_limb_t s[n+1]; /* sp should have room for vn+1 limbs */ + mp_size_t sn; + + bigint v = modulus; // both source operands are destroyed by mpn_gcdext + + /* computes gcd(u, v) = g = u*s + v*t, so s*u will be 1 (mod v) */ + const mp_size_t gn = mpn_gcdext(g.data, s, &sn, this->mont_repr.data, n, v.data, n); + assert(gn == 1 && g.data[0] == 1); /* inverse exists */ + + mp_limb_t q; /* division result fits into q, as sn <= n+1 */ + /* sn < 0 indicates negative sn; will fix up later */ + + if (std::abs(sn) >= n) + { + /* if sn could require modulus reduction, do it here */ + mpn_tdiv_qr(&q, this->mont_repr.data, 0, s, std::abs(sn), modulus.data, n); + } + else + { + /* otherwise just copy it over */ + mpn_zero(this->mont_repr.data, n); + mpn_copyi(this->mont_repr.data, s, std::abs(sn)); + } + + /* fix up the negative sn */ + if (sn < 0) + { + const mp_limb_t borrow = mpn_sub_n(this->mont_repr.data, modulus.data, this->mont_repr.data, n); + assert(borrow == 0); + } + + mul_reduce(Rcubed); + return *this; +} + +template& modulus> +Fp_model Fp_model::inverse() const +{ + Fp_model r(*this); + return (r.invert()); +} + +template& modulus> +Fp_model Fp_model::random_element() /// returns random element of Fp_model +{ + /* note that as Montgomery representation is a bijection then + selecting a random element of {xR} is the same as selecting a + random element of {x} */ + Fp_model r; + do + { + r.mont_repr.randomize(); + + /* clear all bits higher than MSB of modulus */ + size_t bitno = GMP_NUMB_BITS * n - 1; + while (modulus.test_bit(bitno) == false) + { + const std::size_t part = bitno/GMP_NUMB_BITS; + const std::size_t bit = bitno - (GMP_NUMB_BITS*part); + + r.mont_repr.data[part] &= ~(1ul<= modulus -- repeat (rejection sampling) */ + while (mpn_cmp(r.mont_repr.data, modulus.data, n) >= 0); + + return r; +} + +template& modulus> +Fp_model Fp_model::sqrt() const +{ + if (is_zero()) { + return *this; + } + + Fp_model one = Fp_model::one(); + + size_t v = Fp_model::s; + Fp_model z = Fp_model::nqr_to_t; + Fp_model w = (*this)^Fp_model::t_minus_1_over_2; + Fp_model x = (*this) * w; + Fp_model b = x * w; // b = (*this)^t + + + // check if square with euler's criterion + Fp_model check = b; + for (size_t i = 0; i < v-1; ++i) + { + check = check.squared(); + } + if (check != one) + { + assert_except(0); + } + + + // compute square root with Tonelli--Shanks + // (does not terminate if not a square!) + + while (b != one) + { + size_t m = 0; + Fp_model b2m = b; + while (b2m != one) + { + /* invariant: b2m = b^(2^m) after entering this loop */ + b2m = b2m.squared(); + m += 1; + } + + int j = v-m-1; + w = z; + while (j > 0) + { + w = w.squared(); + --j; + } // w = z^2^(v-m-1) + + z = w.squared(); + b = b * z; + x = x * w; + v = m; + } + + return x; +} + +template& modulus> +std::ostream& operator<<(std::ostream &out, const Fp_model &p) +{ +#ifndef MONTGOMERY_OUTPUT + Fp_model tmp; + tmp.mont_repr.data[0] = 1; + tmp.mul_reduce(p.mont_repr); + out << tmp.mont_repr; +#else + out << p.mont_repr; +#endif + return out; +} + +template& modulus> +std::istream& operator>>(std::istream &in, Fp_model &p) +{ +#ifndef MONTGOMERY_OUTPUT + in >> p.mont_repr; + p.mul_reduce(Fp_model::Rsquared); +#else + in >> p.mont_repr; +#endif + return in; +} + +} // libsnark +#endif // FP_TCC_ diff --git a/src/algebra/fields/fp12_2over3over2.hpp b/src/algebra/fields/fp12_2over3over2.hpp new file mode 100644 index 000000000..1de9d88b4 --- /dev/null +++ b/src/algebra/fields/fp12_2over3over2.hpp @@ -0,0 +1,116 @@ +/** @file + ***************************************************************************** + Declaration of arithmetic in the finite field F[((p^2)^3)^2]. + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef FP12_2OVER3OVER2_HPP_ +#define FP12_2OVER3OVER2_HPP_ +#include "algebra/fields/fp.hpp" +#include "algebra/fields/fp2.hpp" +#include "algebra/fields/fp6_3over2.hpp" +#include + +namespace libsnark { + +template& modulus> +class Fp12_2over3over2_model; + +template& modulus> +std::ostream& operator<<(std::ostream &, const Fp12_2over3over2_model &); + +template& modulus> +std::istream& operator>>(std::istream &, Fp12_2over3over2_model &); + +/** + * Arithmetic in the finite field F[((p^2)^3)^2]. + * + * Let p := modulus. This interface provides arithmetic for the extension field + * Fp12 = Fp6[W]/(W^2-V) where Fp6 = Fp2[V]/(V^3-non_residue) and non_residue is in Fp2 + * + * ASSUMPTION: p = 1 (mod 6) + */ +template& modulus> +class Fp12_2over3over2_model { +public: + typedef Fp_model my_Fp; + typedef Fp2_model my_Fp2; + typedef Fp6_3over2_model my_Fp6; + + static Fp2_model non_residue; + static Fp2_model Frobenius_coeffs_c1[12]; // non_residue^((modulus^i-1)/6) for i=0,...,11 + + my_Fp6 c0, c1; + Fp12_2over3over2_model() {}; + Fp12_2over3over2_model(const my_Fp6& c0, const my_Fp6& c1) : c0(c0), c1(c1) {}; + + void clear() { c0.clear(); c1.clear(); } + void print() const { printf("c0/c1:\n"); c0.print(); c1.print(); } + + static Fp12_2over3over2_model zero(); + static Fp12_2over3over2_model one(); + static Fp12_2over3over2_model random_element(); + + bool is_zero() const { return c0.is_zero() && c1.is_zero(); } + bool operator==(const Fp12_2over3over2_model &other) const; + bool operator!=(const Fp12_2over3over2_model &other) const; + + Fp12_2over3over2_model operator+(const Fp12_2over3over2_model &other) const; + Fp12_2over3over2_model operator-(const Fp12_2over3over2_model &other) const; + Fp12_2over3over2_model operator*(const Fp12_2over3over2_model &other) const; + Fp12_2over3over2_model operator-() const; + Fp12_2over3over2_model squared() const; // default is squared_complex + Fp12_2over3over2_model squared_karatsuba() const; + Fp12_2over3over2_model squared_complex() const; + Fp12_2over3over2_model inverse() const; + Fp12_2over3over2_model Frobenius_map(unsigned long power) const; + Fp12_2over3over2_model unitary_inverse() const; + Fp12_2over3over2_model cyclotomic_squared() const; + + Fp12_2over3over2_model mul_by_024(const my_Fp2 &ell_0, const my_Fp2 &ell_VW, const my_Fp2 &ell_VV) const; + + static my_Fp6 mul_by_non_residue(const my_Fp6 &elt); + + template + Fp12_2over3over2_model cyclotomic_exp(const bigint &exponent) const; + + static bigint base_field_char() { return modulus; } + static size_t extension_degree() { return 12; } + + friend std::ostream& operator<< (std::ostream &out, const Fp12_2over3over2_model &el); + friend std::istream& operator>> (std::istream &in, Fp12_2over3over2_model &el); +}; + +template& modulus> +std::ostream& operator<<(std::ostream& out, const std::vector > &v); + +template& modulus> +std::istream& operator>>(std::istream& in, std::vector > &v); + +template& modulus> +Fp12_2over3over2_model operator*(const Fp_model &lhs, const Fp12_2over3over2_model &rhs); + +template& modulus> +Fp12_2over3over2_model operator*(const Fp2_model &lhs, const Fp12_2over3over2_model &rhs); + +template& modulus> +Fp12_2over3over2_model operator*(const Fp6_3over2_model &lhs, const Fp12_2over3over2_model &rhs); + +template& modulus, mp_size_t m> +Fp12_2over3over2_model operator^(const Fp12_2over3over2_model &self, const bigint &exponent); + +template& modulus, mp_size_t m, const bigint& exp_modulus> +Fp12_2over3over2_model operator^(const Fp12_2over3over2_model &self, const Fp_model &exponent); + +template& modulus> +Fp2_model Fp12_2over3over2_model::non_residue; + +template& modulus> +Fp2_model Fp12_2over3over2_model::Frobenius_coeffs_c1[12]; + +} // libsnark +#include "algebra/fields/fp12_2over3over2.tcc" +#endif // FP12_2OVER3OVER2_HPP_ diff --git a/src/algebra/fields/fp12_2over3over2.tcc b/src/algebra/fields/fp12_2over3over2.tcc new file mode 100644 index 000000000..2fbc0b649 --- /dev/null +++ b/src/algebra/fields/fp12_2over3over2.tcc @@ -0,0 +1,412 @@ +/** @file + ***************************************************************************** + Implementation of arithmetic in the finite field F[((p^2)^3)^2]. + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef FP12_2OVER3OVER2_TCC_ +#define FP12_2OVER3OVER2_TCC_ + +namespace libsnark { + +template& modulus> +Fp6_3over2_model Fp12_2over3over2_model::mul_by_non_residue(const Fp6_3over2_model &elt) +{ + return Fp6_3over2_model(non_residue * elt.c2, elt.c0, elt.c1); +} + +template& modulus> +Fp12_2over3over2_model Fp12_2over3over2_model::zero() +{ + return Fp12_2over3over2_model(my_Fp6::zero(), my_Fp6::zero()); +} + +template& modulus> +Fp12_2over3over2_model Fp12_2over3over2_model::one() +{ + return Fp12_2over3over2_model(my_Fp6::one(), my_Fp6::zero()); +} + +template& modulus> +Fp12_2over3over2_model Fp12_2over3over2_model::random_element() +{ + Fp12_2over3over2_model r; + r.c0 = my_Fp6::random_element(); + r.c1 = my_Fp6::random_element(); + + return r; +} + +template& modulus> +bool Fp12_2over3over2_model::operator==(const Fp12_2over3over2_model &other) const +{ + return (this->c0 == other.c0 && this->c1 == other.c1); +} + +template& modulus> +bool Fp12_2over3over2_model::operator!=(const Fp12_2over3over2_model &other) const +{ + return !(operator==(other)); +} + +template& modulus> +Fp12_2over3over2_model Fp12_2over3over2_model::operator+(const Fp12_2over3over2_model &other) const +{ + return Fp12_2over3over2_model(this->c0 + other.c0, + this->c1 + other.c1); +} + +template& modulus> +Fp12_2over3over2_model Fp12_2over3over2_model::operator-(const Fp12_2over3over2_model &other) const +{ + return Fp12_2over3over2_model(this->c0 - other.c0, + this->c1 - other.c1); +} + +template& modulus> +Fp12_2over3over2_model operator*(const Fp_model &lhs, const Fp12_2over3over2_model &rhs) +{ + return Fp12_2over3over2_model(lhs*rhs.c0, + lhs*rhs.c1); +} + +template& modulus> +Fp12_2over3over2_model operator*(const Fp2_model &lhs, const Fp12_2over3over2_model &rhs) +{ + return Fp12_2over3over2_model(lhs*rhs.c0, + lhs*rhs.c1); +} + +template& modulus> +Fp12_2over3over2_model operator*(const Fp6_3over2_model &lhs, const Fp12_2over3over2_model &rhs) +{ + return Fp12_2over3over2_model(lhs*rhs.c0, + lhs*rhs.c1); +} + +template& modulus> +Fp12_2over3over2_model Fp12_2over3over2_model::operator*(const Fp12_2over3over2_model &other) const +{ + /* Devegili OhEig Scott Dahab --- Multiplication and Squaring on Pairing-Friendly Fields.pdf; Section 3 (Karatsuba) */ + + const my_Fp6 &A = other.c0, &B = other.c1, + &a = this->c0, &b = this->c1; + const my_Fp6 aA = a * A; + const my_Fp6 bB = b * B; + + return Fp12_2over3over2_model(aA + Fp12_2over3over2_model::mul_by_non_residue(bB), + (a + b)*(A+B) - aA - bB); +} + +template& modulus> +Fp12_2over3over2_model Fp12_2over3over2_model::operator-() const +{ + return Fp12_2over3over2_model(-this->c0, + -this->c1); +} + +template& modulus> +Fp12_2over3over2_model Fp12_2over3over2_model::squared() const +{ + return squared_complex(); +} + +template& modulus> +Fp12_2over3over2_model Fp12_2over3over2_model::squared_karatsuba() const +{ + /* Devegili OhEig Scott Dahab --- Multiplication and Squaring on Pairing-Friendly Fields.pdf; Section 3 (Karatsuba squaring) */ + + const my_Fp6 &a = this->c0, &b = this->c1; + const my_Fp6 asq = a.squared(); + const my_Fp6 bsq = b.squared(); + + return Fp12_2over3over2_model(asq + Fp12_2over3over2_model::mul_by_non_residue(bsq), + (a + b).squared() - asq - bsq); +} + +template& modulus> +Fp12_2over3over2_model Fp12_2over3over2_model::squared_complex() const +{ + /* Devegili OhEig Scott Dahab --- Multiplication and Squaring on Pairing-Friendly Fields.pdf; Section 3 (Complex squaring) */ + + const my_Fp6 &a = this->c0, &b = this->c1; + const my_Fp6 ab = a * b; + + return Fp12_2over3over2_model((a + b) * (a + Fp12_2over3over2_model::mul_by_non_residue(b)) - ab - Fp12_2over3over2_model::mul_by_non_residue(ab), + ab + ab); +} + +template& modulus> +Fp12_2over3over2_model Fp12_2over3over2_model::inverse() const +{ + /* From "High-Speed Software Implementation of the Optimal Ate Pairing over Barreto-Naehrig Curves"; Algorithm 8 */ + + const my_Fp6 &a = this->c0, &b = this->c1; + const my_Fp6 t0 = a.squared(); + const my_Fp6 t1 = b.squared(); + const my_Fp6 t2 = t0 - Fp12_2over3over2_model::mul_by_non_residue(t1); + const my_Fp6 t3 = t2.inverse(); + const my_Fp6 c0 = a * t3; + const my_Fp6 c1 = - (b * t3); + + return Fp12_2over3over2_model(c0, c1); +} + +template& modulus> +Fp12_2over3over2_model Fp12_2over3over2_model::Frobenius_map(unsigned long power) const +{ + return Fp12_2over3over2_model(c0.Frobenius_map(power), + Frobenius_coeffs_c1[power % 12] * c1.Frobenius_map(power)); +} + +template& modulus> +Fp12_2over3over2_model Fp12_2over3over2_model::unitary_inverse() const +{ + return Fp12_2over3over2_model(this->c0, + -this->c1); +} + +template& modulus> +Fp12_2over3over2_model Fp12_2over3over2_model::cyclotomic_squared() const +{ + /* OLD: naive implementation + return (*this).squared(); + */ + my_Fp2 z0 = this->c0.c0; + my_Fp2 z4 = this->c0.c1; + my_Fp2 z3 = this->c0.c2; + my_Fp2 z2 = this->c1.c0; + my_Fp2 z1 = this->c1.c1; + my_Fp2 z5 = this->c1.c2; + + my_Fp2 t0, t1, t2, t3, t4, t5, tmp; + + // t0 + t1*y = (z0 + z1*y)^2 = a^2 + tmp = z0 * z1; + t0 = (z0 + z1) * (z0 + my_Fp6::non_residue * z1) - tmp - my_Fp6::non_residue * tmp; + t1 = tmp + tmp; + // t2 + t3*y = (z2 + z3*y)^2 = b^2 + tmp = z2 * z3; + t2 = (z2 + z3) * (z2 + my_Fp6::non_residue * z3) - tmp - my_Fp6::non_residue * tmp; + t3 = tmp + tmp; + // t4 + t5*y = (z4 + z5*y)^2 = c^2 + tmp = z4 * z5; + t4 = (z4 + z5) * (z4 + my_Fp6::non_residue * z5) - tmp - my_Fp6::non_residue * tmp; + t5 = tmp + tmp; + + // for A + + // z0 = 3 * t0 - 2 * z0 + z0 = t0 - z0; + z0 = z0 + z0; + z0 = z0 + t0; + // z1 = 3 * t1 + 2 * z1 + z1 = t1 + z1; + z1 = z1 + z1; + z1 = z1 + t1; + + // for B + + // z2 = 3 * (xi * t5) + 2 * z2 + tmp = my_Fp6::non_residue * t5; + z2 = tmp + z2; + z2 = z2 + z2; + z2 = z2 + tmp; + + // z3 = 3 * t4 - 2 * z3 + z3 = t4 - z3; + z3 = z3 + z3; + z3 = z3 + t4; + + // for C + + // z4 = 3 * t2 - 2 * z4 + z4 = t2 - z4; + z4 = z4 + z4; + z4 = z4 + t2; + + // z5 = 3 * t3 + 2 * z5 + z5 = t3 + z5; + z5 = z5 + z5; + z5 = z5 + t3; + + return Fp12_2over3over2_model(my_Fp6(z0,z4,z3),my_Fp6(z2,z1,z5)); +} + +template& modulus> +Fp12_2over3over2_model Fp12_2over3over2_model::mul_by_024(const Fp2_model &ell_0, + const Fp2_model &ell_VW, + const Fp2_model &ell_VV) const +{ + /* OLD: naive implementation + Fp12_2over3over2_model a(my_Fp6(ell_0, my_Fp2::zero(), ell_VV), + my_Fp6(my_Fp2::zero(), ell_VW, my_Fp2::zero())); + + return (*this) * a; + */ + my_Fp2 z0 = this->c0.c0; + my_Fp2 z1 = this->c0.c1; + my_Fp2 z2 = this->c0.c2; + my_Fp2 z3 = this->c1.c0; + my_Fp2 z4 = this->c1.c1; + my_Fp2 z5 = this->c1.c2; + + my_Fp2 x0 = ell_0; + my_Fp2 x2 = ell_VV; + my_Fp2 x4 = ell_VW; + + my_Fp2 t0, t1, t2, s0, T3, T4, D0, D2, D4, S1; + + D0 = z0 * x0; + D2 = z2 * x2; + D4 = z4 * x4; + t2 = z0 + z4; + t1 = z0 + z2; + s0 = z1 + z3 + z5; + + // For z.a_.a_ = z0. + S1 = z1 * x2; + T3 = S1 + D4; + T4 = my_Fp6::non_residue * T3 + D0; + z0 = T4; + + // For z.a_.b_ = z1 + T3 = z5 * x4; + S1 = S1 + T3; + T3 = T3 + D2; + T4 = my_Fp6::non_residue * T3; + T3 = z1 * x0; + S1 = S1 + T3; + T4 = T4 + T3; + z1 = T4; + + // For z.a_.c_ = z2 + t0 = x0 + x2; + T3 = t1 * t0 - D0 - D2; + T4 = z3 * x4; + S1 = S1 + T4; + T3 = T3 + T4; + + // For z.b_.a_ = z3 (z3 needs z2) + t0 = z2 + z4; + z2 = T3; + t1 = x2 + x4; + T3 = t0 * t1 - D2 - D4; + T4 = my_Fp6::non_residue * T3; + T3 = z3 * x0; + S1 = S1 + T3; + T4 = T4 + T3; + z3 = T4; + + // For z.b_.b_ = z4 + T3 = z5 * x2; + S1 = S1 + T3; + T4 = my_Fp6::non_residue * T3; + t0 = x0 + x4; + T3 = t2 * t0 - D0 - D4; + T4 = T4 + T3; + z4 = T4; + + // For z.b_.c_ = z5. + t0 = x0 + x2 + x4; + T3 = s0 * t0 - S1; + z5 = T3; + + return Fp12_2over3over2_model(my_Fp6(z0,z1,z2),my_Fp6(z3,z4,z5)); + +} + +template& modulus, mp_size_t m> +Fp12_2over3over2_model operator^(const Fp12_2over3over2_model &self, const bigint &exponent) +{ + return power >(self, exponent); +} + +template& modulus, mp_size_t m, const bigint& exp_modulus> +Fp12_2over3over2_model operator^(const Fp12_2over3over2_model &self, const Fp_model &exponent) +{ + return self^(exponent.as_bigint()); +} + + +template& modulus> +template +Fp12_2over3over2_model Fp12_2over3over2_model::cyclotomic_exp(const bigint &exponent) const +{ + Fp12_2over3over2_model res = Fp12_2over3over2_model::one(); + + bool found_one = false; + for (long i = m-1; i >= 0; --i) + { + for (long j = GMP_NUMB_BITS - 1; j >= 0; --j) + { + if (found_one) + { + res = res.cyclotomic_squared(); + } + + if (exponent.data[i] & (1ul<& modulus> +std::ostream& operator<<(std::ostream &out, const Fp12_2over3over2_model &el) +{ + out << el.c0 << OUTPUT_SEPARATOR << el.c1; + return out; +} + +template& modulus> +std::istream& operator>>(std::istream &in, Fp12_2over3over2_model &el) +{ + in >> el.c0 >> el.c1; + return in; +} + +template& modulus> +std::ostream& operator<<(std::ostream& out, const std::vector > &v) +{ + out << v.size() << "\n"; + for (const Fp12_2over3over2_model& t : v) + { + out << t << OUTPUT_NEWLINE; + } + + return out; +} + +template& modulus> +std::istream& operator>>(std::istream& in, std::vector > &v) +{ + v.clear(); + + size_t s; + in >> s; + + char b; + in.read(&b, 1); + + v.reserve(s); + + for (size_t i = 0; i < s; ++i) + { + Fp12_2over3over2_model el; + in >> el; + v.emplace_back(el); + } + + return in; +} + +} // libsnark +#endif // FP12_2OVER3OVER2_TCC_ diff --git a/src/algebra/fields/fp2.hpp b/src/algebra/fields/fp2.hpp new file mode 100644 index 000000000..f07726918 --- /dev/null +++ b/src/algebra/fields/fp2.hpp @@ -0,0 +1,120 @@ +/** @file + ***************************************************************************** + Implementation of arithmetic in the finite field F[p^2]. + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef FP2_HPP_ +#define FP2_HPP_ +#include "algebra/fields/fp.hpp" +#include + +namespace libsnark { + +template& modulus> +class Fp2_model; + +template& modulus> +std::ostream& operator<<(std::ostream &, const Fp2_model &); + +template& modulus> +std::istream& operator>>(std::istream &, Fp2_model &); + +/** + * Arithmetic in the field F[p^3]. + * + * Let p := modulus. This interface provides arithmetic for the extension field + * Fp2 = Fp[U]/(U^2-non_residue), where non_residue is in Fp. + * + * ASSUMPTION: p = 1 (mod 6) + */ +template& modulus> +class Fp2_model { +public: + typedef Fp_model my_Fp; + + static bigint<2*n> euler; // (modulus^2-1)/2 + static size_t s; // modulus^2 = 2^s * t + 1 + static bigint<2*n> t; // with t odd + static bigint<2*n> t_minus_1_over_2; // (t-1)/2 + static my_Fp non_residue; // X^4-non_residue irreducible over Fp; used for constructing Fp2 = Fp[X] / (X^2 - non_residue) + static Fp2_model nqr; // a quadratic nonresidue in Fp2 + static Fp2_model nqr_to_t; // nqr^t + static my_Fp Frobenius_coeffs_c1[2]; // non_residue^((modulus^i-1)/2) for i=0,1 + + my_Fp c0, c1; + Fp2_model() {}; + Fp2_model(const my_Fp& c0, const my_Fp& c1) : c0(c0), c1(c1) {}; + + void clear() { c0.clear(); c1.clear(); } + void print() const { printf("c0/c1:\n"); c0.print(); c1.print(); } + + static Fp2_model zero(); + static Fp2_model one(); + static Fp2_model random_element(); + + bool is_zero() const { return c0.is_zero() && c1.is_zero(); } + bool operator==(const Fp2_model &other) const; + bool operator!=(const Fp2_model &other) const; + + Fp2_model operator+(const Fp2_model &other) const; + Fp2_model operator-(const Fp2_model &other) const; + Fp2_model operator*(const Fp2_model &other) const; + Fp2_model operator-() const; + Fp2_model squared() const; // default is squared_complex + Fp2_model inverse() const; + Fp2_model Frobenius_map(unsigned long power) const; + Fp2_model sqrt() const; // HAS TO BE A SQUARE (else does not terminate) + Fp2_model squared_karatsuba() const; + Fp2_model squared_complex() const; + + template + Fp2_model operator^(const bigint &other) const; + + static size_t size_in_bits() { return 2*my_Fp::size_in_bits(); } + static bigint base_field_char() { return modulus; } + + friend std::ostream& operator<< (std::ostream &out, const Fp2_model &el); + friend std::istream& operator>> (std::istream &in, Fp2_model &el); +}; + +template& modulus> +std::ostream& operator<<(std::ostream& out, const std::vector > &v); + +template& modulus> +std::istream& operator>>(std::istream& in, std::vector > &v); + +template& modulus> +Fp2_model operator*(const Fp_model &lhs, const Fp2_model &rhs); + +template& modulus> +bigint<2*n> Fp2_model::euler; + +template& modulus> +size_t Fp2_model::s; + +template& modulus> +bigint<2*n> Fp2_model::t; + +template& modulus> +bigint<2*n> Fp2_model::t_minus_1_over_2; + +template& modulus> +Fp_model Fp2_model::non_residue; + +template& modulus> +Fp2_model Fp2_model::nqr; + +template& modulus> +Fp2_model Fp2_model::nqr_to_t; + +template& modulus> +Fp_model Fp2_model::Frobenius_coeffs_c1[2]; + +} // libsnark +#include "algebra/fields/fp2.tcc" + +#endif // FP2_HPP_ diff --git a/src/algebra/fields/fp2.tcc b/src/algebra/fields/fp2.tcc new file mode 100644 index 000000000..1632a04c7 --- /dev/null +++ b/src/algebra/fields/fp2.tcc @@ -0,0 +1,261 @@ +/** @file + ***************************************************************************** + Implementation of arithmetic in the finite field F[p^2]. + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef FP2_TCC_ +#define FP2_TCC_ + +#include "algebra/fields/field_utils.hpp" + +namespace libsnark { + +template& modulus> +Fp2_model Fp2_model::zero() +{ + return Fp2_model(my_Fp::zero(), my_Fp::zero()); +} + +template& modulus> +Fp2_model Fp2_model::one() +{ + return Fp2_model(my_Fp::one(), my_Fp::zero()); +} + +template& modulus> +Fp2_model Fp2_model::random_element() +{ + Fp2_model r; + r.c0 = my_Fp::random_element(); + r.c1 = my_Fp::random_element(); + + return r; +} + +template& modulus> +bool Fp2_model::operator==(const Fp2_model &other) const +{ + return (this->c0 == other.c0 && this->c1 == other.c1); +} + +template& modulus> +bool Fp2_model::operator!=(const Fp2_model &other) const +{ + return !(operator==(other)); +} + +template& modulus> +Fp2_model Fp2_model::operator+(const Fp2_model &other) const +{ + return Fp2_model(this->c0 + other.c0, + this->c1 + other.c1); +} + +template& modulus> +Fp2_model Fp2_model::operator-(const Fp2_model &other) const +{ + return Fp2_model(this->c0 - other.c0, + this->c1 - other.c1); +} + +template& modulus> +Fp2_model operator*(const Fp_model &lhs, const Fp2_model &rhs) +{ + return Fp2_model(lhs*rhs.c0, + lhs*rhs.c1); +} + +template& modulus> +Fp2_model Fp2_model::operator*(const Fp2_model &other) const +{ + /* Devegili OhEig Scott Dahab --- Multiplication and Squaring on Pairing-Friendly Fields.pdf; Section 3 (Karatsuba) */ + const my_Fp + &A = other.c0, &B = other.c1, + &a = this->c0, &b = this->c1; + const my_Fp aA = a * A; + const my_Fp bB = b * B; + + return Fp2_model(aA + non_residue * bB, + (a + b)*(A+B) - aA - bB); +} + +template& modulus> +Fp2_model Fp2_model::operator-() const +{ + return Fp2_model(-this->c0, + -this->c1); +} + +template& modulus> +Fp2_model Fp2_model::squared() const +{ + return squared_complex(); +} + +template& modulus> +Fp2_model Fp2_model::squared_karatsuba() const +{ + /* Devegili OhEig Scott Dahab --- Multiplication and Squaring on Pairing-Friendly Fields.pdf; Section 3 (Karatsuba squaring) */ + const my_Fp &a = this->c0, &b = this->c1; + const my_Fp asq = a.squared(); + const my_Fp bsq = b.squared(); + + return Fp2_model(asq + non_residue * bsq, + (a + b).squared() - asq - bsq); +} + +template& modulus> +Fp2_model Fp2_model::squared_complex() const +{ + /* Devegili OhEig Scott Dahab --- Multiplication and Squaring on Pairing-Friendly Fields.pdf; Section 3 (Complex squaring) */ + const my_Fp &a = this->c0, &b = this->c1; + const my_Fp ab = a * b; + + return Fp2_model((a + b) * (a + non_residue * b) - ab - non_residue * ab, + ab + ab); +} + +template& modulus> +Fp2_model Fp2_model::inverse() const +{ + const my_Fp &a = this->c0, &b = this->c1; + + /* From "High-Speed Software Implementation of the Optimal Ate Pairing over Barreto-Naehrig Curves"; Algorithm 8 */ + const my_Fp t0 = a.squared(); + const my_Fp t1 = b.squared(); + const my_Fp t2 = t0 - non_residue * t1; + const my_Fp t3 = t2.inverse(); + const my_Fp c0 = a * t3; + const my_Fp c1 = - (b * t3); + + return Fp2_model(c0, c1); +} + +template& modulus> +Fp2_model Fp2_model::Frobenius_map(unsigned long power) const +{ + return Fp2_model(c0, + Frobenius_coeffs_c1[power % 2] * c1); +} + +template& modulus> +Fp2_model Fp2_model::sqrt() const +{ + if (is_zero()) { + return *this; + } + + Fp2_model one = Fp2_model::one(); + + size_t v = Fp2_model::s; + Fp2_model z = Fp2_model::nqr_to_t; + Fp2_model w = (*this)^Fp2_model::t_minus_1_over_2; + Fp2_model x = (*this) * w; + Fp2_model b = x * w; // b = (*this)^t + + + // check if square with euler's criterion + Fp2_model check = b; + for (size_t i = 0; i < v-1; ++i) + { + check = check.squared(); + } + if (check != one) + { + assert_except(0); + } + + + // compute square root with Tonelli--Shanks + // (does not terminate if not a square!) + + while (b != one) + { + size_t m = 0; + Fp2_model b2m = b; + while (b2m != one) + { + /* invariant: b2m = b^(2^m) after entering this loop */ + b2m = b2m.squared(); + m += 1; + } + + int j = v-m-1; + w = z; + while (j > 0) + { + w = w.squared(); + --j; + } // w = z^2^(v-m-1) + + z = w.squared(); + b = b * z; + x = x * w; + v = m; + } + + return x; +} + +template& modulus> +template +Fp2_model Fp2_model::operator^(const bigint &pow) const +{ + return power, m>(*this, pow); +} + +template& modulus> +std::ostream& operator<<(std::ostream &out, const Fp2_model &el) +{ + out << el.c0 << OUTPUT_SEPARATOR << el.c1; + return out; +} + +template& modulus> +std::istream& operator>>(std::istream &in, Fp2_model &el) +{ + in >> el.c0 >> el.c1; + return in; +} + +template& modulus> +std::ostream& operator<<(std::ostream& out, const std::vector > &v) +{ + out << v.size() << "\n"; + for (const Fp2_model& t : v) + { + out << t << OUTPUT_NEWLINE; + } + + return out; +} + +template& modulus> +std::istream& operator>>(std::istream& in, std::vector > &v) +{ + v.clear(); + + size_t s; + in >> s; + + char b; + in.read(&b, 1); + + v.reserve(s); + + for (size_t i = 0; i < s; ++i) + { + Fp2_model el; + in >> el; + v.emplace_back(el); + } + + return in; +} + +} // libsnark +#endif // FP2_TCC_ diff --git a/src/algebra/fields/fp3.hpp b/src/algebra/fields/fp3.hpp new file mode 100644 index 000000000..53b178a27 --- /dev/null +++ b/src/algebra/fields/fp3.hpp @@ -0,0 +1,122 @@ +/** @file + ***************************************************************************** + Declaration of arithmetic in the finite field F[p^3]. + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef FP3_HPP_ +#define FP3_HPP_ +#include "algebra/fields/fp.hpp" +#include + +namespace libsnark { + +template& modulus> +class Fp3_model; + +template& modulus> +std::ostream& operator<<(std::ostream &, const Fp3_model &); + +template& modulus> +std::istream& operator>>(std::istream &, Fp3_model &); + +/** + * Arithmetic in the field F[p^3]. + * + * Let p := modulus. This interface provides arithmetic for the extension field + * Fp3 = Fp[U]/(U^3-non_residue), where non_residue is in Fp. + * + * ASSUMPTION: p = 1 (mod 6) + */ +template& modulus> +class Fp3_model { +public: + typedef Fp_model my_Fp; + + static bigint<3*n> euler; // (modulus^3-1)/2 + static size_t s; // modulus^3 = 2^s * t + 1 + static bigint<3*n> t; // with t odd + static bigint<3*n> t_minus_1_over_2; // (t-1)/2 + static my_Fp non_residue; // X^6-non_residue irreducible over Fp; used for constructing Fp3 = Fp[X] / (X^3 - non_residue) + static Fp3_model nqr; // a quadratic nonresidue in Fp3 + static Fp3_model nqr_to_t; // nqr^t + static my_Fp Frobenius_coeffs_c1[3]; // non_residue^((modulus^i-1)/3) for i=0,1,2 + static my_Fp Frobenius_coeffs_c2[3]; // non_residue^((2*modulus^i-2)/3) for i=0,1,2 + + my_Fp c0, c1, c2; + Fp3_model() {}; + Fp3_model(const my_Fp& c0, const my_Fp& c1, const my_Fp& c2) : c0(c0), c1(c1), c2(c2) {}; + + void clear() { c0.clear(); c1.clear(); c2.clear(); } + void print() const { printf("c0/c1/c2:\n"); c0.print(); c1.print(); c2.print(); } + + static Fp3_model zero(); + static Fp3_model one(); + static Fp3_model random_element(); + + bool is_zero() const { return c0.is_zero() && c1.is_zero() && c2.is_zero(); } + bool operator==(const Fp3_model &other) const; + bool operator!=(const Fp3_model &other) const; + + Fp3_model operator+(const Fp3_model &other) const; + Fp3_model operator-(const Fp3_model &other) const; + Fp3_model operator*(const Fp3_model &other) const; + Fp3_model operator-() const; + Fp3_model squared() const; + Fp3_model inverse() const; + Fp3_model Frobenius_map(unsigned long power) const; + Fp3_model sqrt() const; // HAS TO BE A SQUARE (else does not terminate) + + template + Fp3_model operator^(const bigint &other) const; + + static size_t size_in_bits() { return 3*my_Fp::size_in_bits(); } + static bigint base_field_char() { return modulus; } + + friend std::ostream& operator<< (std::ostream &out, const Fp3_model &el); + friend std::istream& operator>> (std::istream &in, Fp3_model &el); +}; + +template& modulus> +std::ostream& operator<<(std::ostream& out, const std::vector > &v); + +template& modulus> +std::istream& operator>>(std::istream& in, std::vector > &v); + +template& modulus> +Fp3_model operator*(const Fp_model &lhs, const Fp3_model &rhs); + +template& modulus> +bigint<3*n> Fp3_model::euler; + +template& modulus> +size_t Fp3_model::s; + +template& modulus> +bigint<3*n> Fp3_model::t; + +template& modulus> +bigint<3*n> Fp3_model::t_minus_1_over_2; + +template& modulus> +Fp_model Fp3_model::non_residue; + +template& modulus> +Fp3_model Fp3_model::nqr; + +template& modulus> +Fp3_model Fp3_model::nqr_to_t; + +template& modulus> +Fp_model Fp3_model::Frobenius_coeffs_c1[3]; + +template& modulus> +Fp_model Fp3_model::Frobenius_coeffs_c2[3]; + +} // libsnark +#include "algebra/fields/fp3.tcc" + +#endif // FP3_HPP_ diff --git a/src/algebra/fields/fp3.tcc b/src/algebra/fields/fp3.tcc new file mode 100644 index 000000000..590a2a987 --- /dev/null +++ b/src/algebra/fields/fp3.tcc @@ -0,0 +1,259 @@ +/** @file + ***************************************************************************** + Implementation of arithmetic in the finite field F[p^3]. + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef FP3_TCC_ +#define FP3_TCC_ + +#include "algebra/fields/field_utils.hpp" + +namespace libsnark { + +template& modulus> +Fp3_model Fp3_model::zero() +{ + return Fp3_model(my_Fp::zero(), my_Fp::zero(), my_Fp::zero()); +} + +template& modulus> +Fp3_model Fp3_model::one() +{ + return Fp3_model(my_Fp::one(), my_Fp::zero(), my_Fp::zero()); +} + +template& modulus> +Fp3_model Fp3_model::random_element() +{ + Fp3_model r; + r.c0 = my_Fp::random_element(); + r.c1 = my_Fp::random_element(); + r.c2 = my_Fp::random_element(); + + return r; +} + +template& modulus> +bool Fp3_model::operator==(const Fp3_model &other) const +{ + return (this->c0 == other.c0 && this->c1 == other.c1 && this->c2 == other.c2); +} + +template& modulus> +bool Fp3_model::operator!=(const Fp3_model &other) const +{ + return !(operator==(other)); +} + +template& modulus> +Fp3_model Fp3_model::operator+(const Fp3_model &other) const +{ + return Fp3_model(this->c0 + other.c0, + this->c1 + other.c1, + this->c2 + other.c2); +} + +template& modulus> +Fp3_model Fp3_model::operator-(const Fp3_model &other) const +{ + return Fp3_model(this->c0 - other.c0, + this->c1 - other.c1, + this->c2 - other.c2); +} + +template& modulus> +Fp3_model operator*(const Fp_model &lhs, const Fp3_model &rhs) +{ + return Fp3_model(lhs*rhs.c0, + lhs*rhs.c1, + lhs*rhs.c2); +} + +template& modulus> +Fp3_model Fp3_model::operator*(const Fp3_model &other) const +{ + /* Devegili OhEig Scott Dahab --- Multiplication and Squaring on Pairing-Friendly Fields.pdf; Section 4 (Karatsuba) */ + const my_Fp + &A = other.c0, &B = other.c1, &C = other.c2, + &a = this->c0, &b = this->c1, &c = this->c2; + const my_Fp aA = a*A; + const my_Fp bB = b*B; + const my_Fp cC = c*C; + + return Fp3_model(aA + non_residue*((b+c)*(B+C)-bB-cC), + (a+b)*(A+B)-aA-bB+non_residue*cC, + (a+c)*(A+C)-aA+bB-cC); +} + +template& modulus> +Fp3_model Fp3_model::operator-() const +{ + return Fp3_model(-this->c0, + -this->c1, + -this->c2); +} + +template& modulus> +Fp3_model Fp3_model::squared() const +{ + /* Devegili OhEig Scott Dahab --- Multiplication and Squaring on Pairing-Friendly Fields.pdf; Section 4 (CH-SQR2) */ + const my_Fp + &a = this->c0, &b = this->c1, &c = this->c2; + const my_Fp s0 = a.squared(); + const my_Fp ab = a*b; + const my_Fp s1 = ab + ab; + const my_Fp s2 = (a - b + c).squared(); + const my_Fp bc = b*c; + const my_Fp s3 = bc + bc; + const my_Fp s4 = c.squared(); + + return Fp3_model(s0 + non_residue * s3, + s1 + non_residue * s4, + s1 + s2 + s3 - s0 - s4); +} + +template& modulus> +Fp3_model Fp3_model::inverse() const +{ + const my_Fp + &a = this->c0, &b = this->c1, &c = this->c2; + + /* From "High-Speed Software Implementation of the Optimal Ate Pairing over Barreto-Naehrig Curves"; Algorithm 17 */ + const my_Fp t0 = a.squared(); + const my_Fp t1 = b.squared(); + const my_Fp t2 = c.squared(); + const my_Fp t3 = a*b; + const my_Fp t4 = a*c; + const my_Fp t5 = b*c; + const my_Fp c0 = t0 - non_residue * t5; + const my_Fp c1 = non_residue * t2 - t3; + const my_Fp c2 = t1 - t4; // typo in paper referenced above. should be "-" as per Scott, but is "*" + const my_Fp t6 = (a * c0 + non_residue * (c * c1 + b * c2)).inverse(); + return Fp3_model(t6 * c0, t6 * c1, t6 * c2); +} + +template& modulus> +Fp3_model Fp3_model::Frobenius_map(unsigned long power) const +{ + return Fp3_model(c0, + Frobenius_coeffs_c1[power % 3] * c1, + Frobenius_coeffs_c2[power % 3] * c2); +} + +template& modulus> +Fp3_model Fp3_model::sqrt() const +{ + Fp3_model one = Fp3_model::one(); + + size_t v = Fp3_model::s; + Fp3_model z = Fp3_model::nqr_to_t; + Fp3_model w = (*this)^Fp3_model::t_minus_1_over_2; + Fp3_model x = (*this) * w; + Fp3_model b = x * w; // b = (*this)^t + +#if DEBUG + // check if square with euler's criterion + Fp3_model check = b; + for (size_t i = 0; i < v-1; ++i) + { + check = check.squared(); + } + if (check != one) + { + assert(0); + } +#endif + + // compute square root with Tonelli--Shanks + // (does not terminate if not a square!) + + while (b != one) + { + size_t m = 0; + Fp3_model b2m = b; + while (b2m != one) + { + /* invariant: b2m = b^(2^m) after entering this loop */ + b2m = b2m.squared(); + m += 1; + } + + int j = v-m-1; + w = z; + while (j > 0) + { + w = w.squared(); + --j; + } // w = z^2^(v-m-1) + + z = w.squared(); + b = b * z; + x = x * w; + v = m; + } + + return x; +} + +template& modulus> +template +Fp3_model Fp3_model::operator^(const bigint &pow) const +{ + return power >(*this, pow); +} + +template& modulus> +std::ostream& operator<<(std::ostream &out, const Fp3_model &el) +{ + out << el.c0 << OUTPUT_SEPARATOR << el.c1 << OUTPUT_SEPARATOR << el.c2; + return out; +} + +template& modulus> +std::istream& operator>>(std::istream &in, Fp3_model &el) +{ + in >> el.c0 >> el.c1 >> el.c2; + return in; +} + +template& modulus> +std::ostream& operator<<(std::ostream& out, const std::vector > &v) +{ + out << v.size() << "\n"; + for (const Fp3_model& t : v) + { + out << t << OUTPUT_NEWLINE; + } + + return out; +} + +template& modulus> +std::istream& operator>>(std::istream& in, std::vector > &v) +{ + v.clear(); + + size_t s; + in >> s; + + char b; + in.read(&b, 1); + + v.reserve(s); + + for (size_t i = 0; i < s; ++i) + { + Fp3_model el; + in >> el; + v.emplace_back(el); + } + + return in; +} + +} // libsnark +#endif // FP3_TCC_ diff --git a/src/algebra/fields/fp6_3over2.hpp b/src/algebra/fields/fp6_3over2.hpp new file mode 100644 index 000000000..335d61c53 --- /dev/null +++ b/src/algebra/fields/fp6_3over2.hpp @@ -0,0 +1,104 @@ +/** @file + ***************************************************************************** + Declaration of arithmetic in the finite field F[(p^2)^3] + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef FP6_3OVER2_HPP_ +#define FP6_3OVER2_HPP_ +#include "algebra/fields/fp.hpp" +#include "algebra/fields/fp2.hpp" +#include + +namespace libsnark { + +template& modulus> +class Fp6_3over2_model; + +template& modulus> +std::ostream& operator<<(std::ostream &, const Fp6_3over2_model &); + +template& modulus> +std::istream& operator>>(std::istream &, Fp6_3over2_model &); + +/** + * Arithmetic in the finite field F[(p^2)^3]. + * + * Let p := modulus. This interface provides arithmetic for the extension field + * Fp6 = Fp2[V]/(V^3-non_residue) where non_residue is in Fp. + * + * ASSUMPTION: p = 1 (mod 6) + */ +template& modulus> +class Fp6_3over2_model { +public: + typedef Fp_model my_Fp; + typedef Fp2_model my_Fp2; + + static my_Fp2 non_residue; + static my_Fp2 Frobenius_coeffs_c1[6]; // non_residue^((modulus^i-1)/3) for i=0,1,2,3,4,5 + static my_Fp2 Frobenius_coeffs_c2[6]; // non_residue^((2*modulus^i-2)/3) for i=0,1,2,3,4,5 + + my_Fp2 c0, c1, c2; + Fp6_3over2_model() {}; + Fp6_3over2_model(const my_Fp2& c0, const my_Fp2& c1, const my_Fp2& c2) : c0(c0), c1(c1), c2(c2) {}; + + void clear() { c0.clear(); c1.clear(); c2.clear(); } + void print() const { printf("c0/c1/c2:\n"); c0.print(); c1.print(); c2.print(); } + + static Fp6_3over2_model zero(); + static Fp6_3over2_model one(); + static Fp6_3over2_model random_element(); + + bool is_zero() const { return c0.is_zero() && c1.is_zero() && c2.is_zero(); } + bool operator==(const Fp6_3over2_model &other) const; + bool operator!=(const Fp6_3over2_model &other) const; + + Fp6_3over2_model operator+(const Fp6_3over2_model &other) const; + Fp6_3over2_model operator-(const Fp6_3over2_model &other) const; + Fp6_3over2_model operator*(const Fp6_3over2_model &other) const; + Fp6_3over2_model operator-() const; + Fp6_3over2_model squared() const; + Fp6_3over2_model inverse() const; + Fp6_3over2_model Frobenius_map(unsigned long power) const; + + static my_Fp2 mul_by_non_residue(const my_Fp2 &elt); + + template + Fp6_3over2_model operator^(const bigint &other) const; + + static bigint base_field_char() { return modulus; } + static size_t extension_degree() { return 6; } + + friend std::ostream& operator<< (std::ostream &out, const Fp6_3over2_model &el); + friend std::istream& operator>> (std::istream &in, Fp6_3over2_model &el); +}; + +template& modulus> +std::ostream& operator<<(std::ostream& out, const std::vector > &v); + +template& modulus> +std::istream& operator>>(std::istream& in, std::vector > &v); + +template& modulus> +Fp6_3over2_model operator*(const Fp_model &lhs, const Fp6_3over2_model &rhs); + +template& modulus> +Fp6_3over2_model operator*(const Fp2_model &lhs, const Fp6_3over2_model &rhs); + +template& modulus> +Fp2_model Fp6_3over2_model::non_residue; + +template& modulus> +Fp2_model Fp6_3over2_model::Frobenius_coeffs_c1[6]; + +template& modulus> +Fp2_model Fp6_3over2_model::Frobenius_coeffs_c2[6]; + +} // libsnark +#include "algebra/fields/fp6_3over2.tcc" + +#endif // FP6_3OVER2_HPP_ diff --git a/src/algebra/fields/fp6_3over2.tcc b/src/algebra/fields/fp6_3over2.tcc new file mode 100644 index 000000000..f4fffde04 --- /dev/null +++ b/src/algebra/fields/fp6_3over2.tcc @@ -0,0 +1,216 @@ +/** @file + ***************************************************************************** + Implementation of arithmetic in the finite field F[(p^2)^3]. + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef FP6_3OVER2_TCC_ +#define FP6_3OVER2_TCC_ +#include "algebra/fields/field_utils.hpp" + +namespace libsnark { + +template& modulus> +Fp2_model Fp6_3over2_model::mul_by_non_residue(const Fp2_model &elt) +{ + return Fp2_model(non_residue * elt); +} + +template& modulus> +Fp6_3over2_model Fp6_3over2_model::zero() +{ + return Fp6_3over2_model(my_Fp2::zero(), my_Fp2::zero(), my_Fp2::zero()); +} + +template& modulus> +Fp6_3over2_model Fp6_3over2_model::one() +{ + return Fp6_3over2_model(my_Fp2::one(), my_Fp2::zero(), my_Fp2::zero()); +} + +template& modulus> +Fp6_3over2_model Fp6_3over2_model::random_element() +{ + Fp6_3over2_model r; + r.c0 = my_Fp2::random_element(); + r.c1 = my_Fp2::random_element(); + r.c2 = my_Fp2::random_element(); + + return r; +} + +template& modulus> +bool Fp6_3over2_model::operator==(const Fp6_3over2_model &other) const +{ + return (this->c0 == other.c0 && this->c1 == other.c1 && this->c2 == other.c2); +} + +template& modulus> +bool Fp6_3over2_model::operator!=(const Fp6_3over2_model &other) const +{ + return !(operator==(other)); +} + +template& modulus> +Fp6_3over2_model Fp6_3over2_model::operator+(const Fp6_3over2_model &other) const +{ + return Fp6_3over2_model(this->c0 + other.c0, + this->c1 + other.c1, + this->c2 + other.c2); +} + +template& modulus> +Fp6_3over2_model Fp6_3over2_model::operator-(const Fp6_3over2_model &other) const +{ + return Fp6_3over2_model(this->c0 - other.c0, + this->c1 - other.c1, + this->c2 - other.c2); +} + +template& modulus> +Fp6_3over2_model operator*(const Fp_model &lhs, const Fp6_3over2_model &rhs) +{ + return Fp6_3over2_model(lhs*rhs.c0, + lhs*rhs.c1, + lhs*rhs.c2); +} + +template& modulus> +Fp6_3over2_model operator*(const Fp2_model &lhs, const Fp6_3over2_model &rhs) +{ + return Fp6_3over2_model(lhs*rhs.c0, + lhs*rhs.c1, + lhs*rhs.c2); +} + +template& modulus> +Fp6_3over2_model Fp6_3over2_model::operator*(const Fp6_3over2_model &other) const +{ + /* Devegili OhEig Scott Dahab --- Multiplication and Squaring on Pairing-Friendly Fields.pdf; Section 4 (Karatsuba) */ + + const my_Fp2 &A = other.c0, &B = other.c1, &C = other.c2, + &a = this->c0, &b = this->c1, &c = this->c2; + const my_Fp2 aA = a*A; + const my_Fp2 bB = b*B; + const my_Fp2 cC = c*C; + + return Fp6_3over2_model(aA + Fp6_3over2_model::mul_by_non_residue((b+c)*(B+C)-bB-cC), + (a+b)*(A+B)-aA-bB+Fp6_3over2_model::mul_by_non_residue(cC), + (a+c)*(A+C)-aA+bB-cC); +} + +template& modulus> +Fp6_3over2_model Fp6_3over2_model::operator-() const +{ + return Fp6_3over2_model(-this->c0, + -this->c1, + -this->c2); +} + +template& modulus> +Fp6_3over2_model Fp6_3over2_model::squared() const +{ + /* Devegili OhEig Scott Dahab --- Multiplication and Squaring on Pairing-Friendly Fields.pdf; Section 4 (CH-SQR2) */ + + const my_Fp2 &a = this->c0, &b = this->c1, &c = this->c2; + const my_Fp2 s0 = a.squared(); + const my_Fp2 ab = a*b; + const my_Fp2 s1 = ab + ab; + const my_Fp2 s2 = (a - b + c).squared(); + const my_Fp2 bc = b*c; + const my_Fp2 s3 = bc + bc; + const my_Fp2 s4 = c.squared(); + + return Fp6_3over2_model(s0 + Fp6_3over2_model::mul_by_non_residue(s3), + s1 + Fp6_3over2_model::mul_by_non_residue(s4), + s1 + s2 + s3 - s0 - s4); +} + +template& modulus> +Fp6_3over2_model Fp6_3over2_model::inverse() const +{ + /* From "High-Speed Software Implementation of the Optimal Ate Pairing over Barreto-Naehrig Curves"; Algorithm 17 */ + + const my_Fp2 &a = this->c0, &b = this->c1, &c = this->c2; + const my_Fp2 t0 = a.squared(); + const my_Fp2 t1 = b.squared(); + const my_Fp2 t2 = c.squared(); + const my_Fp2 t3 = a*b; + const my_Fp2 t4 = a*c; + const my_Fp2 t5 = b*c; + const my_Fp2 c0 = t0 - Fp6_3over2_model::mul_by_non_residue(t5); + const my_Fp2 c1 = Fp6_3over2_model::mul_by_non_residue(t2) - t3; + const my_Fp2 c2 = t1 - t4; // typo in paper referenced above. should be "-" as per Scott, but is "*" + const my_Fp2 t6 = (a * c0 + Fp6_3over2_model::mul_by_non_residue((c * c1 + b * c2))).inverse(); + return Fp6_3over2_model(t6 * c0, t6 * c1, t6 * c2); +} + +template& modulus> +Fp6_3over2_model Fp6_3over2_model::Frobenius_map(unsigned long power) const +{ + return Fp6_3over2_model(c0.Frobenius_map(power), + Frobenius_coeffs_c1[power % 6] * c1.Frobenius_map(power), + Frobenius_coeffs_c2[power % 6] * c2.Frobenius_map(power)); +} + +template& modulus> +template +Fp6_3over2_model Fp6_3over2_model::operator^(const bigint &pow) const +{ + return power, m>(*this, pow); +} + +template& modulus> +std::ostream& operator<<(std::ostream &out, const Fp6_3over2_model &el) +{ + out << el.c0 << OUTPUT_SEPARATOR << el.c1 << OUTPUT_SEPARATOR << el.c2; + return out; +} + +template& modulus> +std::istream& operator>>(std::istream &in, Fp6_3over2_model &el) +{ + in >> el.c0 >> el.c1 >> el.c2; + return in; +} + +template& modulus> +std::ostream& operator<<(std::ostream& out, const std::vector > &v) +{ + out << v.size() << "\n"; + for (const Fp6_3over2_model& t : v) + { + out << t << OUTPUT_NEWLINE; + } + + return out; +} + +template& modulus> +std::istream& operator>>(std::istream& in, std::vector > &v) +{ + v.clear(); + + size_t s; + in >> s; + + char b; + in.read(&b, 1); + + v.reserve(s); + + for (size_t i = 0; i < s; ++i) + { + Fp6_3over2_model el; + in >> el; + v.emplace_back(el); + } + + return in; +} + +} // libsnark +#endif // FP6_3_OVER_2_TCC_ diff --git a/src/algebra/fields/fp_aux.tcc b/src/algebra/fields/fp_aux.tcc new file mode 100644 index 000000000..7f8a3eadf --- /dev/null +++ b/src/algebra/fields/fp_aux.tcc @@ -0,0 +1,389 @@ +/** @file + ***************************************************************************** + Assembly code snippets for F[p] finite field arithmetic, used by fp.tcc . + Specific to x86-64, and used only if USE_ASM is defined. + On other architectures or without USE_ASM, fp.tcc uses a portable + C++ implementation instead. + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef FP_AUX_TCC_ +#define FP_AUX_TCC_ + +namespace libsnark { + +#define STR_HELPER(x) #x +#define STR(x) STR_HELPER(x) + +/* addq is faster than adcq, even if preceded by clc */ +#define ADD_FIRSTADD \ + "movq (%[B]), %%rax \n\t" \ + "addq %%rax, (%[A]) \n\t" + +#define ADD_NEXTADD(ofs) \ + "movq " STR(ofs) "(%[B]), %%rax \n\t" \ + "adcq %%rax, " STR(ofs) "(%[A]) \n\t" + +#define ADD_CMP(ofs) \ + "movq " STR(ofs) "(%[mod]), %%rax \n\t" \ + "cmpq %%rax, " STR(ofs) "(%[A]) \n\t" \ + "jb done%= \n\t" \ + "ja subtract%= \n\t" + +#define ADD_FIRSTSUB \ + "movq (%[mod]), %%rax \n\t" \ + "subq %%rax, (%[A]) \n\t" + +#define ADD_FIRSTSUB \ + "movq (%[mod]), %%rax \n\t" \ + "subq %%rax, (%[A]) \n\t" + +#define ADD_NEXTSUB(ofs) \ + "movq " STR(ofs) "(%[mod]), %%rax \n\t" \ + "sbbq %%rax, " STR(ofs) "(%[A]) \n\t" + +#define SUB_FIRSTSUB \ + "movq (%[B]), %%rax\n\t" \ + "subq %%rax, (%[A])\n\t" + +#define SUB_NEXTSUB(ofs) \ + "movq " STR(ofs) "(%[B]), %%rax\n\t" \ + "sbbq %%rax, " STR(ofs) "(%[A])\n\t" + +#define SUB_FIRSTADD \ + "movq (%[mod]), %%rax\n\t" \ + "addq %%rax, (%[A])\n\t" + +#define SUB_NEXTADD(ofs) \ + "movq " STR(ofs) "(%[mod]), %%rax\n\t" \ + "adcq %%rax, " STR(ofs) "(%[A])\n\t" + +#define MONT_CMP(ofs) \ + "movq " STR(ofs) "(%[M]), %%rax \n\t" \ + "cmpq %%rax, " STR(ofs) "(%[tmp]) \n\t" \ + "jb done%= \n\t" \ + "ja subtract%= \n\t" + +#define MONT_FIRSTSUB \ + "movq (%[M]), %%rax \n\t" \ + "subq %%rax, (%[tmp]) \n\t" + +#define MONT_NEXTSUB(ofs) \ + "movq " STR(ofs) "(%[M]), %%rax \n\t" \ + "sbbq %%rax, " STR(ofs) "(%[tmp]) \n\t" + +/* + The x86-64 Montgomery multiplication here is similar + to Algorithm 2 (CIOS method) in http://eprint.iacr.org/2012/140.pdf + and the PowerPC pseudocode of gmp-ecm library (c) Paul Zimmermann and Alexander Kruppa + (see comments on top of powerpc64/mulredc.m4). +*/ + +#define MONT_PRECOMPUTE \ + "xorq %[cy], %[cy] \n\t" \ + "movq 0(%[A]), %%rax \n\t" \ + "mulq 0(%[B]) \n\t" \ + "movq %%rax, %[T0] \n\t" \ + "movq %%rdx, %[T1] # T1:T0 <- A[0] * B[0] \n\t" \ + "mulq %[inv] \n\t" \ + "movq %%rax, %[u] # u <- T0 * inv \n\t" \ + "mulq 0(%[M]) \n\t" \ + "addq %[T0], %%rax \n\t" \ + "adcq %%rdx, %[T1] \n\t" \ + "adcq $0, %[cy] # cy:T1 <- (M[0]*u + T1 * b + T0) / b\n\t" + +#define MONT_FIRSTITER(j) \ + "xorq %[T0], %[T0] \n\t" \ + "movq 0(%[A]), %%rax \n\t" \ + "mulq " STR((j*8)) "(%[B]) \n\t" \ + "addq %[T1], %%rax \n\t" \ + "movq %%rax, " STR(((j-1)*8)) "(%[tmp]) \n\t" \ + "adcq $0, %%rdx \n\t" \ + "movq %%rdx, %[T1] # now T1:tmp[j-1] <-- X[0] * Y[j] + T1\n\t" \ + "movq " STR((j*8)) "(%[M]), %%rax \n\t" \ + "mulq %[u] \n\t" \ + "addq %%rax, " STR(((j-1)*8)) "(%[tmp]) \n\t" \ + "adcq %[cy], %%rdx \n\t" \ + "adcq $0, %[T0] \n\t" \ + "xorq %[cy], %[cy] \n\t" \ + "addq %%rdx, %[T1] \n\t" \ + "adcq %[T0], %[cy] # cy:T1:tmp[j-1] <---- (X[0] * Y[j] + T1) + (M[j] * u + cy * b) \n\t" + +#define MONT_ITERFIRST(i) \ + "xorq %[cy], %[cy] \n\t" \ + "movq " STR((i*8)) "(%[A]), %%rax \n\t" \ + "mulq 0(%[B]) \n\t" \ + "addq 0(%[tmp]), %%rax \n\t" \ + "adcq 8(%[tmp]), %%rdx \n\t" \ + "adcq $0, %[cy] \n\t" \ + "movq %%rax, %[T0] \n\t" \ + "movq %%rdx, %[T1] # cy:T1:T0 <- A[i] * B[0] + tmp[1] * b + tmp[0]\n\t" \ + "mulq %[inv] \n\t" \ + "movq %%rax, %[u] # u <- T0 * inv\n\t" \ + "mulq 0(%[M]) \n\t" \ + "addq %[T0], %%rax \n\t" \ + "adcq %%rdx, %[T1] \n\t" \ + "adcq $0, %[cy] # cy:T1 <- (M[0]*u + cy * b * b + T1 * b + T0) / b\n\t" + +#define MONT_ITERITER(i, j) \ + "xorq %[T0], %[T0] \n\t" \ + "movq " STR((i*8)) "(%[A]), %%rax \n\t" \ + "mulq " STR((j*8)) "(%[B]) \n\t" \ + "addq %[T1], %%rax \n\t" \ + "movq %%rax, " STR(((j-1)*8)) "(%[tmp]) \n\t" \ + "adcq $0, %%rdx \n\t" \ + "movq %%rdx, %[T1] # now T1:tmp[j-1] <-- X[i] * Y[j] + T1 \n\t" \ + "movq " STR((j*8)) "(%[M]), %%rax \n\t" \ + "mulq %[u] \n\t" \ + "addq %%rax, " STR(((j-1)*8)) "(%[tmp]) \n\t" \ + "adcq %[cy], %%rdx \n\t" \ + "adcq $0, %[T0] \n\t" \ + "xorq %[cy], %[cy] \n\t" \ + "addq %%rdx, %[T1] \n\t" \ + "adcq %[T0], %[cy] # cy:T1:tmp[j-1] <-- (X[i] * Y[j] + T1) + M[j] * u + cy * b \n\t" \ + "addq " STR(((j+1)*8)) "(%[tmp]), %[T1] \n\t" \ + "adcq $0, %[cy] # cy:T1:tmp[j-1] <-- (X[i] * Y[j] + T1) + M[j] * u + (tmp[j+1] + cy) * b \n\t" + +#define MONT_FINALIZE(j) \ + "movq %[T1], " STR((j*8)) "(%[tmp]) \n\t" \ + "movq %[cy], " STR(((j+1)*8)) "(%[tmp]) \n\t" + +/* + Comba multiplication and squaring routines are based on the + public-domain tomsfastmath library by Tom St Denis + + + + Compared to the above, we save 5-20% of cycles by using careful register + renaming to implement Comba forward operation. + */ + +#define COMBA_3_BY_3_MUL(c0_, c1_, c2_, res_, A_, B_) \ + asm volatile ( \ + "movq 0(%[A]), %%rax \n\t" \ + "mulq 0(%[B]) \n\t" \ + "movq %%rax, 0(%[res]) \n\t" \ + "movq %%rdx, %[c0] \n\t" \ + \ + "xorq %[c1], %[c1] \n\t" \ + "movq 0(%[A]), %%rax \n\t" \ + "mulq 8(%[B]) \n\t" \ + "addq %%rax, %[c0] \n\t" \ + "adcq %%rdx, %[c1] \n\t" \ + \ + "xorq %[c2], %[c2] \n\t" \ + "movq 8(%[A]), %%rax \n\t" \ + "mulq 0(%[B]) \n\t" \ + "addq %%rax, %[c0] \n\t" \ + "movq %[c0], 8(%[res]) \n\t" \ + "adcq %%rdx, %[c1] \n\t" \ + "adcq $0, %[c2] \n\t" \ + \ + "// register renaming (c1, c2, c0)\n\t" \ + "xorq %[c0], %[c0] \n\t" \ + "movq 0(%[A]), %%rax \n\t" \ + "mulq 16(%[B]) \n\t" \ + "addq %%rax, %[c1] \n\t" \ + "adcq %%rdx, %[c2] \n\t" \ + "adcq $0, %[c0] \n\t" \ + \ + "movq 8(%[A]), %%rax \n\t" \ + "mulq 8(%[B]) \n\t" \ + "addq %%rax, %[c1] \n\t" \ + "adcq %%rdx, %[c2] \n\t" \ + "adcq $0, %[c0] \n\t" \ + \ + "movq 16(%[A]), %%rax \n\t" \ + "mulq 0(%[B]) \n\t" \ + "addq %%rax, %[c1] \n\t" \ + "movq %[c1], 16(%[res]) \n\t" \ + "adcq %%rdx, %[c2] \n\t" \ + "adcq $0, %[c0] \n\t" \ + \ + "// register renaming (c2, c0, c1)\n\t" \ + "xorq %[c1], %[c1] \n\t" \ + "movq 8(%[A]), %%rax \n\t" \ + "mulq 16(%[B]) \n\t" \ + "addq %%rax, %[c2] \n\t" \ + "adcq %%rdx, %[c0] \n\t" \ + "adcq $0, %[c1] \n\t" \ + \ + "movq 16(%[A]), %%rax \n\t" \ + "mulq 8(%[B]) \n\t" \ + "addq %%rax, %[c2] \n\t" \ + "movq %[c2], 24(%[res]) \n\t" \ + "adcq %%rdx, %[c0] \n\t" \ + "adcq $0, %[c1] \n\t" \ + \ + "// register renaming (c0, c1, c2)\n\t" \ + "xorq %[c2], %[c2] \n\t" \ + "movq 16(%[A]), %%rax \n\t" \ + "mulq 16(%[B]) \n\t" \ + "addq %%rax, %[c0] \n\t" \ + "movq %[c0], 32(%[res]) \n\t" \ + "adcq %%rdx, %[c1] \n\t" \ + "movq %[c1], 40(%[res]) \n\t" \ + : [c0] "=&r" (c0_), [c1] "=&r" (c1_), [c2] "=&r" (c2_) \ + : [res] "r" (res_), [A] "r" (A_), [B] "r" (B_) \ + : "%rax", "%rdx", "cc", "memory") + +#define COMBA_3_BY_3_SQR(c0_, c1_, c2_, res_, A_) \ + asm volatile ( \ + "xorq %[c1], %[c1] \n\t" \ + "xorq %[c2], %[c2] \n\t" \ + "movq 0(%[A]), %%rax \n\t" \ + "mulq %%rax \n\t" \ + "movq %%rax, 0(%[res]) \n\t" \ + "movq %%rdx, %[c0] \n\t" \ + \ + "movq 0(%[A]), %%rax \n\t" \ + "mulq 8(%[A]) \n\t" \ + "addq %%rax, %[c0] \n\t" \ + "adcq %%rdx, %[c1] \n\t" \ + "addq %%rax, %[c0] \n\t" \ + "movq %[c0], 8(%[res]) \n\t" \ + "adcq %%rdx, %[c1] \n\t" \ + "adcq $0, %[c2] \n\t" \ + \ + "// register renaming (c1, c2, c0)\n\t" \ + "movq 0(%[A]), %%rax \n\t" \ + "xorq %[c0], %[c0] \n\t" \ + "mulq 16(%[A]) \n\t" \ + "addq %%rax, %[c1] \n\t" \ + "adcq %%rdx, %[c2] \n\t" \ + "adcq $0, %[c0] \n\t" \ + "addq %%rax, %[c1] \n\t" \ + "adcq %%rdx, %[c2] \n\t" \ + "adcq $0, %[c0] \n\t" \ + \ + "movq 8(%[A]), %%rax \n\t" \ + "mulq %%rax \n\t" \ + "addq %%rax, %[c1] \n\t" \ + "movq %[c1], 16(%[res]) \n\t" \ + "adcq %%rdx, %[c2] \n\t" \ + "adcq $0, %[c0] \n\t" \ + \ + "// register renaming (c2, c0, c1)\n\t" \ + "movq 8(%[A]), %%rax \n\t" \ + "xorq %[c1], %[c1] \n\t" \ + "mulq 16(%[A]) \n\t" \ + "addq %%rax, %[c2] \n\t" \ + "adcq %%rdx, %[c0] \n\t" \ + "adcq $0, %[c1] \n\t" \ + "addq %%rax, %[c2] \n\t" \ + "movq %[c2], 24(%[res]) \n\t" \ + "adcq %%rdx, %[c0] \n\t" \ + "adcq $0, %[c1] \n\t" \ + \ + "// register renaming (c0, c1, c2)\n\t" \ + "movq 16(%[A]), %%rax \n\t" \ + "mulq %%rax \n\t" \ + "addq %%rax, %[c0] \n\t" \ + "movq %[c0], 32(%[res]) \n\t" \ + "adcq %%rdx, %[c1] \n\t" \ + "movq %[c1], 40(%[res]) \n\t" \ + \ + : [c0] "=&r" (c0_), [c1] "=&r" (c1_), [c2] "=&r" (c2_) \ + : [res] "r" (res_), [A] "r" (A_) \ + : "%rax", "%rdx", "cc", "memory") + +/* + The Montgomery reduction here is based on Algorithm 14.32 in + Handbook of Applied Cryptography + . + */ +#define REDUCE_6_LIMB_PRODUCT(k_, tmp1_, tmp2_, tmp3_, inv_, res_, mod_) \ + __asm__ volatile \ + ("///////////////////////////////////\n\t" \ + "movq 0(%[res]), %%rax \n\t" \ + "mulq %[modprime] \n\t" \ + "movq %%rax, %[k] \n\t" \ + \ + "movq (%[mod]), %%rax \n\t" \ + "mulq %[k] \n\t" \ + "movq %%rax, %[tmp1] \n\t" \ + "movq %%rdx, %[tmp2] \n\t" \ + \ + "xorq %[tmp3], %[tmp3] \n\t" \ + "movq 8(%[mod]), %%rax \n\t" \ + "mulq %[k] \n\t" \ + "addq %[tmp1], 0(%[res]) \n\t" \ + "adcq %%rax, %[tmp2] \n\t" \ + "adcq %%rdx, %[tmp3] \n\t" \ + \ + "xorq %[tmp1], %[tmp1] \n\t" \ + "movq 16(%[mod]), %%rax \n\t" \ + "mulq %[k] \n\t" \ + "addq %[tmp2], 8(%[res]) \n\t" \ + "adcq %%rax, %[tmp3] \n\t" \ + "adcq %%rdx, %[tmp1] \n\t" \ + \ + "addq %[tmp3], 16(%[res]) \n\t" \ + "adcq %[tmp1], 24(%[res]) \n\t" \ + "adcq $0, 32(%[res]) \n\t" \ + "adcq $0, 40(%[res]) \n\t" \ + \ + "///////////////////////////////////\n\t" \ + "movq 8(%[res]), %%rax \n\t" \ + "mulq %[modprime] \n\t" \ + "movq %%rax, %[k] \n\t" \ + \ + "movq (%[mod]), %%rax \n\t" \ + "mulq %[k] \n\t" \ + "movq %%rax, %[tmp1] \n\t" \ + "movq %%rdx, %[tmp2] \n\t" \ + \ + "xorq %[tmp3], %[tmp3] \n\t" \ + "movq 8(%[mod]), %%rax \n\t" \ + "mulq %[k] \n\t" \ + "addq %[tmp1], 8(%[res]) \n\t" \ + "adcq %%rax, %[tmp2] \n\t" \ + "adcq %%rdx, %[tmp3] \n\t" \ + \ + "xorq %[tmp1], %[tmp1] \n\t" \ + "movq 16(%[mod]), %%rax \n\t" \ + "mulq %[k] \n\t" \ + "addq %[tmp2], 16(%[res]) \n\t" \ + "adcq %%rax, %[tmp3] \n\t" \ + "adcq %%rdx, %[tmp1] \n\t" \ + \ + "addq %[tmp3], 24(%[res]) \n\t" \ + "adcq %[tmp1], 32(%[res]) \n\t" \ + "adcq $0, 40(%[res]) \n\t" \ + \ + "///////////////////////////////////\n\t" \ + "movq 16(%[res]), %%rax \n\t" \ + "mulq %[modprime] \n\t" \ + "movq %%rax, %[k] \n\t" \ + \ + "movq (%[mod]), %%rax \n\t" \ + "mulq %[k] \n\t" \ + "movq %%rax, %[tmp1] \n\t" \ + "movq %%rdx, %[tmp2] \n\t" \ + \ + "xorq %[tmp3], %[tmp3] \n\t" \ + "movq 8(%[mod]), %%rax \n\t" \ + "mulq %[k] \n\t" \ + "addq %[tmp1], 16(%[res]) \n\t" \ + "adcq %%rax, %[tmp2] \n\t" \ + "adcq %%rdx, %[tmp3] \n\t" \ + \ + "xorq %[tmp1], %[tmp1] \n\t" \ + "movq 16(%[mod]), %%rax \n\t" \ + "mulq %[k] \n\t" \ + "addq %[tmp2], 24(%[res]) \n\t" \ + "adcq %%rax, %[tmp3] \n\t" \ + "adcq %%rdx, %[tmp1] \n\t" \ + \ + "addq %[tmp3], 32(%[res]) \n\t" \ + "adcq %[tmp1], 40(%[res]) \n\t" \ + : [k] "=&r" (k_), [tmp1] "=&r" (tmp1_), [tmp2] "=&r" (tmp2_), [tmp3] "=&r" (tmp3_) \ + : [modprime] "r" (inv_), [res] "r" (res_), [mod] "r" (mod_) \ + : "%rax", "%rdx", "cc", "memory") + +} // libsnark +#endif // FP_AUX_TCC_ diff --git a/src/algebra/fields/tests/test_bigint.cpp b/src/algebra/fields/tests/test_bigint.cpp new file mode 100644 index 000000000..b66aae0a3 --- /dev/null +++ b/src/algebra/fields/tests/test_bigint.cpp @@ -0,0 +1,107 @@ +/** + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#include "algebra/fields/bigint.hpp" + +using namespace libsnark; + +void test_bigint() +{ + static_assert(ULONG_MAX == 0xFFFFFFFFFFFFFFFFul, "unsigned long not 64-bit"); + static_assert(GMP_NUMB_BITS == 64, "GMP limb not 64-bit"); + + const char *b1_decimal = "76749407"; + const char *b2_decimal = "435020359732196472065729437602"; + const char *b3_decimal = "33387554642372758038536799358397002014"; + const char *b2_binary = "0000000000000000000000000000010101111101101000000110100001011010" + "1101101010001001000001101000101000100110011001110001111110100010"; + + bigint<1> b0 = bigint<1>(0ul); + bigint<1> b1 = bigint<1>(b1_decimal); + bigint<2> b2 = bigint<2>(b2_decimal); + + assert(b0.as_ulong() == 0ul); + assert(b0.is_zero()); + assert(b1.as_ulong() == 76749407ul); + assert(!(b1.is_zero())); + assert(b2.as_ulong() == 15747124762497195938ul); + assert(!(b2.is_zero())); + assert(b0 != b1); + assert(!(b0 == b1)); + + assert(b2.max_bits() == 128); + assert(b2.num_bits() == 99); + for (size_t i = 0; i < 128; i++) { + assert(b2.test_bit(i) == (b2_binary[127-i] == '1')); + } + + bigint<3> b3 = b2 * b1; + + assert(b3 == bigint<3>(b3_decimal)); + assert(!(b3.is_zero())); + + bigint<3> b3a { b3 }; + assert(b3a == bigint<3>(b3_decimal)); + assert(b3a == b3); + assert(!(b3a.is_zero())); + + mpz_t m3; + mpz_init(m3); + b3.to_mpz(m3); + bigint<3> b3b { m3 }; + assert(b3b == b3); + + bigint<2> quotient; + bigint<2> remainder; + bigint<3>::div_qr(quotient, remainder, b3, b2); + assert(quotient.num_bits() < GMP_NUMB_BITS); + assert(quotient.as_ulong() == b1.as_ulong()); + bigint<1> b1inc = bigint<1>("76749408"); + bigint<1> b1a = quotient.shorten(b1inc, "test"); + assert(b1a == b1); + assert(remainder.is_zero()); + remainder.limit(b2, "test"); + + try { + (void)(quotient.shorten(b1, "test")); + assert(false); + } catch (std::domain_error) {} + try { + remainder.limit(remainder, "test"); + assert(false); + } catch (std::domain_error) {} + + bigint<1> br = bigint<1>("42"); + b3 += br; + assert(b3 != b3a); + assert(b3 > b3a); + assert(!(b3a > b3)); + + bigint<3>::div_qr(quotient, remainder, b3, b2); + assert(quotient.num_bits() < GMP_NUMB_BITS); + assert(quotient.as_ulong() == b1.as_ulong()); + assert(remainder.num_bits() < GMP_NUMB_BITS); + assert(remainder.as_ulong() == 42); + + b3a.clear(); + assert(b3a.is_zero()); + assert(b3a.num_bits() == 0); + assert(!(b3.is_zero())); + + bigint<4> bx = bigint<4>().randomize(); + bigint<4> by = bigint<4>().randomize(); + assert(!(bx == by)); + + // TODO: test serialization +} + +int main(void) +{ + test_bigint(); + return 0; +} + diff --git a/src/algebra/fields/tests/test_fields.cpp b/src/algebra/fields/tests/test_fields.cpp new file mode 100644 index 000000000..a05f601e6 --- /dev/null +++ b/src/algebra/fields/tests/test_fields.cpp @@ -0,0 +1,245 @@ +/** + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ +#include "common/profiling.hpp" +#include "algebra/curves/edwards/edwards_pp.hpp" +#include "algebra/curves/mnt/mnt4/mnt4_pp.hpp" +#include "algebra/curves/mnt/mnt6/mnt6_pp.hpp" +#ifdef CURVE_BN128 +#include "algebra/curves/bn128/bn128_pp.hpp" +#endif +#include "algebra/curves/alt_bn128/alt_bn128_pp.hpp" +#include "algebra/fields/fp6_3over2.hpp" +#include "algebra/fields/fp12_2over3over2.hpp" + +using namespace libsnark; + +template +void test_field() +{ + bigint<1> rand1 = bigint<1>("76749407"); + bigint<1> rand2 = bigint<1>("44410867"); + bigint<1> randsum = bigint<1>("121160274"); + + FieldT zero = FieldT::zero(); + FieldT one = FieldT::one(); + FieldT a = FieldT::random_element(); + FieldT a_ser; + a_ser = reserialize(a); + assert(a_ser == a); + + FieldT b = FieldT::random_element(); + FieldT c = FieldT::random_element(); + FieldT d = FieldT::random_element(); + + assert(a != zero); + assert(a != one); + + assert(a * a == a.squared()); + assert((a + b).squared() == a.squared() + a*b + b*a + b.squared()); + assert((a + b)*(c + d) == a*c + a*d + b*c + b*d); + assert(a - b == a + (-b)); + assert(a - b == (-b) + a); + + assert((a ^ rand1) * (a ^ rand2) == (a^randsum)); + + assert(a * a.inverse() == one); + assert((a + b) * c.inverse() == a * c.inverse() + (b.inverse() * c).inverse()); + +} + +template +void test_sqrt() +{ + for (size_t i = 0; i < 100; ++i) + { + FieldT a = FieldT::random_element(); + FieldT asq = a.squared(); + assert(asq.sqrt() == a || asq.sqrt() == -a); + } +} + +template +void test_two_squarings() +{ + FieldT a = FieldT::random_element(); + assert(a.squared() == a * a); + assert(a.squared() == a.squared_complex()); + assert(a.squared() == a.squared_karatsuba()); +} + +template +void test_Frobenius() +{ + FieldT a = FieldT::random_element(); + assert(a.Frobenius_map(0) == a); + FieldT a_q = a ^ FieldT::base_field_char(); + for (size_t power = 1; power < 10; ++power) + { + const FieldT a_qi = a.Frobenius_map(power); + assert(a_qi == a_q); + + a_q = a_q ^ FieldT::base_field_char(); + } +} + +template +void test_unitary_inverse() +{ + assert(FieldT::extension_degree() % 2 == 0); + FieldT a = FieldT::random_element(); + FieldT aqcubed_minus1 = a.Frobenius_map(FieldT::extension_degree()/2) * a.inverse(); + assert(aqcubed_minus1.inverse() == aqcubed_minus1.unitary_inverse()); +} + +template +void test_cyclotomic_squaring(); + +template<> +void test_cyclotomic_squaring >() +{ + typedef Fqk FieldT; + assert(FieldT::extension_degree() % 2 == 0); + FieldT a = FieldT::random_element(); + FieldT a_unitary = a.Frobenius_map(FieldT::extension_degree()/2) * a.inverse(); + // beta = a^((q^(k/2)-1)*(q+1)) + FieldT beta = a_unitary.Frobenius_map(1) * a_unitary; + assert(beta.cyclotomic_squared() == beta.squared()); +} + +template<> +void test_cyclotomic_squaring >() +{ + typedef Fqk FieldT; + assert(FieldT::extension_degree() % 2 == 0); + FieldT a = FieldT::random_element(); + FieldT a_unitary = a.Frobenius_map(FieldT::extension_degree()/2) * a.inverse(); + // beta = a^(q^(k/2)-1) + FieldT beta = a_unitary; + assert(beta.cyclotomic_squared() == beta.squared()); +} + +template<> +void test_cyclotomic_squaring >() +{ + typedef Fqk FieldT; + assert(FieldT::extension_degree() % 2 == 0); + FieldT a = FieldT::random_element(); + FieldT a_unitary = a.Frobenius_map(FieldT::extension_degree()/2) * a.inverse(); + // beta = a^((q^(k/2)-1)*(q+1)) + FieldT beta = a_unitary.Frobenius_map(1) * a_unitary; + assert(beta.cyclotomic_squared() == beta.squared()); +} + +template +void test_all_fields() +{ + test_field >(); + test_field >(); + test_field >(); + test_field >(); + + test_sqrt >(); + test_sqrt >(); + test_sqrt >(); + + test_Frobenius >(); + test_Frobenius >(); + + test_unitary_inverse >(); +} + +template +void test_Fp4_tom_cook() +{ + typedef typename Fp4T::my_Fp FieldT; + for (size_t i = 0; i < 100; ++i) + { + const Fp4T a = Fp4T::random_element(); + const Fp4T b = Fp4T::random_element(); + const Fp4T correct_res = a * b; + + Fp4T res; + + const FieldT + &a0 = a.c0.c0, + &a1 = a.c1.c0, + &a2 = a.c0.c1, + &a3 = a.c1.c1; + + const FieldT + &b0 = b.c0.c0, + &b1 = b.c1.c0, + &b2 = b.c0.c1, + &b3 = b.c1.c1; + + FieldT + &c0 = res.c0.c0, + &c1 = res.c1.c0, + &c2 = res.c0.c1, + &c3 = res.c1.c1; + + const FieldT v0 = a0 * b0; + const FieldT v1 = (a0 + a1 + a2 + a3) * (b0 + b1 + b2 + b3); + const FieldT v2 = (a0 - a1 + a2 - a3) * (b0 - b1 + b2 - b3); + const FieldT v3 = (a0 + FieldT(2)*a1 + FieldT(4)*a2 + FieldT(8)*a3) * (b0 + FieldT(2)*b1 + FieldT(4)*b2 + FieldT(8)*b3); + const FieldT v4 = (a0 - FieldT(2)*a1 + FieldT(4)*a2 - FieldT(8)*a3) * (b0 - FieldT(2)*b1 + FieldT(4)*b2 - FieldT(8)*b3); + const FieldT v5 = (a0 + FieldT(3)*a1 + FieldT(9)*a2 + FieldT(27)*a3) * (b0 + FieldT(3)*b1 + FieldT(9)*b2 + FieldT(27)*b3); + const FieldT v6 = a3 * b3; + + const FieldT beta = Fp4T::non_residue; + + c0 = v0 + beta*(FieldT(4).inverse()*v0 - FieldT(6).inverse()*(v1 + v2) + FieldT(24).inverse() * (v3 + v4) - FieldT(5) * v6); + c1 = - FieldT(3).inverse()*v0 + v1 - FieldT(2).inverse()*v2 - FieldT(4).inverse()*v3 + FieldT(20).inverse() * v4 + FieldT(30).inverse() * v5 - FieldT(12) * v6 + beta * ( - FieldT(12).inverse() * (v0 - v1) + FieldT(24).inverse()*(v2 - v3) - FieldT(120).inverse() * (v4 - v5) - FieldT(3) * v6); + c2 = - (FieldT(5)*(FieldT(4).inverse()))* v0 + (FieldT(2)*(FieldT(3).inverse()))*(v1 + v2) - FieldT(24).inverse()*(v3 + v4) + FieldT(4)*v6 + beta*v6; + c3 = FieldT(12).inverse() * (FieldT(5)*v0 - FieldT(7)*v1) - FieldT(24).inverse()*(v2 - FieldT(7)*v3 + v4 + v5) + FieldT(15)*v6; + + assert(res == correct_res); + + // {v0, v3, v4, v5} + const FieldT u = (FieldT::one() - beta).inverse(); + assert(v0 == u * c0 + beta * u * c2 - beta * u * FieldT(2).inverse() * v1 - beta * u * FieldT(2).inverse() * v2 + beta * v6); + assert(v3 == - FieldT(15) * u * c0 - FieldT(30) * u * c1 - FieldT(3) * (FieldT(4) + beta) * u * c2 - FieldT(6) * (FieldT(4) + beta) * u * c3 + (FieldT(24) - FieldT(3) * beta * FieldT(2).inverse()) * u * v1 + (-FieldT(8) + beta * FieldT(2).inverse()) * u * v2 + - FieldT(3) * (-FieldT(16) + beta) * v6); + assert(v4 == - FieldT(15) * u * c0 + FieldT(30) * u * c1 - FieldT(3) * (FieldT(4) + beta) * u * c2 + FieldT(6) * (FieldT(4) + beta) * u * c3 + (FieldT(24) - FieldT(3) * beta * FieldT(2).inverse()) * u * v2 + (-FieldT(8) + beta * FieldT(2).inverse()) * u * v1 + - FieldT(3) * (-FieldT(16) + beta) * v6); + assert(v5 == - FieldT(80) * u * c0 - FieldT(240) * u * c1 - FieldT(8) * (FieldT(9) + beta) * u * c2 - FieldT(24) * (FieldT(9) + beta) * u * c3 - FieldT(2) * (-FieldT(81) + beta) * u * v1 + (-FieldT(81) + beta) * u * v2 + - FieldT(8) * (-FieldT(81) + beta) * v6); + + // c0 + beta c2 - (beta v1)/2 - (beta v2)/ 2 - (-1 + beta) beta v6, + // -15 c0 - 30 c1 - 3 (4 + beta) c2 - 6 (4 + beta) c3 + (24 - (3 beta)/2) v1 + (-8 + beta/2) v2 + 3 (-16 + beta) (-1 + beta) v6, + // -15 c0 + 30 c1 - 3 (4 + beta) c2 + 6 (4 + beta) c3 + (-8 + beta/2) v1 + (24 - (3 beta)/2) v2 + 3 (-16 + beta) (-1 + beta) v6, + // -80 c0 - 240 c1 - 8 (9 + beta) c2 - 24 (9 + beta) c3 - 2 (-81 + beta) v1 + (-81 + beta) v2 + 8 (-81 + beta) (-1 + beta) v6 + } +} + +int main(void) +{ + edwards_pp::init_public_params(); + test_all_fields(); + test_cyclotomic_squaring >(); + + mnt4_pp::init_public_params(); + test_all_fields(); + test_Fp4_tom_cook(); + test_two_squarings >(); + test_cyclotomic_squaring >(); + + mnt6_pp::init_public_params(); + test_all_fields(); + test_cyclotomic_squaring >(); + + alt_bn128_pp::init_public_params(); + test_field(); + test_Frobenius(); + test_all_fields(); + +#ifdef CURVE_BN128 // BN128 has fancy dependencies so it may be disabled + bn128_pp::init_public_params(); + test_field >(); + test_field >(); +#endif +} diff --git a/src/algebra/knowledge_commitment/knowledge_commitment.hpp b/src/algebra/knowledge_commitment/knowledge_commitment.hpp new file mode 100644 index 000000000..902423134 --- /dev/null +++ b/src/algebra/knowledge_commitment/knowledge_commitment.hpp @@ -0,0 +1,84 @@ +/** @file + ***************************************************************************** + + Declaration of interfaces for: + - a knowledge commitment, and + - a knowledge commitment vector. + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef KNOWLEDGE_COMMITMENT_HPP_ +#define KNOWLEDGE_COMMITMENT_HPP_ + +#include "algebra/fields/fp.hpp" +#include "common/data_structures/sparse_vector.hpp" + +namespace libsnark { + +/********************** Knowledge commitment *********************************/ + +/** + * A knowledge commitment is a pair (g,h) where g is in T1 and h in T2, + * and T1 and T2 are groups (written additively). + * + * Such pairs form a group by defining: + * - "zero" = (0,0) + * - "one" = (1,1) + * - a * (g,h) + b * (g',h') := ( a * g + b * g', a * h + b * h'). + */ +template +struct knowledge_commitment { + + T1 g; + T2 h; + + knowledge_commitment() = default; + knowledge_commitment(const knowledge_commitment &other) = default; + knowledge_commitment(knowledge_commitment &&other) = default; + knowledge_commitment(const T1 &g, const T2 &h); + + knowledge_commitment& operator=(const knowledge_commitment &other) = default; + knowledge_commitment& operator=(knowledge_commitment &&other) = default; + knowledge_commitment operator+(const knowledge_commitment &other) const; + + bool is_zero() const; + bool operator==(const knowledge_commitment &other) const; + bool operator!=(const knowledge_commitment &other) const; + + static knowledge_commitment zero(); + static knowledge_commitment one(); + + void print() const; + + static size_t size_in_bits(); +}; + +template +knowledge_commitment operator*(const bigint &lhs, const knowledge_commitment &rhs); + +template &modulus_p> +knowledge_commitment operator*(const Fp_model &lhs, const knowledge_commitment &rhs); + +template +std::ostream& operator<<(std::ostream& out, const knowledge_commitment &kc); + +template +std::istream& operator>>(std::istream& in, knowledge_commitment &kc); + +/******************** Knowledge commitment vector ****************************/ + +/** + * A knowledge commitment vector is a sparse vector of knowledge commitments. + */ +template +using knowledge_commitment_vector = sparse_vector >; + +} // libsnark + +#include "algebra/knowledge_commitment/knowledge_commitment.tcc" + +#endif // KNOWLEDGE_COMMITMENT_HPP_ diff --git a/src/algebra/knowledge_commitment/knowledge_commitment.tcc b/src/algebra/knowledge_commitment/knowledge_commitment.tcc new file mode 100644 index 000000000..15b2926c8 --- /dev/null +++ b/src/algebra/knowledge_commitment/knowledge_commitment.tcc @@ -0,0 +1,111 @@ +/** @file + ***************************************************************************** + + Implementation of interfaces for: + - a knowledge commitment, and + - a knowledge commitment vector. + + See knowledge_commitment.hpp . + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef KNOWLEDGE_COMMITMENT_TCC_ +#define KNOWLEDGE_COMMITMENT_TCC_ + +namespace libsnark { + +template +knowledge_commitment::knowledge_commitment(const T1 &g, const T2 &h) : + g(g), h(h) +{ +} + +template +knowledge_commitment knowledge_commitment::zero() +{ + return knowledge_commitment(T1::zero(), T2::zero()); +} + +template +knowledge_commitment knowledge_commitment::one() +{ + return knowledge_commitment(T1::one(), T2::one()); +} + +template +knowledge_commitment knowledge_commitment::operator+(const knowledge_commitment &other) const +{ + return knowledge_commitment(this->g + other.g, + this->h + other.h); +} + +template +bool knowledge_commitment::is_zero() const +{ + return (g.is_zero() && h.is_zero()); +} + +template +bool knowledge_commitment::operator==(const knowledge_commitment &other) const +{ + return (this->g == other.g && + this->h == other.h); +} + +template +bool knowledge_commitment::operator!=(const knowledge_commitment &other) const +{ + return !((*this) == other); +} + +template +knowledge_commitment operator*(const bigint &lhs, const knowledge_commitment &rhs) +{ + return knowledge_commitment(lhs * rhs.g, + lhs * rhs.h); +} + +template &modulus_p> +knowledge_commitment operator*(const Fp_model &lhs, const knowledge_commitment &rhs) +{ + return (lhs.as_bigint()) * rhs; +} + +template +void knowledge_commitment::print() const +{ + printf("knowledge_commitment.g:\n"); + g.print(); + printf("knowledge_commitment.h:\n"); + h.print(); +} + +template +size_t knowledge_commitment::size_in_bits() +{ + return T1::size_in_bits() + T2::size_in_bits(); +} + +template +std::ostream& operator<<(std::ostream& out, const knowledge_commitment &kc) +{ + out << kc.g << OUTPUT_SEPARATOR << kc.h; + return out; +} + +template +std::istream& operator>>(std::istream& in, knowledge_commitment &kc) +{ + in >> kc.g; + consume_OUTPUT_SEPARATOR(in); + in >> kc.h; + return in; +} + +} // libsnark + +#endif // KNOWLEDGE_COMMITMENT_TCC_ diff --git a/src/algebra/scalar_multiplication/kc_multiexp.hpp b/src/algebra/scalar_multiplication/kc_multiexp.hpp new file mode 100644 index 000000000..4e8b55667 --- /dev/null +++ b/src/algebra/scalar_multiplication/kc_multiexp.hpp @@ -0,0 +1,55 @@ +/** @file + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef KC_MULTIEXP_HPP_ +#define KC_MULTIEXP_HPP_ + +/* + Split out from multiexp to prevent cyclical + dependencies. I.e. previously multiexp dependend on + knowledge_commitment, which dependend on sparse_vector, which + dependend on multiexp (to do accumulate). + + Will probably go away in more general exp refactoring. +*/ + +#include "algebra/knowledge_commitment/knowledge_commitment.hpp" + +namespace libsnark { + +template +knowledge_commitment opt_window_wnaf_exp(const knowledge_commitment &base, + const bigint &scalar, const size_t scalar_bits); + +template +knowledge_commitment kc_multi_exp_with_mixed_addition(const knowledge_commitment_vector &vec, + const size_t min_idx, + const size_t max_idx, + typename std::vector::const_iterator scalar_start, + typename std::vector::const_iterator scalar_end, + const size_t chunks, + const bool use_multiexp=false); + +template +void kc_batch_to_special(std::vector > &vec); + +template +knowledge_commitment_vector kc_batch_exp(const size_t scalar_size, + const size_t T1_window, + const size_t T2_window, + const window_table &T1_table, + const window_table &T2_table, + const FieldT &T1_coeff, + const FieldT &T2_coeff, + const std::vector &v, + const size_t suggested_num_chunks); + +} // libsnark + +#include "algebra/scalar_multiplication/kc_multiexp.tcc" + +#endif // KC_MULTIEXP_HPP_ diff --git a/src/algebra/scalar_multiplication/kc_multiexp.tcc b/src/algebra/scalar_multiplication/kc_multiexp.tcc new file mode 100644 index 000000000..e9c08d4bc --- /dev/null +++ b/src/algebra/scalar_multiplication/kc_multiexp.tcc @@ -0,0 +1,274 @@ +/** @file + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef KC_MULTIEXP_TCC_ +#define KC_MULTIEXP_TCC_ + +namespace libsnark { + +template +knowledge_commitment opt_window_wnaf_exp(const knowledge_commitment &base, + const bigint &scalar, const size_t scalar_bits) +{ + return knowledge_commitment(opt_window_wnaf_exp(base.g, scalar, scalar_bits), + opt_window_wnaf_exp(base.h, scalar, scalar_bits)); +} + +template +knowledge_commitment kc_multi_exp_with_mixed_addition(const knowledge_commitment_vector &vec, + const size_t min_idx, + const size_t max_idx, + typename std::vector::const_iterator scalar_start, + typename std::vector::const_iterator scalar_end, + const size_t chunks, + const bool use_multiexp) +{ + enter_block("Process scalar vector"); + auto index_it = std::lower_bound(vec.indices.begin(), vec.indices.end(), min_idx); + const size_t offset = index_it - vec.indices.begin(); + + auto value_it = vec.values.begin() + offset; + + const FieldT zero = FieldT::zero(); + const FieldT one = FieldT::one(); + + std::vector p; + std::vector > g; + + knowledge_commitment acc = knowledge_commitment::zero(); + + size_t num_skip = 0; + size_t num_add = 0; + size_t num_other = 0; + + const size_t scalar_length = std::distance(scalar_start, scalar_end); + + while (index_it != vec.indices.end() && *index_it < max_idx) + { + const size_t scalar_position = (*index_it) - min_idx; + assert(scalar_position < scalar_length); + + const FieldT scalar = *(scalar_start + scalar_position); + + if (scalar == zero) + { + // do nothing + ++num_skip; + } + else if (scalar == one) + { +#ifdef USE_MIXED_ADDITION + acc.g = acc.g.mixed_add(value_it->g); + acc.h = acc.h.mixed_add(value_it->h); +#else + acc.g = acc.g + value_it->g; + acc.h = acc.h + value_it->h; +#endif + ++num_add; + } + else + { + p.emplace_back(scalar); + g.emplace_back(*value_it); + ++num_other; + } + + ++index_it; + ++value_it; + } + + //print_indent(); printf("* Elements of w skipped: %zu (%0.2f%%)\n", num_skip, 100.*num_skip/(num_skip+num_add+num_other)); + //print_indent(); printf("* Elements of w processed with special addition: %zu (%0.2f%%)\n", num_add, 100.*num_add/(num_skip+num_add+num_other)); + //print_indent(); printf("* Elements of w remaining: %zu (%0.2f%%)\n", num_other, 100.*num_other/(num_skip+num_add+num_other)); + leave_block("Process scalar vector"); + + return acc + multi_exp, FieldT>(g.begin(), g.end(), p.begin(), p.end(), chunks, use_multiexp); +} + +template +void kc_batch_to_special(std::vector > &vec) +{ + enter_block("Batch-convert knowledge-commitments to special form"); + + std::vector g_vec; + g_vec.reserve(vec.size()); + + for (size_t i = 0; i < vec.size(); ++i) + { + if (!vec[i].g.is_zero()) + { + g_vec.emplace_back(vec[i].g); + } + } + + batch_to_special_all_non_zeros(g_vec); + auto g_it = g_vec.begin(); + T1 T1_zero_special = T1::zero(); + T1_zero_special.to_special(); + + for (size_t i = 0; i < vec.size(); ++i) + { + if (!vec[i].g.is_zero()) + { + vec[i].g = *g_it; + ++g_it; + } + else + { + vec[i].g = T1_zero_special; + } + } + + g_vec.clear(); + + std::vector h_vec; + h_vec.reserve(vec.size()); + + for (size_t i = 0; i < vec.size(); ++i) + { + if (!vec[i].h.is_zero()) + { + h_vec.emplace_back(vec[i].h); + } + } + + batch_to_special_all_non_zeros(h_vec); + auto h_it = h_vec.begin(); + T2 T2_zero_special = T2::zero(); + T2_zero_special.to_special(); + + for (size_t i = 0; i < vec.size(); ++i) + { + if (!vec[i].h.is_zero()) + { + vec[i].h = *h_it; + ++h_it; + } + else + { + vec[i].h = T2_zero_special; + } + } + + g_vec.clear(); + + leave_block("Batch-convert knowledge-commitments to special form"); +} + +template +knowledge_commitment_vector kc_batch_exp_internal(const size_t scalar_size, + const size_t T1_window, + const size_t T2_window, + const window_table &T1_table, + const window_table &T2_table, + const FieldT &T1_coeff, + const FieldT &T2_coeff, + const std::vector &v, + const size_t start_pos, + const size_t end_pos, + const size_t expected_size) +{ + knowledge_commitment_vector res; + + res.values.reserve(expected_size); + res.indices.reserve(expected_size); + + for (size_t pos = start_pos; pos != end_pos; ++pos) + { + if (!v[pos].is_zero()) + { + res.values.emplace_back(knowledge_commitment(windowed_exp(scalar_size, T1_window, T1_table, T1_coeff * v[pos]), + windowed_exp(scalar_size, T2_window, T2_table, T2_coeff * v[pos]))); + res.indices.emplace_back(pos); + } + } + + return res; +} + +template +knowledge_commitment_vector kc_batch_exp(const size_t scalar_size, + const size_t T1_window, + const size_t T2_window, + const window_table &T1_table, + const window_table &T2_table, + const FieldT &T1_coeff, + const FieldT &T2_coeff, + const std::vector &v, + const size_t suggested_num_chunks) +{ + knowledge_commitment_vector res; + res.domain_size_ = v.size(); + + size_t nonzero = 0; + for (size_t i = 0; i < v.size(); ++i) + { + nonzero += (v[i].is_zero() ? 0 : 1); + } + + const size_t num_chunks = std::max((size_t)1, std::min(nonzero, suggested_num_chunks)); + + if (!inhibit_profiling_info) + { + print_indent(); printf("Non-zero coordinate count: %zu/%zu (%0.2f%%)\n", nonzero, v.size(), 100.*nonzero/v.size()); + } + + std::vector > tmp(num_chunks); + std::vector chunk_pos(num_chunks+1); + + const size_t chunk_size = nonzero / num_chunks; + const size_t last_chunk = nonzero - chunk_size * (num_chunks - 1); + + chunk_pos[0] = 0; + + size_t cnt = 0; + size_t chunkno = 1; + + for (size_t i = 0; i < v.size(); ++i) + { + cnt += (v[i].is_zero() ? 0 : 1); + if (cnt == chunk_size && chunkno < num_chunks) + { + chunk_pos[chunkno] = i; + cnt = 0; + ++chunkno; + } + } + + chunk_pos[num_chunks] = v.size(); + +#ifdef MULTICORE +#pragma omp parallel for +#endif + for (size_t i = 0; i < num_chunks; ++i) + { + tmp[i] = kc_batch_exp_internal(scalar_size, T1_window, T2_window, T1_table, T2_table, T1_coeff, T2_coeff, v, + chunk_pos[i], chunk_pos[i+1], i == num_chunks - 1 ? last_chunk : chunk_size); +#ifdef USE_MIXED_ADDITION + kc_batch_to_special(tmp[i].values); +#endif + } + + if (num_chunks == 1) + { + tmp[0].domain_size_ = v.size(); + return tmp[0]; + } + else + { + for (size_t i = 0; i < num_chunks; ++i) + { + res.values.insert(res.values.end(), tmp[i].values.begin(), tmp[i].values.end()); + res.indices.insert(res.indices.end(), tmp[i].indices.begin(), tmp[i].indices.end()); + } + return res; + } +} + +} // libsnark + +#endif // KC_MULTIEXP_TCC_ diff --git a/src/algebra/scalar_multiplication/multiexp.hpp b/src/algebra/scalar_multiplication/multiexp.hpp new file mode 100644 index 000000000..eaf72d61f --- /dev/null +++ b/src/algebra/scalar_multiplication/multiexp.hpp @@ -0,0 +1,110 @@ +/** @file + ***************************************************************************** + + Declaration of interfaces for multi-exponentiation routines. + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef MULTIEXP_HPP_ +#define MULTIEXP_HPP_ + +namespace libsnark { + +/** + * Naive multi-exponentiation individually multiplies each base by the + * corresponding scalar and adds up the results. + */ +template +T naive_exp(typename std::vector::const_iterator vec_start, + typename std::vector::const_iterator vec_end, + typename std::vector::const_iterator scalar_start, + typename std::vector::const_iterator scalar_end); + +template +T naive_plain_exp(typename std::vector::const_iterator vec_start, + typename std::vector::const_iterator vec_end, + typename std::vector::const_iterator scalar_start, + typename std::vector::const_iterator scalar_end); + +/** + * Naive multi-exponentiation uses a variant of the Bos-Coster algorithm [1], + * and implementation suggestions from [2]. + * + * [1] = Bos and Coster, "Addition chain heuristics", CRYPTO '89 + * [2] = Bernstein, Duif, Lange, Schwabe, and Yang, "High-speed high-security signatures", CHES '11 + */ +template +T multi_exp(typename std::vector::const_iterator vec_start, + typename std::vector::const_iterator vec_end, + typename std::vector::const_iterator scalar_start, + typename std::vector::const_iterator scalar_end, + const size_t chunks, + const bool use_multiexp=false); + + +/** + * A variant of multi_exp that takes advantage of the method mixed_add (instead of the operator '+'). + */ +template +T multi_exp_with_mixed_addition(typename std::vector::const_iterator vec_start, + typename std::vector::const_iterator vec_end, + typename std::vector::const_iterator scalar_start, + typename std::vector::const_iterator scalar_end, + const size_t chunks, + const bool use_multiexp); + +/** + * A window table stores window sizes for different instance sizes for fixed-base multi-scalar multiplications. + */ +template +using window_table = std::vector >; + +/** + * Compute window size for the given number of scalars. + */ +template +size_t get_exp_window_size(const size_t num_scalars); + +/** + * Compute table of window sizes. + */ +template +window_table get_window_table(const size_t scalar_size, + const size_t window, + const T &g); + +template +T windowed_exp(const size_t scalar_size, + const size_t window, + const window_table &powers_of_g, + const FieldT &pow); + +template +std::vector batch_exp(const size_t scalar_size, + const size_t window, + const window_table &table, + const std::vector &v); + +template +std::vector batch_exp_with_coeff(const size_t scalar_size, + const size_t window, + const window_table &table, + const FieldT &coeff, + const std::vector &v); + +// defined in every curve +template +void batch_to_special_all_non_zeros(std::vector &vec); + +template +void batch_to_special(std::vector &vec); + +} // libsnark + +#include "algebra/scalar_multiplication/multiexp.tcc" + +#endif // MULTIEXP_HPP_ diff --git a/src/algebra/scalar_multiplication/multiexp.tcc b/src/algebra/scalar_multiplication/multiexp.tcc new file mode 100644 index 000000000..a6b14c4df --- /dev/null +++ b/src/algebra/scalar_multiplication/multiexp.tcc @@ -0,0 +1,590 @@ +/** @file + ***************************************************************************** + + Implementation of interfaces for multi-exponentiation routines. + + See multiexp.hpp . + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef MULTIEXP_TCC_ +#define MULTIEXP_TCC_ + +#include "algebra/fields/fp_aux.tcc" + +#include +#include +#include + +#include "common/profiling.hpp" +#include "common/utils.hpp" +#include "algebra/scalar_multiplication/wnaf.hpp" + +namespace libsnark { + +template +class ordered_exponent { +// to use std::push_heap and friends later +public: + size_t idx; + bigint r; + + ordered_exponent(const size_t idx, const bigint &r) : idx(idx), r(r) {}; + + bool operator<(const ordered_exponent &other) const + { +#if defined(__x86_64__) && defined(USE_ASM) + if (n == 3) + { + long res; + __asm__ + ("// check for overflow \n\t" + "mov $0, %[res] \n\t" + ADD_CMP(16) + ADD_CMP(8) + ADD_CMP(0) + "jmp done%= \n\t" + "subtract%=: \n\t" + "mov $1, %[res] \n\t" + "done%=: \n\t" + : [res] "=&r" (res) + : [A] "r" (other.r.data), [mod] "r" (this->r.data) + : "cc", "%rax"); + return res; + } + else if (n == 4) + { + long res; + __asm__ + ("// check for overflow \n\t" + "mov $0, %[res] \n\t" + ADD_CMP(24) + ADD_CMP(16) + ADD_CMP(8) + ADD_CMP(0) + "jmp done%= \n\t" + "subtract%=: \n\t" + "mov $1, %[res] \n\t" + "done%=: \n\t" + : [res] "=&r" (res) + : [A] "r" (other.r.data), [mod] "r" (this->r.data) + : "cc", "%rax"); + return res; + } + else if (n == 5) + { + long res; + __asm__ + ("// check for overflow \n\t" + "mov $0, %[res] \n\t" + ADD_CMP(32) + ADD_CMP(24) + ADD_CMP(16) + ADD_CMP(8) + ADD_CMP(0) + "jmp done%= \n\t" + "subtract%=: \n\t" + "mov $1, %[res] \n\t" + "done%=: \n\t" + : [res] "=&r" (res) + : [A] "r" (other.r.data), [mod] "r" (this->r.data) + : "cc", "%rax"); + return res; + } + else +#endif + { + return (mpn_cmp(this->r.data, other.r.data, n) < 0); + } + } +}; + +template +T naive_exp(typename std::vector::const_iterator vec_start, + typename std::vector::const_iterator vec_end, + typename std::vector::const_iterator scalar_start, + typename std::vector::const_iterator scalar_end) +{ + T result(T::zero()); + + typename std::vector::const_iterator vec_it; + typename std::vector::const_iterator scalar_it; + + for (vec_it = vec_start, scalar_it = scalar_start; vec_it != vec_end; ++vec_it, ++scalar_it) + { + bigint scalar_bigint = scalar_it->as_bigint(); + result = result + opt_window_wnaf_exp(*vec_it, scalar_bigint, scalar_bigint.num_bits()); + } + assert(scalar_it == scalar_end); + + return result; +} + +template +T naive_plain_exp(typename std::vector::const_iterator vec_start, + typename std::vector::const_iterator vec_end, + typename std::vector::const_iterator scalar_start, + typename std::vector::const_iterator scalar_end) +{ + T result(T::zero()); + + typename std::vector::const_iterator vec_it; + typename std::vector::const_iterator scalar_it; + + for (vec_it = vec_start, scalar_it = scalar_start; vec_it != vec_end; ++vec_it, ++scalar_it) + { + result = result + (*scalar_it) * (*vec_it); + } + assert(scalar_it == scalar_end); + + return result; +} + +/* + The multi-exponentiation algorithm below is a variant of the Bos-Coster algorithm + [Bos and Coster, "Addition chain heuristics", CRYPTO '89]. + The implementation uses suggestions from + [Bernstein, Duif, Lange, Schwabe, and Yang, "High-speed high-security signatures", CHES '11]. +*/ +template +T multi_exp_inner(typename std::vector::const_iterator vec_start, + typename std::vector::const_iterator vec_end, + typename std::vector::const_iterator scalar_start, + typename std::vector::const_iterator scalar_end) +{ + const mp_size_t n = std::remove_reference::type::num_limbs; + + if (vec_start == vec_end) + { + return T::zero(); + } + + if (vec_start + 1 == vec_end) + { + return (*scalar_start)*(*vec_start); + } + + std::vector > opt_q; + const size_t vec_len = scalar_end - scalar_start; + const size_t odd_vec_len = (vec_len % 2 == 1 ? vec_len : vec_len + 1); + opt_q.reserve(odd_vec_len); + std::vector g; + g.reserve(odd_vec_len); + + typename std::vector::const_iterator vec_it; + typename std::vector::const_iterator scalar_it; + size_t i; + for (i=0, vec_it = vec_start, scalar_it = scalar_start; vec_it != vec_end; ++vec_it, ++scalar_it, ++i) + { + g.emplace_back(*vec_it); + + opt_q.emplace_back(ordered_exponent(i, scalar_it->as_bigint())); + } + std::make_heap(opt_q.begin(),opt_q.end()); + assert(scalar_it == scalar_end); + + if (vec_len != odd_vec_len) + { + g.emplace_back(T::zero()); + opt_q.emplace_back(ordered_exponent(odd_vec_len - 1, bigint(0ul))); + } + assert(g.size() % 2 == 1); + assert(opt_q.size() == g.size()); + + T opt_result = T::zero(); + + while (true) + { + ordered_exponent &a = opt_q[0]; + ordered_exponent &b = (opt_q[1] < opt_q[2] ? opt_q[2] : opt_q[1]); + + const size_t abits = a.r.num_bits(); + + if (b.r.is_zero()) + { + // opt_result = opt_result + (a.r * g[a.idx]); + opt_result = opt_result + opt_window_wnaf_exp(g[a.idx], a.r, abits); + break; + } + + const size_t bbits = b.r.num_bits(); + const size_t limit = (abits-bbits >= 20 ? 20 : abits-bbits); + + if (bbits < 1ul< (x-y) A + y (B+A) + mpn_sub_n(a.r.data, a.r.data, b.r.data, n); + g[b.idx] = g[b.idx] + g[a.idx]; + } + + // regardless of whether a was cleared or subtracted from we push it down, then take back up + + /* heapify A down */ + size_t a_pos = 0; + while (2*a_pos + 2< odd_vec_len) + { + // this is a max-heap so to maintain a heap property we swap with the largest of the two + if (opt_q[2*a_pos+1] < opt_q[2*a_pos+2]) + { + std::swap(opt_q[a_pos], opt_q[2*a_pos+2]); + a_pos = 2*a_pos+2; + } + else + { + std::swap(opt_q[a_pos], opt_q[2*a_pos+1]); + a_pos = 2*a_pos+1; + } + } + + /* now heapify A up appropriate amount of times */ + while (a_pos > 0 && opt_q[(a_pos-1)/2] < opt_q[a_pos]) + { + std::swap(opt_q[a_pos], opt_q[(a_pos-1)/2]); + a_pos = (a_pos-1) / 2; + } + } + + return opt_result; +} + +template +T multi_exp(typename std::vector::const_iterator vec_start, + typename std::vector::const_iterator vec_end, + typename std::vector::const_iterator scalar_start, + typename std::vector::const_iterator scalar_end, + const size_t chunks, + const bool use_multiexp) +{ + const size_t total = vec_end - vec_start; + if (total < chunks) + { + return naive_exp(vec_start, vec_end, scalar_start, scalar_end); + } + + const size_t one = total/chunks; + + std::vector partial(chunks, T::zero()); + + if (use_multiexp) + { +#ifdef MULTICORE +#pragma omp parallel for +#endif + for (size_t i = 0; i < chunks; ++i) + { + partial[i] = multi_exp_inner(vec_start + i*one, + (i == chunks-1 ? vec_end : vec_start + (i+1)*one), + scalar_start + i*one, + (i == chunks-1 ? scalar_end : scalar_start + (i+1)*one)); + } + } + else + { +#ifdef MULTICORE +#pragma omp parallel for +#endif + for (size_t i = 0; i < chunks; ++i) + { + partial[i] = naive_exp(vec_start + i*one, + (i == chunks-1 ? vec_end : vec_start + (i+1)*one), + scalar_start + i*one, + (i == chunks-1 ? scalar_end : scalar_start + (i+1)*one)); + } + } + + T final = T::zero(); + + for (size_t i = 0; i < chunks; ++i) + { + final = final + partial[i]; + } + + return final; +} + +template +T multi_exp_with_mixed_addition(typename std::vector::const_iterator vec_start, + typename std::vector::const_iterator vec_end, + typename std::vector::const_iterator scalar_start, + typename std::vector::const_iterator scalar_end, + const size_t chunks, + const bool use_multiexp) +{ + assert(std::distance(vec_start, vec_end) == std::distance(scalar_start, scalar_end)); + enter_block("Process scalar vector"); + auto value_it = vec_start; + auto scalar_it = scalar_start; + + const FieldT zero = FieldT::zero(); + const FieldT one = FieldT::one(); + std::vector p; + std::vector g; + + T acc = T::zero(); + + size_t num_skip = 0; + size_t num_add = 0; + size_t num_other = 0; + + for (; scalar_it != scalar_end; ++scalar_it, ++value_it) + { + if (*scalar_it == zero) + { + // do nothing + ++num_skip; + } + else if (*scalar_it == one) + { +#ifdef USE_MIXED_ADDITION + acc = acc.mixed_add(*value_it); +#else + acc = acc + (*value_it); +#endif + ++num_add; + } + else + { + p.emplace_back(*scalar_it); + g.emplace_back(*value_it); + ++num_other; + } + } + //print_indent(); printf("* Elements of w skipped: %zu (%0.2f%%)\n", num_skip, 100.*num_skip/(num_skip+num_add+num_other)); + //print_indent(); printf("* Elements of w processed with special addition: %zu (%0.2f%%)\n", num_add, 100.*num_add/(num_skip+num_add+num_other)); + //print_indent(); printf("* Elements of w remaining: %zu (%0.2f%%)\n", num_other, 100.*num_other/(num_skip+num_add+num_other)); + + leave_block("Process scalar vector"); + + return acc + multi_exp(g.begin(), g.end(), p.begin(), p.end(), chunks, use_multiexp); +} + +template +size_t get_exp_window_size(const size_t num_scalars) +{ + if (T::fixed_base_exp_window_table.empty()) + { +#ifdef LOWMEM + return 14; +#else + return 17; +#endif + } + size_t window = 1; + for (long i = T::fixed_base_exp_window_table.size()-1; i >= 0; --i) + { +#ifdef DEBUG + if (!inhibit_profiling_info) + { + printf("%ld %zu %zu\n", i, num_scalars, T::fixed_base_exp_window_table[i]); + } +#endif + if (T::fixed_base_exp_window_table[i] != 0 && num_scalars >= T::fixed_base_exp_window_table[i]) + { + window = i+1; + break; + } + } + + if (!inhibit_profiling_info) + { + print_indent(); printf("Choosing window size %zu for %zu elements\n", window, num_scalars); + } + +#ifdef LOWMEM + window = std::min((size_t)14, window); +#endif + return window; +} + +template +window_table get_window_table(const size_t scalar_size, + const size_t window, + const T &g) +{ + const size_t in_window = 1ul< powers_of_g(outerc, std::vector(in_window, T::zero())); + + T gouter = g; + + for (size_t outer = 0; outer < outerc; ++outer) + { + T ginner = T::zero(); + size_t cur_in_window = outer == outerc-1 ? last_in_window : in_window; + for (size_t inner = 0; inner < cur_in_window; ++inner) + { + powers_of_g[outer][inner] = ginner; + ginner = ginner + gouter; + } + + for (size_t i = 0; i < window; ++i) + { + gouter = gouter + gouter; + } + } + + return powers_of_g; +} + +template +T windowed_exp(const size_t scalar_size, + const size_t window, + const window_table &powers_of_g, + const FieldT &pow) +{ + const size_t outerc = (scalar_size+window-1)/window; + const bigint pow_val = pow.as_bigint(); + + /* exp */ + T res = powers_of_g[0][0]; + + for (size_t outer = 0; outer < outerc; ++outer) + { + size_t inner = 0; + for (size_t i = 0; i < window; ++i) + { + if (pow_val.test_bit(outer*window + i)) + { + inner |= 1u << i; + } + } + + res = res + powers_of_g[outer][inner]; + } + + return res; +} + +template +std::vector batch_exp(const size_t scalar_size, + const size_t window, + const window_table &table, + const std::vector &v) +{ + if (!inhibit_profiling_info) + { + print_indent(); + } + std::vector res(v.size(), table[0][0]); + +#ifdef MULTICORE +#pragma omp parallel for +#endif + for (size_t i = 0; i < v.size(); ++i) + { + res[i] = windowed_exp(scalar_size, window, table, v[i]); + + if (!inhibit_profiling_info && (i % 10000 == 0)) + { + printf("."); + fflush(stdout); + } + } + + if (!inhibit_profiling_info) + { + printf(" DONE!\n"); + } + + return res; +} + +template +std::vector batch_exp_with_coeff(const size_t scalar_size, + const size_t window, + const window_table &table, + const FieldT &coeff, + const std::vector &v) +{ + if (!inhibit_profiling_info) + { + print_indent(); + } + std::vector res(v.size(), table[0][0]); + +#ifdef MULTICORE +#pragma omp parallel for +#endif + for (size_t i = 0; i < v.size(); ++i) + { + res[i] = windowed_exp(scalar_size, window, table, coeff * v[i]); + + if (!inhibit_profiling_info && (i % 10000 == 0)) + { + printf("."); + fflush(stdout); + } + } + + if (!inhibit_profiling_info) + { + printf(" DONE!\n"); + } + + return res; +} + +template +void batch_to_special(std::vector &vec) +{ + enter_block("Batch-convert elements to special form"); + + std::vector non_zero_vec; + for (size_t i = 0; i < vec.size(); ++i) + { + if (!vec[i].is_zero()) + { + non_zero_vec.emplace_back(vec[i]); + } + } + + batch_to_special_all_non_zeros(non_zero_vec); + auto it = non_zero_vec.begin(); + T zero_special = T::zero(); + zero_special.to_special(); + + for (size_t i = 0; i < vec.size(); ++i) + { + if (!vec[i].is_zero()) + { + vec[i] = *it; + ++it; + } + else + { + vec[i] = zero_special; + } + } + leave_block("Batch-convert elements to special form"); +} + +} // libsnark + +#endif // MULTIEXP_TCC_ diff --git a/src/algebra/scalar_multiplication/wnaf.hpp b/src/algebra/scalar_multiplication/wnaf.hpp new file mode 100644 index 000000000..a7ecd598e --- /dev/null +++ b/src/algebra/scalar_multiplication/wnaf.hpp @@ -0,0 +1,39 @@ +/** @file + ***************************************************************************** + + Declaration of interfaces for wNAF ("width-w Non-Adjacent Form") exponentiation routines. + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef WNAF_HPP_ +#define WNAF_HPP_ + +namespace libsnark { + +/** + * Find the wNAF representation of the given scalar relative to the given window size. + */ +template +std::vector find_wnaf(const size_t window_size, const bigint &scalar); + +/** + * In additive notation, use wNAF exponentiation (with the given window size) to compute scalar * base. + */ +template +T fixed_window_wnaf_exp(const size_t window_size, const T &base, const bigint &scalar); + +/** + * In additive notation, use wNAF exponentiation (with the window size determined by T) to compute scalar * base. + */ +template +T opt_window_wnaf_exp(const T &base, const bigint &scalar, const size_t scalar_bits); + +} // libsnark + +#include "algebra/scalar_multiplication/wnaf.tcc" + +#endif // WNAF_HPP_ diff --git a/src/algebra/scalar_multiplication/wnaf.tcc b/src/algebra/scalar_multiplication/wnaf.tcc new file mode 100644 index 000000000..a5e47e8e2 --- /dev/null +++ b/src/algebra/scalar_multiplication/wnaf.tcc @@ -0,0 +1,123 @@ +/** @file + ***************************************************************************** + + Implementation of interfaces for wNAF ("weighted Non-Adjacent Form") exponentiation routines. + + See wnaf.hpp . + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef WNAF_TCC_ +#define WNAF_TCC_ + +namespace libsnark { + +template +std::vector find_wnaf(const size_t window_size, const bigint &scalar) +{ + const size_t length = scalar.max_bits(); // upper bound + std::vector res(length+1); + bigint c = scalar; + long j = 0; + while (!c.is_zero()) + { + long u; + if ((c.data[0] & 1) == 1) + { + u = c.data[0] % (1u << (window_size+1)); + if (u > (1 << window_size)) + { + u = u - (1 << (window_size+1)); + } + + if (u > 0) + { + mpn_sub_1(c.data, c.data, n, u); + } + else + { + mpn_add_1(c.data, c.data, n, -u); + } + } + else + { + u = 0; + } + res[j] = u; + ++j; + + mpn_rshift(c.data, c.data, n, 1); // c = c/2 + } + + return res; +} + +template +T fixed_window_wnaf_exp(const size_t window_size, const T &base, const bigint &scalar) +{ + std::vector naf = find_wnaf(window_size, scalar); + std::vector table(1ul<<(window_size-1)); + T tmp = base; + T dbl = base.dbl(); + for (size_t i = 0; i < 1ul<<(window_size-1); ++i) + { + table[i] = tmp; + tmp = tmp + dbl; + } + + T res = T::zero(); + bool found_nonzero = false; + for (long i = naf.size()-1; i >= 0; --i) + { + if (found_nonzero) + { + res = res.dbl(); + } + + if (naf[i] != 0) + { + found_nonzero = true; + if (naf[i] > 0) + { + res = res + table[naf[i]/2]; + } + else + { + res = res - table[(-naf[i])/2]; + } + } + } + + return res; +} + +template +T opt_window_wnaf_exp(const T &base, const bigint &scalar, const size_t scalar_bits) +{ + size_t best = 0; + for (long i = T::wnaf_window_table.size() - 1; i >= 0; --i) + { + if (scalar_bits >= T::wnaf_window_table[i]) + { + best = i+1; + break; + } + } + + if (best > 0) + { + return fixed_window_wnaf_exp(best, base, scalar); + } + else + { + return scalar * base; + } +} + +} // libsnark + +#endif // WNAF_TCC_ diff --git a/src/common/assert_except.hpp b/src/common/assert_except.hpp new file mode 100644 index 000000000..781923044 --- /dev/null +++ b/src/common/assert_except.hpp @@ -0,0 +1,12 @@ +#ifndef ASSERT_except_H +#define ASSERT_except_H + +#include + +inline void assert_except(bool condition) { + if (!condition) { + throw std::runtime_error("Assertion failed."); + } +} + +#endif diff --git a/src/common/data_structures/accumulation_vector.hpp b/src/common/data_structures/accumulation_vector.hpp new file mode 100644 index 000000000..37e0c9841 --- /dev/null +++ b/src/common/data_structures/accumulation_vector.hpp @@ -0,0 +1,74 @@ +/** @file + ***************************************************************************** + + Declaration of interfaces for an accumulation vector. + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef ACCUMULATION_VECTOR_HPP_ +#define ACCUMULATION_VECTOR_HPP_ + +#include "common/data_structures/sparse_vector.hpp" + +namespace libsnark { + +template +class accumulation_vector; + +template +std::ostream& operator<<(std::ostream &out, const accumulation_vector &v); + +template +std::istream& operator>>(std::istream &in, accumulation_vector &v); + +/** + * An accumulation vector comprises an accumulation value and a sparse vector. + * The method "accumulate_chunk" allows one to accumlate portions of the sparse + * vector into the accumualation value. + */ +template +class accumulation_vector { +public: + T first; + sparse_vector rest; + + accumulation_vector() = default; + accumulation_vector(const accumulation_vector &other) = default; + accumulation_vector(accumulation_vector &&other) = default; + accumulation_vector(T &&first, sparse_vector &&rest) : first(std::move(first)), rest(std::move(rest)) {}; + accumulation_vector(T &&first, std::vector &&v) : first(std::move(first)), rest(std::move(v)) {} + accumulation_vector(std::vector &&v) : first(T::zero()), rest(std::move(v)) {}; + + accumulation_vector& operator=(const accumulation_vector &other) = default; + accumulation_vector& operator=(accumulation_vector &&other) = default; + + bool operator==(const accumulation_vector &other) const; + + bool is_fully_accumulated() const; + + size_t domain_size() const; + size_t size() const; + size_t size_in_bits() const; + + template + accumulation_vector accumulate_chunk(const typename std::vector::const_iterator &it_begin, + const typename std::vector::const_iterator &it_end, + const size_t offset) const; + +}; + +template +std::ostream& operator<<(std::ostream &out, const accumulation_vector &v); + +template +std::istream& operator>>(std::istream &in, accumulation_vector &v); + +} // libsnark + +#include "common/data_structures/accumulation_vector.tcc" + +#endif // ACCUMULATION_VECTOR_HPP_ diff --git a/src/common/data_structures/accumulation_vector.tcc b/src/common/data_structures/accumulation_vector.tcc new file mode 100644 index 000000000..9e524aba7 --- /dev/null +++ b/src/common/data_structures/accumulation_vector.tcc @@ -0,0 +1,84 @@ +/** @file + ***************************************************************************** + + Implementation of interfaces for an accumulation vector. + + See accumulation_vector.hpp . + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef ACCUMULATION_VECTOR_TCC_ +#define ACCUMULATION_VECTOR_TCC_ + +namespace libsnark { + +template +bool accumulation_vector::operator==(const accumulation_vector &other) const +{ + return (this->first == other.first && this->rest == other.rest); +} + +template +bool accumulation_vector::is_fully_accumulated() const +{ + return rest.empty(); +} + +template +size_t accumulation_vector::domain_size() const +{ + return rest.domain_size(); +} + +template +size_t accumulation_vector::size() const +{ + return rest.domain_size(); +} + +template +size_t accumulation_vector::size_in_bits() const +{ + const size_t first_size_in_bits = T::size_in_bits(); + const size_t rest_size_in_bits = rest.size_in_bits(); + return first_size_in_bits + rest_size_in_bits; +} + +template +template +accumulation_vector accumulation_vector::accumulate_chunk(const typename std::vector::const_iterator &it_begin, + const typename std::vector::const_iterator &it_end, + const size_t offset) const +{ + std::pair > acc_result = rest.template accumulate(it_begin, it_end, offset); + T new_first = first + acc_result.first; + return accumulation_vector(std::move(new_first), std::move(acc_result.second)); +} + +template +std::ostream& operator<<(std::ostream& out, const accumulation_vector &v) +{ + out << v.first << OUTPUT_NEWLINE; + out << v.rest << OUTPUT_NEWLINE; + + return out; +} + +template +std::istream& operator>>(std::istream& in, accumulation_vector &v) +{ + in >> v.first; + consume_OUTPUT_NEWLINE(in); + in >> v.rest; + consume_OUTPUT_NEWLINE(in); + + return in; +} + +} // libsnark + +#endif // ACCUMULATION_VECTOR_TCC_ diff --git a/src/common/data_structures/merkle_tree.hpp b/src/common/data_structures/merkle_tree.hpp new file mode 100644 index 000000000..6f0c851ba --- /dev/null +++ b/src/common/data_structures/merkle_tree.hpp @@ -0,0 +1,71 @@ +/** @file + ***************************************************************************** + + Declaration of interfaces for a Merkle tree. + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef MERKLE_TREE_HPP_ +#define MERKLE_TREE_HPP_ + +#include +#include +#include "common/utils.hpp" + +namespace libsnark { + +/** + * A Merkle tree is maintained as two maps: + * - a map from addresses to values, and + * - a map from addresses to hashes. + * + * The second map maintains the intermediate hashes of a Merkle tree + * built atop the values currently stored in the tree (the + * implementation admits a very efficient support for sparse + * trees). Besides offering methods to load and store values, the + * class offers methods to retrieve the root of the Merkle tree and to + * obtain the authentication paths for (the value at) a given address. + */ + +typedef bit_vector merkle_authentication_node; +typedef std::vector merkle_authentication_path; + +template +class merkle_tree { +private: + + typedef typename HashT::hash_value_type hash_value_type; + typedef typename HashT::merkle_authentication_path_type merkle_authentication_path_type; + +public: + + std::vector hash_defaults; + std::map values; + std::map hashes; + + size_t depth; + size_t value_size; + size_t digest_size; + + merkle_tree(const size_t depth, const size_t value_size); + merkle_tree(const size_t depth, const size_t value_size, const std::vector &contents_as_vector); + merkle_tree(const size_t depth, const size_t value_size, const std::map &contents); + + bit_vector get_value(const size_t address) const; + void set_value(const size_t address, const bit_vector &value); + + hash_value_type get_root() const; + merkle_authentication_path_type get_path(const size_t address) const; + + void dump() const; +}; + +} // libsnark + +#include "common/data_structures/merkle_tree.tcc" + +#endif // MERKLE_TREE_HPP_ diff --git a/src/common/data_structures/merkle_tree.tcc b/src/common/data_structures/merkle_tree.tcc new file mode 100644 index 000000000..281700b33 --- /dev/null +++ b/src/common/data_structures/merkle_tree.tcc @@ -0,0 +1,246 @@ +/** @file + ***************************************************************************** + + Implementation of interfaces for Merkle tree. + + See merkle_tree.hpp . + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef MERKLE_TREE_TCC +#define MERKLE_TREE_TCC + +#include + +#include "common/profiling.hpp" +#include "common/utils.hpp" + +namespace libsnark { + +template +typename HashT::hash_value_type two_to_one_CRH(const typename HashT::hash_value_type &l, + const typename HashT::hash_value_type &r) +{ + typename HashT::hash_value_type new_input; + new_input.insert(new_input.end(), l.begin(), l.end()); + new_input.insert(new_input.end(), r.begin(), r.end()); + + const size_t digest_size = HashT::get_digest_len(); + assert(l.size() == digest_size); + assert(r.size() == digest_size); + + return HashT::get_hash(new_input); +} + +template +merkle_tree::merkle_tree(const size_t depth, const size_t value_size) : + depth(depth), value_size(value_size) +{ + assert(depth < sizeof(size_t) * 8); + + digest_size = HashT::get_digest_len(); + assert(value_size <= digest_size); + + hash_value_type last(digest_size); + hash_defaults.reserve(depth+1); + hash_defaults.emplace_back(last); + for (size_t i = 0; i < depth; ++i) + { + last = two_to_one_CRH(last, last); + hash_defaults.emplace_back(last); + } + + std::reverse(hash_defaults.begin(), hash_defaults.end()); +} + +template +merkle_tree::merkle_tree(const size_t depth, + const size_t value_size, + const std::vector &contents_as_vector) : + merkle_tree(depth, value_size) +{ + assert(log2(contents_as_vector.size()) <= depth); + for (size_t address = 0; address < contents_as_vector.size(); ++address) + { + const size_t idx = address + (1ul< 0; --layer) + { + for (size_t idx = idx_begin; idx < idx_end; idx += 2) + { + hash_value_type l = hashes[idx]; // this is sound, because idx_begin is always a left child + hash_value_type r = (idx + 1 < idx_end ? hashes[idx+1] : hash_defaults[layer]); + + hash_value_type h = two_to_one_CRH(l, r); + hashes[(idx-1)/2] = h; + } + + idx_begin = (idx_begin-1)/2; + idx_end = (idx_end-1)/2; + } +} + +template +merkle_tree::merkle_tree(const size_t depth, + const size_t value_size, + const std::map &contents) : + merkle_tree(depth, value_size) +{ + + if (!contents.empty()) + { + assert(contents.rbegin()->first < 1ul<first; + const bit_vector value = it->second; + const size_t idx = address + (1ul< 0; --layer) + { + auto next_last_it = hashes.begin(); + + for (auto it = hashes.begin(); it != last_it; ++it) + { + const size_t idx = it->first; + const hash_value_type hash = it->second; + + if (idx % 2 == 0) + { + // this is the right child of its parent and by invariant we are missing the left child + hashes[(idx-1)/2] = two_to_one_CRH(hash_defaults[layer], hash); + } + else + { + if (std::next(it) == last_it || std::next(it)->first != idx + 1) + { + // this is the left child of its parent and is missing its right child + hashes[(idx-1)/2] = two_to_one_CRH(hash, hash_defaults[layer]); + } + else + { + // typical case: this is the left child of the parent and adjecent to it there is a right child + hashes[(idx-1)/2] = two_to_one_CRH(hash, std::next(it)->second); + ++it; + } + } + } + + last_it = next_last_it; + } + } +} + +template +bit_vector merkle_tree::get_value(const size_t address) const +{ + assert(log2(address) <= depth); + + auto it = values.find(address); + bit_vector padded_result = (it == values.end() ? bit_vector(digest_size) : it->second); + padded_result.resize(value_size); + + return padded_result; +} + +template +void merkle_tree::set_value(const size_t address, + const bit_vector &value) +{ + assert(log2(address) <= depth); + size_t idx = address + (1ul<=0; --layer) + { + idx = (idx-1)/2; + + auto it = hashes.find(2*idx+1); + hash_value_type l = (it == hashes.end() ? hash_defaults[layer+1] : it->second); + + it = hashes.find(2*idx+2); + hash_value_type r = (it == hashes.end() ? hash_defaults[layer+1] : it->second); + + hash_value_type h = two_to_one_CRH(l, r); + hashes[idx] = h; + } +} + +template +typename HashT::hash_value_type merkle_tree::get_root() const +{ + auto it = hashes.find(0); + return (it == hashes.end() ? hash_defaults[0] : it->second); +} + +template +typename HashT::merkle_authentication_path_type merkle_tree::get_path(const size_t address) const +{ + typename HashT::merkle_authentication_path_type result(depth); + assert(log2(address) <= depth); + size_t idx = address + (1ul< 0; --layer) + { + size_t sibling_idx = ((idx + 1) ^ 1) - 1; + auto it = hashes.find(sibling_idx); + if (layer == depth) + { + auto it2 = values.find(sibling_idx - ((1ul<second); + result[layer-1].resize(digest_size); + } + else + { + result[layer-1] = (it == hashes.end() ? hash_defaults[layer] : it->second); + } + + idx = (idx-1)/2; + } + + return result; +} + +template +void merkle_tree::dump() const +{ + for (size_t i = 0; i < 1ul< ", i); + const bit_vector value = (it == values.end() ? bit_vector(value_size) : it->second); + for (bool b : value) + { + printf("%d", b ? 1 : 0); + } + printf("\n"); + } + printf("\n"); +} + +} // libsnark + +#endif // MERKLE_TREE_TCC diff --git a/src/common/data_structures/sparse_vector.hpp b/src/common/data_structures/sparse_vector.hpp new file mode 100644 index 000000000..8b134f42e --- /dev/null +++ b/src/common/data_structures/sparse_vector.hpp @@ -0,0 +1,79 @@ +/** @file + ***************************************************************************** + + Declaration of interfaces for a sparse vector. + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef SPARSE_VECTOR_HPP_ +#define SPARSE_VECTOR_HPP_ + +#include + +namespace libsnark { + +template +struct sparse_vector; + +template +std::ostream& operator<<(std::ostream &out, const sparse_vector &v); + +template +std::istream& operator>>(std::istream &in, sparse_vector &v); + +/** + * A sparse vector is a list of indices along with corresponding values. + * The indices are selected from the set {0,1,...,domain_size-1}. + */ +template +struct sparse_vector { + + std::vector indices; + std::vector values; + size_t domain_size_ = 0; + + sparse_vector() = default; + sparse_vector(const sparse_vector &other) = default; + sparse_vector(sparse_vector &&other) = default; + sparse_vector(std::vector &&v); /* constructor from std::vector */ + + sparse_vector& operator=(const sparse_vector &other) = default; + sparse_vector& operator=(sparse_vector &&other) = default; + + T operator[](const size_t idx) const; + + bool operator==(const sparse_vector &other) const; + bool operator==(const std::vector &other) const; + + bool is_valid() const; + bool empty() const; + + size_t domain_size() const; // return domain_size_ + size_t size() const; // return the number of indices (representing the number of non-zero entries) + size_t size_in_bits() const; // return the number bits needed to store the sparse vector + + /* return a pair consisting of the accumulated value and the sparse vector of non-accumuated values */ + template + std::pair > accumulate(const typename std::vector::const_iterator &it_begin, + const typename std::vector::const_iterator &it_end, + const size_t offset) const; + + friend std::ostream& operator<< (std::ostream &out, const sparse_vector &v); + friend std::istream& operator>> (std::istream &in, sparse_vector &v); +}; + +template +std::ostream& operator<<(std::ostream& out, const sparse_vector &v); + +template +std::istream& operator>>(std::istream& in, sparse_vector &v); + +} // libsnark + +#include "common/data_structures/sparse_vector.tcc" + +#endif // SPARSE_VECTOR_HPP_ diff --git a/src/common/data_structures/sparse_vector.tcc b/src/common/data_structures/sparse_vector.tcc new file mode 100644 index 000000000..cfc5d7559 --- /dev/null +++ b/src/common/data_structures/sparse_vector.tcc @@ -0,0 +1,316 @@ +/** @file + ***************************************************************************** + + Implementation of interfaces for a sparse vector. + + See sparse_vector.hpp . + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef SPARSE_VECTOR_TCC_ +#define SPARSE_VECTOR_TCC_ + +#include "algebra/scalar_multiplication/multiexp.hpp" + +#include + +namespace libsnark { + +template +sparse_vector::sparse_vector(std::vector &&v) : + values(std::move(v)), domain_size_(values.size()) +{ + indices.resize(domain_size_); + std::iota(indices.begin(), indices.end(), 0); +} + +template +T sparse_vector::operator[](const size_t idx) const +{ + auto it = std::lower_bound(indices.begin(), indices.end(), idx); + return (it != indices.end() && *it == idx) ? values[it - indices.begin()] : T(); +} + +template +bool sparse_vector::operator==(const sparse_vector &other) const +{ + if (this->domain_size_ != other.domain_size_) + { + return false; + } + + size_t this_pos = 0, other_pos = 0; + while (this_pos < this->indices.size() && other_pos < other.indices.size()) + { + if (this->indices[this_pos] == other.indices[other_pos]) + { + if (this->values[this_pos] != other.values[other_pos]) + { + return false; + } + ++this_pos; + ++other_pos; + } + else if (this->indices[this_pos] < other.indices[other_pos]) + { + if (!this->values[this_pos].is_zero()) + { + return false; + } + ++this_pos; + } + else + { + if (!other.values[other_pos].is_zero()) + { + return false; + } + ++other_pos; + } + } + + /* at least one of the vectors has been exhausted, so other must be empty */ + while (this_pos < this->indices.size()) + { + if (!this->values[this_pos].is_zero()) + { + return false; + } + ++this_pos; + } + + while (other_pos < other.indices.size()) + { + if (!other.values[other_pos].is_zero()) + { + return false; + } + ++other_pos; + } + + return true; +} + +template +bool sparse_vector::operator==(const std::vector &other) const +{ + if (this->domain_size_ < other.size()) + { + return false; + } + + size_t j = 0; + for (size_t i = 0; i < other.size(); ++i) + { + if (this->indices[j] == i) + { + if (this->values[j] != other[j]) + { + return false; + } + ++j; + } + else + { + if (!other[j].is_zero()) + { + return false; + } + } + } + + return true; +} + +template +bool sparse_vector::is_valid() const +{ + if (values.size() == indices.size() && values.size() <= domain_size_) + { + return false; + } + + for (size_t i = 0; i + 1 < indices.size(); ++i) + { + if (indices[i] >= indices[i+1]) + { + return false; + } + } + + if (!indices.empty() && indices[indices.size()-1] >= domain_size_) + { + return false; + } + + return true; +} + +template +bool sparse_vector::empty() const +{ + return indices.empty(); +} + +template +size_t sparse_vector::domain_size() const +{ + return domain_size_; +} + +template +size_t sparse_vector::size() const +{ + return indices.size(); +} + +template +size_t sparse_vector::size_in_bits() const +{ + return indices.size() * (sizeof(size_t) * 8 + T::size_in_bits()); +} + +template +template +std::pair > sparse_vector::accumulate(const typename std::vector::const_iterator &it_begin, + const typename std::vector::const_iterator &it_end, + const size_t offset) const +{ + // TODO: does not really belong here. + const size_t chunks = 1; + const bool use_multiexp = true; + + T accumulated_value = T::zero(); + sparse_vector resulting_vector; + resulting_vector.domain_size_ = domain_size_; + + const size_t range_len = it_end - it_begin; + bool in_block = false; + size_t first_pos = -1, last_pos = -1; // g++ -flto emits unitialized warning, even though in_block guards for such cases. + + for (size_t i = 0; i < indices.size(); ++i) + { + const bool matching_pos = (offset <= indices[i] && indices[i] < offset + range_len); + // printf("i = %zu, pos[i] = %zu, offset = %zu, w_size = %zu\n", i, indices[i], offset, w_size); + bool copy_over; + + if (in_block) + { + if (matching_pos && last_pos == i-1) + { + // block can be extended, do it + last_pos = i; + copy_over = false; + } + else + { + // block has ended here + in_block = false; + copy_over = true; + +#ifdef DEBUG + print_indent(); printf("doing multiexp for w_%zu ... w_%zu\n", indices[first_pos], indices[last_pos]); +#endif + accumulated_value = accumulated_value + multi_exp(values.begin() + first_pos, + values.begin() + last_pos + 1, + it_begin + (indices[first_pos] - offset), + it_begin + (indices[last_pos] - offset) + 1, + chunks, use_multiexp); + } + } + else + { + if (matching_pos) + { + // block can be started + first_pos = i; + last_pos = i; + in_block = true; + copy_over = false; + } + else + { + copy_over = true; + } + } + + if (copy_over) + { + resulting_vector.indices.emplace_back(indices[i]); + resulting_vector.values.emplace_back(values[i]); + } + } + + if (in_block) + { +#ifdef DEBUG + print_indent(); printf("doing multiexp for w_%zu ... w_%zu\n", indices[first_pos], indices[last_pos]); +#endif + accumulated_value = accumulated_value + multi_exp(values.begin() + first_pos, + values.begin() + last_pos + 1, + it_begin + (indices[first_pos] - offset), + it_begin + (indices[last_pos] - offset) + 1, + chunks, use_multiexp); + } + + return std::make_pair(accumulated_value, resulting_vector); +} + +template +std::ostream& operator<<(std::ostream& out, const sparse_vector &v) +{ + out << v.domain_size_ << "\n"; + out << v.indices.size() << "\n"; + for (const size_t& i : v.indices) + { + out << i << "\n"; + } + + out << v.values.size() << "\n"; + for (const T& t : v.values) + { + out << t << OUTPUT_NEWLINE; + } + + return out; +} + +template +std::istream& operator>>(std::istream& in, sparse_vector &v) +{ + in >> v.domain_size_; + consume_newline(in); + + size_t s; + in >> s; + consume_newline(in); + v.indices.resize(s); + for (size_t i = 0; i < s; ++i) + { + in >> v.indices[i]; + consume_newline(in); + } + + v.values.clear(); + in >> s; + consume_newline(in); + v.values.reserve(s); + + for (size_t i = 0; i < s; ++i) + { + T t; + in >> t; + consume_OUTPUT_NEWLINE(in); + v.values.emplace_back(t); + } + + return in; +} + +} // libsnark + +#endif // SPARSE_VECTOR_TCC_ diff --git a/src/common/default_types/ec_pp.hpp b/src/common/default_types/ec_pp.hpp new file mode 100644 index 000000000..b08c2da88 --- /dev/null +++ b/src/common/default_types/ec_pp.hpp @@ -0,0 +1,53 @@ +/** @file + ***************************************************************************** + + This file defines default_ec_pp based on the CURVE=... make flag, which selects + which elliptic curve is used to implement group arithmetic and pairings. + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef EC_PP_HPP_ +#define EC_PP_HPP_ + +/************************ Pick the elliptic curve ****************************/ + +#ifdef CURVE_ALT_BN128 +#include "algebra/curves/alt_bn128/alt_bn128_pp.hpp" +namespace libsnark { +typedef alt_bn128_pp default_ec_pp; +} // libsnark +#endif + +#ifdef CURVE_BN128 +#include "algebra/curves/bn128/bn128_pp.hpp" +namespace libsnark { +typedef bn128_pp default_ec_pp; +} // libsnark +#endif + +#ifdef CURVE_EDWARDS +#include "algebra/curves/edwards/edwards_pp.hpp" +namespace libsnark { +typedef edwards_pp default_ec_pp; +} // libsnark +#endif + +#ifdef CURVE_MNT4 +#include "algebra/curves/mnt/mnt4/mnt4_pp.hpp" +namespace libsnark { +typedef mnt4_pp default_ec_pp; +} // libsnark +#endif + +#ifdef CURVE_MNT6 +#include "algebra/curves/mnt/mnt6/mnt6_pp.hpp" +namespace libsnark { +typedef mnt6_pp default_ec_pp; +} // libsnark +#endif + +#endif // EC_PP_HPP_ diff --git a/src/common/default_types/r1cs_ppzksnark_pp.hpp b/src/common/default_types/r1cs_ppzksnark_pp.hpp new file mode 100644 index 000000000..c819b4a85 --- /dev/null +++ b/src/common/default_types/r1cs_ppzksnark_pp.hpp @@ -0,0 +1,22 @@ +/** @file + ***************************************************************************** + + This file defines default_r1cs_ppzksnark_pp based on the elliptic curve + choice selected in ec_pp.hpp. + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef R1CS_PPZKSNARK_PP_HPP_ +#define R1CS_PPZKSNARK_PP_HPP_ + +#include "common/default_types/ec_pp.hpp" + +namespace libsnark { +typedef default_ec_pp default_r1cs_ppzksnark_pp; +} // libsnark + +#endif // R1CS_PPZKSNARK_PP_HPP_ diff --git a/src/common/profiling.cpp b/src/common/profiling.cpp new file mode 100644 index 000000000..d227203a0 --- /dev/null +++ b/src/common/profiling.cpp @@ -0,0 +1,379 @@ +/** @file + ***************************************************************************** + + Implementation of functions for profiling code blocks. + + See profiling.hpp . + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#include "common/profiling.hpp" +#include +#include +#include +#include +#include +#include +#include +#include "common/default_types/ec_pp.hpp" +#include "common/utils.hpp" + +#ifndef NO_PROCPS +#include +#endif + +namespace libsnark { + +long long get_nsec_time() +{ + auto timepoint = std::chrono::high_resolution_clock::now(); + return std::chrono::duration_cast(timepoint.time_since_epoch()).count(); +} + +/* Return total CPU time consumsed by all threads of the process, in nanoseconds. */ +long long get_nsec_cpu_time() +{ + ::timespec ts; + if ( ::clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts) ) + throw ::std::runtime_error("clock_gettime(CLOCK_PROCESS_CPUTIME_ID) failed"); + // If we expected this to work, don't silently ignore failures, because that would hide the problem and incur an unnecessarily system-call overhead. So if we ever observe this exception, we should probably add a suitable #ifdef . + //TODO: clock_gettime(CLOCK_PROCESS_CPUTIME_ID) is not supported by native Windows. What about Cygwin? Should we #ifdef on CLOCK_PROCESS_CPUTIME_ID or on __linux__? + return ts.tv_sec * 1000000000ll + ts.tv_nsec; +} + +long long start_time, last_time; +long long start_cpu_time, last_cpu_time; + +void start_profiling() +{ + printf("Reset time counters for profiling\n"); + + last_time = start_time = get_nsec_time(); + last_cpu_time = start_cpu_time = get_nsec_cpu_time(); +} + +std::map invocation_counts; +std::map enter_times; +std::map last_times; +std::map cumulative_times; +//TODO: Instead of analogous maps for time and cpu_time, use a single struct-valued map +std::map enter_cpu_times; +std::map last_cpu_times; +std::map, long long> op_counts; +std::map, long long> cumulative_op_counts; // ((msg, data_point), value) + // TODO: Convert op_counts and cumulative_op_counts from pair to structs +size_t indentation = 0; + +std::vector block_names; + +std::list > op_data_points = { +#ifdef PROFILE_OP_COUNTS + std::make_pair("Fradd", &Fr::add_cnt), + std::make_pair("Frsub", &Fr::sub_cnt), + std::make_pair("Frmul", &Fr::mul_cnt), + std::make_pair("Frinv", &Fr::inv_cnt), + std::make_pair("Fqadd", &Fq::add_cnt), + std::make_pair("Fqsub", &Fq::sub_cnt), + std::make_pair("Fqmul", &Fq::mul_cnt), + std::make_pair("Fqinv", &Fq::inv_cnt), + std::make_pair("G1add", &G1::add_cnt), + std::make_pair("G1dbl", &G1::dbl_cnt), + std::make_pair("G2add", &G2::add_cnt), + std::make_pair("G2dbl", &G2::dbl_cnt) +#endif +}; + +bool inhibit_profiling_info = false; +bool inhibit_profiling_counters = false; + +void clear_profiling_counters() +{ + invocation_counts.clear(); + last_times.clear(); + last_cpu_times.clear(); + cumulative_times.clear(); +} + +void print_cumulative_time_entry(const std::string &key, const long long factor) +{ + const double total_ms = (cumulative_times.at(key) * 1e-6); + const size_t cnt = invocation_counts.at(key); + const double avg_ms = total_ms / cnt; + printf(" %-45s: %12.5fms = %lld * %0.5fms (%zu invocations, %0.5fms = %lld * %0.5fms per invocation)\n", key.c_str(), total_ms, factor, total_ms/factor, cnt, avg_ms, factor, avg_ms/factor); +} + +void print_cumulative_times(const long long factor) +{ + printf("Dumping times:\n"); + for (auto& kv : cumulative_times) + { + print_cumulative_time_entry(kv.first, factor); + } +} + +void print_cumulative_op_counts(const bool only_fq) +{ +#ifdef PROFILE_OP_COUNTS + printf("Dumping operation counts:\n"); + for (auto& msg : invocation_counts) + { + printf(" %-45s: ", msg.first.c_str()); + bool first = true; + for (auto& data_point : op_data_points) + { + if (only_fq && data_point.first.compare(0, 2, "Fq") != 0) + { + continue; + } + + if (!first) + { + printf(", "); + } + printf("%-5s = %7.0f (%3zu)", + data_point.first.c_str(), + 1. * cumulative_op_counts[std::make_pair(msg.first, data_point.first)] / msg.second, + msg.second); + first = false; + } + printf("\n"); + } +#else + UNUSED(only_fq); +#endif +} + +void print_op_profiling(const std::string &msg) +{ +#ifdef PROFILE_OP_COUNTS + printf("\n"); + print_indent(); + + printf("(opcounts) = ("); + bool first = true; + for (std::pair p : op_data_points) + { + if (!first) + { + printf(", "); + } + + printf("%s=%lld", p.first.c_str(), *(p.second)-op_counts[std::make_pair(msg, p.first)]); + first = false; + } + printf(")"); +#else + UNUSED(msg); +#endif +} + +static void print_times_from_last_and_start(long long now, long long last, + long long cpu_now, long long cpu_last) +{ + long long time_from_start = now - start_time; + long long time_from_last = now - last; + + long long cpu_time_from_start = cpu_now - start_cpu_time; + long long cpu_time_from_last = cpu_now - cpu_last; + + if (time_from_last != 0) { + double parallelism_from_last = 1.0 * cpu_time_from_last / time_from_last; + printf("[%0.4fs x%0.2f]", time_from_last * 1e-9, parallelism_from_last); + } else { + printf("[ ]"); + } + if (time_from_start != 0) { + double parallelism_from_start = 1.0 * cpu_time_from_start / time_from_start; + printf("\t(%0.4fs x%0.2f from start)", time_from_start * 1e-9, parallelism_from_start); + } +} + +void print_time(const char* msg) +{ + if (inhibit_profiling_info) + { + return; + } + + long long now = get_nsec_time(); + long long cpu_now = get_nsec_cpu_time(); + + printf("%-35s\t", msg); + print_times_from_last_and_start(now, last_time, cpu_now, last_cpu_time); +#ifdef PROFILE_OP_COUNTS + print_op_profiling(msg); +#endif + printf("\n"); + + fflush(stdout); + last_time = now; + last_cpu_time = cpu_now; +} + +void print_header(const char *msg) +{ + printf("\n================================================================================\n"); + printf("%s\n", msg); + printf("================================================================================\n\n"); +} + +void print_indent() +{ + for (size_t i = 0; i < indentation; ++i) + { + printf(" "); + } +} + +void op_profiling_enter(const std::string &msg) +{ + for (std::pair p : op_data_points) + { + op_counts[std::make_pair(msg, p.first)] = *(p.second); + } +} + +void enter_block(const std::string &msg, const bool indent) +{ + if (inhibit_profiling_counters) + { + return; + } + + block_names.emplace_back(msg); + long long t = get_nsec_time(); + enter_times[msg] = t; + long long cpu_t = get_nsec_cpu_time(); + enter_cpu_times[msg] = cpu_t; + + if (inhibit_profiling_info) + { + return; + } + +#ifdef MULTICORE +#pragma omp critical +#endif + { + op_profiling_enter(msg); + + print_indent(); + printf("(enter) %-35s\t", msg.c_str()); + print_times_from_last_and_start(t, t, cpu_t, cpu_t); + printf("\n"); + fflush(stdout); + + if (indent) + { + ++indentation; + } + } +} + +void leave_block(const std::string &msg, const bool indent) +{ + if (inhibit_profiling_counters) + { + return; + } + +#ifndef MULTICORE + assert(*(--block_names.end()) == msg); +#endif + block_names.pop_back(); + + ++invocation_counts[msg]; + + long long t = get_nsec_time(); + last_times[msg] = (t - enter_times[msg]); + cumulative_times[msg] += (t - enter_times[msg]); + + long long cpu_t = get_nsec_cpu_time(); + last_cpu_times[msg] = (cpu_t - enter_cpu_times[msg]); + +#ifdef PROFILE_OP_COUNTS + for (std::pair p : op_data_points) + { + cumulative_op_counts[std::make_pair(msg, p.first)] += *(p.second)-op_counts[std::make_pair(msg, p.first)]; + } +#endif + + if (inhibit_profiling_info) + { + return; + } + +#ifdef MULTICORE +#pragma omp critical +#endif + { + if (indent) + { + --indentation; + } + + print_indent(); + printf("(leave) %-35s\t", msg.c_str()); + print_times_from_last_and_start(t, enter_times[msg], cpu_t, enter_cpu_times[msg]); + print_op_profiling(msg); + printf("\n"); + fflush(stdout); + } +} + +void print_mem(const std::string &s) +{ +#ifndef NO_PROCPS + struct proc_t usage; + look_up_our_self(&usage); + if (s.empty()) + { + printf("* Peak vsize (physical memory+swap) in mebibytes: %lu\n", usage.vsize >> 20); + } + else + { + printf("* Peak vsize (physical memory+swap) in mebibytes (%s): %lu\n", s.c_str(), usage.vsize >> 20); + } +#else + printf("* Memory profiling not supported in NO_PROCPS mode\n"); +#endif +} + +void print_compilation_info() +{ +#ifdef __GNUC__ + printf("g++ version: %s\n", __VERSION__); + //printf("Compiled on %s %s\n", __DATE__, __TIME__); +#endif +#ifdef STATIC + printf("STATIC: yes\n"); +#else + printf("STATIC: no\n"); +#endif +#ifdef MULTICORE + printf("MULTICORE: yes\n"); +#else + printf("MULTICORE: no\n"); +#endif +#ifdef DEBUG + printf("DEBUG: yes\n"); +#else + printf("DEBUG: no\n"); +#endif +#ifdef PROFILE_OP_COUNTS + printf("PROFILE_OP_COUNTS: yes\n"); +#else + printf("PROFILE_OP_COUNTS: no\n"); +#endif +#ifdef _GLIBCXX_DEBUG + printf("_GLIBCXX_DEBUG: yes\n"); +#else + printf("_GLIBCXX_DEBUG: no\n"); +#endif +} + +} // libsnark diff --git a/src/common/profiling.hpp b/src/common/profiling.hpp new file mode 100644 index 000000000..9619117f4 --- /dev/null +++ b/src/common/profiling.hpp @@ -0,0 +1,51 @@ +/** @file + ***************************************************************************** + + Declaration of functions for profiling code blocks. + + Reports time, operation counts, memory usage, and others. + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef PROFILING_HPP_ +#define PROFILING_HPP_ + +#include +#include +#include +#include + +namespace libsnark { + +void start_profiling(); +long long get_nsec_time(); +void print_time(const char* msg); +void print_header(const char* msg); + +void print_indent(); + +extern bool inhibit_profiling_info; +extern bool inhibit_profiling_counters; +extern std::map invocation_counts; +extern std::map last_times; +extern std::map cumulative_times; + +void clear_profiling_counters(); + +void print_cumulative_time_entry(const std::string &key, const long long factor=1); +void print_cumulative_times(const long long factor=1); +void print_cumulative_op_counts(const bool only_fq=false); + +void enter_block(const std::string &msg, const bool indent=true); +void leave_block(const std::string &msg, const bool indent=true); + +void print_mem(const std::string &s = ""); +void print_compilation_info(); + +} // libsnark + +#endif // PROFILING_HPP_ diff --git a/src/common/serialization.hpp b/src/common/serialization.hpp new file mode 100644 index 000000000..c931c65b2 --- /dev/null +++ b/src/common/serialization.hpp @@ -0,0 +1,104 @@ +/** @file + ***************************************************************************** + + Declaration of serialization routines and constants. + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef SERIALIZATION_HPP_ +#define SERIALIZATION_HPP_ + +#include +#include +#include +#include +#include + +namespace libsnark { + +/* + * @todo + * The serialization is fragile. Shoud be rewritten using a standard, portable-format + * library like boost::serialize. + * + * However, for now the following conventions are used within the code. + * + * All algebraic objects support either binary or decimal output using + * the standard C++ stream operators (operator<<, operator>>). + * + * The binary mode is activated by defining a BINARY_OUTPUT + * preprocessor macro (e.g. g++ -DBINARY_OUTPUT ...). + * + * Binary output assumes that the stream is to be binary read at its + * current position so any white space should be consumed beforehand. + * + * Consecutive algebraic objects are separated by OUTPUT_NEWLINE and + * within themselves (e.g. X and Y coordinates for field elements) with + * OUTPUT_SEPARATOR (as defined below). + * + * Therefore to dump two integers, two Fp elements and another integer + * one would: + * + * out << 3 << "\n"; + * out << 4 << "\n"; + * out << FieldT(56) << OUTPUT_NEWLINE; + * out << FieldT(78) << OUTPUT_NEWLINE; + * out << 9 << "\n"; + * + * Then reading back it its reader's responsibility (!) to consume "\n" + * after 4, but Fp::operator<< will correctly consume OUTPUT_NEWLINE. + * + * The reader should also consume "\n" after 9, so that another field + * element can be properly chained. This is especially important for + * binary output. + * + * The binary serialization of algebraic objects is currently *not* + * portable between machines of different word sizes. + */ + +#ifdef BINARY_OUTPUT +#define OUTPUT_NEWLINE "" +#define OUTPUT_SEPARATOR "" +#else +#define OUTPUT_NEWLINE "\n" +#define OUTPUT_SEPARATOR " " +#endif + +inline void consume_newline(std::istream &in); +inline void consume_OUTPUT_NEWLINE(std::istream &in); +inline void consume_OUTPUT_SEPARATOR(std::istream &in); + +inline void output_bool(std::ostream &out, const bool b); + +inline void output_bool_vector(std::ostream &out, const std::vector &v); + +template +T reserialize(const T &obj); + +template +std::ostream& operator<<(std::ostream& out, const std::vector &v); + +template +std::istream& operator>>(std::ostream& out, std::vector &v); + +template +std::ostream& operator<<(std::ostream& out, const std::map &m); + +template +std::istream& operator>>(std::istream& in, std::map &m); + +template +std::ostream& operator<<(std::ostream& out, const std::set &s); + +template +std::istream& operator>>(std::istream& in, std::set &s); + +} // libsnark + +#include "common/serialization.tcc" + +#endif // SERIALIZATION_HPP_ diff --git a/src/common/serialization.tcc b/src/common/serialization.tcc new file mode 100644 index 000000000..398f97850 --- /dev/null +++ b/src/common/serialization.tcc @@ -0,0 +1,180 @@ +/** @file + ***************************************************************************** + + Implementation of serialization routines. + + See serialization.hpp . + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef SERIALIZATION_TCC_ +#define SERIALIZATION_TCC_ + +#include +#include +#include "common/utils.hpp" + +namespace libsnark { + +inline void consume_newline(std::istream &in) +{ + char c; + in.read(&c, 1); +} + +inline void consume_OUTPUT_NEWLINE(std::istream &in) +{ +#ifdef BINARY_OUTPUT + // nothing to consume + UNUSED(in); +#else + char c; + in.read(&c, 1); +#endif +} + +inline void consume_OUTPUT_SEPARATOR(std::istream &in) +{ +#ifdef BINARY_OUTPUT + // nothing to consume + UNUSED(in); +#else + char c; + in.read(&c, 1); +#endif +} + +inline void output_bool(std::ostream &out, const bool b) +{ + out << (b ? 1 : 0) << "\n"; +} + +inline void output_bool_vector(std::ostream &out, const std::vector &v) +{ + out << v.size() << "\n"; + for (const bool b : v) + { + output_bool(out, b); + } +} + +template +T reserialize(const T &obj) +{ + std::stringstream ss; + ss << obj; + T tmp; + ss >> tmp; + assert(obj == tmp); + return tmp; +} + +template +std::ostream& operator<<(std::ostream& out, const std::vector &v) +{ + static_assert(!std::is_same::value, "this does not work for std::vector"); + out << v.size() << "\n"; + for (const T& t : v) + { + out << t << OUTPUT_NEWLINE; + } + + return out; +} + +template +std::istream& operator>>(std::istream& in, std::vector &v) +{ + static_assert(!std::is_same::value, "this does not work for std::vector"); + size_t size; + in >> size; + consume_newline(in); + + v.resize(0); + for (size_t i = 0; i < size; ++i) + { + T elt; + in >> elt; + consume_OUTPUT_NEWLINE(in); + v.push_back(elt); + } + + return in; +} + +template +std::ostream& operator<<(std::ostream& out, const std::map &m) +{ + out << m.size() << "\n"; + + for (auto &it : m) + { + out << it.first << "\n"; + out << it.second << "\n"; + } + + return out; +} + +template +std::istream& operator>>(std::istream& in, std::map &m) +{ + m.clear(); + size_t size; + in >> size; + consume_newline(in); + + for (size_t i = 0; i < size; ++i) + { + T1 k; + T2 v; + in >> k; + consume_newline(in); + in >> v; + consume_newline(in); + m[k] = v; + } + + return in; +} + +template +std::ostream& operator<<(std::ostream& out, const std::set &s) +{ + out << s.size() << "\n"; + + for (auto &el : s) + { + out << el << "\n"; + } + + return out; +} + + +template +std::istream& operator>>(std::istream& in, std::set &s) +{ + s.clear(); + size_t size; + in >> size; + consume_newline(in); + + for (size_t i = 0; i < size; ++i) + { + T el; + in >> el; + consume_newline(in); + s.insert(el); + } + + return in; +} + +} + +#endif // SERIALIZATION_TCC_ diff --git a/src/common/template_utils.hpp b/src/common/template_utils.hpp new file mode 100644 index 000000000..8dbfd261d --- /dev/null +++ b/src/common/template_utils.hpp @@ -0,0 +1,26 @@ +/** @file + ***************************************************************************** + + Declaration of functions for supporting the use of templates. + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef TEMPLATE_UTILS_HPP_ +#define TEMPLATE_UTILS_HPP_ + +namespace libsnark { + +/* A commonly used SFINAE helper type */ +template +struct void_type +{ + typedef void type; +}; + +} // libsnark + +#endif // TEMPLATE_UTILS_HPP_ diff --git a/src/common/utils.cpp b/src/common/utils.cpp new file mode 100644 index 000000000..dd114fdf0 --- /dev/null +++ b/src/common/utils.cpp @@ -0,0 +1,102 @@ +/** @file + ***************************************************************************** + Implementation of misc math and serialization utility functions + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#include +#include +#include +#include +#include "common/utils.hpp" + +namespace libsnark { + +size_t log2(size_t n) +/* returns ceil(log2(n)), so 1ul< 1) + { + n >>= 1; + r++; + } + + return r; +} + +size_t bitreverse(size_t n, const size_t l) +{ + size_t r = 0; + for (size_t k = 0; k < l; ++k) + { + r = (r << 1) | (n & 1); + n >>= 1; + } + return r; +} + +bit_vector int_list_to_bits(const std::initializer_list &l, const size_t wordsize) +{ + bit_vector res(wordsize*l.size()); + for (size_t i = 0; i < l.size(); ++i) + { + for (size_t j = 0; j < wordsize; ++j) + { + res[i*wordsize + j] = (*(l.begin()+i) & (1ul<<(wordsize-1-j))); + } + } + return res; +} + +long long div_ceil(long long x, long long y) +{ + return (x + (y-1)) / y; +} + +bool is_little_endian() +{ + uint64_t a = 0x12345678; + unsigned char *c = (unsigned char*)(&a); + return (*c = 0x78); +} + +std::string FORMAT(const std::string &prefix, const char* format, ...) +{ + const static size_t MAX_FMT = 256; + char buf[MAX_FMT]; + va_list args; + va_start(args, format); + vsnprintf(buf, MAX_FMT, format, args); + va_end(args); + + return prefix + std::string(buf); +} + +void serialize_bit_vector(std::ostream &out, const bit_vector &v) +{ + out << v.size() << "\n"; + for (size_t i = 0; i < v.size(); ++i) + { + out << v[i] << "\n"; + } +} + +void deserialize_bit_vector(std::istream &in, bit_vector &v) +{ + size_t size; + in >> size; + v.resize(size); + for (size_t i = 0; i < size; ++i) + { + bool b; + in >> b; + v[i] = b; + } +} +} // libsnark diff --git a/src/common/utils.hpp b/src/common/utils.hpp new file mode 100644 index 000000000..d7d9e8947 --- /dev/null +++ b/src/common/utils.hpp @@ -0,0 +1,57 @@ +/** @file + ***************************************************************************** + Declaration of misc math and serialization utility functions + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef UTILS_HPP_ +#define UTILS_HPP_ + +#include +#include +#include +#include +#include + +namespace libsnark { + +typedef std::vector bit_vector; + +/// returns ceil(log2(n)), so 1ul< &l, const size_t wordsize); +long long div_ceil(long long x, long long y); + +bool is_little_endian(); + +std::string FORMAT(const std::string &prefix, const char* format, ...); + +/* A variadic template to suppress unused argument warnings */ +template +void UNUSED(Types&&...) {} + +#ifdef DEBUG +#define FMT FORMAT +#else +#define FMT(...) (UNUSED(__VA_ARGS__), "") +#endif + +void serialize_bit_vector(std::ostream &out, const bit_vector &v); +void deserialize_bit_vector(std::istream &in, bit_vector &v); + +template +size_t size_in_bits(const std::vector &v); + +#define ARRAY_SIZE(arr) (sizeof(arr)/sizeof(arr[0])) + +} // libsnark + +#include "common/utils.tcc" /* note that utils has a templatized part (utils.tcc) and non-templatized part (utils.cpp) */ +#endif // UTILS_HPP_ diff --git a/src/common/utils.tcc b/src/common/utils.tcc new file mode 100644 index 000000000..f97178f8c --- /dev/null +++ b/src/common/utils.tcc @@ -0,0 +1,23 @@ +/** @file + ***************************************************************************** + Implementation of templatized utility functions + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef UTILS_TCC_ +#define UTILS_TCC_ + +namespace libsnark { + +template +size_t size_in_bits(const std::vector &v) +{ + return v.size() * T::size_in_bits(); +} + +} // libsnark + +#endif // UTILS_TCC_ diff --git a/src/gadgetlib1/constraint_profiling.cpp b/src/gadgetlib1/constraint_profiling.cpp new file mode 100644 index 000000000..bc17e63bc --- /dev/null +++ b/src/gadgetlib1/constraint_profiling.cpp @@ -0,0 +1,48 @@ +/** @file + ***************************************************************************** + + Implementation of interfaces for profiling constraints. + + See constraint_profiling.hpp . + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#include "gadgetlib1/constraint_profiling.hpp" +#include "common/profiling.hpp" + +namespace libsnark { + +size_t constraint_profiling_indent = 0; +std::vector constraint_profiling_table; + +size_t PRINT_CONSTRAINT_PROFILING() +{ + size_t accounted = 0; + print_indent(); + printf("Constraint profiling:\n"); + for (constraint_profiling_entry &ent : constraint_profiling_table) + { + if (ent.indent == 0) + { + accounted += ent.count; + } + + print_indent(); + for (size_t i = 0; i < ent.indent; ++i) + { + printf(" "); + } + printf("* Number of constraints in [%s]: %zu\n", ent.annotation.c_str(), ent.count); + } + + constraint_profiling_table.clear(); + constraint_profiling_indent = 0; + + return accounted; +} + +} diff --git a/src/gadgetlib1/constraint_profiling.hpp b/src/gadgetlib1/constraint_profiling.hpp new file mode 100644 index 000000000..df8a55de1 --- /dev/null +++ b/src/gadgetlib1/constraint_profiling.hpp @@ -0,0 +1,42 @@ +/** @file + ***************************************************************************** + + Declaration of interfaces for profiling constraints. + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef CONSTRAINT_PROFILING_HPP_ +#define CONSTRAINT_PROFILING_HPP_ + +#include +#include +#include +#include + +namespace libsnark { + +extern size_t constraint_profiling_indent; + +struct constraint_profiling_entry { + size_t indent; + std::string annotation; + size_t count; +}; + +extern std::vector constraint_profiling_table; + +#define PROFILE_CONSTRAINTS(pb, annotation) \ + for (size_t _num_constraints_before = pb.num_constraints(), _iter = (++constraint_profiling_indent, 0), _cp_pos = constraint_profiling_table.size(); \ + _iter == 0; \ + constraint_profiling_table.insert(constraint_profiling_table.begin() + _cp_pos, constraint_profiling_entry{--constraint_profiling_indent, annotation, pb.num_constraints() - _num_constraints_before}), \ + _iter = 1) + +size_t PRINT_CONSTRAINT_PROFILING(); // returns # of top level constraints + +} // libsnark + +#endif // CONSTRAINT_PROFILING_HPP_ diff --git a/src/gadgetlib1/examples/simple_example.hpp b/src/gadgetlib1/examples/simple_example.hpp new file mode 100644 index 000000000..faa3a9605 --- /dev/null +++ b/src/gadgetlib1/examples/simple_example.hpp @@ -0,0 +1,23 @@ +/** @file + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef SIMPLE_EXAMPLE_HPP_ +#define SIMPLE_EXAMPLE_HPP_ + +#include "examples/r1cs_examples.hpp" + +namespace libsnark { + +template +r1cs_example gen_r1cs_example_from_protoboard(const size_t num_constraints, + const size_t num_inputs); + +} // libsnark + +#include "gadgetlib1/examples/simple_example.tcc" + +#endif // SIMPLE_EXAMPLE_HPP_ diff --git a/src/gadgetlib1/examples/simple_example.tcc b/src/gadgetlib1/examples/simple_example.tcc new file mode 100644 index 000000000..9d500b5c7 --- /dev/null +++ b/src/gadgetlib1/examples/simple_example.tcc @@ -0,0 +1,54 @@ +/** @file + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef SIMPLE_EXAMPLE_TCC_ +#define SIMPLE_EXAMPLE_TCC_ + +#include +#include "gadgetlib1/gadgets/basic_gadgets.hpp" + +namespace libsnark { + +/* NOTE: all examples here actually generate one constraint less to account for soundness constraint in QAP */ + +template +r1cs_example gen_r1cs_example_from_protoboard(const size_t num_constraints, + const size_t num_inputs) +{ + const size_t new_num_constraints = num_constraints - 1; + + /* construct dummy example: inner products of two vectors */ + protoboard pb; + pb_variable_array A; + pb_variable_array B; + pb_variable res; + + // the variables on the protoboard are (ONE (constant 1 term), res, A[0], ..., A[num_constraints-1], B[0], ..., B[num_constraints-1]) + res.allocate(pb, "res"); + A.allocate(pb, new_num_constraints, "A"); + B.allocate(pb, new_num_constraints, "B"); + + inner_product_gadget compute_inner_product(pb, A, B, res, "compute_inner_product"); + compute_inner_product.generate_r1cs_constraints(); + + /* fill in random example */ + for (size_t i = 0; i < new_num_constraints; ++i) + { + pb.val(A[i]) = FieldT::random_element(); + pb.val(B[i]) = FieldT::random_element(); + } + + compute_inner_product.generate_r1cs_witness(); + + pb.constraint_system.num_inputs = num_inputs; + const r1cs_variable_assignment va = pb.values; + const r1cs_variable_assignment input(va.begin(), va.begin() + num_inputs); + return r1cs_example(pb.constraint_system, input, va, num_inputs); +} + +} // libsnark +#endif // R1CS_EXAMPLES_TCC_ diff --git a/src/gadgetlib1/gadget.hpp b/src/gadgetlib1/gadget.hpp new file mode 100644 index 000000000..dbeaa9d4b --- /dev/null +++ b/src/gadgetlib1/gadget.hpp @@ -0,0 +1,27 @@ +/** @file + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef GADGET_HPP_ +#define GADGET_HPP_ + +#include "gadgetlib1/protoboard.hpp" + +namespace libsnark { + +template +class gadget { +protected: + protoboard &pb; + const std::string annotation_prefix; +public: + gadget(protoboard &pb, const std::string &annotation_prefix=""); +}; + +} // libsnark +#include "gadgetlib1/gadget.tcc" + +#endif // GADGET_HPP_ diff --git a/src/gadgetlib1/gadget.tcc b/src/gadgetlib1/gadget.tcc new file mode 100644 index 000000000..120229bbe --- /dev/null +++ b/src/gadgetlib1/gadget.tcc @@ -0,0 +1,23 @@ +/** @file + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef GADGET_TCC_ +#define GADGET_TCC_ + +namespace libsnark { + +template +gadget::gadget(protoboard &pb, const std::string &annotation_prefix) : + pb(pb), annotation_prefix(annotation_prefix) +{ +#ifdef DEBUG + assert(annotation_prefix != ""); +#endif +} + +} // libsnark +#endif // GADGET_TCC_ diff --git a/src/gadgetlib1/gadgets/basic_gadgets.hpp b/src/gadgetlib1/gadgets/basic_gadgets.hpp new file mode 100644 index 000000000..08e596bee --- /dev/null +++ b/src/gadgetlib1/gadgets/basic_gadgets.hpp @@ -0,0 +1,351 @@ +/** @file + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef BASIC_GADGETS_HPP_ +#define BASIC_GADGETS_HPP_ + +#include +#include + +#include "gadgetlib1/gadget.hpp" + +namespace libsnark { + +/* forces lc to take value 0 or 1 by adding constraint lc * (1-lc) = 0 */ +template +void generate_boolean_r1cs_constraint(protoboard &pb, const pb_linear_combination &lc, const std::string &annotation_prefix=""); + +template +void generate_r1cs_equals_const_constraint(protoboard &pb, const pb_linear_combination &lc, const FieldT& c, const std::string &annotation_prefix=""); + +template +class packing_gadget : public gadget { +private: + /* no internal variables */ +public: + const pb_linear_combination_array bits; + const pb_linear_combination packed; + + packing_gadget(protoboard &pb, + const pb_linear_combination_array &bits, + const pb_linear_combination &packed, + const std::string &annotation_prefix="") : + gadget(pb, annotation_prefix), bits(bits), packed(packed) {} + + void generate_r1cs_constraints(const bool enforce_bitness); + /* adds constraint result = \sum bits[i] * 2^i */ + + void generate_r1cs_witness_from_packed(); + void generate_r1cs_witness_from_bits(); +}; + +template +class multipacking_gadget : public gadget { +private: + std::vector > packers; +public: + const pb_linear_combination_array bits; + const pb_linear_combination_array packed_vars; + + const size_t chunk_size; + const size_t num_chunks; + // const size_t last_chunk_size; + + multipacking_gadget(protoboard &pb, + const pb_linear_combination_array &bits, + const pb_linear_combination_array &packed_vars, + const size_t chunk_size, + const std::string &annotation_prefix=""); + void generate_r1cs_constraints(const bool enforce_bitness); + void generate_r1cs_witness_from_packed(); + void generate_r1cs_witness_from_bits(); +}; + +template +class field_vector_copy_gadget : public gadget { +public: + const pb_variable_array source; + const pb_variable_array target; + const pb_linear_combination do_copy; + + field_vector_copy_gadget(protoboard &pb, + const pb_variable_array &source, + const pb_variable_array &target, + const pb_linear_combination &do_copy, + const std::string &annotation_prefix=""); + void generate_r1cs_constraints(); + void generate_r1cs_witness(); +}; + +template +class bit_vector_copy_gadget : public gadget { +public: + const pb_variable_array source_bits; + const pb_variable_array target_bits; + const pb_linear_combination do_copy; + + pb_variable_array packed_source; + pb_variable_array packed_target; + + std::shared_ptr > pack_source; + std::shared_ptr > pack_target; + std::shared_ptr > copier; + + const size_t chunk_size; + const size_t num_chunks; + + bit_vector_copy_gadget(protoboard &pb, + const pb_variable_array &source_bits, + const pb_variable_array &target_bits, + const pb_linear_combination &do_copy, + const size_t chunk_size, + const std::string &annotation_prefix=""); + void generate_r1cs_constraints(const bool enforce_source_bitness, const bool enforce_target_bitness); + void generate_r1cs_witness(); +}; + +template +class dual_variable_gadget : public gadget { +private: + std::shared_ptr > consistency_check; +public: + pb_variable packed; + pb_variable_array bits; + + dual_variable_gadget(protoboard &pb, + const size_t width, + const std::string &annotation_prefix="") : + gadget(pb, annotation_prefix) + { + packed.allocate(pb, FMT(this->annotation_prefix, " packed")); + bits.allocate(pb, width, FMT(this->annotation_prefix, " bits")); + consistency_check.reset(new packing_gadget(pb, + bits, + packed, + FMT(this->annotation_prefix, " consistency_check"))); + } + + dual_variable_gadget(protoboard &pb, + const pb_variable_array &bits, + const std::string &annotation_prefix="") : + gadget(pb, annotation_prefix), bits(bits) + { + packed.allocate(pb, FMT(this->annotation_prefix, " packed")); + consistency_check.reset(new packing_gadget(pb, + bits, + packed, + FMT(this->annotation_prefix, " consistency_check"))); + } + + dual_variable_gadget(protoboard &pb, + const pb_variable &packed, + const size_t width, + const std::string &annotation_prefix="") : + gadget(pb, annotation_prefix), packed(packed) + { + bits.allocate(pb, width, FMT(this->annotation_prefix, " bits")); + consistency_check.reset(new packing_gadget(pb, + bits, + packed, + FMT(this->annotation_prefix, " consistency_check"))); + } + + void generate_r1cs_constraints(const bool enforce_bitness); + void generate_r1cs_witness_from_packed(); + void generate_r1cs_witness_from_bits(); +}; + +/* + the gadgets below are Fp specific: + I * X = R + (1-R) * X = 0 + + if X = 0 then R = 0 + if X != 0 then R = 1 and I = X^{-1} +*/ + +template +class disjunction_gadget : public gadget { +private: + pb_variable inv; +public: + const pb_variable_array inputs; + const pb_variable output; + + disjunction_gadget(protoboard& pb, + const pb_variable_array &inputs, + const pb_variable &output, + const std::string &annotation_prefix="") : + gadget(pb, annotation_prefix), inputs(inputs), output(output) + { + assert(inputs.size() >= 1); + inv.allocate(pb, FMT(this->annotation_prefix, " inv")); + } + + void generate_r1cs_constraints(); + void generate_r1cs_witness(); +}; + +template +void test_disjunction_gadget(const size_t n); + +template +class conjunction_gadget : public gadget { +private: + pb_variable inv; +public: + const pb_variable_array inputs; + const pb_variable output; + + conjunction_gadget(protoboard& pb, + const pb_variable_array &inputs, + const pb_variable &output, + const std::string &annotation_prefix="") : + gadget(pb, annotation_prefix), inputs(inputs), output(output) + { + assert(inputs.size() >= 1); + inv.allocate(pb, FMT(this->annotation_prefix, " inv")); + } + + void generate_r1cs_constraints(); + void generate_r1cs_witness(); +}; + +template +void test_conjunction_gadget(const size_t n); + +template +class comparison_gadget : public gadget { +private: + pb_variable_array alpha; + pb_variable alpha_packed; + std::shared_ptr > pack_alpha; + + std::shared_ptr > all_zeros_test; + pb_variable not_all_zeros; +public: + const size_t n; + const pb_linear_combination A; + const pb_linear_combination B; + const pb_variable less; + const pb_variable less_or_eq; + + comparison_gadget(protoboard& pb, + const size_t n, + const pb_linear_combination &A, + const pb_linear_combination &B, + const pb_variable &less, + const pb_variable &less_or_eq, + const std::string &annotation_prefix="") : + gadget(pb, annotation_prefix), n(n), A(A), B(B), less(less), less_or_eq(less_or_eq) + { + alpha.allocate(pb, n, FMT(this->annotation_prefix, " alpha")); + alpha.emplace_back(less_or_eq); // alpha[n] is less_or_eq + + alpha_packed.allocate(pb, FMT(this->annotation_prefix, " alpha_packed")); + not_all_zeros.allocate(pb, FMT(this->annotation_prefix, " not_all_zeros")); + + pack_alpha.reset(new packing_gadget(pb, alpha, alpha_packed, + FMT(this->annotation_prefix, " pack_alpha"))); + + all_zeros_test.reset(new disjunction_gadget(pb, + pb_variable_array(alpha.begin(), alpha.begin() + n), + not_all_zeros, + FMT(this->annotation_prefix, " all_zeros_test"))); + }; + + void generate_r1cs_constraints(); + void generate_r1cs_witness(); +}; + +template +void test_comparison_gadget(const size_t n); + +template +class inner_product_gadget : public gadget { +private: + /* S_i = \sum_{k=0}^{i+1} A[i] * B[i] */ + pb_variable_array S; +public: + const pb_linear_combination_array A; + const pb_linear_combination_array B; + const pb_variable result; + + inner_product_gadget(protoboard& pb, + const pb_linear_combination_array &A, + const pb_linear_combination_array &B, + const pb_variable &result, + const std::string &annotation_prefix="") : + gadget(pb, annotation_prefix), A(A), B(B), result(result) + { + assert(A.size() >= 1); + assert(A.size() == B.size()); + + S.allocate(pb, A.size()-1, FMT(this->annotation_prefix, " S")); + } + + void generate_r1cs_constraints(); + void generate_r1cs_witness(); +}; + +template +void test_inner_product_gadget(const size_t n); + +template +class loose_multiplexing_gadget : public gadget { +/* + this implements loose multiplexer: + index not in bounds -> success_flag = 0 + index in bounds && success_flag = 1 -> result is correct + however if index is in bounds we can also set success_flag to 0 (and then result will be forced to be 0) +*/ +public: + pb_variable_array alpha; +private: + std::shared_ptr > compute_result; +public: + const pb_linear_combination_array arr; + const pb_variable index; + const pb_variable result; + const pb_variable success_flag; + + loose_multiplexing_gadget(protoboard& pb, + const pb_linear_combination_array &arr, + const pb_variable &index, + const pb_variable &result, + const pb_variable &success_flag, + const std::string &annotation_prefix="") : + gadget(pb, annotation_prefix), arr(arr), index(index), result(result), success_flag(success_flag) + { + alpha.allocate(pb, arr.size(), FMT(this->annotation_prefix, " alpha")); + compute_result.reset(new inner_product_gadget(pb, alpha, arr, result, FMT(this->annotation_prefix, " compute_result"))); + }; + + void generate_r1cs_constraints(); + void generate_r1cs_witness(); +}; + +template +void test_loose_multiplexing_gadget(const size_t n); + +template +void create_linear_combination_constraints(protoboard &pb, + const std::vector &base, + const std::vector > &v, + const VarT &target, + const std::string &annotation_prefix); + +template +void create_linear_combination_witness(protoboard &pb, + const std::vector &base, + const std::vector > &v, + const VarT &target); + +} // libsnark +#include "gadgetlib1/gadgets/basic_gadgets.tcc" + +#endif // BASIC_GADGETS_HPP_ diff --git a/src/gadgetlib1/gadgets/basic_gadgets.tcc b/src/gadgetlib1/gadgets/basic_gadgets.tcc new file mode 100644 index 000000000..213b1906f --- /dev/null +++ b/src/gadgetlib1/gadgets/basic_gadgets.tcc @@ -0,0 +1,705 @@ +/** @file + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef BASIC_GADGETS_TCC_ +#define BASIC_GADGETS_TCC_ + +#include "common/profiling.hpp" +#include "common/utils.hpp" + +namespace libsnark { + +template +void generate_boolean_r1cs_constraint(protoboard &pb, const pb_linear_combination &lc, const std::string &annotation_prefix) +/* forces lc to take value 0 or 1 by adding constraint lc * (1-lc) = 0 */ +{ + pb.add_r1cs_constraint(r1cs_constraint(lc, 1-lc, 0), + FMT(annotation_prefix, " boolean_r1cs_constraint")); +} + +template +void generate_r1cs_equals_const_constraint(protoboard &pb, const pb_linear_combination &lc, const FieldT& c, const std::string &annotation_prefix) +{ + pb.add_r1cs_constraint(r1cs_constraint(1, lc, c), + FMT(annotation_prefix, " constness_constraint")); +} + +template +void packing_gadget::generate_r1cs_constraints(const bool enforce_bitness) +/* adds constraint result = \sum bits[i] * 2^i */ +{ + this->pb.add_r1cs_constraint(r1cs_constraint(1, pb_packing_sum(bits), packed), FMT(this->annotation_prefix, " packing_constraint")); + + if (enforce_bitness) + { + for (size_t i = 0; i < bits.size(); ++i) + { + generate_boolean_r1cs_constraint(this->pb, bits[i], FMT(this->annotation_prefix, " bitness_%zu", i)); + } + } +} + +template +void packing_gadget::generate_r1cs_witness_from_packed() +{ + packed.evaluate(this->pb); + assert(this->pb.lc_val(packed).as_bigint().num_bits() <= bits.size()); + bits.fill_with_bits_of_field_element(this->pb, this->pb.lc_val(packed)); +} + +template +void packing_gadget::generate_r1cs_witness_from_bits() +{ + bits.evaluate(this->pb); + this->pb.lc_val(packed) = bits.get_field_element_from_bits(this->pb); +} + +template +multipacking_gadget::multipacking_gadget(protoboard &pb, + const pb_linear_combination_array &bits, + const pb_linear_combination_array &packed_vars, + const size_t chunk_size, + const std::string &annotation_prefix) : + gadget(pb, annotation_prefix), bits(bits), packed_vars(packed_vars), + chunk_size(chunk_size), + num_chunks(div_ceil(bits.size(), chunk_size)) + // last_chunk_size(bits.size() - (num_chunks-1) * chunk_size) +{ + assert(packed_vars.size() == num_chunks); + for (size_t i = 0; i < num_chunks; ++i) + { + packers.emplace_back(packing_gadget(this->pb, pb_linear_combination_array(bits.begin() + i * chunk_size, + bits.begin() + std::min((i+1) * chunk_size, bits.size())), + packed_vars[i], FMT(this->annotation_prefix, " packers_%zu", i))); + } +} + +template +void multipacking_gadget::generate_r1cs_constraints(const bool enforce_bitness) +{ + for (size_t i = 0; i < num_chunks; ++i) + { + packers[i].generate_r1cs_constraints(enforce_bitness); + } +} + +template +void multipacking_gadget::generate_r1cs_witness_from_packed() +{ + for (size_t i = 0; i < num_chunks; ++i) + { + packers[i].generate_r1cs_witness_from_packed(); + } +} + +template +void multipacking_gadget::generate_r1cs_witness_from_bits() +{ + for (size_t i = 0; i < num_chunks; ++i) + { + packers[i].generate_r1cs_witness_from_bits(); + } +} + +template +size_t multipacking_num_chunks(const size_t num_bits) +{ + return div_ceil(num_bits, FieldT::capacity()); +} + +template +field_vector_copy_gadget::field_vector_copy_gadget(protoboard &pb, + const pb_variable_array &source, + const pb_variable_array &target, + const pb_linear_combination &do_copy, + const std::string &annotation_prefix) : +gadget(pb, annotation_prefix), source(source), target(target), do_copy(do_copy) +{ + assert(source.size() == target.size()); +} + +template +void field_vector_copy_gadget::generate_r1cs_constraints() +{ + for (size_t i = 0; i < source.size(); ++i) + { + this->pb.add_r1cs_constraint(r1cs_constraint(do_copy, source[i] - target[i], 0), + FMT(this->annotation_prefix, " copying_check_%zu", i)); + } +} + +template +void field_vector_copy_gadget::generate_r1cs_witness() +{ + do_copy.evaluate(this->pb); + assert(this->pb.lc_val(do_copy) == FieldT::one() || this->pb.lc_val(do_copy) == FieldT::zero()); + if (this->pb.lc_val(do_copy) != FieldT::zero()) + { + for (size_t i = 0; i < source.size(); ++i) + { + this->pb.val(target[i]) = this->pb.val(source[i]); + } + } +} + +template +bit_vector_copy_gadget::bit_vector_copy_gadget(protoboard &pb, + const pb_variable_array &source_bits, + const pb_variable_array &target_bits, + const pb_linear_combination &do_copy, + const size_t chunk_size, + const std::string &annotation_prefix) : + gadget(pb, annotation_prefix), source_bits(source_bits), target_bits(target_bits), do_copy(do_copy), + chunk_size(chunk_size), num_chunks(div_ceil(source_bits.size(), chunk_size)) +{ + assert(source_bits.size() == target_bits.size()); + + packed_source.allocate(pb, num_chunks, FMT(annotation_prefix, " packed_source")); + pack_source.reset(new multipacking_gadget(pb, source_bits, packed_source, chunk_size, FMT(annotation_prefix, " pack_source"))); + + packed_target.allocate(pb, num_chunks, FMT(annotation_prefix, " packed_target")); + pack_target.reset(new multipacking_gadget(pb, target_bits, packed_target, chunk_size, FMT(annotation_prefix, " pack_target"))); + + copier.reset(new field_vector_copy_gadget(pb, packed_source, packed_target, do_copy, FMT(annotation_prefix, " copier"))); +} + +template +void bit_vector_copy_gadget::generate_r1cs_constraints(const bool enforce_source_bitness, const bool enforce_target_bitness) +{ + pack_source->generate_r1cs_constraints(enforce_source_bitness); + pack_target->generate_r1cs_constraints(enforce_target_bitness); + + copier->generate_r1cs_constraints(); +} + +template +void bit_vector_copy_gadget::generate_r1cs_witness() +{ + do_copy.evaluate(this->pb); + assert(this->pb.lc_val(do_copy) == FieldT::zero() || this->pb.lc_val(do_copy) == FieldT::one()); + if (this->pb.lc_val(do_copy) == FieldT::one()) + { + for (size_t i = 0; i < source_bits.size(); ++i) + { + this->pb.val(target_bits[i]) = this->pb.val(source_bits[i]); + } + } + + pack_source->generate_r1cs_witness_from_bits(); + pack_target->generate_r1cs_witness_from_bits(); +} + +template +void dual_variable_gadget::generate_r1cs_constraints(const bool enforce_bitness) +{ + consistency_check->generate_r1cs_constraints(enforce_bitness); +} + +template +void dual_variable_gadget::generate_r1cs_witness_from_packed() +{ + consistency_check->generate_r1cs_witness_from_packed(); +} + +template +void dual_variable_gadget::generate_r1cs_witness_from_bits() +{ + consistency_check->generate_r1cs_witness_from_bits(); +} + +template +void disjunction_gadget::generate_r1cs_constraints() +{ + /* inv * sum = output */ + linear_combination a1, b1, c1; + a1.add_term(inv); + for (size_t i = 0; i < inputs.size(); ++i) + { + b1.add_term(inputs[i]); + } + c1.add_term(output); + + this->pb.add_r1cs_constraint(r1cs_constraint(a1, b1, c1), FMT(this->annotation_prefix, " inv*sum=output")); + + /* (1-output) * sum = 0 */ + linear_combination a2, b2, c2; + a2.add_term(ONE); + a2.add_term(output, -1); + for (size_t i = 0; i < inputs.size(); ++i) + { + b2.add_term(inputs[i]); + } + c2.add_term(ONE, 0); + + this->pb.add_r1cs_constraint(r1cs_constraint(a2, b2, c2), FMT(this->annotation_prefix, " (1-output)*sum=0")); +} + +template +void disjunction_gadget::generate_r1cs_witness() +{ + FieldT sum = FieldT::zero(); + + for (size_t i = 0; i < inputs.size(); ++i) + { + sum += this->pb.val(inputs[i]); + } + + if (sum.is_zero()) + { + this->pb.val(inv) = FieldT::zero(); + this->pb.val(output) = FieldT::zero(); + } + else + { + this->pb.val(inv) = sum.inverse(); + this->pb.val(output) = FieldT::one(); + } +} + +template +void test_disjunction_gadget(const size_t n) +{ + printf("testing disjunction_gadget on all %zu bit strings\n", n); + + protoboard pb; + pb_variable_array inputs; + inputs.allocate(pb, n, "inputs"); + + pb_variable output; + output.allocate(pb, "output"); + + disjunction_gadget d(pb, inputs, output, "d"); + d.generate_r1cs_constraints(); + + for (size_t w = 0; w < 1ul< +void conjunction_gadget::generate_r1cs_constraints() +{ + /* inv * (n-sum) = 1-output */ + linear_combination a1, b1, c1; + a1.add_term(inv); + b1.add_term(ONE, inputs.size()); + for (size_t i = 0; i < inputs.size(); ++i) + { + b1.add_term(inputs[i], -1); + } + c1.add_term(ONE); + c1.add_term(output, -1); + + this->pb.add_r1cs_constraint(r1cs_constraint(a1, b1, c1), FMT(this->annotation_prefix, " inv*(n-sum)=(1-output)")); + + /* output * (n-sum) = 0 */ + linear_combination a2, b2, c2; + a2.add_term(output); + b2.add_term(ONE, inputs.size()); + for (size_t i = 0; i < inputs.size(); ++i) + { + b2.add_term(inputs[i], -1); + } + c2.add_term(ONE, 0); + + this->pb.add_r1cs_constraint(r1cs_constraint(a2, b2, c2), FMT(this->annotation_prefix, " output*(n-sum)=0")); +} + +template +void conjunction_gadget::generate_r1cs_witness() +{ + FieldT sum = FieldT(inputs.size()); + + for (size_t i = 0; i < inputs.size(); ++i) + { + sum -= this->pb.val(inputs[i]); + } + + if (sum.is_zero()) + { + this->pb.val(inv) = FieldT::zero(); + this->pb.val(output) = FieldT::one(); + } + else + { + this->pb.val(inv) = sum.inverse(); + this->pb.val(output) = FieldT::zero(); + } +} + +template +void test_conjunction_gadget(const size_t n) +{ + printf("testing conjunction_gadget on all %zu bit strings\n", n); + + protoboard pb; + pb_variable_array inputs; + inputs.allocate(pb, n, "inputs"); + + pb_variable output; + output.allocate(pb, "output"); + + conjunction_gadget c(pb, inputs, output, "c"); + c.generate_r1cs_constraints(); + + for (size_t w = 0; w < 1ul< +void comparison_gadget::generate_r1cs_constraints() +{ + /* + packed(alpha) = 2^n + B - A + + not_all_zeros = \bigvee_{i=0}^{n-1} alpha_i + + if B - A > 0, then 2^n + B - A > 2^n, + so alpha_n = 1 and not_all_zeros = 1 + if B - A = 0, then 2^n + B - A = 2^n, + so alpha_n = 1 and not_all_zeros = 0 + if B - A < 0, then 2^n + B - A \in {0, 1, \ldots, 2^n-1}, + so alpha_n = 0 + + therefore alpha_n = less_or_eq and alpha_n * not_all_zeros = less + */ + + /* not_all_zeros to be Boolean, alpha_i are Boolean by packing gadget */ + generate_boolean_r1cs_constraint(this->pb, not_all_zeros, + FMT(this->annotation_prefix, " not_all_zeros")); + + /* constraints for packed(alpha) = 2^n + B - A */ + pack_alpha->generate_r1cs_constraints(true); + this->pb.add_r1cs_constraint(r1cs_constraint(1, (FieldT(2)^n) + B - A, alpha_packed), FMT(this->annotation_prefix, " main_constraint")); + + /* compute result */ + all_zeros_test->generate_r1cs_constraints(); + this->pb.add_r1cs_constraint(r1cs_constraint(less_or_eq, not_all_zeros, less), + FMT(this->annotation_prefix, " less")); +} + +template +void comparison_gadget::generate_r1cs_witness() +{ + A.evaluate(this->pb); + B.evaluate(this->pb); + + /* unpack 2^n + B - A into alpha_packed */ + this->pb.val(alpha_packed) = (FieldT(2)^n) + this->pb.lc_val(B) - this->pb.lc_val(A); + pack_alpha->generate_r1cs_witness_from_packed(); + + /* compute result */ + all_zeros_test->generate_r1cs_witness(); + this->pb.val(less) = this->pb.val(less_or_eq) * this->pb.val(not_all_zeros); +} + +template +void test_comparison_gadget(const size_t n) +{ + printf("testing comparison_gadget on all %zu bit inputs\n", n); + + protoboard pb; + + pb_variable A, B, less, less_or_eq; + A.allocate(pb, "A"); + B.allocate(pb, "B"); + less.allocate(pb, "less"); + less_or_eq.allocate(pb, "less_or_eq"); + + comparison_gadget cmp(pb, n, A, B, less, less_or_eq, "cmp"); + cmp.generate_r1cs_constraints(); + + for (size_t a = 0; a < 1ul< +void inner_product_gadget::generate_r1cs_constraints() +{ + /* + S_i = \sum_{k=0}^{i+1} A[i] * B[i] + S[0] = A[0] * B[0] + S[i+1] - S[i] = A[i] * B[i] + */ + for (size_t i = 0; i < A.size(); ++i) + { + this->pb.add_r1cs_constraint( + r1cs_constraint(A[i], B[i], + (i == A.size()-1 ? result : S[i]) + (i == 0 ? 0 * ONE : -S[i-1])), + FMT(this->annotation_prefix, " S_%zu", i)); + } +} + +template +void inner_product_gadget::generate_r1cs_witness() +{ + FieldT total = FieldT::zero(); + for (size_t i = 0; i < A.size(); ++i) + { + A[i].evaluate(this->pb); + B[i].evaluate(this->pb); + + total += this->pb.lc_val(A[i]) * this->pb.lc_val(B[i]); + this->pb.val(i == A.size()-1 ? result : S[i]) = total; + } +} + +template +void test_inner_product_gadget(const size_t n) +{ + printf("testing inner_product_gadget on all %zu bit strings\n", n); + + protoboard pb; + pb_variable_array A; + A.allocate(pb, n, "A"); + pb_variable_array B; + B.allocate(pb, n, "B"); + + pb_variable result; + result.allocate(pb, "result"); + + inner_product_gadget g(pb, A, B, result, "g"); + g.generate_r1cs_constraints(); + + for (size_t i = 0; i < 1ul< +void loose_multiplexing_gadget::generate_r1cs_constraints() +{ + /* \alpha_i (index - i) = 0 */ + for (size_t i = 0; i < arr.size(); ++i) + { + this->pb.add_r1cs_constraint( + r1cs_constraint(alpha[i], index - i, 0), + FMT(this->annotation_prefix, " alpha_%zu", i)); + } + + /* 1 * (\sum \alpha_i) = success_flag */ + linear_combination a, b, c; + a.add_term(ONE); + for (size_t i = 0; i < arr.size(); ++i) + { + b.add_term(alpha[i]); + } + c.add_term(success_flag); + this->pb.add_r1cs_constraint(r1cs_constraint(a, b, c), FMT(this->annotation_prefix, " main_constraint")); + + /* now success_flag is constrained to either 0 (if index is out of + range) or \alpha_i. constrain it and \alpha_i to zero */ + generate_boolean_r1cs_constraint(this->pb, success_flag, FMT(this->annotation_prefix, " success_flag")); + + /* compute result */ + compute_result->generate_r1cs_constraints(); +} + +template +void loose_multiplexing_gadget::generate_r1cs_witness() +{ + /* assumes that idx can be fit in ulong; true for our purposes for now */ + const bigint valint = this->pb.val(index).as_bigint(); + unsigned long idx = valint.as_ulong(); + const bigint arrsize(arr.size()); + + if (idx >= arr.size() || mpn_cmp(valint.data, arrsize.data, FieldT::num_limbs) >= 0) + { + for (size_t i = 0; i < arr.size(); ++i) + { + this->pb.val(alpha[i]) = FieldT::zero(); + } + + this->pb.val(success_flag) = FieldT::zero(); + } + else + { + for (size_t i = 0; i < arr.size(); ++i) + { + this->pb.val(alpha[i]) = (i == idx ? FieldT::one() : FieldT::zero()); + } + + this->pb.val(success_flag) = FieldT::one(); + } + + compute_result->generate_r1cs_witness(); +} + +template +void test_loose_multiplexing_gadget(const size_t n) +{ + printf("testing loose_multiplexing_gadget on 2**%zu pb_variable array inputs\n", n); + protoboard pb; + + pb_variable_array arr; + arr.allocate(pb, 1ul< index, result, success_flag; + index.allocate(pb, "index"); + result.allocate(pb, "result"); + success_flag.allocate(pb, "success_flag"); + + loose_multiplexing_gadget g(pb, arr, index, result, success_flag, "g"); + g.generate_r1cs_constraints(); + + for (size_t i = 0; i < 1ul< +void create_linear_combination_constraints(protoboard &pb, + const std::vector &base, + const std::vector > &v, + const VarT &target, + const std::string &annotation_prefix) +{ + for (size_t i = 0; i < base.size(); ++i) + { + linear_combination a, b, c; + + a.add_term(ONE); + b.add_term(ONE, base[i]); + + for (auto &p : v) + { + b.add_term(p.first.all_vars[i], p.second); + } + + c.add_term(target.all_vars[i]); + + pb.add_r1cs_constraint(r1cs_constraint(a, b, c), FMT(annotation_prefix, " linear_combination_%zu", i)); + } +} + +template +void create_linear_combination_witness(protoboard &pb, + const std::vector &base, + const std::vector > &v, + const VarT &target) +{ + for (size_t i = 0; i < base.size(); ++i) + { + pb.val(target.all_vars[i]) = base[i]; + + for (auto &p : v) + { + pb.val(target.all_vars[i]) += p.second * pb.val(p.first.all_vars[i]); + } + } +} + +} // libsnark +#endif // BASIC_GADGETS_TCC_ diff --git a/src/gadgetlib1/gadgets/gadget_from_r1cs.hpp b/src/gadgetlib1/gadgets/gadget_from_r1cs.hpp new file mode 100644 index 000000000..e4b8a2acf --- /dev/null +++ b/src/gadgetlib1/gadgets/gadget_from_r1cs.hpp @@ -0,0 +1,45 @@ +/** @file + ***************************************************************************** + + Declaration of interfaces for a gadget that can be created from an R1CS constraint system. + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef GADGET_FROM_R1CS_HPP_ +#define GADGET_FROM_R1CS_HPP_ + +#include + +#include "gadgetlib1/gadget.hpp" + +namespace libsnark { + +template +class gadget_from_r1cs : public gadget { + +private: + const std::vector > vars; + const r1cs_constraint_system cs; + std::map cs_to_vars; + +public: + + gadget_from_r1cs(protoboard &pb, + const std::vector > &vars, + const r1cs_constraint_system &cs, + const std::string &annotation_prefix); + + void generate_r1cs_constraints(); + void generate_r1cs_witness(const r1cs_primary_input &primary_input, + const r1cs_auxiliary_input &auxiliary_input); +}; + +} // libsnark + +#include "gadgetlib1/gadgets/gadget_from_r1cs.tcc" + +#endif // GADGET_FROM_R1CS_HPP_ diff --git a/src/gadgetlib1/gadgets/gadget_from_r1cs.tcc b/src/gadgetlib1/gadgets/gadget_from_r1cs.tcc new file mode 100644 index 000000000..bc59b4587 --- /dev/null +++ b/src/gadgetlib1/gadgets/gadget_from_r1cs.tcc @@ -0,0 +1,123 @@ +/** @file + ***************************************************************************** + + Implementation of interfaces for a gadget that can be created from an R1CS constraint system. + + See gadget_from_r1cs.hpp . + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef GADGET_FROM_R1CS_TCC_ +#define GADGET_FROM_R1CS_TCC_ + +namespace libsnark { + +template +gadget_from_r1cs::gadget_from_r1cs(protoboard &pb, + const std::vector > &vars, + const r1cs_constraint_system &cs, + const std::string &annotation_prefix) : + gadget(pb, annotation_prefix), + vars(vars), + cs(cs) +{ + cs_to_vars[0] = 0; /* constant term maps to constant term */ + + size_t cs_var_idx = 1; + for (auto va : vars) + { +#ifdef DEBUG + printf("gadget_from_r1cs: translating a block of variables with length %zu\n", va.size()); +#endif + for (auto v : va) + { + cs_to_vars[cs_var_idx] = v.index; + +#ifdef DEBUG + if (v.index != 0) + { + // handle annotations, except for re-annotating constant term + const std::map::const_iterator it = cs.variable_annotations.find(cs_var_idx); + + std::string annotation = FMT(annotation_prefix, " variable_%zu", cs_var_idx); + if (it != cs.variable_annotations.end()) + { + annotation = annotation_prefix + " " + it->second; + } + + pb.augment_variable_annotation(v, annotation); + } +#endif + ++cs_var_idx; + } + } + +#ifdef DEBUG + printf("gadget_from_r1cs: sum of all block lengths: %zu\n", cs_var_idx-1); + printf("gadget_from_r1cs: cs.num_variables(): %zu\n", cs.num_variables()); +#endif + + assert(cs_var_idx - 1 == cs.num_variables()); +} + +template +void gadget_from_r1cs::generate_r1cs_constraints() +{ + for (size_t i = 0; i < cs.num_constraints(); ++i) + { + const r1cs_constraint &constr = cs.constraints[i]; + r1cs_constraint translated_constr; + + for (const linear_term &t: constr.a.terms) + { + translated_constr.a.terms.emplace_back(linear_term(pb_variable(cs_to_vars[t.index]), t.coeff)); + } + + for (const linear_term &t: constr.b.terms) + { + translated_constr.b.terms.emplace_back(linear_term(pb_variable(cs_to_vars[t.index]), t.coeff)); + } + + for (const linear_term &t: constr.c.terms) + { + translated_constr.c.terms.emplace_back(linear_term(pb_variable(cs_to_vars[t.index]), t.coeff)); + } + + std::string annotation = FMT(this->annotation_prefix, " constraint_%zu", i); + +#ifdef DEBUG + auto it = cs.constraint_annotations.find(i); + if (it != cs.constraint_annotations.end()) + { + annotation = this->annotation_prefix + " " + it->second; + } +#endif + this->pb.add_r1cs_constraint(translated_constr, annotation); + } +} + +template +void gadget_from_r1cs::generate_r1cs_witness(const r1cs_primary_input &primary_input, + const r1cs_auxiliary_input &auxiliary_input) +{ + assert(cs.num_inputs() == primary_input.size()); + assert(cs.num_variables() == primary_input.size() + auxiliary_input.size()); + + for (size_t i = 0; i < primary_input.size(); ++i) + { + this->pb.val(pb_variable(cs_to_vars[i+1])) = primary_input[i]; + } + + for (size_t i = 0; i < auxiliary_input.size(); ++i) + { + this->pb.val(pb_variable(cs_to_vars[primary_input.size()+i+1])) = auxiliary_input[i]; + } +} + +} // libsnark + +#endif // GADGET_FROM_R1CS_TCC_ diff --git a/src/gadgetlib1/gadgets/hashes/digest_selector_gadget.hpp b/src/gadgetlib1/gadgets/hashes/digest_selector_gadget.hpp new file mode 100644 index 000000000..a7598b9be --- /dev/null +++ b/src/gadgetlib1/gadgets/hashes/digest_selector_gadget.hpp @@ -0,0 +1,42 @@ +/** + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ +#ifndef DIGEST_SELECTOR_GADGET_HPP_ +#define DIGEST_SELECTOR_GADGET_HPP_ + +#include + +#include "gadgetlib1/gadgets/basic_gadgets.hpp" +#include "gadgetlib1/gadgets/hashes/hash_io.hpp" + +namespace libsnark { + +template +class digest_selector_gadget : public gadget { +public: + size_t digest_size; + digest_variable input; + pb_linear_combination is_right; + digest_variable left; + digest_variable right; + + digest_selector_gadget(protoboard &pb, + const size_t digest_size, + const digest_variable &input, + const pb_linear_combination &is_right, + const digest_variable &left, + const digest_variable &right, + const std::string &annotation_prefix); + + void generate_r1cs_constraints(); + void generate_r1cs_witness(); +}; + +} // libsnark + +#include "gadgetlib1/gadgets/hashes/digest_selector_gadget.tcc" + +#endif // DIGEST_SELECTOR_GADGET_HPP_ diff --git a/src/gadgetlib1/gadgets/hashes/digest_selector_gadget.tcc b/src/gadgetlib1/gadgets/hashes/digest_selector_gadget.tcc new file mode 100644 index 000000000..422ee170a --- /dev/null +++ b/src/gadgetlib1/gadgets/hashes/digest_selector_gadget.tcc @@ -0,0 +1,62 @@ +/** + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ +#ifndef DIGEST_SELECTOR_GADGET_TCC_ +#define DIGEST_SELECTOR_GADGET_TCC_ + +namespace libsnark { + +template +digest_selector_gadget::digest_selector_gadget(protoboard &pb, + const size_t digest_size, + const digest_variable &input, + const pb_linear_combination &is_right, + const digest_variable &left, + const digest_variable &right, + const std::string &annotation_prefix) : +gadget(pb, annotation_prefix), digest_size(digest_size), input(input), is_right(is_right), left(left), right(right) +{ +} + +template +void digest_selector_gadget::generate_r1cs_constraints() +{ + for (size_t i = 0; i < digest_size; ++i) + { + /* + input = is_right * right + (1-is_right) * left + input - left = is_right(right - left) + */ + this->pb.add_r1cs_constraint(r1cs_constraint(is_right, right.bits[i] - left.bits[i], input.bits[i] - left.bits[i]), + FMT(this->annotation_prefix, " propagate_%zu", i)); + } +} + +template +void digest_selector_gadget::generate_r1cs_witness() +{ + is_right.evaluate(this->pb); + + assert(this->pb.lc_val(is_right) == FieldT::one() || this->pb.lc_val(is_right) == FieldT::zero()); + if (this->pb.lc_val(is_right) == FieldT::one()) + { + for (size_t i = 0; i < digest_size; ++i) + { + this->pb.val(right.bits[i]) = this->pb.val(input.bits[i]); + } + } + else + { + for (size_t i = 0; i < digest_size; ++i) + { + this->pb.val(left.bits[i]) = this->pb.val(input.bits[i]); + } + } +} + +} // libsnark + +#endif // DIGEST_SELECTOR_GADGET_TCC_ diff --git a/src/gadgetlib1/gadgets/hashes/hash_io.hpp b/src/gadgetlib1/gadgets/hashes/hash_io.hpp new file mode 100644 index 000000000..80ca19c61 --- /dev/null +++ b/src/gadgetlib1/gadgets/hashes/hash_io.hpp @@ -0,0 +1,63 @@ +/** + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ +#ifndef HASH_IO_HPP_ +#define HASH_IO_HPP_ +#include +#include +#include "gadgetlib1/gadgets/basic_gadgets.hpp" + +namespace libsnark { + +template +class digest_variable : public gadget { +public: + size_t digest_size; + pb_variable_array bits; + + digest_variable(protoboard &pb, + const size_t digest_size, + const std::string &annotation_prefix); + + digest_variable(protoboard &pb, + const size_t digest_size, + const pb_variable_array &partial_bits, + const pb_variable &padding, + const std::string &annotation_prefix); + + void generate_r1cs_constraints(); + void generate_r1cs_witness(const bit_vector& contents); + bit_vector get_digest() const; +}; + +template +class block_variable : public gadget { +public: + size_t block_size; + pb_variable_array bits; + + block_variable(protoboard &pb, + const size_t block_size, + const std::string &annotation_prefix); + + block_variable(protoboard &pb, + const std::vector > &parts, + const std::string &annotation_prefix); + + block_variable(protoboard &pb, + const digest_variable &left, + const digest_variable &right, + const std::string &annotation_prefix); + + void generate_r1cs_constraints(); + void generate_r1cs_witness(const bit_vector& contents); + bit_vector get_block() const; +}; + +} // libsnark +#include "gadgetlib1/gadgets/hashes/hash_io.tcc" + +#endif // HASH_IO_HPP_ diff --git a/src/gadgetlib1/gadgets/hashes/hash_io.tcc b/src/gadgetlib1/gadgets/hashes/hash_io.tcc new file mode 100644 index 000000000..b122d8f98 --- /dev/null +++ b/src/gadgetlib1/gadgets/hashes/hash_io.tcc @@ -0,0 +1,105 @@ +/** + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ +#ifndef HASH_IO_TCC_ +#define HASH_IO_TCC_ + +namespace libsnark { + +template +digest_variable::digest_variable(protoboard &pb, + const size_t digest_size, + const std::string &annotation_prefix) : + gadget(pb, annotation_prefix), digest_size(digest_size) +{ + bits.allocate(pb, digest_size, FMT(this->annotation_prefix, " bits")); +} + +template +digest_variable::digest_variable(protoboard &pb, + const size_t digest_size, + const pb_variable_array &partial_bits, + const pb_variable &padding, + const std::string &annotation_prefix) : + gadget(pb, annotation_prefix), digest_size(digest_size) +{ + assert(bits.size() <= digest_size); + bits = partial_bits; + while (bits.size() != digest_size) + { + bits.emplace_back(padding); + } +} + +template +void digest_variable::generate_r1cs_constraints() +{ + for (size_t i = 0; i < digest_size; ++i) + { + generate_boolean_r1cs_constraint(this->pb, bits[i], FMT(this->annotation_prefix, " bits_%zu", i)); + } +} + +template +void digest_variable::generate_r1cs_witness(const bit_vector& contents) +{ + bits.fill_with_bits(this->pb, contents); +} + +template +bit_vector digest_variable::get_digest() const +{ + return bits.get_bits(this->pb); +} + +template +block_variable::block_variable(protoboard &pb, + const size_t block_size, + const std::string &annotation_prefix) : + gadget(pb, annotation_prefix), block_size(block_size) +{ + bits.allocate(pb, block_size, FMT(this->annotation_prefix, " bits")); +} + +template +block_variable::block_variable(protoboard &pb, + const std::vector > &parts, + const std::string &annotation_prefix) : + gadget(pb, annotation_prefix) +{ + for (auto &part : parts) + { + bits.insert(bits.end(), part.begin(), part.end()); + } +} + +template +block_variable::block_variable(protoboard &pb, + const digest_variable &left, + const digest_variable &right, + const std::string &annotation_prefix) : + gadget(pb, annotation_prefix) +{ + assert(left.bits.size() == right.bits.size()); + block_size = 2 * left.bits.size(); + bits.insert(bits.end(), left.bits.begin(), left.bits.end()); + bits.insert(bits.end(), right.bits.begin(), right.bits.end()); +} + +template +void block_variable::generate_r1cs_witness(const bit_vector& contents) +{ + bits.fill_with_bits(this->pb, contents); +} + +template +bit_vector block_variable::get_block() const +{ + return bits.get_bits(this->pb); +} + +} // libsnark +#endif // HASH_IO_TCC_ diff --git a/src/gadgetlib1/gadgets/hashes/sha256/sha256_aux.hpp b/src/gadgetlib1/gadgets/hashes/sha256/sha256_aux.hpp new file mode 100644 index 000000000..e0c7a7e0b --- /dev/null +++ b/src/gadgetlib1/gadgets/hashes/sha256/sha256_aux.hpp @@ -0,0 +1,160 @@ +/** @file + ***************************************************************************** + + Declaration of interfaces for auxiliary gadgets for the SHA256 gadget. + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef SHA256_AUX_HPP_ +#define SHA256_AUX_HPP_ + +#include "gadgetlib1/gadgets/basic_gadgets.hpp" + +namespace libsnark { + +template +class lastbits_gadget : public gadget { +public: + pb_variable X; + size_t X_bits; + pb_variable result; + pb_linear_combination_array result_bits; + + pb_linear_combination_array full_bits; + std::shared_ptr > unpack_bits; + std::shared_ptr > pack_result; + + lastbits_gadget(protoboard &pb, + const pb_variable &X, + const size_t X_bits, + const pb_variable &result, + const pb_linear_combination_array &result_bits, + const std::string &annotation_prefix); + + void generate_r1cs_constraints(); + void generate_r1cs_witness(); +}; + +template +class XOR3_gadget : public gadget { +private: + pb_variable tmp; +public: + pb_linear_combination A; + pb_linear_combination B; + pb_linear_combination C; + bool assume_C_is_zero; + pb_linear_combination out; + + XOR3_gadget(protoboard &pb, + const pb_linear_combination &A, + const pb_linear_combination &B, + const pb_linear_combination &C, + const bool assume_C_is_zero, + const pb_linear_combination &out, + const std::string &annotation_prefix); + + void generate_r1cs_constraints(); + void generate_r1cs_witness(); +}; + +/* Page 10 of http://csrc.nist.gov/publications/fips/fips180-4/fips-180-4.pdf */ +template +class small_sigma_gadget : public gadget { +private: + pb_variable_array W; + pb_variable result; +public: + pb_variable_array result_bits; + std::vector > > compute_bits; + std::shared_ptr > pack_result; + + small_sigma_gadget(protoboard &pb, + const pb_variable_array &W, + const pb_variable &result, + const size_t rot1, + const size_t rot2, + const size_t shift, + const std::string &annotation_prefix); + + void generate_r1cs_constraints(); + void generate_r1cs_witness(); +}; + +/* Page 10 of http://csrc.nist.gov/publications/fips/fips180-4/fips-180-4.pdf */ +template +class big_sigma_gadget : public gadget { +private: + pb_linear_combination_array W; + pb_variable result; +public: + pb_variable_array result_bits; + std::vector > > compute_bits; + std::shared_ptr > pack_result; + + big_sigma_gadget(protoboard &pb, + const pb_linear_combination_array &W, + const pb_variable &result, + const size_t rot1, + const size_t rot2, + const size_t rot3, + const std::string &annotation_prefix); + + void generate_r1cs_constraints(); + void generate_r1cs_witness(); +}; + +/* Page 10 of http://csrc.nist.gov/publications/fips/fips180-4/fips-180-4.pdf */ +template +class choice_gadget : public gadget { +private: + pb_variable_array result_bits; +public: + pb_linear_combination_array X; + pb_linear_combination_array Y; + pb_linear_combination_array Z; + pb_variable result; + std::shared_ptr > pack_result; + + choice_gadget(protoboard &pb, + const pb_linear_combination_array &X, + const pb_linear_combination_array &Y, + const pb_linear_combination_array &Z, + const pb_variable &result, const std::string &annotation_prefix); + + void generate_r1cs_constraints(); + void generate_r1cs_witness(); +}; + +/* Page 10 of http://csrc.nist.gov/publications/fips/fips180-4/fips-180-4.pdf */ +template +class majority_gadget : public gadget { +private: + pb_variable_array result_bits; + std::shared_ptr > pack_result; +public: + pb_linear_combination_array X; + pb_linear_combination_array Y; + pb_linear_combination_array Z; + pb_variable result; + + majority_gadget(protoboard &pb, + const pb_linear_combination_array &X, + const pb_linear_combination_array &Y, + const pb_linear_combination_array &Z, + const pb_variable &result, + const std::string &annotation_prefix); + + void generate_r1cs_constraints(); + void generate_r1cs_witness(); +}; + +} // libsnark + +#include "gadgetlib1/gadgets/hashes/sha256/sha256_aux.tcc" + +#endif // SHA256_AUX_HPP_ diff --git a/src/gadgetlib1/gadgets/hashes/sha256/sha256_aux.tcc b/src/gadgetlib1/gadgets/hashes/sha256/sha256_aux.tcc new file mode 100644 index 000000000..8ab67be5f --- /dev/null +++ b/src/gadgetlib1/gadgets/hashes/sha256/sha256_aux.tcc @@ -0,0 +1,297 @@ +/** @file + ***************************************************************************** + + Implementation of interfaces for auxiliary gadgets for the SHA256 gadget. + + See sha256_aux.hpp . + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef SHA256_AUX_TCC_ +#define SHA256_AUX_TCC_ + +namespace libsnark { + +template +lastbits_gadget::lastbits_gadget(protoboard &pb, + const pb_variable &X, + const size_t X_bits, + const pb_variable &result, + const pb_linear_combination_array &result_bits, + const std::string &annotation_prefix) : + gadget(pb, annotation_prefix), + X(X), + X_bits(X_bits), + result(result), + result_bits(result_bits) +{ + full_bits = result_bits; + for (size_t i = result_bits.size(); i < X_bits; ++i) + { + pb_variable full_bits_overflow; + full_bits_overflow.allocate(pb, FMT(this->annotation_prefix, " full_bits_%zu", i)); + full_bits.emplace_back(full_bits_overflow); + } + + unpack_bits.reset(new packing_gadget(pb, full_bits, X, FMT(this->annotation_prefix, " unpack_bits"))); + pack_result.reset(new packing_gadget(pb, result_bits, result, FMT(this->annotation_prefix, " pack_result"))); +} + +template +void lastbits_gadget::generate_r1cs_constraints() +{ + unpack_bits->generate_r1cs_constraints(true); + pack_result->generate_r1cs_constraints(false); +} + +template +void lastbits_gadget::generate_r1cs_witness() +{ + unpack_bits->generate_r1cs_witness_from_packed(); + pack_result->generate_r1cs_witness_from_bits(); +} + +template +XOR3_gadget::XOR3_gadget(protoboard &pb, + const pb_linear_combination &A, + const pb_linear_combination &B, + const pb_linear_combination &C, + const bool assume_C_is_zero, + const pb_linear_combination &out, + const std::string &annotation_prefix) : + gadget(pb, annotation_prefix), + A(A), + B(B), + C(C), + assume_C_is_zero(assume_C_is_zero), + out(out) +{ + if (!assume_C_is_zero) + { + tmp.allocate(pb, FMT(this->annotation_prefix, " tmp")); + } +} + +template +void XOR3_gadget::generate_r1cs_constraints() +{ + /* + tmp = A + B - 2AB i.e. tmp = A xor B + out = tmp + C - 2tmp C i.e. out = tmp xor C + */ + if (assume_C_is_zero) + { + this->pb.add_r1cs_constraint(r1cs_constraint(2*A, B, A + B - out), FMT(this->annotation_prefix, " implicit_tmp_equals_out")); + } + else + { + this->pb.add_r1cs_constraint(r1cs_constraint(2*A, B, A + B - tmp), FMT(this->annotation_prefix, " tmp")); + this->pb.add_r1cs_constraint(r1cs_constraint(2 * tmp, C, tmp + C - out), FMT(this->annotation_prefix, " out")); + } +} + +template +void XOR3_gadget::generate_r1cs_witness() +{ + if (assume_C_is_zero) + { + this->pb.lc_val(out) = this->pb.lc_val(A) + this->pb.lc_val(B) - FieldT(2) * this->pb.lc_val(A) * this->pb.lc_val(B); + } + else + { + this->pb.val(tmp) = this->pb.lc_val(A) + this->pb.lc_val(B) - FieldT(2) * this->pb.lc_val(A) * this->pb.lc_val(B); + this->pb.lc_val(out) = this->pb.val(tmp) + this->pb.lc_val(C) - FieldT(2) * this->pb.val(tmp) * this->pb.lc_val(C); + } +} + +#define SHA256_GADGET_ROTR(A, i, k) A[((i)+(k)) % 32] + +/* Page 10 of http://csrc.nist.gov/publications/fips/fips180-4/fips-180-4.pdf */ +template +small_sigma_gadget::small_sigma_gadget(protoboard &pb, + const pb_variable_array &W, + const pb_variable &result, + const size_t rot1, + const size_t rot2, + const size_t shift, + const std::string &annotation_prefix) : + gadget(pb, annotation_prefix), + W(W), + result(result) +{ + result_bits.allocate(pb, 32, FMT(this->annotation_prefix, " result_bits")); + compute_bits.resize(32); + for (size_t i = 0; i < 32; ++i) + { + compute_bits[i].reset(new XOR3_gadget(pb, SHA256_GADGET_ROTR(W, i, rot1), SHA256_GADGET_ROTR(W, i, rot2), + (i + shift < 32 ? W[i+shift] : ONE), + (i + shift >= 32), result_bits[i], + FMT(this->annotation_prefix, " compute_bits_%zu", i))); + } + pack_result.reset(new packing_gadget(pb, result_bits, result, FMT(this->annotation_prefix, " pack_result"))); +} + +template +void small_sigma_gadget::generate_r1cs_constraints() +{ + for (size_t i = 0; i < 32; ++i) + { + compute_bits[i]->generate_r1cs_constraints(); + } + + pack_result->generate_r1cs_constraints(false); +} + +template +void small_sigma_gadget::generate_r1cs_witness() +{ + for (size_t i = 0; i < 32; ++i) + { + compute_bits[i]->generate_r1cs_witness(); + } + + pack_result->generate_r1cs_witness_from_bits(); +} + +template +big_sigma_gadget::big_sigma_gadget(protoboard &pb, + const pb_linear_combination_array &W, + const pb_variable &result, + const size_t rot1, + const size_t rot2, + const size_t rot3, + const std::string &annotation_prefix) : + gadget(pb, annotation_prefix), + W(W), + result(result) +{ + result_bits.allocate(pb, 32, FMT(this->annotation_prefix, " result_bits")); + compute_bits.resize(32); + for (size_t i = 0; i < 32; ++i) + { + compute_bits[i].reset(new XOR3_gadget(pb, SHA256_GADGET_ROTR(W, i, rot1), SHA256_GADGET_ROTR(W, i, rot2), SHA256_GADGET_ROTR(W, i, rot3), false, result_bits[i], + FMT(this->annotation_prefix, " compute_bits_%zu", i))); + } + + pack_result.reset(new packing_gadget(pb, result_bits, result, FMT(this->annotation_prefix, " pack_result"))); +} + +template +void big_sigma_gadget::generate_r1cs_constraints() +{ + for (size_t i = 0; i < 32; ++i) + { + compute_bits[i]->generate_r1cs_constraints(); + } + + pack_result->generate_r1cs_constraints(false); +} + +template +void big_sigma_gadget::generate_r1cs_witness() +{ + for (size_t i = 0; i < 32; ++i) + { + compute_bits[i]->generate_r1cs_witness(); + } + + pack_result->generate_r1cs_witness_from_bits(); +} + +/* Page 10 of http://csrc.nist.gov/publications/fips/fips180-4/fips-180-4.pdf */ +template +choice_gadget::choice_gadget(protoboard &pb, + const pb_linear_combination_array &X, + const pb_linear_combination_array &Y, + const pb_linear_combination_array &Z, + const pb_variable &result, const std::string &annotation_prefix) : + gadget(pb, annotation_prefix), + X(X), + Y(Y), + Z(Z), + result(result) +{ + result_bits.allocate(pb, 32, FMT(this->annotation_prefix, " result_bits")); + pack_result.reset(new packing_gadget(pb, result_bits, result, FMT(this->annotation_prefix, " result"))); +} + +template +void choice_gadget::generate_r1cs_constraints() +{ + for (size_t i = 0; i < 32; ++i) + { + /* + result = x * y + (1-x) * z + result - z = x * (y - z) + */ + this->pb.add_r1cs_constraint(r1cs_constraint(X[i], Y[i] - Z[i], result_bits[i] - Z[i]), FMT(this->annotation_prefix, " result_bits_%zu", i)); + } + pack_result->generate_r1cs_constraints(false); +} + +template +void choice_gadget::generate_r1cs_witness() +{ + for (size_t i = 0; i < 32; ++i) + { + this->pb.val(result_bits[i]) = this->pb.lc_val(X[i]) * this->pb.lc_val(Y[i]) + (FieldT::one() - this->pb.lc_val(X[i])) * this->pb.lc_val(Z[i]); + } + pack_result->generate_r1cs_witness_from_bits(); +} + +/* Page 10 of http://csrc.nist.gov/publications/fips/fips180-4/fips-180-4.pdf */ +template +majority_gadget::majority_gadget(protoboard &pb, + const pb_linear_combination_array &X, + const pb_linear_combination_array &Y, + const pb_linear_combination_array &Z, + const pb_variable &result, + const std::string &annotation_prefix) : + gadget(pb, annotation_prefix), + X(X), + Y(Y), + Z(Z), + result(result) +{ + result_bits.allocate(pb, 32, FMT(this->annotation_prefix, " result_bits")); + pack_result.reset(new packing_gadget(pb, result_bits, result, FMT(this->annotation_prefix, " result"))); +} + +template +void majority_gadget::generate_r1cs_constraints() +{ + for (size_t i = 0; i < 32; ++i) + { + /* + 2*result + aux = x + y + z + x, y, z, aux -- bits + aux = x + y + z - 2*result + */ + generate_boolean_r1cs_constraint(this->pb, result_bits[i], FMT(this->annotation_prefix, " result_%zu", i)); + this->pb.add_r1cs_constraint(r1cs_constraint(X[i] + Y[i] + Z[i] - 2 * result_bits[i], + 1 - (X[i] + Y[i] + Z[i] - 2 * result_bits[i]), + 0), + FMT(this->annotation_prefix, " result_bits_%zu", i)); + } + pack_result->generate_r1cs_constraints(false); +} + +template +void majority_gadget::generate_r1cs_witness() +{ + for (size_t i = 0; i < 32; ++i) + { + const long v = (this->pb.lc_val(X[i]) + this->pb.lc_val(Y[i]) + this->pb.lc_val(Z[i])).as_ulong(); + this->pb.val(result_bits[i]) = FieldT(v / 2); + } + + pack_result->generate_r1cs_witness_from_bits(); +} + +} // libsnark + +#endif // SHA256_AUX_TCC_ diff --git a/src/gadgetlib1/gadgets/hashes/sha256/sha256_components.hpp b/src/gadgetlib1/gadgets/hashes/sha256/sha256_components.hpp new file mode 100644 index 000000000..c2f31e3af --- /dev/null +++ b/src/gadgetlib1/gadgets/hashes/sha256/sha256_components.hpp @@ -0,0 +1,108 @@ +/** @file + ***************************************************************************** + + Declaration of interfaces for gadgets for the SHA256 message schedule and round function. + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef SHA256_COMPONENTS_HPP_ +#define SHA256_COMPONENTS_HPP_ + +#include "gadgetlib1/gadgets/basic_gadgets.hpp" +#include "gadgetlib1/gadgets/hashes/hash_io.hpp" +#include "gadgetlib1/gadgets/hashes/sha256/sha256_aux.hpp" + +namespace libsnark { + +const size_t SHA256_digest_size = 256; +const size_t SHA256_block_size = 512; + +template +pb_linear_combination_array SHA256_default_IV(protoboard &pb); + +template +class sha256_message_schedule_gadget : public gadget { +public: + std::vector > W_bits; + std::vector > > pack_W; + + std::vector > sigma0; + std::vector > sigma1; + std::vector > > compute_sigma0; + std::vector > > compute_sigma1; + std::vector > unreduced_W; + std::vector > > mod_reduce_W; +public: + pb_variable_array M; + pb_variable_array packed_W; + sha256_message_schedule_gadget(protoboard &pb, + const pb_variable_array &M, + const pb_variable_array &packed_W, + const std::string &annotation_prefix); + void generate_r1cs_constraints(); + void generate_r1cs_witness(); +}; + +template +class sha256_round_function_gadget : public gadget { +public: + pb_variable sigma0; + pb_variable sigma1; + std::shared_ptr > compute_sigma0; + std::shared_ptr > compute_sigma1; + pb_variable choice; + pb_variable majority; + std::shared_ptr > compute_choice; + std::shared_ptr > compute_majority; + pb_variable packed_d; + std::shared_ptr > pack_d; + pb_variable packed_h; + std::shared_ptr > pack_h; + pb_variable unreduced_new_a; + pb_variable unreduced_new_e; + std::shared_ptr > mod_reduce_new_a; + std::shared_ptr > mod_reduce_new_e; + pb_variable packed_new_a; + pb_variable packed_new_e; +public: + pb_linear_combination_array a; + pb_linear_combination_array b; + pb_linear_combination_array c; + pb_linear_combination_array d; + pb_linear_combination_array e; + pb_linear_combination_array f; + pb_linear_combination_array g; + pb_linear_combination_array h; + pb_variable W; + long K; + pb_linear_combination_array new_a; + pb_linear_combination_array new_e; + + sha256_round_function_gadget(protoboard &pb, + const pb_linear_combination_array &a, + const pb_linear_combination_array &b, + const pb_linear_combination_array &c, + const pb_linear_combination_array &d, + const pb_linear_combination_array &e, + const pb_linear_combination_array &f, + const pb_linear_combination_array &g, + const pb_linear_combination_array &h, + const pb_variable &W, + const long &K, + const pb_linear_combination_array &new_a, + const pb_linear_combination_array &new_e, + const std::string &annotation_prefix); + + void generate_r1cs_constraints(); + void generate_r1cs_witness(); +}; + +} // libsnark + +#include "gadgetlib1/gadgets/hashes/sha256/sha256_components.tcc" + +#endif // SHA256_COMPONENTS_HPP_ diff --git a/src/gadgetlib1/gadgets/hashes/sha256/sha256_components.tcc b/src/gadgetlib1/gadgets/hashes/sha256/sha256_components.tcc new file mode 100644 index 000000000..e8f233a54 --- /dev/null +++ b/src/gadgetlib1/gadgets/hashes/sha256/sha256_components.tcc @@ -0,0 +1,250 @@ +/** @file + ***************************************************************************** + + Implementation of interfaces for gadgets for the SHA256 message schedule and round function. + + See sha256_components.hpp . + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef SHA256_COMPONENTS_TCC_ +#define SHA256_COMPONENTS_TCC_ + +namespace libsnark { + +const unsigned long SHA256_K[64] = { + 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, + 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, + 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, + 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, + 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, + 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, + 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, + 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 +}; + +const unsigned long SHA256_H[8] = { + 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19 +}; + +template +pb_linear_combination_array SHA256_default_IV(protoboard &pb) +{ + pb_linear_combination_array result; + result.reserve(SHA256_digest_size); + + for (size_t i = 0; i < SHA256_digest_size; ++i) + { + int iv_val = (SHA256_H[i / 32] >> (31-(i % 32))) & 1; + + pb_linear_combination iv_element; + iv_element.assign(pb, iv_val * ONE); + iv_element.evaluate(pb); + + result.emplace_back(iv_element); + } + + return result; +} + +template +sha256_message_schedule_gadget::sha256_message_schedule_gadget(protoboard &pb, + const pb_variable_array &M, + const pb_variable_array &packed_W, + const std::string &annotation_prefix) : + gadget(pb, annotation_prefix), + M(M), + packed_W(packed_W) +{ + W_bits.resize(64); + + pack_W.resize(16); + for (size_t i = 0; i < 16; ++i) + { + W_bits[i] = pb_variable_array(M.rbegin() + (15-i) * 32, M.rbegin() + (16-i) * 32); + pack_W[i].reset(new packing_gadget(pb, W_bits[i], packed_W[i], FMT(this->annotation_prefix, " pack_W_%zu", i))); + } + + /* NB: some of those will be un-allocated */ + sigma0.resize(64); + sigma1.resize(64); + compute_sigma0.resize(64); + compute_sigma1.resize(64); + unreduced_W.resize(64); + mod_reduce_W.resize(64); + + for (size_t i = 16; i < 64; ++i) + { + /* allocate result variables for sigma0/sigma1 invocations */ + sigma0[i].allocate(pb, FMT(this->annotation_prefix, " sigma0_%zu", i)); + sigma1[i].allocate(pb, FMT(this->annotation_prefix, " sigma1_%zu", i)); + + /* compute sigma0/sigma1 */ + compute_sigma0[i].reset(new small_sigma_gadget(pb, W_bits[i-15], sigma0[i], 7, 18, 3, FMT(this->annotation_prefix, " compute_sigma0_%zu", i))); + compute_sigma1[i].reset(new small_sigma_gadget(pb, W_bits[i-2], sigma1[i], 17, 19, 10, FMT(this->annotation_prefix, " compute_sigma1_%zu", i))); + + /* unreduced_W = sigma0(W_{i-15}) + sigma1(W_{i-2}) + W_{i-7} + W_{i-16} before modulo 2^32 */ + unreduced_W[i].allocate(pb, FMT(this->annotation_prefix, "unreduced_W_%zu", i)); + + /* allocate the bit representation of packed_W[i] */ + W_bits[i].allocate(pb, 32, FMT(this->annotation_prefix, " W_bits_%zu", i)); + + /* and finally reduce this into packed and bit representations */ + mod_reduce_W[i].reset(new lastbits_gadget(pb, unreduced_W[i], 32+2, packed_W[i], W_bits[i], FMT(this->annotation_prefix, " mod_reduce_W_%zu", i))); + } +} + +template +void sha256_message_schedule_gadget::generate_r1cs_constraints() +{ + for (size_t i = 0; i < 16; ++i) + { + pack_W[i]->generate_r1cs_constraints(false); // do not enforce bitness here; caller be aware. + } + + for (size_t i = 16; i < 64; ++i) + { + compute_sigma0[i]->generate_r1cs_constraints(); + compute_sigma1[i]->generate_r1cs_constraints(); + + this->pb.add_r1cs_constraint(r1cs_constraint(1, + sigma0[i] + sigma1[i] + packed_W[i-16] + packed_W[i-7], + unreduced_W[i]), + FMT(this->annotation_prefix, " unreduced_W_%zu", i)); + + mod_reduce_W[i]->generate_r1cs_constraints(); + } +} + +template +void sha256_message_schedule_gadget::generate_r1cs_witness() +{ + for (size_t i = 0; i < 16; ++i) + { + pack_W[i]->generate_r1cs_witness_from_bits(); + } + + for (size_t i = 16; i < 64; ++i) + { + compute_sigma0[i]->generate_r1cs_witness(); + compute_sigma1[i]->generate_r1cs_witness(); + + this->pb.val(unreduced_W[i]) = this->pb.val(sigma0[i]) + this->pb.val(sigma1[i]) + this->pb.val(packed_W[i-16]) + this->pb.val(packed_W[i-7]); + mod_reduce_W[i]->generate_r1cs_witness(); + } +} + +template +sha256_round_function_gadget::sha256_round_function_gadget(protoboard &pb, + const pb_linear_combination_array &a, + const pb_linear_combination_array &b, + const pb_linear_combination_array &c, + const pb_linear_combination_array &d, + const pb_linear_combination_array &e, + const pb_linear_combination_array &f, + const pb_linear_combination_array &g, + const pb_linear_combination_array &h, + const pb_variable &W, + const long &K, + const pb_linear_combination_array &new_a, + const pb_linear_combination_array &new_e, + const std::string &annotation_prefix) : + gadget(pb, annotation_prefix), + a(a), + b(b), + c(c), + d(d), + e(e), + f(f), + g(g), + h(h), + W(W), + K(K), + new_a(new_a), + new_e(new_e) +{ + /* compute sigma0 and sigma1 */ + sigma0.allocate(pb, FMT(this->annotation_prefix, " sigma0")); + sigma1.allocate(pb, FMT(this->annotation_prefix, " sigma1")); + compute_sigma0.reset(new big_sigma_gadget(pb, a, sigma0, 2, 13, 22, FMT(this->annotation_prefix, " compute_sigma0"))); + compute_sigma1.reset(new big_sigma_gadget(pb, e, sigma1, 6, 11, 25, FMT(this->annotation_prefix, " compute_sigma1"))); + + /* compute choice */ + choice.allocate(pb, FMT(this->annotation_prefix, " choice")); + compute_choice.reset(new choice_gadget(pb, e, f, g, choice, FMT(this->annotation_prefix, " compute_choice"))); + + /* compute majority */ + majority.allocate(pb, FMT(this->annotation_prefix, " majority")); + compute_majority.reset(new majority_gadget(pb, a, b, c, majority, FMT(this->annotation_prefix, " compute_majority"))); + + /* pack d */ + packed_d.allocate(pb, FMT(this->annotation_prefix, " packed_d")); + pack_d.reset(new packing_gadget(pb, d, packed_d, FMT(this->annotation_prefix, " pack_d"))); + + /* pack h */ + packed_h.allocate(pb, FMT(this->annotation_prefix, " packed_h")); + pack_h.reset(new packing_gadget(pb, h, packed_h, FMT(this->annotation_prefix, " pack_h"))); + + /* compute the actual results for the round */ + unreduced_new_a.allocate(pb, FMT(this->annotation_prefix, " unreduced_new_a")); + unreduced_new_e.allocate(pb, FMT(this->annotation_prefix, " unreduced_new_e")); + + packed_new_a.allocate(pb, FMT(this->annotation_prefix, " packed_new_a")); + packed_new_e.allocate(pb, FMT(this->annotation_prefix, " packed_new_e")); + + mod_reduce_new_a.reset(new lastbits_gadget(pb, unreduced_new_a, 32+3, packed_new_a, new_a, FMT(this->annotation_prefix, " mod_reduce_new_a"))); + mod_reduce_new_e.reset(new lastbits_gadget(pb, unreduced_new_e, 32+3, packed_new_e, new_e, FMT(this->annotation_prefix, " mod_reduce_new_e"))); +} + +template +void sha256_round_function_gadget::generate_r1cs_constraints() +{ + compute_sigma0->generate_r1cs_constraints(); + compute_sigma1->generate_r1cs_constraints(); + + compute_choice->generate_r1cs_constraints(); + compute_majority->generate_r1cs_constraints(); + + pack_d->generate_r1cs_constraints(false); + pack_h->generate_r1cs_constraints(false); + + this->pb.add_r1cs_constraint(r1cs_constraint(1, + packed_h + sigma1 + choice + K + W + sigma0 + majority, + unreduced_new_a), + FMT(this->annotation_prefix, " unreduced_new_a")); + + this->pb.add_r1cs_constraint(r1cs_constraint(1, + packed_d + packed_h + sigma1 + choice + K + W, + unreduced_new_e), + FMT(this->annotation_prefix, " unreduced_new_e")); + + mod_reduce_new_a->generate_r1cs_constraints(); + mod_reduce_new_e->generate_r1cs_constraints(); +} + +template +void sha256_round_function_gadget::generate_r1cs_witness() +{ + compute_sigma0->generate_r1cs_witness(); + compute_sigma1->generate_r1cs_witness(); + + compute_choice->generate_r1cs_witness(); + compute_majority->generate_r1cs_witness(); + + pack_d->generate_r1cs_witness_from_bits(); + pack_h->generate_r1cs_witness_from_bits(); + + this->pb.val(unreduced_new_a) = this->pb.val(packed_h) + this->pb.val(sigma1) + this->pb.val(choice) + FieldT(K) + this->pb.val(W) + this->pb.val(sigma0) + this->pb.val(majority); + this->pb.val(unreduced_new_e) = this->pb.val(packed_d) + this->pb.val(packed_h) + this->pb.val(sigma1) + this->pb.val(choice) + FieldT(K) + this->pb.val(W); + + mod_reduce_new_a->generate_r1cs_witness(); + mod_reduce_new_e->generate_r1cs_witness(); +} + +} // libsnark + +#endif // SHA256_COMPONENTS_TCC_ diff --git a/src/gadgetlib1/gadgets/hashes/sha256/sha256_gadget.hpp b/src/gadgetlib1/gadgets/hashes/sha256/sha256_gadget.hpp new file mode 100644 index 000000000..8cb6365c8 --- /dev/null +++ b/src/gadgetlib1/gadgets/hashes/sha256/sha256_gadget.hpp @@ -0,0 +1,98 @@ +/** @file + ***************************************************************************** + + Declaration of interfaces for top-level SHA256 gadgets. + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef SHA256_GADGET_HPP_ +#define SHA256_GADGET_HPP_ + +#include "common/data_structures/merkle_tree.hpp" +#include "gadgetlib1/gadgets/basic_gadgets.hpp" +#include "gadgetlib1/gadgets/hashes/hash_io.hpp" +#include "gadgetlib1/gadgets/hashes/sha256/sha256_components.hpp" + +namespace libsnark { + +/** + * Gadget for the SHA256 compression function. + */ +template +class sha256_compression_function_gadget : public gadget { +public: + std::vector > round_a; + std::vector > round_b; + std::vector > round_c; + std::vector > round_d; + std::vector > round_e; + std::vector > round_f; + std::vector > round_g; + std::vector > round_h; + + pb_variable_array packed_W; + std::shared_ptr > message_schedule; + std::vector > round_functions; + + pb_variable_array unreduced_output; + pb_variable_array reduced_output; + std::vector > reduce_output; +public: + pb_linear_combination_array prev_output; + pb_variable_array new_block; + digest_variable output; + + sha256_compression_function_gadget(protoboard &pb, + const pb_linear_combination_array &prev_output, + const pb_variable_array &new_block, + const digest_variable &output, + const std::string &annotation_prefix); + void generate_r1cs_constraints(); + void generate_r1cs_witness(); +}; + +/** + * Gadget for the SHA256 compression function, viewed as a 2-to-1 hash + * function, and using the same initialization vector as in SHA256 + * specification. Thus, any collision for + * sha256_two_to_one_hash_gadget trivially extends to a collision for + * full SHA256 (by appending the same padding). + */ +template +class sha256_two_to_one_hash_gadget : public gadget { +public: + typedef bit_vector hash_value_type; + typedef merkle_authentication_path merkle_authentication_path_type; + + std::shared_ptr > f; + + sha256_two_to_one_hash_gadget(protoboard &pb, + const digest_variable &left, + const digest_variable &right, + const digest_variable &output, + const std::string &annotation_prefix); + sha256_two_to_one_hash_gadget(protoboard &pb, + const size_t block_length, + const block_variable &input_block, + const digest_variable &output, + const std::string &annotation_prefix); + + void generate_r1cs_constraints(const bool ensure_output_bitness=true); // TODO: ignored for now + void generate_r1cs_witness(); + + static size_t get_block_len(); + static size_t get_digest_len(); + static bit_vector get_hash(const bit_vector &input); + + static size_t expected_constraints(const bool ensure_output_bitness=true); // TODO: ignored for now +}; + +} // libsnark + +#include "gadgetlib1/gadgets/hashes/sha256/sha256_gadget.tcc" + +#endif // SHA256_GADGET_HPP_ diff --git a/src/gadgetlib1/gadgets/hashes/sha256/sha256_gadget.tcc b/src/gadgetlib1/gadgets/hashes/sha256/sha256_gadget.tcc new file mode 100644 index 000000000..fc7ac982a --- /dev/null +++ b/src/gadgetlib1/gadgets/hashes/sha256/sha256_gadget.tcc @@ -0,0 +1,230 @@ +/** @file + ***************************************************************************** + + Implementation of interfaces for top-level SHA256 gadgets. + + See sha256_gadget.hpp . + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef SHA256_GADGET_TCC_ +#define SHA256_GADGET_TCC_ + +namespace libsnark { + +template +sha256_compression_function_gadget::sha256_compression_function_gadget(protoboard &pb, + const pb_linear_combination_array &prev_output, + const pb_variable_array &new_block, + const digest_variable &output, + const std::string &annotation_prefix) : + gadget(pb, annotation_prefix), + prev_output(prev_output), + new_block(new_block), + output(output) +{ + /* message schedule and inputs for it */ + packed_W.allocate(pb, 64, FMT(this->annotation_prefix, " packed_W")); + message_schedule.reset(new sha256_message_schedule_gadget(pb, new_block, packed_W, FMT(this->annotation_prefix, " message_schedule"))); + + /* initalize */ + round_a.push_back(pb_linear_combination_array(prev_output.rbegin() + 7*32, prev_output.rbegin() + 8*32)); + round_b.push_back(pb_linear_combination_array(prev_output.rbegin() + 6*32, prev_output.rbegin() + 7*32)); + round_c.push_back(pb_linear_combination_array(prev_output.rbegin() + 5*32, prev_output.rbegin() + 6*32)); + round_d.push_back(pb_linear_combination_array(prev_output.rbegin() + 4*32, prev_output.rbegin() + 5*32)); + round_e.push_back(pb_linear_combination_array(prev_output.rbegin() + 3*32, prev_output.rbegin() + 4*32)); + round_f.push_back(pb_linear_combination_array(prev_output.rbegin() + 2*32, prev_output.rbegin() + 3*32)); + round_g.push_back(pb_linear_combination_array(prev_output.rbegin() + 1*32, prev_output.rbegin() + 2*32)); + round_h.push_back(pb_linear_combination_array(prev_output.rbegin() + 0*32, prev_output.rbegin() + 1*32)); + + /* do the rounds */ + for (size_t i = 0; i < 64; ++i) + { + round_h.push_back(round_g[i]); + round_g.push_back(round_f[i]); + round_f.push_back(round_e[i]); + round_d.push_back(round_c[i]); + round_c.push_back(round_b[i]); + round_b.push_back(round_a[i]); + + pb_variable_array new_round_a_variables; + new_round_a_variables.allocate(pb, 32, FMT(this->annotation_prefix, " new_round_a_variables_%zu", i+1)); + round_a.emplace_back(new_round_a_variables); + + pb_variable_array new_round_e_variables; + new_round_e_variables.allocate(pb, 32, FMT(this->annotation_prefix, " new_round_e_variables_%zu", i+1)); + round_e.emplace_back(new_round_e_variables); + + round_functions.push_back(sha256_round_function_gadget(pb, + round_a[i], round_b[i], round_c[i], round_d[i], + round_e[i], round_f[i], round_g[i], round_h[i], + packed_W[i], SHA256_K[i], round_a[i+1], round_e[i+1], + FMT(this->annotation_prefix, " round_functions_%zu", i))); + } + + /* finalize */ + unreduced_output.allocate(pb, 8, FMT(this->annotation_prefix, " unreduced_output")); + reduced_output.allocate(pb, 8, FMT(this->annotation_prefix, " reduced_output")); + for (size_t i = 0; i < 8; ++i) + { + reduce_output.push_back(lastbits_gadget(pb, + unreduced_output[i], + 32+1, + reduced_output[i], + pb_variable_array(output.bits.rbegin() + (7-i) * 32, output.bits.rbegin() + (8-i) * 32), + FMT(this->annotation_prefix, " reduce_output_%zu", i))); + } +} + +template +void sha256_compression_function_gadget::generate_r1cs_constraints() +{ + message_schedule->generate_r1cs_constraints(); + for (size_t i = 0; i < 64; ++i) + { + round_functions[i].generate_r1cs_constraints(); + } + + for (size_t i = 0; i < 4; ++i) + { + this->pb.add_r1cs_constraint(r1cs_constraint(1, + round_functions[3-i].packed_d + round_functions[63-i].packed_new_a, + unreduced_output[i]), + FMT(this->annotation_prefix, " unreduced_output_%zu", i)); + + this->pb.add_r1cs_constraint(r1cs_constraint(1, + round_functions[3-i].packed_h + round_functions[63-i].packed_new_e, + unreduced_output[4+i]), + FMT(this->annotation_prefix, " unreduced_output_%zu", 4+i)); + } + + for (size_t i = 0; i < 8; ++i) + { + reduce_output[i].generate_r1cs_constraints(); + } +} + +template +void sha256_compression_function_gadget::generate_r1cs_witness() +{ + message_schedule->generate_r1cs_witness(); + +#ifdef DEBUG + printf("Input:\n"); + for (size_t j = 0; j < 16; ++j) + { + printf("%lx ", this->pb.val(packed_W[j]).as_ulong()); + } + printf("\n"); +#endif + + for (size_t i = 0; i < 64; ++i) + { + round_functions[i].generate_r1cs_witness(); + } + + for (size_t i = 0; i < 4; ++i) + { + this->pb.val(unreduced_output[i]) = this->pb.val(round_functions[3-i].packed_d) + this->pb.val(round_functions[63-i].packed_new_a); + this->pb.val(unreduced_output[4+i]) = this->pb.val(round_functions[3-i].packed_h) + this->pb.val(round_functions[63-i].packed_new_e); + } + + for (size_t i = 0; i < 8; ++i) + { + reduce_output[i].generate_r1cs_witness(); + } + +#ifdef DEBUG + printf("Output:\n"); + for (size_t j = 0; j < 8; ++j) + { + printf("%lx ", this->pb.val(reduced_output[j]).as_ulong()); + } + printf("\n"); +#endif +} + +template +sha256_two_to_one_hash_gadget::sha256_two_to_one_hash_gadget(protoboard &pb, + const digest_variable &left, + const digest_variable &right, + const digest_variable &output, + const std::string &annotation_prefix) : + gadget(pb, annotation_prefix) +{ + /* concatenate block = left || right */ + pb_variable_array block; + block.insert(block.end(), left.bits.begin(), left.bits.end()); + block.insert(block.end(), right.bits.begin(), right.bits.end()); + + /* compute the hash itself */ + f.reset(new sha256_compression_function_gadget(pb, SHA256_default_IV(pb), block, output, FMT(this->annotation_prefix, " f"))); +} + +template +sha256_two_to_one_hash_gadget::sha256_two_to_one_hash_gadget(protoboard &pb, + const size_t block_length, + const block_variable &input_block, + const digest_variable &output, + const std::string &annotation_prefix) : + gadget(pb, annotation_prefix) +{ + assert(block_length == SHA256_block_size); + assert(input_block.bits.size() == block_length); + f.reset(new sha256_compression_function_gadget(pb, SHA256_default_IV(pb), input_block.bits, output, FMT(this->annotation_prefix, " f"))); +} + +template +void sha256_two_to_one_hash_gadget::generate_r1cs_constraints(const bool ensure_output_bitness) +{ + UNUSED(ensure_output_bitness); + f->generate_r1cs_constraints(); +} + +template +void sha256_two_to_one_hash_gadget::generate_r1cs_witness() +{ + f->generate_r1cs_witness(); +} + +template +size_t sha256_two_to_one_hash_gadget::get_block_len() +{ + return SHA256_block_size; +} + +template +size_t sha256_two_to_one_hash_gadget::get_digest_len() +{ + return SHA256_digest_size; +} + +template +bit_vector sha256_two_to_one_hash_gadget::get_hash(const bit_vector &input) +{ + protoboard pb; + + block_variable input_variable(pb, SHA256_block_size, "input"); + digest_variable output_variable(pb, SHA256_digest_size, "output"); + sha256_two_to_one_hash_gadget f(pb, SHA256_block_size, input_variable, output_variable, "f"); + + input_variable.generate_r1cs_witness(input); + f.generate_r1cs_witness(); + + return output_variable.get_digest(); +} + +template +size_t sha256_two_to_one_hash_gadget::expected_constraints(const bool ensure_output_bitness) +{ + UNUSED(ensure_output_bitness); + return 27280; /* hardcoded for now */ +} + +} // libsnark + +#endif // SHA256_GADGET_TCC_ diff --git a/src/gadgetlib1/gadgets/hashes/sha256/tests/generate_sha256_gadget_tests.py b/src/gadgetlib1/gadgets/hashes/sha256/tests/generate_sha256_gadget_tests.py new file mode 100644 index 000000000..452317ffb --- /dev/null +++ b/src/gadgetlib1/gadgets/hashes/sha256/tests/generate_sha256_gadget_tests.py @@ -0,0 +1,55 @@ +#!/usr/bin/env python +## +# @author This file is part of libsnark, developed by SCIPR Lab +# and contributors (see AUTHORS). +# @copyright MIT license (see LICENSE file) + +import random +import pypy_sha256 # PyPy's implementation of SHA256 compression function; see copyright and authorship notice within. + +BLOCK_LEN = 512 +BLOCK_BYTES = BLOCK_LEN // 8 +HASH_LEN = 256 +HASH_BYTES = HASH_LEN // 8 + +def gen_random_bytes(n): + return [random.randint(0, 255) for i in xrange(n)] + +def words_to_bytes(arr): + return sum(([x >> 24, (x >> 16) & 0xff, (x >> 8) & 0xff, x & 0xff] for x in arr), []) + +def bytes_to_words(arr): + l = len(arr) + assert l % 4 == 0 + return [(arr[i*4 + 3] << 24) + (arr[i*4+2] << 16) + (arr[i*4+1] << 8) + arr[i*4] for i in xrange(l//4)] + +def cpp_val(s, log_radix=32): + if log_radix == 8: + hexfmt = '0x%02x' + elif log_radix == 32: + hexfmt = '0x%08x' + s = bytes_to_words(s) + else: + raise + return 'int_list_to_bits({%s}, %d)' % (', '.join(hexfmt % x for x in s), log_radix) + +def H_bytes(x): + assert len(x) == BLOCK_BYTES + state = pypy_sha256.sha_init() + state['data'] = words_to_bytes(bytes_to_words(x)) + pypy_sha256.sha_transform(state) + return words_to_bytes(bytes_to_words(words_to_bytes(state['digest']))) + +def generate_sha256_gadget_tests(): + left = gen_random_bytes(HASH_BYTES) + right = gen_random_bytes(HASH_BYTES) + hash = H_bytes(left + right) + + print "const bit_vector left_bv = %s;" % cpp_val(left) + print "const bit_vector right_bv = %s;" % cpp_val(right) + print "const bit_vector hash_bv = %s;" % cpp_val(hash) + +if __name__ == '__main__': + random.seed(0) # for reproducibility + generate_sha256_gadget_tests() + diff --git a/src/gadgetlib1/gadgets/hashes/sha256/tests/pypy_sha256.py b/src/gadgetlib1/gadgets/hashes/sha256/tests/pypy_sha256.py new file mode 100644 index 000000000..496989c11 --- /dev/null +++ b/src/gadgetlib1/gadgets/hashes/sha256/tests/pypy_sha256.py @@ -0,0 +1,263 @@ +#!/usr/bin/env python +# +# SHA256 compression function implementation below is a verbatim copy of PyPy's implementation from +# https://bitbucket.org/pypy/pypy/raw/f1f064b3faf1e012f7a9a9ab08f18074637ebe8a/lib_pypy/_sha256.py . +# +# It is licensed under the MIT license and copyright PyPy Copyright holders 2003-2015 +# See https://bitbucket.org/pypy/pypy/src/tip/LICENSE for the full copyright notice. +# + +SHA_BLOCKSIZE = 64 +SHA_DIGESTSIZE = 32 + + +def new_shaobject(): + return { + 'digest': [0]*8, + 'count_lo': 0, + 'count_hi': 0, + 'data': [0]* SHA_BLOCKSIZE, + 'local': 0, + 'digestsize': 0 + } + +ROR = lambda x, y: (((x & 0xffffffff) >> (y & 31)) | (x << (32 - (y & 31)))) & 0xffffffff +Ch = lambda x, y, z: (z ^ (x & (y ^ z))) +Maj = lambda x, y, z: (((x | y) & z) | (x & y)) +S = lambda x, n: ROR(x, n) +R = lambda x, n: (x & 0xffffffff) >> n +Sigma0 = lambda x: (S(x, 2) ^ S(x, 13) ^ S(x, 22)) +Sigma1 = lambda x: (S(x, 6) ^ S(x, 11) ^ S(x, 25)) +Gamma0 = lambda x: (S(x, 7) ^ S(x, 18) ^ R(x, 3)) +Gamma1 = lambda x: (S(x, 17) ^ S(x, 19) ^ R(x, 10)) + +def sha_transform(sha_info): + W = [] + + d = sha_info['data'] + for i in range(0,16): + W.append( (d[4*i]<<24) + (d[4*i+1]<<16) + (d[4*i+2]<<8) + d[4*i+3]) + + for i in range(16,64): + W.append( (Gamma1(W[i - 2]) + W[i - 7] + Gamma0(W[i - 15]) + W[i - 16]) & 0xffffffff ) + + ss = sha_info['digest'][:] + + def RND(a,b,c,d,e,f,g,h,i,ki): + t0 = h + Sigma1(e) + Ch(e, f, g) + ki + W[i]; + t1 = Sigma0(a) + Maj(a, b, c); + d += t0; + h = t0 + t1; + return d & 0xffffffff, h & 0xffffffff + + ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],0,0x428a2f98); + ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],1,0x71374491); + ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],2,0xb5c0fbcf); + ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],3,0xe9b5dba5); + ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],4,0x3956c25b); + ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],5,0x59f111f1); + ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],6,0x923f82a4); + ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],7,0xab1c5ed5); + ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],8,0xd807aa98); + ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],9,0x12835b01); + ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],10,0x243185be); + ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],11,0x550c7dc3); + ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],12,0x72be5d74); + ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],13,0x80deb1fe); + ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],14,0x9bdc06a7); + ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],15,0xc19bf174); + ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],16,0xe49b69c1); + ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],17,0xefbe4786); + ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],18,0x0fc19dc6); + ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],19,0x240ca1cc); + ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],20,0x2de92c6f); + ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],21,0x4a7484aa); + ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],22,0x5cb0a9dc); + ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],23,0x76f988da); + ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],24,0x983e5152); + ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],25,0xa831c66d); + ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],26,0xb00327c8); + ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],27,0xbf597fc7); + ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],28,0xc6e00bf3); + ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],29,0xd5a79147); + ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],30,0x06ca6351); + ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],31,0x14292967); + ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],32,0x27b70a85); + ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],33,0x2e1b2138); + ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],34,0x4d2c6dfc); + ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],35,0x53380d13); + ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],36,0x650a7354); + ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],37,0x766a0abb); + ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],38,0x81c2c92e); + ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],39,0x92722c85); + ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],40,0xa2bfe8a1); + ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],41,0xa81a664b); + ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],42,0xc24b8b70); + ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],43,0xc76c51a3); + ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],44,0xd192e819); + ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],45,0xd6990624); + ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],46,0xf40e3585); + ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],47,0x106aa070); + ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],48,0x19a4c116); + ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],49,0x1e376c08); + ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],50,0x2748774c); + ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],51,0x34b0bcb5); + ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],52,0x391c0cb3); + ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],53,0x4ed8aa4a); + ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],54,0x5b9cca4f); + ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],55,0x682e6ff3); + ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],56,0x748f82ee); + ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],57,0x78a5636f); + ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],58,0x84c87814); + ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],59,0x8cc70208); + ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],60,0x90befffa); + ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],61,0xa4506ceb); + ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],62,0xbef9a3f7); + ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],63,0xc67178f2); + + dig = [] + for i, x in enumerate(sha_info['digest']): + dig.append( (x + ss[i]) & 0xffffffff ) + sha_info['digest'] = dig + +def sha_init(): + sha_info = new_shaobject() + sha_info['digest'] = [0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19] + sha_info['count_lo'] = 0 + sha_info['count_hi'] = 0 + sha_info['local'] = 0 + sha_info['digestsize'] = 32 + return sha_info + +def sha224_init(): + sha_info = new_shaobject() + sha_info['digest'] = [0xc1059ed8, 0x367cd507, 0x3070dd17, 0xf70e5939, 0xffc00b31, 0x68581511, 0x64f98fa7, 0xbefa4fa4] + sha_info['count_lo'] = 0 + sha_info['count_hi'] = 0 + sha_info['local'] = 0 + sha_info['digestsize'] = 28 + return sha_info + +def sha_update(sha_info, buffer): + if isinstance(buffer, str): + raise TypeError("Unicode strings must be encoded before hashing") + count = len(buffer) + buffer_idx = 0 + clo = (sha_info['count_lo'] + (count << 3)) & 0xffffffff + if clo < sha_info['count_lo']: + sha_info['count_hi'] += 1 + sha_info['count_lo'] = clo + + sha_info['count_hi'] += (count >> 29) + + if sha_info['local']: + i = SHA_BLOCKSIZE - sha_info['local'] + if i > count: + i = count + + # copy buffer + sha_info['data'][sha_info['local']:sha_info['local']+i] = buffer[buffer_idx:buffer_idx+i] + + count -= i + buffer_idx += i + + sha_info['local'] += i + if sha_info['local'] == SHA_BLOCKSIZE: + sha_transform(sha_info) + sha_info['local'] = 0 + else: + return + + while count >= SHA_BLOCKSIZE: + # copy buffer + sha_info['data'] = list(buffer[buffer_idx:buffer_idx + SHA_BLOCKSIZE]) + count -= SHA_BLOCKSIZE + buffer_idx += SHA_BLOCKSIZE + sha_transform(sha_info) + + + # copy buffer + pos = sha_info['local'] + sha_info['data'][pos:pos+count] = buffer[buffer_idx:buffer_idx + count] + sha_info['local'] = count + +def sha_final(sha_info): + lo_bit_count = sha_info['count_lo'] + hi_bit_count = sha_info['count_hi'] + count = (lo_bit_count >> 3) & 0x3f + sha_info['data'][count] = 0x80; + count += 1 + if count > SHA_BLOCKSIZE - 8: + # zero the bytes in data after the count + sha_info['data'] = sha_info['data'][:count] + ([0] * (SHA_BLOCKSIZE - count)) + sha_transform(sha_info) + # zero bytes in data + sha_info['data'] = [0] * SHA_BLOCKSIZE + else: + sha_info['data'] = sha_info['data'][:count] + ([0] * (SHA_BLOCKSIZE - count)) + + sha_info['data'][56] = (hi_bit_count >> 24) & 0xff + sha_info['data'][57] = (hi_bit_count >> 16) & 0xff + sha_info['data'][58] = (hi_bit_count >> 8) & 0xff + sha_info['data'][59] = (hi_bit_count >> 0) & 0xff + sha_info['data'][60] = (lo_bit_count >> 24) & 0xff + sha_info['data'][61] = (lo_bit_count >> 16) & 0xff + sha_info['data'][62] = (lo_bit_count >> 8) & 0xff + sha_info['data'][63] = (lo_bit_count >> 0) & 0xff + + sha_transform(sha_info) + + dig = [] + for i in sha_info['digest']: + dig.extend([ ((i>>24) & 0xff), ((i>>16) & 0xff), ((i>>8) & 0xff), (i & 0xff) ]) + return ''.join([chr(i) for i in dig]) + +class sha256(object): + digest_size = digestsize = SHA_DIGESTSIZE + block_size = SHA_BLOCKSIZE + + def __init__(self, s=None): + self._sha = sha_init() + if s: + sha_update(self._sha, s) + + def update(self, s): + sha_update(self._sha, s) + + def digest(self): + return sha_final(self._sha.copy())[:self._sha['digestsize']] + + def hexdigest(self): + return ''.join(['%.2x' % ord(i) for i in self.digest()]) + + def copy(self): + new = sha256.__new__(sha256) + new._sha = self._sha.copy() + return new + +class sha224(sha256): + digest_size = digestsize = 28 + + def __init__(self, s=None): + self._sha = sha224_init() + if s: + sha_update(self._sha, s) + + def copy(self): + new = sha224.__new__(sha224) + new._sha = self._sha.copy() + return new + +def test(): + a_str = "just a test string" + + assert 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' == sha256().hexdigest() + assert 'd7b553c6f09ac85d142415f857c5310f3bbbe7cdd787cce4b985acedd585266f' == sha256(a_str).hexdigest() + assert '8113ebf33c97daa9998762aacafe750c7cefc2b2f173c90c59663a57fe626f21' == sha256(a_str*7).hexdigest() + + s = sha256(a_str) + s.update(a_str) + assert '03d9963e05a094593190b6fc794cb1a3e1ac7d7883f0b5855268afeccc70d461' == s.hexdigest() + +if __name__ == "__main__": + test() diff --git a/src/gadgetlib1/gadgets/hashes/sha256/tests/test_sha256_gadget.cpp b/src/gadgetlib1/gadgets/hashes/sha256/tests/test_sha256_gadget.cpp new file mode 100644 index 000000000..471928f6a --- /dev/null +++ b/src/gadgetlib1/gadgets/hashes/sha256/tests/test_sha256_gadget.cpp @@ -0,0 +1,46 @@ +/** @file + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#include "common/default_types/ec_pp.hpp" +#include "common/utils.hpp" +#include "common/profiling.hpp" +#include "gadgetlib1/gadgets/hashes/sha256/sha256_gadget.hpp" + +using namespace libsnark; + +template +void test_two_to_one() +{ + protoboard pb; + + digest_variable left(pb, SHA256_digest_size, "left"); + digest_variable right(pb, SHA256_digest_size, "right"); + digest_variable output(pb, SHA256_digest_size, "output"); + + sha256_two_to_one_hash_gadget f(pb, left, right, output, "f"); + f.generate_r1cs_constraints(); + printf("Number of constraints for sha256_two_to_one_hash_gadget: %zu\n", pb.num_constraints()); + + const bit_vector left_bv = int_list_to_bits({0x426bc2d8, 0x4dc86782, 0x81e8957a, 0x409ec148, 0xe6cffbe8, 0xafe6ba4f, 0x9c6f1978, 0xdd7af7e9}, 32); + const bit_vector right_bv = int_list_to_bits({0x038cce42, 0xabd366b8, 0x3ede7e00, 0x9130de53, 0x72cdf73d, 0xee825114, 0x8cb48d1b, 0x9af68ad0}, 32); + const bit_vector hash_bv = int_list_to_bits({0xeffd0b7f, 0x1ccba116, 0x2ee816f7, 0x31c62b48, 0x59305141, 0x990e5c0a, 0xce40d33d, 0x0b1167d1}, 32); + + left.generate_r1cs_witness(left_bv); + right.generate_r1cs_witness(right_bv); + + f.generate_r1cs_witness(); + output.generate_r1cs_witness(hash_bv); + + assert(pb.is_satisfied()); +} + +int main(void) +{ + start_profiling(); + default_ec_pp::init_public_params(); + test_two_to_one >(); +} diff --git a/src/gadgetlib1/gadgets/merkle_tree/merkle_authentication_path_variable.hpp b/src/gadgetlib1/gadgets/merkle_tree/merkle_authentication_path_variable.hpp new file mode 100644 index 000000000..0efa7cf4d --- /dev/null +++ b/src/gadgetlib1/gadgets/merkle_tree/merkle_authentication_path_variable.hpp @@ -0,0 +1,38 @@ +/** + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef MERKLE_AUTHENTICATION_PATH_VARIABLE_HPP_ +#define MERKLE_AUTHENTICATION_PATH_VARIABLE_HPP_ + +#include "common/data_structures/merkle_tree.hpp" +#include "gadgetlib1/gadget.hpp" +#include "gadgetlib1/gadgets/hashes/hash_io.hpp" + +namespace libsnark { + +template +class merkle_authentication_path_variable : public gadget { +public: + + const size_t tree_depth; + std::vector > left_digests; + std::vector > right_digests; + + merkle_authentication_path_variable(protoboard &pb, + const size_t tree_depth, + const std::string &annotation_prefix); + + void generate_r1cs_constraints(); + void generate_r1cs_witness(const size_t address, const merkle_authentication_path &path); + merkle_authentication_path get_authentication_path(const size_t address) const; +}; + +} // libsnark + +#include "gadgetlib1/gadgets/merkle_tree/merkle_authentication_path_variable.tcc" + +#endif // MERKLE_AUTHENTICATION_PATH_VARIABLE_HPP diff --git a/src/gadgetlib1/gadgets/merkle_tree/merkle_authentication_path_variable.tcc b/src/gadgetlib1/gadgets/merkle_tree/merkle_authentication_path_variable.tcc new file mode 100644 index 000000000..d773051ab --- /dev/null +++ b/src/gadgetlib1/gadgets/merkle_tree/merkle_authentication_path_variable.tcc @@ -0,0 +1,76 @@ +/** + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef MERKLE_AUTHENTICATION_PATH_VARIABLE_TCC_ +#define MERKLE_AUTHENTICATION_PATH_VARIABLE_TCC_ + +namespace libsnark { + +template +merkle_authentication_path_variable::merkle_authentication_path_variable(protoboard &pb, + const size_t tree_depth, + const std::string &annotation_prefix) : + gadget(pb, annotation_prefix), + tree_depth(tree_depth) +{ + for (size_t i = 0; i < tree_depth; ++i) + { + left_digests.emplace_back(digest_variable(pb, HashT::get_digest_len(), FMT(annotation_prefix, " left_digests_%zu", i))); + right_digests.emplace_back(digest_variable(pb, HashT::get_digest_len(), FMT(annotation_prefix, " right_digests_%zu", i))); + } +} + +template +void merkle_authentication_path_variable::generate_r1cs_constraints() +{ + for (size_t i = 0; i < tree_depth; ++i) + { + left_digests[i].generate_r1cs_constraints(); + right_digests[i].generate_r1cs_constraints(); + } +} + +template +void merkle_authentication_path_variable::generate_r1cs_witness(const size_t address, const merkle_authentication_path &path) +{ + assert(path.size() == tree_depth); + + for (size_t i = 0; i < tree_depth; ++i) + { + if (address & (1ul << (tree_depth-1-i))) + { + left_digests[i].generate_r1cs_witness(path[i]); + } + else + { + right_digests[i].generate_r1cs_witness(path[i]); + } + } +} + +template +merkle_authentication_path merkle_authentication_path_variable::get_authentication_path(const size_t address) const +{ + merkle_authentication_path result; + for (size_t i = 0; i < tree_depth; ++i) + { + if (address & (1ul << (tree_depth-1-i))) + { + result.emplace_back(left_digests[i].get_digest()); + } + else + { + result.emplace_back(right_digests[i].get_digest()); + } + } + + return result; +} + +} // libsnark + +#endif // MERKLE_AUTHENTICATION_PATH_VARIABLE_TCC diff --git a/src/gadgetlib1/gadgets/merkle_tree/merkle_tree_check_read_gadget.hpp b/src/gadgetlib1/gadgets/merkle_tree/merkle_tree_check_read_gadget.hpp new file mode 100644 index 000000000..b1e3a4f05 --- /dev/null +++ b/src/gadgetlib1/gadgets/merkle_tree/merkle_tree_check_read_gadget.hpp @@ -0,0 +1,73 @@ +/** @file + ***************************************************************************** + + Declaration of interfaces for the Merkle tree check read gadget. + + The gadget checks the following: given a root R, address A, value V, and + authentication path P, check that P is a valid authentication path for the + value V as the A-th leaf in a Merkle tree with root R. + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef MERKLE_TREE_CHECK_READ_GADGET_HPP_ +#define MERKLE_TREE_CHECK_READ_GADGET_HPP_ + +#include "common/data_structures/merkle_tree.hpp" +#include "gadgetlib1/gadget.hpp" +#include "gadgetlib1/gadgets/hashes/hash_io.hpp" +#include "gadgetlib1/gadgets/hashes/digest_selector_gadget.hpp" +#include "gadgetlib1/gadgets/merkle_tree/merkle_authentication_path_variable.hpp" + +namespace libsnark { + +template +class merkle_tree_check_read_gadget : public gadget { +private: + + std::vector hashers; + std::vector > hasher_inputs; + std::vector > propagators; + std::vector > internal_output; + + std::shared_ptr > computed_root; + std::shared_ptr > check_root; + +public: + + const size_t digest_size; + const size_t tree_depth; + pb_linear_combination_array address_bits; + digest_variable leaf; + digest_variable root; + merkle_authentication_path_variable path; + pb_linear_combination read_successful; + + merkle_tree_check_read_gadget(protoboard &pb, + const size_t tree_depth, + const pb_linear_combination_array &address_bits, + const digest_variable &leaf_digest, + const digest_variable &root_digest, + const merkle_authentication_path_variable &path, + const pb_linear_combination &read_successful, + const std::string &annotation_prefix); + + void generate_r1cs_constraints(); + void generate_r1cs_witness(); + + static size_t root_size_in_bits(); + /* for debugging purposes */ + static size_t expected_constraints(const size_t tree_depth); +}; + +template +void test_merkle_tree_check_read_gadget(); + +} // libsnark + +#include "gadgetlib1/gadgets/merkle_tree/merkle_tree_check_read_gadget.tcc" + +#endif // MERKLE_TREE_CHECK_READ_GADGET_HPP_ diff --git a/src/gadgetlib1/gadgets/merkle_tree/merkle_tree_check_read_gadget.tcc b/src/gadgetlib1/gadgets/merkle_tree/merkle_tree_check_read_gadget.tcc new file mode 100644 index 000000000..6002a5886 --- /dev/null +++ b/src/gadgetlib1/gadgets/merkle_tree/merkle_tree_check_read_gadget.tcc @@ -0,0 +1,196 @@ +/** @file + ***************************************************************************** + + Implementation of interfaces for the Merkle tree check read. + + See merkle_tree_check_read_gadget.hpp . + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef MERKLE_TREE_CHECK_READ_GADGET_TCC_ +#define MERKLE_TREE_CHECK_READ_GADGET_TCC_ + +namespace libsnark { + +template +merkle_tree_check_read_gadget::merkle_tree_check_read_gadget(protoboard &pb, + const size_t tree_depth, + const pb_linear_combination_array &address_bits, + const digest_variable &leaf, + const digest_variable &root, + const merkle_authentication_path_variable &path, + const pb_linear_combination &read_successful, + const std::string &annotation_prefix) : + gadget(pb, annotation_prefix), + digest_size(HashT::get_digest_len()), + tree_depth(tree_depth), + address_bits(address_bits), + leaf(leaf), + root(root), + path(path), + read_successful(read_successful) +{ + /* + The tricky part here is ordering. For Merkle tree + authentication paths, path[0] corresponds to one layer below + the root (and path[tree_depth-1] corresponds to the layer + containing the leaf), while address_bits has the reverse order: + address_bits[0] is LSB, and corresponds to layer containing the + leaf, and address_bits[tree_depth-1] is MSB, and corresponds to + the subtree directly under the root. + */ + assert(tree_depth > 0); + assert(tree_depth == address_bits.size()); + + for (size_t i = 0; i < tree_depth-1; ++i) + { + internal_output.emplace_back(digest_variable(pb, digest_size, FMT(this->annotation_prefix, " internal_output_%zu", i))); + } + + computed_root.reset(new digest_variable(pb, digest_size, FMT(this->annotation_prefix, " computed_root"))); + + for (size_t i = 0; i < tree_depth; ++i) + { + block_variable inp(pb, path.left_digests[i], path.right_digests[i], FMT(this->annotation_prefix, " inp_%zu", i)); + hasher_inputs.emplace_back(inp); + hashers.emplace_back(HashT(pb, 2*digest_size, inp, (i == 0 ? *computed_root : internal_output[i-1]), + FMT(this->annotation_prefix, " load_hashers_%zu", i))); + } + + for (size_t i = 0; i < tree_depth; ++i) + { + /* + The propagators take a computed hash value (or leaf in the + base case) and propagate it one layer up, either in the left + or the right slot of authentication_path_variable. + */ + propagators.emplace_back(digest_selector_gadget(pb, digest_size, i < tree_depth - 1 ? internal_output[i] : leaf, + address_bits[tree_depth-1-i], path.left_digests[i], path.right_digests[i], + FMT(this->annotation_prefix, " digest_selector_%zu", i))); + } + + check_root.reset(new bit_vector_copy_gadget(pb, computed_root->bits, root.bits, read_successful, FieldT::capacity(), FMT(annotation_prefix, " check_root"))); +} + +template +void merkle_tree_check_read_gadget::generate_r1cs_constraints() +{ + /* ensure correct hash computations */ + for (size_t i = 0; i < tree_depth; ++i) + { + // Note that we check root outside and have enforced booleanity of path.left_digests/path.right_digests outside in path.generate_r1cs_constraints + hashers[i].generate_r1cs_constraints(false); + } + + /* ensure consistency of path.left_digests/path.right_digests with internal_output */ + for (size_t i = 0; i < tree_depth; ++i) + { + propagators[i].generate_r1cs_constraints(); + } + + check_root->generate_r1cs_constraints(false, false); +} + +template +void merkle_tree_check_read_gadget::generate_r1cs_witness() +{ + /* do the hash computations bottom-up */ + for (int i = tree_depth-1; i >= 0; --i) + { + /* propagate previous input */ + propagators[i].generate_r1cs_witness(); + + /* compute hash */ + hashers[i].generate_r1cs_witness(); + } + + check_root->generate_r1cs_witness(); +} + +template +size_t merkle_tree_check_read_gadget::root_size_in_bits() +{ + return HashT::get_digest_len(); +} + +template +size_t merkle_tree_check_read_gadget::expected_constraints(const size_t tree_depth) +{ + /* NB: this includes path constraints */ + const size_t hasher_constraints = tree_depth * HashT::expected_constraints(false); + const size_t propagator_constraints = tree_depth * HashT::get_digest_len(); + const size_t authentication_path_constraints = 2 * tree_depth * HashT::get_digest_len(); + const size_t check_root_constraints = 3 * div_ceil(HashT::get_digest_len(), FieldT::capacity()); + + return hasher_constraints + propagator_constraints + authentication_path_constraints + check_root_constraints; +} + +template +void test_merkle_tree_check_read_gadget() +{ + /* prepare test */ + const size_t digest_len = HashT::get_digest_len(); + const size_t tree_depth = 16; + std::vector path(tree_depth); + + bit_vector prev_hash(digest_len); + std::generate(prev_hash.begin(), prev_hash.end(), [&]() { return std::rand() % 2; }); + bit_vector leaf = prev_hash; + + bit_vector address_bits; + + size_t address = 0; + for (long level = tree_depth-1; level >= 0; --level) + { + const bool computed_is_right = (std::rand() % 2); + address |= (computed_is_right ? 1ul << (tree_depth-1-level) : 0); + address_bits.push_back(computed_is_right); + bit_vector other(digest_len); + std::generate(other.begin(), other.end(), [&]() { return std::rand() % 2; }); + + bit_vector block = prev_hash; + block.insert(computed_is_right ? block.begin() : block.end(), other.begin(), other.end()); + bit_vector h = HashT::get_hash(block); + + path[level] = other; + + prev_hash = h; + } + bit_vector root = prev_hash; + + /* execute test */ + protoboard pb; + pb_variable_array address_bits_va; + address_bits_va.allocate(pb, tree_depth, "address_bits"); + digest_variable leaf_digest(pb, digest_len, "input_block"); + digest_variable root_digest(pb, digest_len, "output_digest"); + merkle_authentication_path_variable path_var(pb, tree_depth, "path_var"); + merkle_tree_check_read_gadget ml(pb, tree_depth, address_bits_va, leaf_digest, root_digest, path_var, ONE, "ml"); + + path_var.generate_r1cs_constraints(); + ml.generate_r1cs_constraints(); + + address_bits_va.fill_with_bits(pb, address_bits); + assert(address_bits_va.get_field_element_from_bits(pb).as_ulong() == address); + leaf_digest.generate_r1cs_witness(leaf); + path_var.generate_r1cs_witness(address, path); + ml.generate_r1cs_witness(); + + /* make sure that read checker didn't accidentally overwrite anything */ + address_bits_va.fill_with_bits(pb, address_bits); + leaf_digest.generate_r1cs_witness(leaf); + root_digest.generate_r1cs_witness(root); + assert(pb.is_satisfied()); + + const size_t num_constraints = pb.num_constraints(); + const size_t expected_constraints = merkle_tree_check_read_gadget::expected_constraints(tree_depth); + assert(num_constraints == expected_constraints); +} + +} // libsnark + +#endif // MERKLE_TREE_CHECK_READ_GADGET_TCC_ diff --git a/src/gadgetlib1/gadgets/merkle_tree/merkle_tree_check_update_gadget.hpp b/src/gadgetlib1/gadgets/merkle_tree/merkle_tree_check_update_gadget.hpp new file mode 100644 index 000000000..2d6840d61 --- /dev/null +++ b/src/gadgetlib1/gadgets/merkle_tree/merkle_tree_check_update_gadget.hpp @@ -0,0 +1,91 @@ +/** @file + ***************************************************************************** + + Declaration of interfaces for the Merkle tree check read gadget. + + The gadget checks the following: given two roots R1 and R2, address A, two + values V1 and V2, and authentication path P, check that + - P is a valid authentication path for the value V1 as the A-th leaf in a Merkle tree with root R1, and + - P is a valid authentication path for the value V2 as the A-th leaf in a Merkle tree with root R2. + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef MERKLE_TREE_CHECK_UPDATE_GADGET_HPP_ +#define MERKLE_TREE_CHECK_UPDATE_GADGET_HPP_ + +#include "common/data_structures/merkle_tree.hpp" +#include "gadgetlib1/gadget.hpp" +#include "gadgetlib1/gadgets/hashes/crh_gadget.hpp" +#include "gadgetlib1/gadgets/hashes/hash_io.hpp" +#include "gadgetlib1/gadgets/hashes/digest_selector_gadget.hpp" +#include "gadgetlib1/gadgets/merkle_tree/merkle_authentication_path_variable.hpp" + +namespace libsnark { + +template +class merkle_tree_check_update_gadget : public gadget { +private: + + std::vector prev_hashers; + std::vector > prev_hasher_inputs; + std::vector > prev_propagators; + std::vector > prev_internal_output; + + std::vector next_hashers; + std::vector > next_hasher_inputs; + std::vector > next_propagators; + std::vector > next_internal_output; + + std::shared_ptr > computed_next_root; + std::shared_ptr > check_next_root; + +public: + + const size_t digest_size; + const size_t tree_depth; + + pb_variable_array address_bits; + digest_variable prev_leaf_digest; + digest_variable prev_root_digest; + merkle_authentication_path_variable prev_path; + digest_variable next_leaf_digest; + digest_variable next_root_digest; + merkle_authentication_path_variable next_path; + pb_linear_combination update_successful; + + /* Note that while it is necessary to generate R1CS constraints + for prev_path, it is not necessary to do so for next_path. See + comment in the implementation of generate_r1cs_constraints() */ + + merkle_tree_check_update_gadget(protoboard &pb, + const size_t tree_depth, + const pb_variable_array &address_bits, + const digest_variable &prev_leaf_digest, + const digest_variable &prev_root_digest, + const merkle_authentication_path_variable &prev_path, + const digest_variable &next_leaf_digest, + const digest_variable &next_root_digest, + const merkle_authentication_path_variable &next_path, + const pb_linear_combination &update_successful, + const std::string &annotation_prefix); + + void generate_r1cs_constraints(); + void generate_r1cs_witness(); + + static size_t root_size_in_bits(); + /* for debugging purposes */ + static size_t expected_constraints(const size_t tree_depth); +}; + +template +void test_merkle_tree_check_update_gadget(); + +} // libsnark + +#include "gadgetlib1/gadgets/merkle_tree/merkle_tree_check_update_gadget.tcc" + +#endif // MERKLE_TREE_CHECK_UPDATE_GADGET_HPP_ diff --git a/src/gadgetlib1/gadgets/merkle_tree/merkle_tree_check_update_gadget.tcc b/src/gadgetlib1/gadgets/merkle_tree/merkle_tree_check_update_gadget.tcc new file mode 100644 index 000000000..1ac08edbb --- /dev/null +++ b/src/gadgetlib1/gadgets/merkle_tree/merkle_tree_check_update_gadget.tcc @@ -0,0 +1,265 @@ +/** @file + ***************************************************************************** + + Implementation of interfaces for the Merkle tree check update gadget. + + See merkle_tree_check_update_gadget.hpp . + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef MERKLE_TREE_CHECK_UPDATE_GADGET_TCC_ +#define MERKLE_TREE_CHECK_UPDATE_GADGET_TCC_ + +namespace libsnark { + +template +merkle_tree_check_update_gadget::merkle_tree_check_update_gadget(protoboard &pb, + const size_t tree_depth, + const pb_variable_array &address_bits, + const digest_variable &prev_leaf_digest, + const digest_variable &prev_root_digest, + const merkle_authentication_path_variable &prev_path, + const digest_variable &next_leaf_digest, + const digest_variable &next_root_digest, + const merkle_authentication_path_variable &next_path, + const pb_linear_combination &update_successful, + const std::string &annotation_prefix) : + gadget(pb, annotation_prefix), + digest_size(HashT::get_digest_len()), + tree_depth(tree_depth), + address_bits(address_bits), + prev_leaf_digest(prev_leaf_digest), + prev_root_digest(prev_root_digest), + prev_path(prev_path), + next_leaf_digest(next_leaf_digest), + next_root_digest(next_root_digest), + next_path(next_path), + update_successful(update_successful) +{ + assert(tree_depth > 0); + assert(tree_depth == address_bits.size()); + + for (size_t i = 0; i < tree_depth-1; ++i) + { + prev_internal_output.emplace_back(digest_variable(pb, digest_size, FMT(this->annotation_prefix, " prev_internal_output_%zu", i))); + next_internal_output.emplace_back(digest_variable(pb, digest_size, FMT(this->annotation_prefix, " next_internal_output_%zu", i))); + } + + computed_next_root.reset(new digest_variable(pb, digest_size, FMT(this->annotation_prefix, " computed_root"))); + + for (size_t i = 0; i < tree_depth; ++i) + { + block_variable prev_inp(pb, prev_path.left_digests[i], prev_path.right_digests[i], FMT(this->annotation_prefix, " prev_inp_%zu", i)); + prev_hasher_inputs.emplace_back(prev_inp); + prev_hashers.emplace_back(HashT(pb, 2*digest_size, prev_inp, (i == 0 ? prev_root_digest : prev_internal_output[i-1]), + FMT(this->annotation_prefix, " prev_hashers_%zu", i))); + + block_variable next_inp(pb, next_path.left_digests[i], next_path.right_digests[i], FMT(this->annotation_prefix, " next_inp_%zu", i)); + next_hasher_inputs.emplace_back(next_inp); + next_hashers.emplace_back(HashT(pb, 2*digest_size, next_inp, (i == 0 ? *computed_next_root : next_internal_output[i-1]), + FMT(this->annotation_prefix, " next_hashers_%zu", i))); + } + + for (size_t i = 0; i < tree_depth; ++i) + { + prev_propagators.emplace_back(digest_selector_gadget(pb, digest_size, i < tree_depth -1 ? prev_internal_output[i] : prev_leaf_digest, + address_bits[tree_depth-1-i], prev_path.left_digests[i], prev_path.right_digests[i], + FMT(this->annotation_prefix, " prev_propagators_%zu", i))); + next_propagators.emplace_back(digest_selector_gadget(pb, digest_size, i < tree_depth -1 ? next_internal_output[i] : next_leaf_digest, + address_bits[tree_depth-1-i], next_path.left_digests[i], next_path.right_digests[i], + FMT(this->annotation_prefix, " next_propagators_%zu", i))); + } + + check_next_root.reset(new bit_vector_copy_gadget(pb, computed_next_root->bits, next_root_digest.bits, update_successful, FieldT::capacity(), FMT(annotation_prefix, " check_next_root"))); +} + +template +void merkle_tree_check_update_gadget::generate_r1cs_constraints() +{ + /* ensure correct hash computations */ + for (size_t i = 0; i < tree_depth; ++i) + { + prev_hashers[i].generate_r1cs_constraints(false); // we check root outside and prev_left/prev_right above + next_hashers[i].generate_r1cs_constraints(true); // however we must check right side hashes + } + + /* ensure consistency of internal_left/internal_right with internal_output */ + for (size_t i = 0; i < tree_depth; ++i) + { + prev_propagators[i].generate_r1cs_constraints(); + next_propagators[i].generate_r1cs_constraints(); + } + + /* ensure that prev auxiliary input and next auxiliary input match */ + for (size_t i = 0; i < tree_depth; ++i) + { + for (size_t j = 0; j < digest_size; ++j) + { + /* + addr * (prev_left - next_left) + (1 - addr) * (prev_right - next_right) = 0 + addr * (prev_left - next_left - prev_right + next_right) = next_right - prev_right + */ + this->pb.add_r1cs_constraint(r1cs_constraint(address_bits[tree_depth-1-i], + prev_path.left_digests[i].bits[j] - next_path.left_digests[i].bits[j] - prev_path.right_digests[i].bits[j] + next_path.right_digests[i].bits[j], + next_path.right_digests[i].bits[j] - prev_path.right_digests[i].bits[j]), + FMT(this->annotation_prefix, " aux_check_%zu_%zu", i, j)); + } + } + + /* Note that while it is necessary to generate R1CS constraints + for prev_path, it is not necessary to do so for next_path. + + This holds, because { next_path.left_inputs[i], + next_path.right_inputs[i] } is a pair { hash_output, + auxiliary_input }. The bitness for hash_output is enforced + above by next_hashers[i].generate_r1cs_constraints. + + Because auxiliary input is the same for prev_path and next_path + (enforced above), we have that auxiliary_input part is also + constrained to be boolean, because prev_path is *all* + constrained to be all boolean. */ + + check_next_root->generate_r1cs_constraints(false, false); +} + +template +void merkle_tree_check_update_gadget::generate_r1cs_witness() +{ + /* do the hash computations bottom-up */ + for (int i = tree_depth-1; i >= 0; --i) + { + /* ensure consistency of prev_path and next_path */ + if (this->pb.val(address_bits[tree_depth-1-i]) == FieldT::one()) + { + next_path.left_digests[i].generate_r1cs_witness(prev_path.left_digests[i].get_digest()); + } + else + { + next_path.right_digests[i].generate_r1cs_witness(prev_path.right_digests[i].get_digest()); + } + + /* propagate previous input */ + prev_propagators[i].generate_r1cs_witness(); + next_propagators[i].generate_r1cs_witness(); + + /* compute hash */ + prev_hashers[i].generate_r1cs_witness(); + next_hashers[i].generate_r1cs_witness(); + } + + check_next_root->generate_r1cs_witness(); +} + +template +size_t merkle_tree_check_update_gadget::root_size_in_bits() +{ + return HashT::get_digest_len(); +} + +template +size_t merkle_tree_check_update_gadget::expected_constraints(const size_t tree_depth) +{ + /* NB: this includes path constraints */ + const size_t prev_hasher_constraints = tree_depth * HashT::expected_constraints(false); + const size_t next_hasher_constraints = tree_depth * HashT::expected_constraints(true); + const size_t prev_authentication_path_constraints = 2 * tree_depth * HashT::get_digest_len(); + const size_t prev_propagator_constraints = tree_depth * HashT::get_digest_len(); + const size_t next_propagator_constraints = tree_depth * HashT::get_digest_len(); + const size_t check_next_root_constraints = 3 * div_ceil(HashT::get_digest_len(), FieldT::capacity()); + const size_t aux_equality_constraints = tree_depth * HashT::get_digest_len(); + + return (prev_hasher_constraints + next_hasher_constraints + prev_authentication_path_constraints + + prev_propagator_constraints + next_propagator_constraints + check_next_root_constraints + + aux_equality_constraints); +} + +template +void test_merkle_tree_check_update_gadget() +{ + /* prepare test */ + const size_t digest_len = HashT::get_digest_len(); + + const size_t tree_depth = 16; + std::vector prev_path(tree_depth); + + bit_vector prev_load_hash(digest_len); + std::generate(prev_load_hash.begin(), prev_load_hash.end(), [&]() { return std::rand() % 2; }); + bit_vector prev_store_hash(digest_len); + std::generate(prev_store_hash.begin(), prev_store_hash.end(), [&]() { return std::rand() % 2; }); + + bit_vector loaded_leaf = prev_load_hash; + bit_vector stored_leaf = prev_store_hash; + + bit_vector address_bits; + + size_t address = 0; + for (long level = tree_depth-1; level >= 0; --level) + { + const bool computed_is_right = (std::rand() % 2); + address |= (computed_is_right ? 1ul << (tree_depth-1-level) : 0); + address_bits.push_back(computed_is_right); + bit_vector other(digest_len); + std::generate(other.begin(), other.end(), [&]() { return std::rand() % 2; }); + + bit_vector load_block = prev_load_hash; + load_block.insert(computed_is_right ? load_block.begin() : load_block.end(), other.begin(), other.end()); + bit_vector store_block = prev_store_hash; + store_block.insert(computed_is_right ? store_block.begin() : store_block.end(), other.begin(), other.end()); + + bit_vector load_h = HashT::get_hash(load_block); + bit_vector store_h = HashT::get_hash(store_block); + + prev_path[level] = other; + + prev_load_hash = load_h; + prev_store_hash = store_h; + } + + bit_vector load_root = prev_load_hash; + bit_vector store_root = prev_store_hash; + + /* execute the test */ + protoboard pb; + pb_variable_array address_bits_va; + address_bits_va.allocate(pb, tree_depth, "address_bits"); + digest_variable prev_leaf_digest(pb, digest_len, "prev_leaf_digest"); + digest_variable prev_root_digest(pb, digest_len, "prev_root_digest"); + merkle_authentication_path_variable prev_path_var(pb, tree_depth, "prev_path_var"); + digest_variable next_leaf_digest(pb, digest_len, "next_leaf_digest"); + digest_variable next_root_digest(pb, digest_len, "next_root_digest"); + merkle_authentication_path_variable next_path_var(pb, tree_depth, "next_path_var"); + merkle_tree_check_update_gadget mls(pb, tree_depth, address_bits_va, + prev_leaf_digest, prev_root_digest, prev_path_var, + next_leaf_digest, next_root_digest, next_path_var, ONE, "mls"); + + prev_path_var.generate_r1cs_constraints(); + mls.generate_r1cs_constraints(); + + address_bits_va.fill_with_bits(pb, address_bits); + assert(address_bits_va.get_field_element_from_bits(pb).as_ulong() == address); + prev_leaf_digest.generate_r1cs_witness(loaded_leaf); + prev_path_var.generate_r1cs_witness(address, prev_path); + next_leaf_digest.generate_r1cs_witness(stored_leaf); + address_bits_va.fill_with_bits(pb, address_bits); + mls.generate_r1cs_witness(); + + /* make sure that update check will check for the right things */ + prev_leaf_digest.generate_r1cs_witness(loaded_leaf); + next_leaf_digest.generate_r1cs_witness(stored_leaf); + prev_root_digest.generate_r1cs_witness(load_root); + next_root_digest.generate_r1cs_witness(store_root); + address_bits_va.fill_with_bits(pb, address_bits); + assert(pb.is_satisfied()); + + const size_t num_constraints = pb.num_constraints(); + const size_t expected_constraints = merkle_tree_check_update_gadget::expected_constraints(tree_depth); + assert(num_constraints == expected_constraints); +} + +} // libsnark + +#endif // MERKLE_TREE_CHECK_UPDATE_GADGET_TCC_ diff --git a/src/gadgetlib1/gadgets/merkle_tree/tests/test_merkle_tree_gadgets.cpp b/src/gadgetlib1/gadgets/merkle_tree/tests/test_merkle_tree_gadgets.cpp new file mode 100644 index 000000000..8d52c579b --- /dev/null +++ b/src/gadgetlib1/gadgets/merkle_tree/tests/test_merkle_tree_gadgets.cpp @@ -0,0 +1,48 @@ +/** + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifdef CURVE_BN128 +#include "algebra/curves/bn128/bn128_pp.hpp" +#endif +#include "algebra/curves/edwards/edwards_pp.hpp" +#include "algebra/curves/mnt/mnt4/mnt4_pp.hpp" +#include "algebra/curves/mnt/mnt6/mnt6_pp.hpp" +#include "gadgetlib1/gadgets/merkle_tree/merkle_tree_check_read_gadget.hpp" +#include "gadgetlib1/gadgets/merkle_tree/merkle_tree_check_update_gadget.hpp" +#include "gadgetlib1/gadgets/hashes/sha256/sha256_gadget.hpp" + +using namespace libsnark; + +template +void test_all_merkle_tree_gadgets() +{ + typedef Fr FieldT; + test_merkle_tree_check_read_gadget >(); + test_merkle_tree_check_read_gadget >(); + + test_merkle_tree_check_update_gadget >(); + test_merkle_tree_check_update_gadget >(); +} + +int main(void) +{ + start_profiling(); + +#ifdef CURVE_BN128 // BN128 has fancy dependencies so it may be disabled + bn128_pp::init_public_params(); + test_all_merkle_tree_gadgets(); +#endif + + edwards_pp::init_public_params(); + test_all_merkle_tree_gadgets(); + + mnt4_pp::init_public_params(); + test_all_merkle_tree_gadgets(); + + mnt6_pp::init_public_params(); + test_all_merkle_tree_gadgets(); +} diff --git a/src/gadgetlib1/pb_variable.hpp b/src/gadgetlib1/pb_variable.hpp new file mode 100644 index 000000000..fdf64d014 --- /dev/null +++ b/src/gadgetlib1/pb_variable.hpp @@ -0,0 +1,144 @@ +/** @file + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef PB_VARIABLE_HPP_ +#define PB_VARIABLE_HPP_ + +#include +#include +#include +#include "common/utils.hpp" +#include "relations/variable.hpp" + +namespace libsnark { + +typedef size_t lc_index_t; + +template +class protoboard; + +template +class pb_variable : public variable { +public: + pb_variable(const var_index_t index = 0) : variable(index) {}; + + void allocate(protoboard &pb, const std::string &annotation=""); +}; + +template +class pb_variable_array : private std::vector > +{ + typedef std::vector > contents; +public: + using typename contents::iterator; + using typename contents::const_iterator; + using typename contents::reverse_iterator; + using typename contents::const_reverse_iterator; + + using contents::begin; + using contents::end; + using contents::rbegin; + using contents::rend; + using contents::emplace_back; + using contents::insert; + using contents::reserve; + using contents::size; + using contents::empty; + using contents::operator[]; + using contents::resize; + + pb_variable_array() : contents() {}; + pb_variable_array(size_t count, const pb_variable &value) : contents(count, value) {}; + pb_variable_array(typename contents::const_iterator first, typename contents::const_iterator last) : contents(first, last) {}; + pb_variable_array(typename contents::const_reverse_iterator first, typename contents::const_reverse_iterator last) : contents(first, last) {}; + void allocate(protoboard &pb, const size_t n, const std::string &annotation_prefix=""); + + void fill_with_field_elements(protoboard &pb, const std::vector& vals) const; + void fill_with_bits(protoboard &pb, const bit_vector& bits) const; + void fill_with_bits_of_ulong(protoboard &pb, const unsigned long i) const; + void fill_with_bits_of_field_element(protoboard &pb, const FieldT &r) const; + + std::vector get_vals(const protoboard &pb) const; + bit_vector get_bits(const protoboard &pb) const; + + FieldT get_field_element_from_bits(const protoboard &pb) const; +}; + +/* index 0 corresponds to the constant term (used in legacy code) */ +#define ONE pb_variable(0) + +template +class pb_linear_combination : public linear_combination { +public: + bool is_variable; + lc_index_t index; + + pb_linear_combination(); + pb_linear_combination(const pb_variable &var); + + void assign(protoboard &pb, const linear_combination &lc); + void evaluate(protoboard &pb) const; + + bool is_constant() const; + FieldT constant_term() const; +}; + +template +class pb_linear_combination_array : private std::vector > +{ + typedef std::vector > contents; +public: + using typename contents::iterator; + using typename contents::const_iterator; + using typename contents::reverse_iterator; + using typename contents::const_reverse_iterator; + + using contents::begin; + using contents::end; + using contents::rbegin; + using contents::rend; + using contents::emplace_back; + using contents::insert; + using contents::reserve; + using contents::size; + using contents::empty; + using contents::operator[]; + using contents::resize; + + pb_linear_combination_array() : contents() {}; + pb_linear_combination_array(const pb_variable_array &arr) { for (auto &v : arr) this->emplace_back(pb_linear_combination(v)); }; + pb_linear_combination_array(size_t count) : contents(count) {}; + pb_linear_combination_array(size_t count, const pb_linear_combination &value) : contents(count, value) {}; + pb_linear_combination_array(typename contents::const_iterator first, typename contents::const_iterator last) : contents(first, last) {}; + pb_linear_combination_array(typename contents::const_reverse_iterator first, typename contents::const_reverse_iterator last) : contents(first, last) {}; + + void evaluate(protoboard &pb) const; + + void fill_with_field_elements(protoboard &pb, const std::vector& vals) const; + void fill_with_bits(protoboard &pb, const bit_vector& bits) const; + void fill_with_bits_of_ulong(protoboard &pb, const unsigned long i) const; + void fill_with_bits_of_field_element(protoboard &pb, const FieldT &r) const; + + std::vector get_vals(const protoboard &pb) const; + bit_vector get_bits(const protoboard &pb) const; + + FieldT get_field_element_from_bits(const protoboard &pb) const; +}; + +template +linear_combination pb_sum(const pb_linear_combination_array &v); + +template +linear_combination pb_packing_sum(const pb_linear_combination_array &v); + +template +linear_combination pb_coeff_sum(const pb_linear_combination_array &v, const std::vector &coeffs); + +} // libsnark +#include "gadgetlib1/pb_variable.tcc" + +#endif // PB_VARIABLE_HPP_ diff --git a/src/gadgetlib1/pb_variable.tcc b/src/gadgetlib1/pb_variable.tcc new file mode 100644 index 000000000..b36b3f8d7 --- /dev/null +++ b/src/gadgetlib1/pb_variable.tcc @@ -0,0 +1,330 @@ +/** @file + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef PB_VARIABLE_TCC_ +#define PB_VARIABLE_TCC_ +#include +#include "gadgetlib1/protoboard.hpp" +#include "common/utils.hpp" + +namespace libsnark { + +template +void pb_variable::allocate(protoboard &pb, const std::string &annotation) +{ + this->index = pb.allocate_var_index(annotation); +} + +/* allocates pb_variable array in MSB->LSB order */ +template +void pb_variable_array::allocate(protoboard &pb, const size_t n, const std::string &annotation_prefix) +{ +#ifdef DEBUG + assert(annotation_prefix != ""); +#endif + (*this).resize(n); + + for (size_t i = 0; i < n; ++i) + { + (*this)[i].allocate(pb, FMT(annotation_prefix, "_%zu", i)); + } +} + +template +void pb_variable_array::fill_with_field_elements(protoboard &pb, const std::vector& vals) const +{ + assert(this->size() == vals.size()); + for (size_t i = 0; i < vals.size(); ++i) + { + pb.val((*this)[i]) = vals[i]; + } +} + +template +void pb_variable_array::fill_with_bits(protoboard &pb, const bit_vector& bits) const +{ + assert(this->size() == bits.size()); + for (size_t i = 0; i < bits.size(); ++i) + { + pb.val((*this)[i]) = (bits[i] ? FieldT::one() : FieldT::zero()); + } +} + +template +void pb_variable_array::fill_with_bits_of_field_element(protoboard &pb, const FieldT &r) const +{ + const bigint rint = r.as_bigint(); + for (size_t i = 0; i < this->size(); ++i) + { + pb.val((*this)[i]) = rint.test_bit(i) ? FieldT::one() : FieldT::zero(); + } +} + +template +void pb_variable_array::fill_with_bits_of_ulong(protoboard &pb, const unsigned long i) const +{ + this->fill_with_bits_of_field_element(pb, FieldT(i, true)); +} + +template +std::vector pb_variable_array::get_vals(const protoboard &pb) const +{ + std::vector result(this->size()); + for (size_t i = 0; i < this->size(); ++i) + { + result[i] = pb.val((*this)[i]); + } + return result; +} + +template +bit_vector pb_variable_array::get_bits(const protoboard &pb) const +{ + bit_vector result; + for (size_t i = 0; i < this->size(); ++i) + { + const FieldT v = pb.val((*this)[i]); + assert(v == FieldT::zero() || v == FieldT::one()); + result.push_back(v == FieldT::one()); + } + return result; +} + +template +FieldT pb_variable_array::get_field_element_from_bits(const protoboard &pb) const +{ + FieldT result = FieldT::zero(); + + for (size_t i = 0; i < this->size(); ++i) + { + /* push in the new bit */ + const FieldT v = pb.val((*this)[this->size()-1-i]); + assert(v == FieldT::zero() || v == FieldT::one()); + result += result + v; + } + + return result; +} + +template +pb_linear_combination::pb_linear_combination() +{ + this->is_variable = false; + this->index = 0; +} + +template +pb_linear_combination::pb_linear_combination(const pb_variable &var) +{ + this->is_variable = true; + this->index = var.index; + this->terms.emplace_back(linear_term(var)); +} + +template +void pb_linear_combination::assign(protoboard &pb, const linear_combination &lc) +{ + assert(this->is_variable == false); + this->index = pb.allocate_lc_index(); + this->terms = lc.terms; +} + +template +void pb_linear_combination::evaluate(protoboard &pb) const +{ + if (this->is_variable) + { + return; // do nothing + } + + FieldT sum = 0; + for (auto term : this->terms) + { + sum += term.coeff * pb.val(pb_variable(term.index)); + } + + pb.lc_val(*this) = sum; +} + +template +bool pb_linear_combination::is_constant() const +{ + if (is_variable) + { + return (index == 0); + } + else + { + for (auto term : this->terms) + { + if (term.index != 0) + { + return false; + } + } + + return true; + } +} + +template +FieldT pb_linear_combination::constant_term() const +{ + if (is_variable) + { + return (index == 0 ? FieldT::one() : FieldT::zero()); + } + else + { + FieldT result = FieldT::zero(); + for (auto term : this->terms) + { + if (term.index == 0) + { + result += term.coeff; + } + } + return result; + } +} + +template +void pb_linear_combination_array::evaluate(protoboard &pb) const +{ + for (size_t i = 0; i < this->size(); ++i) + { + (*this)[i].evaluate(pb); + } +} + +template +void pb_linear_combination_array::fill_with_field_elements(protoboard &pb, const std::vector& vals) const +{ + assert(this->size() == vals.size()); + for (size_t i = 0; i < vals.size(); ++i) + { + pb.lc_val((*this)[i]) = vals[i]; + } +} + +template +void pb_linear_combination_array::fill_with_bits(protoboard &pb, const bit_vector& bits) const +{ + assert(this->size() == bits.size()); + for (size_t i = 0; i < bits.size(); ++i) + { + pb.lc_val((*this)[i]) = (bits[i] ? FieldT::one() : FieldT::zero()); + } +} + +template +void pb_linear_combination_array::fill_with_bits_of_field_element(protoboard &pb, const FieldT &r) const +{ + const bigint rint = r.as_bigint(); + for (size_t i = 0; i < this->size(); ++i) + { + pb.lc_val((*this)[i]) = rint.test_bit(i) ? FieldT::one() : FieldT::zero(); + } +} + +template +void pb_linear_combination_array::fill_with_bits_of_ulong(protoboard &pb, const unsigned long i) const +{ + this->fill_with_bits_of_field_element(pb, FieldT(i)); +} + +template +std::vector pb_linear_combination_array::get_vals(const protoboard &pb) const +{ + std::vector result(this->size()); + for (size_t i = 0; i < this->size(); ++i) + { + result[i] = pb.lc_val((*this)[i]); + } + return result; +} + +template +bit_vector pb_linear_combination_array::get_bits(const protoboard &pb) const +{ + bit_vector result; + for (size_t i = 0; i < this->size(); ++i) + { + const FieldT v = pb.lc_val((*this)[i]); + assert(v == FieldT::zero() || v == FieldT::one()); + result.push_back(v == FieldT::one()); + } + return result; +} + +template +FieldT pb_linear_combination_array::get_field_element_from_bits(const protoboard &pb) const +{ + FieldT result = FieldT::zero(); + + for (size_t i = 0; i < this->size(); ++i) + { + /* push in the new bit */ + const FieldT v = pb.lc_val((*this)[this->size()-1-i]); + assert(v == FieldT::zero() || v == FieldT::one()); + result += result + v; + } + + return result; +} + +template +linear_combination pb_sum(const pb_linear_combination_array &v) +{ + linear_combination result; + for (auto &term : v) + { + result = result + term; + } + + return result; +} + +template +linear_combination pb_packing_sum(const pb_linear_combination_array &v) +{ + FieldT twoi = FieldT::one(); // will hold 2^i entering each iteration + std::vector > all_terms; + for (auto &lc : v) + { + for (auto &term : lc.terms) + { + all_terms.emplace_back(twoi * term); + } + twoi += twoi; + } + + return linear_combination(all_terms); +} + +template +linear_combination pb_coeff_sum(const pb_linear_combination_array &v, const std::vector &coeffs) +{ + assert(v.size() == coeffs.size()); + std::vector > all_terms; + + auto coeff_it = coeffs.begin(); + for (auto &lc : v) + { + for (auto &term : lc.terms) + { + all_terms.emplace_back((*coeff_it) * term); + } + ++coeff_it; + } + + return linear_combination(all_terms); +} + + +} // libsnark +#endif // PB_VARIABLE_TCC diff --git a/src/gadgetlib1/protoboard.hpp b/src/gadgetlib1/protoboard.hpp new file mode 100644 index 000000000..a910a6df9 --- /dev/null +++ b/src/gadgetlib1/protoboard.hpp @@ -0,0 +1,75 @@ +/** @file + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef PROTOBOARD_HPP_ +#define PROTOBOARD_HPP_ + +#include +#include +#include +#include +#include +#include "gadgetlib1/pb_variable.hpp" +#include "relations/constraint_satisfaction_problems/r1cs/r1cs.hpp" +#include "common/utils.hpp" + +namespace libsnark { + +template +class r1cs_constraint; + +template +class r1cs_constraint_system; + +template +class protoboard { +private: + FieldT constant_term; /* only here, because pb.val() needs to be able to return reference to the constant 1 term */ + r1cs_variable_assignment values; /* values[0] will hold the value of the first allocated variable of the protoboard, *NOT* constant 1 */ + var_index_t next_free_var; + lc_index_t next_free_lc; + std::vector lc_values; +public: + r1cs_constraint_system constraint_system; + + protoboard(); + + void clear_values(); + + FieldT& val(const pb_variable &var); + FieldT val(const pb_variable &var) const; + + FieldT& lc_val(const pb_linear_combination &lc); + FieldT lc_val(const pb_linear_combination &lc) const; + + void add_r1cs_constraint(const r1cs_constraint &constr, const std::string &annotation=""); + void augment_variable_annotation(const pb_variable &v, const std::string &postfix); + bool is_satisfied() const; + void dump_variables() const; + + size_t num_constraints() const; + size_t num_inputs() const; + size_t num_variables() const; + + void set_input_sizes(const size_t primary_input_size); + + r1cs_variable_assignment full_variable_assignment() const; + r1cs_primary_input primary_input() const; + r1cs_auxiliary_input auxiliary_input() const; + r1cs_constraint_system get_constraint_system() const; + + friend class pb_variable; + friend class pb_linear_combination; + +private: + var_index_t allocate_var_index(const std::string &annotation=""); + lc_index_t allocate_lc_index(); +}; + +} // libsnark +#include "gadgetlib1/protoboard.tcc" +#endif // PROTOBOARD_HPP_ diff --git a/src/gadgetlib1/protoboard.tcc b/src/gadgetlib1/protoboard.tcc new file mode 100644 index 000000000..882af28e6 --- /dev/null +++ b/src/gadgetlib1/protoboard.tcc @@ -0,0 +1,189 @@ +/** @file + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef PROTOBOARD_TCC_ +#define PROTOBOARD_TCC_ + +#include +#include +#include "common/profiling.hpp" + +namespace libsnark { + +template +protoboard::protoboard() +{ + constant_term = FieldT::one(); + +#ifdef DEBUG + constraint_system.variable_annotations[0] = "ONE"; +#endif + + next_free_var = 1; /* to account for constant 1 term */ + next_free_lc = 0; +} + +template +void protoboard::clear_values() +{ + std::fill(values.begin(), values.end(), FieldT::zero()); +} + +template +var_index_t protoboard::allocate_var_index(const std::string &annotation) +{ +#ifdef DEBUG + assert(annotation != ""); + constraint_system.variable_annotations[next_free_var] = annotation; +#else + UNUSED(annotation); +#endif + ++constraint_system.auxiliary_input_size; + values.emplace_back(FieldT::zero()); + return next_free_var++; +} + +template +lc_index_t protoboard::allocate_lc_index() +{ + lc_values.emplace_back(FieldT::zero()); + return next_free_lc++; +} + +template +FieldT& protoboard::val(const pb_variable &var) +{ + assert(var.index <= values.size()); + return (var.index == 0 ? constant_term : values[var.index-1]); +} + +template +FieldT protoboard::val(const pb_variable &var) const +{ + assert(var.index <= values.size()); + return (var.index == 0 ? constant_term : values[var.index-1]); +} + +template +FieldT& protoboard::lc_val(const pb_linear_combination &lc) +{ + if (lc.is_variable) + { + return this->val(pb_variable(lc.index)); + } + else + { + assert(lc.index < lc_values.size()); + return lc_values[lc.index]; + } +} + +template +FieldT protoboard::lc_val(const pb_linear_combination &lc) const +{ + if (lc.is_variable) + { + return this->val(pb_variable(lc.index)); + } + else + { + assert(lc.index < lc_values.size()); + return lc_values[lc.index]; + } +} + +template +void protoboard::add_r1cs_constraint(const r1cs_constraint &constr, const std::string &annotation) +{ +#ifdef DEBUG + assert(annotation != ""); + constraint_system.constraint_annotations[constraint_system.constraints.size()] = annotation; +#else + UNUSED(annotation); +#endif + constraint_system.constraints.emplace_back(constr); +} + +template +void protoboard::augment_variable_annotation(const pb_variable &v, const std::string &postfix) +{ +#ifdef DEBUG + auto it = constraint_system.variable_annotations.find(v.index); + constraint_system.variable_annotations[v.index] = (it == constraint_system.variable_annotations.end() ? "" : it->second + " ") + postfix; +#endif +} + +template +bool protoboard::is_satisfied() const +{ + return constraint_system.is_satisfied(primary_input(), auxiliary_input()); +} + +template +void protoboard::dump_variables() const +{ +#ifdef DEBUG + for (size_t i = 0; i < constraint_system.num_variables; ++i) + { + printf("%-40s --> ", constraint_system.variable_annotations[i].c_str()); + values[i].as_bigint().print_hex(); + } +#endif +} + +template +size_t protoboard::num_constraints() const +{ + return constraint_system.num_constraints(); +} + +template +size_t protoboard::num_inputs() const +{ + return constraint_system.num_inputs(); +} + +template +size_t protoboard::num_variables() const +{ + return next_free_var - 1; +} + +template +void protoboard::set_input_sizes(const size_t primary_input_size) +{ + assert(primary_input_size <= num_variables()); + constraint_system.primary_input_size = primary_input_size; + constraint_system.auxiliary_input_size = num_variables() - primary_input_size; +} + +template +r1cs_variable_assignment protoboard::full_variable_assignment() const +{ + return values; +} + +template +r1cs_primary_input protoboard::primary_input() const +{ + return r1cs_primary_input(values.begin(), values.begin() + num_inputs()); +} + +template +r1cs_auxiliary_input protoboard::auxiliary_input() const +{ + return r1cs_primary_input(values.begin() + num_inputs(), values.end()); +} + +template +r1cs_constraint_system protoboard::get_constraint_system() const +{ + return constraint_system; +} + +} // libsnark +#endif // PROTOBOARD_TCC_ diff --git a/src/reductions/r1cs_to_qap/r1cs_to_qap.hpp b/src/reductions/r1cs_to_qap/r1cs_to_qap.hpp new file mode 100644 index 000000000..b3cde710c --- /dev/null +++ b/src/reductions/r1cs_to_qap/r1cs_to_qap.hpp @@ -0,0 +1,70 @@ +/** @file + ***************************************************************************** + + Declaration of interfaces for a R1CS-to-QAP reduction, that is, constructing + a QAP ("Quadratic Arithmetic Program") from a R1CS ("Rank-1 Constraint System"). + + QAPs are defined in \[GGPR13], and construced for R1CS also in \[GGPR13]. + + The implementation of the reduction follows, extends, and optimizes + the efficient approach described in Appendix E of \[BCGTV13]. + + References: + + \[BCGTV13] + "SNARKs for C: Verifying Program Executions Succinctly and in Zero Knowledge", + Eli Ben-Sasson, Alessandro Chiesa, Daniel Genkin, Eran Tromer, Madars Virza, + CRYPTO 2013, + + + \[GGPR13]: + "Quadratic span programs and succinct NIZKs without PCPs", + Rosario Gennaro, Craig Gentry, Bryan Parno, Mariana Raykova, + EUROCRYPT 2013, + + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef R1CS_TO_QAP_HPP_ +#define R1CS_TO_QAP_HPP_ + +#include "relations/arithmetic_programs/qap/qap.hpp" +#include "relations/constraint_satisfaction_problems/r1cs/r1cs.hpp" + +namespace libsnark { + +/** + * Instance map for the R1CS-to-QAP reduction. + */ +template +qap_instance r1cs_to_qap_instance_map(const r1cs_constraint_system &cs); + +/** + * Instance map for the R1CS-to-QAP reduction followed by evaluation of the resulting QAP instance. + */ +template +qap_instance_evaluation r1cs_to_qap_instance_map_with_evaluation(const r1cs_constraint_system &cs, + const FieldT &t); + +/** + * Witness map for the R1CS-to-QAP reduction. + * + * The witness map takes zero knowledge into account when d1,d2,d3 are random. + */ +template +qap_witness r1cs_to_qap_witness_map(const r1cs_constraint_system &cs, + const r1cs_primary_input &primary_input, + const r1cs_auxiliary_input &auxiliary_input, + const FieldT &d1, + const FieldT &d2, + const FieldT &d3); + +} // libsnark + +#include "reductions/r1cs_to_qap/r1cs_to_qap.tcc" + +#endif // R1CS_TO_QAP_HPP_ diff --git a/src/reductions/r1cs_to_qap/r1cs_to_qap.tcc b/src/reductions/r1cs_to_qap/r1cs_to_qap.tcc new file mode 100644 index 000000000..3d0bee273 --- /dev/null +++ b/src/reductions/r1cs_to_qap/r1cs_to_qap.tcc @@ -0,0 +1,338 @@ +/** @file + ***************************************************************************** + + Implementation of interfaces for a R1CS-to-QAP reduction. + + See r1cs_to_qap.hpp . + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef R1CS_TO_QAP_TCC_ +#define R1CS_TO_QAP_TCC_ + +#include "common/profiling.hpp" +#include "common/utils.hpp" +#include "algebra/evaluation_domain/evaluation_domain.hpp" + +namespace libsnark { + +/** + * Instance map for the R1CS-to-QAP reduction. + * + * Namely, given a R1CS constraint system cs, construct a QAP instance for which: + * A := (A_0(z),A_1(z),...,A_m(z)) + * B := (B_0(z),B_1(z),...,B_m(z)) + * C := (C_0(z),C_1(z),...,C_m(z)) + * where + * m = number of variables of the QAP + * and + * each A_i,B_i,C_i is expressed in the Lagrange basis. + */ +template +qap_instance r1cs_to_qap_instance_map(const r1cs_constraint_system &cs) +{ + enter_block("Call to r1cs_to_qap_instance_map"); + + const std::shared_ptr > domain = get_evaluation_domain(cs.num_constraints() + cs.num_inputs() + 1); + + std::vector > A_in_Lagrange_basis(cs.num_variables()+1); + std::vector > B_in_Lagrange_basis(cs.num_variables()+1); + std::vector > C_in_Lagrange_basis(cs.num_variables()+1); + + enter_block("Compute polynomials A, B, C in Lagrange basis"); + /** + * add and process the constraints + * input_i * 0 = 0 + * to ensure soundness of input consistency + */ + for (size_t i = 0; i <= cs.num_inputs(); ++i) + { + A_in_Lagrange_basis[i][cs.num_constraints() + i] = FieldT::one(); + } + /* process all other constraints */ + for (size_t i = 0; i < cs.num_constraints(); ++i) + { + for (size_t j = 0; j < cs.constraints[i].a.terms.size(); ++j) + { + A_in_Lagrange_basis[cs.constraints[i].a.terms[j].index][i] += + cs.constraints[i].a.terms[j].coeff; + } + + for (size_t j = 0; j < cs.constraints[i].b.terms.size(); ++j) + { + B_in_Lagrange_basis[cs.constraints[i].b.terms[j].index][i] += + cs.constraints[i].b.terms[j].coeff; + } + + for (size_t j = 0; j < cs.constraints[i].c.terms.size(); ++j) + { + C_in_Lagrange_basis[cs.constraints[i].c.terms[j].index][i] += + cs.constraints[i].c.terms[j].coeff; + } + } + leave_block("Compute polynomials A, B, C in Lagrange basis"); + + leave_block("Call to r1cs_to_qap_instance_map"); + + return qap_instance(domain, + cs.num_variables(), + domain->m, + cs.num_inputs(), + std::move(A_in_Lagrange_basis), + std::move(B_in_Lagrange_basis), + std::move(C_in_Lagrange_basis)); +} + +/** + * Instance map for the R1CS-to-QAP reduction followed by evaluation of the resulting QAP instance. + * + * Namely, given a R1CS constraint system cs and a field element t, construct + * a QAP instance (evaluated at t) for which: + * At := (A_0(t),A_1(t),...,A_m(t)) + * Bt := (B_0(t),B_1(t),...,B_m(t)) + * Ct := (C_0(t),C_1(t),...,C_m(t)) + * Ht := (1,t,t^2,...,t^n) + * Zt := Z(t) = "vanishing polynomial of a certain set S, evaluated at t" + * where + * m = number of variables of the QAP + * n = degree of the QAP + */ +template +qap_instance_evaluation r1cs_to_qap_instance_map_with_evaluation(const r1cs_constraint_system &cs, + const FieldT &t) +{ + enter_block("Call to r1cs_to_qap_instance_map_with_evaluation"); + + const std::shared_ptr > domain = get_evaluation_domain(cs.num_constraints() + cs.num_inputs() + 1); + + std::vector At, Bt, Ct, Ht; + + At.resize(cs.num_variables()+1, FieldT::zero()); + Bt.resize(cs.num_variables()+1, FieldT::zero()); + Ct.resize(cs.num_variables()+1, FieldT::zero()); + Ht.reserve(domain->m+1); + + const FieldT Zt = domain->compute_Z(t); + + enter_block("Compute evaluations of A, B, C, H at t"); + const std::vector u = domain->lagrange_coeffs(t); + /** + * add and process the constraints + * input_i * 0 = 0 + * to ensure soundness of input consistency + */ + for (size_t i = 0; i <= cs.num_inputs(); ++i) + { + At[i] = u[cs.num_constraints() + i]; + } + /* process all other constraints */ + for (size_t i = 0; i < cs.num_constraints(); ++i) + { + for (size_t j = 0; j < cs.constraints[i].a.terms.size(); ++j) + { + At[cs.constraints[i].a.terms[j].index] += + u[i]*cs.constraints[i].a.terms[j].coeff; + } + + for (size_t j = 0; j < cs.constraints[i].b.terms.size(); ++j) + { + Bt[cs.constraints[i].b.terms[j].index] += + u[i]*cs.constraints[i].b.terms[j].coeff; + } + + for (size_t j = 0; j < cs.constraints[i].c.terms.size(); ++j) + { + Ct[cs.constraints[i].c.terms[j].index] += + u[i]*cs.constraints[i].c.terms[j].coeff; + } + } + + FieldT ti = FieldT::one(); + for (size_t i = 0; i < domain->m+1; ++i) + { + Ht.emplace_back(ti); + ti *= t; + } + leave_block("Compute evaluations of A, B, C, H at t"); + + leave_block("Call to r1cs_to_qap_instance_map_with_evaluation"); + + return qap_instance_evaluation(domain, + cs.num_variables(), + domain->m, + cs.num_inputs(), + t, + std::move(At), + std::move(Bt), + std::move(Ct), + std::move(Ht), + Zt); +} + +/** + * Witness map for the R1CS-to-QAP reduction. + * + * The witness map takes zero knowledge into account when d1,d2,d3 are random. + * + * More precisely, compute the coefficients + * h_0,h_1,...,h_n + * of the polynomial + * H(z) := (A(z)*B(z)-C(z))/Z(z) + * where + * A(z) := A_0(z) + \sum_{k=1}^{m} w_k A_k(z) + d1 * Z(z) + * B(z) := B_0(z) + \sum_{k=1}^{m} w_k B_k(z) + d2 * Z(z) + * C(z) := C_0(z) + \sum_{k=1}^{m} w_k C_k(z) + d3 * Z(z) + * Z(z) := "vanishing polynomial of set S" + * and + * m = number of variables of the QAP + * n = degree of the QAP + * + * This is done as follows: + * (1) compute evaluations of A,B,C on S = {sigma_1,...,sigma_n} + * (2) compute coefficients of A,B,C + * (3) compute evaluations of A,B,C on T = "coset of S" + * (4) compute evaluation of H on T + * (5) compute coefficients of H + * (6) patch H to account for d1,d2,d3 (i.e., add coefficients of the polynomial (A d2 + B d1 - d3) + d1*d2*Z ) + * + * The code below is not as simple as the above high-level description due to + * some reshuffling to save space. + */ +template +qap_witness r1cs_to_qap_witness_map(const r1cs_constraint_system &cs, + const r1cs_primary_input &primary_input, + const r1cs_auxiliary_input &auxiliary_input, + const FieldT &d1, + const FieldT &d2, + const FieldT &d3) +{ + enter_block("Call to r1cs_to_qap_witness_map"); + + /* sanity check */ + assert(cs.is_satisfied(primary_input, auxiliary_input)); + + const std::shared_ptr > domain = get_evaluation_domain(cs.num_constraints() + cs.num_inputs() + 1); + + r1cs_variable_assignment full_variable_assignment = primary_input; + full_variable_assignment.insert(full_variable_assignment.end(), auxiliary_input.begin(), auxiliary_input.end()); + + enter_block("Compute evaluation of polynomials A, B on set S"); + std::vector aA(domain->m, FieldT::zero()), aB(domain->m, FieldT::zero()); + + /* account for the additional constraints input_i * 0 = 0 */ + for (size_t i = 0; i <= cs.num_inputs(); ++i) + { + aA[i+cs.num_constraints()] = (i > 0 ? full_variable_assignment[i-1] : FieldT::one()); + } + /* account for all other constraints */ + for (size_t i = 0; i < cs.num_constraints(); ++i) + { + aA[i] += cs.constraints[i].a.evaluate(full_variable_assignment); + aB[i] += cs.constraints[i].b.evaluate(full_variable_assignment); + } + leave_block("Compute evaluation of polynomials A, B on set S"); + + enter_block("Compute coefficients of polynomial A"); + domain->iFFT(aA); + leave_block("Compute coefficients of polynomial A"); + + enter_block("Compute coefficients of polynomial B"); + domain->iFFT(aB); + leave_block("Compute coefficients of polynomial B"); + + enter_block("Compute ZK-patch"); + std::vector coefficients_for_H(domain->m+1, FieldT::zero()); +#ifdef MULTICORE +#pragma omp parallel for +#endif + /* add coefficients of the polynomial (d2*A + d1*B - d3) + d1*d2*Z */ + for (size_t i = 0; i < domain->m; ++i) + { + coefficients_for_H[i] = d2*aA[i] + d1*aB[i]; + } + coefficients_for_H[0] -= d3; + domain->add_poly_Z(d1*d2, coefficients_for_H); + leave_block("Compute ZK-patch"); + + enter_block("Compute evaluation of polynomial A on set T"); + domain->cosetFFT(aA, FieldT::multiplicative_generator); + leave_block("Compute evaluation of polynomial A on set T"); + + enter_block("Compute evaluation of polynomial B on set T"); + domain->cosetFFT(aB, FieldT::multiplicative_generator); + leave_block("Compute evaluation of polynomial B on set T"); + + enter_block("Compute evaluation of polynomial H on set T"); + std::vector &H_tmp = aA; // can overwrite aA because it is not used later +#ifdef MULTICORE +#pragma omp parallel for +#endif + for (size_t i = 0; i < domain->m; ++i) + { + H_tmp[i] = aA[i]*aB[i]; + } + std::vector().swap(aB); // destroy aB + + enter_block("Compute evaluation of polynomial C on set S"); + std::vector aC(domain->m, FieldT::zero()); + for (size_t i = 0; i < cs.num_constraints(); ++i) + { + aC[i] += cs.constraints[i].c.evaluate(full_variable_assignment); + } + leave_block("Compute evaluation of polynomial C on set S"); + + enter_block("Compute coefficients of polynomial C"); + domain->iFFT(aC); + leave_block("Compute coefficients of polynomial C"); + + enter_block("Compute evaluation of polynomial C on set T"); + domain->cosetFFT(aC, FieldT::multiplicative_generator); + leave_block("Compute evaluation of polynomial C on set T"); + +#ifdef MULTICORE +#pragma omp parallel for +#endif + for (size_t i = 0; i < domain->m; ++i) + { + H_tmp[i] = (H_tmp[i]-aC[i]); + } + + enter_block("Divide by Z on set T"); + domain->divide_by_Z_on_coset(H_tmp); + leave_block("Divide by Z on set T"); + + leave_block("Compute evaluation of polynomial H on set T"); + + enter_block("Compute coefficients of polynomial H"); + domain->icosetFFT(H_tmp, FieldT::multiplicative_generator); + leave_block("Compute coefficients of polynomial H"); + + enter_block("Compute sum of H and ZK-patch"); +#ifdef MULTICORE +#pragma omp parallel for +#endif + for (size_t i = 0; i < domain->m; ++i) + { + coefficients_for_H[i] += H_tmp[i]; + } + leave_block("Compute sum of H and ZK-patch"); + + leave_block("Call to r1cs_to_qap_witness_map"); + + return qap_witness(cs.num_variables(), + domain->m, + cs.num_inputs(), + d1, + d2, + d3, + full_variable_assignment, + std::move(coefficients_for_H)); +} + +} // libsnark + +#endif // R1CS_TO_QAP_TCC_ diff --git a/src/relations/arithmetic_programs/qap/qap.hpp b/src/relations/arithmetic_programs/qap/qap.hpp new file mode 100644 index 000000000..4991d203b --- /dev/null +++ b/src/relations/arithmetic_programs/qap/qap.hpp @@ -0,0 +1,193 @@ +/** @file + ***************************************************************************** + + Declaration of interfaces for a QAP ("Quadratic Arithmetic Program"). + + QAPs are defined in \[GGPR13]. + + References: + + \[GGPR13]: + "Quadratic span programs and succinct NIZKs without PCPs", + Rosario Gennaro, Craig Gentry, Bryan Parno, Mariana Raykova, + EUROCRYPT 2013, + + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef QAP_HPP_ +#define QAP_HPP_ + +#include "algebra/evaluation_domain/evaluation_domain.hpp" + +namespace libsnark { + +/* forward declaration */ +template +class qap_witness; + +/** + * A QAP instance. + * + * Specifically, the datastructure stores: + * - a choice of domain (corresponding to a certain subset of the field); + * - the number of variables, the degree, and the number of inputs; and + * - coefficients of the A,B,C polynomials in the Lagrange basis. + * + * There is no need to store the Z polynomial because it is uniquely + * determined by the domain (as Z is its vanishing polynomial). + */ +template +class qap_instance { +private: + size_t num_variables_; + size_t degree_; + size_t num_inputs_; + +public: + std::shared_ptr > domain; + + std::vector > A_in_Lagrange_basis; + std::vector > B_in_Lagrange_basis; + std::vector > C_in_Lagrange_basis; + + qap_instance(const std::shared_ptr > &domain, + const size_t num_variables, + const size_t degree, + const size_t num_inputs, + const std::vector > &A_in_Lagrange_basis, + const std::vector > &B_in_Lagrange_basis, + const std::vector > &C_in_Lagrange_basis); + + qap_instance(const std::shared_ptr > &domain, + const size_t num_variables, + const size_t degree, + const size_t num_inputs, + std::vector > &&A_in_Lagrange_basis, + std::vector > &&B_in_Lagrange_basis, + std::vector > &&C_in_Lagrange_basis); + + qap_instance(const qap_instance &other) = default; + qap_instance(qap_instance &&other) = default; + qap_instance& operator=(const qap_instance &other) = default; + qap_instance& operator=(qap_instance &&other) = default; + + size_t num_variables() const; + size_t degree() const; + size_t num_inputs() const; + + bool is_satisfied(const qap_witness &witness) const; +}; + +/** + * A QAP instance evaluation is a QAP instance that is evaluated at a field element t. + * + * Specifically, the datastructure stores: + * - a choice of domain (corresponding to a certain subset of the field); + * - the number of variables, the degree, and the number of inputs; + * - a field element t; + * - evaluations of the A,B,C (and Z) polynomials at t; + * - evaluations of all monomials of t; + * - counts about how many of the above evaluations are in fact non-zero. + */ +template +class qap_instance_evaluation { +private: + size_t num_variables_; + size_t degree_; + size_t num_inputs_; +public: + std::shared_ptr > domain; + + FieldT t; + + std::vector At, Bt, Ct, Ht; + + FieldT Zt; + + qap_instance_evaluation(const std::shared_ptr > &domain, + const size_t num_variables, + const size_t degree, + const size_t num_inputs, + const FieldT &t, + const std::vector &At, + const std::vector &Bt, + const std::vector &Ct, + const std::vector &Ht, + const FieldT &Zt); + qap_instance_evaluation(const std::shared_ptr > &domain, + const size_t num_variables, + const size_t degree, + const size_t num_inputs, + const FieldT &t, + std::vector &&At, + std::vector &&Bt, + std::vector &&Ct, + std::vector &&Ht, + const FieldT &Zt); + + qap_instance_evaluation(const qap_instance_evaluation &other) = default; + qap_instance_evaluation(qap_instance_evaluation &&other) = default; + qap_instance_evaluation& operator=(const qap_instance_evaluation &other) = default; + qap_instance_evaluation& operator=(qap_instance_evaluation &&other) = default; + + size_t num_variables() const; + size_t degree() const; + size_t num_inputs() const; + + bool is_satisfied(const qap_witness &witness) const; +}; + +/** + * A QAP witness. + */ +template +class qap_witness { +private: + size_t num_variables_; + size_t degree_; + size_t num_inputs_; + +public: + FieldT d1, d2, d3; + + std::vector coefficients_for_ABCs; + std::vector coefficients_for_H; + + qap_witness(const size_t num_variables, + const size_t degree, + const size_t num_inputs, + const FieldT &d1, + const FieldT &d2, + const FieldT &d3, + const std::vector &coefficients_for_ABCs, + const std::vector &coefficients_for_H); + + qap_witness(const size_t num_variables, + const size_t degree, + const size_t num_inputs, + const FieldT &d1, + const FieldT &d2, + const FieldT &d3, + const std::vector &coefficients_for_ABCs, + std::vector &&coefficients_for_H); + + qap_witness(const qap_witness &other) = default; + qap_witness(qap_witness &&other) = default; + qap_witness& operator=(const qap_witness &other) = default; + qap_witness& operator=(qap_witness &&other) = default; + + size_t num_variables() const; + size_t degree() const; + size_t num_inputs() const; +}; + +} // libsnark + +#include "relations/arithmetic_programs/qap/qap.tcc" + +#endif // QAP_HPP_ diff --git a/src/relations/arithmetic_programs/qap/qap.tcc b/src/relations/arithmetic_programs/qap/qap.tcc new file mode 100644 index 000000000..a4a3c96a2 --- /dev/null +++ b/src/relations/arithmetic_programs/qap/qap.tcc @@ -0,0 +1,324 @@ +/** @file +***************************************************************************** + +Implementation of interfaces for a QAP ("Quadratic Arithmetic Program"). + +See qap.hpp . + +***************************************************************************** +* @author This file is part of libsnark, developed by SCIPR Lab +* and contributors (see AUTHORS). +* @copyright MIT license (see LICENSE file) +*****************************************************************************/ + +#ifndef QAP_TCC_ +#define QAP_TCC_ + +#include "common/profiling.hpp" +#include "common/utils.hpp" +#include "algebra/evaluation_domain/evaluation_domain.hpp" +#include "algebra/scalar_multiplication/multiexp.hpp" + +namespace libsnark { + +template +qap_instance::qap_instance(const std::shared_ptr > &domain, + const size_t num_variables, + const size_t degree, + const size_t num_inputs, + const std::vector > &A_in_Lagrange_basis, + const std::vector > &B_in_Lagrange_basis, + const std::vector > &C_in_Lagrange_basis) : + num_variables_(num_variables), + degree_(degree), + num_inputs_(num_inputs), + domain(domain), + A_in_Lagrange_basis(A_in_Lagrange_basis), + B_in_Lagrange_basis(B_in_Lagrange_basis), + C_in_Lagrange_basis(C_in_Lagrange_basis) +{ +} + +template +qap_instance::qap_instance(const std::shared_ptr > &domain, + const size_t num_variables, + const size_t degree, + const size_t num_inputs, + std::vector > &&A_in_Lagrange_basis, + std::vector > &&B_in_Lagrange_basis, + std::vector > &&C_in_Lagrange_basis) : + num_variables_(num_variables), + degree_(degree), + num_inputs_(num_inputs), + domain(domain), + A_in_Lagrange_basis(std::move(A_in_Lagrange_basis)), + B_in_Lagrange_basis(std::move(B_in_Lagrange_basis)), + C_in_Lagrange_basis(std::move(C_in_Lagrange_basis)) +{ +} + +template +size_t qap_instance::num_variables() const +{ + return num_variables_; +} + +template +size_t qap_instance::degree() const +{ + return degree_; +} + +template +size_t qap_instance::num_inputs() const +{ + return num_inputs_; +} + +template +bool qap_instance::is_satisfied(const qap_witness &witness) const +{ + const FieldT t = FieldT::random_element(); + + std::vector At(this->num_variables()+1, FieldT::zero()); + std::vector Bt(this->num_variables()+1, FieldT::zero()); + std::vector Ct(this->num_variables()+1, FieldT::zero()); + std::vector Ht(this->degree()+1); + + const FieldT Zt = this->domain->compute_Z(t); + + const std::vector u = this->domain->lagrange_coeffs(t); + + for (size_t i = 0; i < this->num_variables()+1; ++i) + { + for (auto &el : A_in_Lagrange_basis[i]) + { + At[i] += u[el.first] * el.second; + } + + for (auto &el : B_in_Lagrange_basis[i]) + { + Bt[i] += u[el.first] * el.second; + } + + for (auto &el : C_in_Lagrange_basis[i]) + { + Ct[i] += u[el.first] * el.second; + } + } + + FieldT ti = FieldT::one(); + for (size_t i = 0; i < this->degree()+1; ++i) + { + Ht[i] = ti; + ti *= t; + } + + const qap_instance_evaluation eval_qap_inst(this->domain, + this->num_variables(), + this->degree(), + this->num_inputs(), + t, + std::move(At), + std::move(Bt), + std::move(Ct), + std::move(Ht), + Zt); + return eval_qap_inst.is_satisfied(witness); +} + +template +qap_instance_evaluation::qap_instance_evaluation(const std::shared_ptr > &domain, + const size_t num_variables, + const size_t degree, + const size_t num_inputs, + const FieldT &t, + const std::vector &At, + const std::vector &Bt, + const std::vector &Ct, + const std::vector &Ht, + const FieldT &Zt) : + num_variables_(num_variables), + degree_(degree), + num_inputs_(num_inputs), + domain(domain), + t(t), + At(At), + Bt(Bt), + Ct(Ct), + Ht(Ht), + Zt(Zt) +{ +} + +template +qap_instance_evaluation::qap_instance_evaluation(const std::shared_ptr > &domain, + const size_t num_variables, + const size_t degree, + const size_t num_inputs, + const FieldT &t, + std::vector &&At, + std::vector &&Bt, + std::vector &&Ct, + std::vector &&Ht, + const FieldT &Zt) : + num_variables_(num_variables), + degree_(degree), + num_inputs_(num_inputs), + domain(domain), + t(t), + At(std::move(At)), + Bt(std::move(Bt)), + Ct(std::move(Ct)), + Ht(std::move(Ht)), + Zt(Zt) +{ +} + +template +size_t qap_instance_evaluation::num_variables() const +{ + return num_variables_; +} + +template +size_t qap_instance_evaluation::degree() const +{ + return degree_; +} + +template +size_t qap_instance_evaluation::num_inputs() const +{ + return num_inputs_; +} + +template +bool qap_instance_evaluation::is_satisfied(const qap_witness &witness) const +{ + + if (this->num_variables() != witness.num_variables()) + { + return false; + } + + if (this->degree() != witness.degree()) + { + return false; + } + + if (this->num_inputs() != witness.num_inputs()) + { + return false; + } + + if (this->num_variables() != witness.coefficients_for_ABCs.size()) + { + return false; + } + + if (this->degree()+1 != witness.coefficients_for_H.size()) + { + return false; + } + + if (this->At.size() != this->num_variables()+1 || this->Bt.size() != this->num_variables()+1 || this->Ct.size() != this->num_variables()+1) + { + return false; + } + + if (this->Ht.size() != this->degree()+1) + { + return false; + } + + if (this->Zt != this->domain->compute_Z(this->t)) + { + return false; + } + + FieldT ans_A = this->At[0] + witness.d1*this->Zt; + FieldT ans_B = this->Bt[0] + witness.d2*this->Zt; + FieldT ans_C = this->Ct[0] + witness.d3*this->Zt; + FieldT ans_H = FieldT::zero(); + + ans_A = ans_A + naive_plain_exp(this->At.begin()+1, this->At.begin()+1+this->num_variables(), + witness.coefficients_for_ABCs.begin(), witness.coefficients_for_ABCs.begin()+this->num_variables()); + ans_B = ans_B + naive_plain_exp(this->Bt.begin()+1, this->Bt.begin()+1+this->num_variables(), + witness.coefficients_for_ABCs.begin(), witness.coefficients_for_ABCs.begin()+this->num_variables()); + ans_C = ans_C + naive_plain_exp(this->Ct.begin()+1, this->Ct.begin()+1+this->num_variables(), + witness.coefficients_for_ABCs.begin(), witness.coefficients_for_ABCs.begin()+this->num_variables()); + ans_H = ans_H + naive_plain_exp(this->Ht.begin(), this->Ht.begin()+this->degree()+1, + witness.coefficients_for_H.begin(), witness.coefficients_for_H.begin()+this->degree()+1); + + if (ans_A * ans_B - ans_C != ans_H * this->Zt) + { + return false; + } + + return true; +} + +template +qap_witness::qap_witness(const size_t num_variables, + const size_t degree, + const size_t num_inputs, + const FieldT &d1, + const FieldT &d2, + const FieldT &d3, + const std::vector &coefficients_for_ABCs, + const std::vector &coefficients_for_H) : + num_variables_(num_variables), + degree_(degree), + num_inputs_(num_inputs), + d1(d1), + d2(d2), + d3(d3), + coefficients_for_ABCs(coefficients_for_ABCs), + coefficients_for_H(coefficients_for_H) +{ +} + +template +qap_witness::qap_witness(const size_t num_variables, + const size_t degree, + const size_t num_inputs, + const FieldT &d1, + const FieldT &d2, + const FieldT &d3, + const std::vector &coefficients_for_ABCs, + std::vector &&coefficients_for_H) : + num_variables_(num_variables), + degree_(degree), + num_inputs_(num_inputs), + d1(d1), + d2(d2), + d3(d3), + coefficients_for_ABCs(coefficients_for_ABCs), + coefficients_for_H(std::move(coefficients_for_H)) +{ +} + + +template +size_t qap_witness::num_variables() const +{ + return num_variables_; +} + +template +size_t qap_witness::degree() const +{ + return degree_; +} + +template +size_t qap_witness::num_inputs() const +{ + return num_inputs_; +} + + +} // libsnark + +#endif // QAP_TCC_ diff --git a/src/relations/arithmetic_programs/qap/tests/test_qap.cpp b/src/relations/arithmetic_programs/qap/tests/test_qap.cpp new file mode 100644 index 000000000..d8aaddaa7 --- /dev/null +++ b/src/relations/arithmetic_programs/qap/tests/test_qap.cpp @@ -0,0 +1,115 @@ +/** + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ +#include +#include +#include +#include +#include + +#include "algebra/curves/mnt/mnt6/mnt6_pp.hpp" +#include "algebra/fields/field_utils.hpp" +#include "common/profiling.hpp" +#include "common/utils.hpp" +#include "reductions/r1cs_to_qap/r1cs_to_qap.hpp" +#include "relations/constraint_satisfaction_problems/r1cs/examples/r1cs_examples.hpp" + +using namespace libsnark; + +template +void test_qap(const size_t qap_degree, const size_t num_inputs, const bool binary_input) +{ + /* + We construct an instance where the QAP degree is qap_degree. + So we generate an instance of R1CS where the number of constraints qap_degree - num_inputs - 1. + See the transformation from R1CS to QAP for why this is the case. + So we need that qap_degree >= num_inputs + 1. + */ + assert(num_inputs + 1 <= qap_degree); + enter_block("Call to test_qap"); + + const size_t num_constraints = qap_degree - num_inputs - 1; + + print_indent(); printf("* QAP degree: %zu\n", qap_degree); + print_indent(); printf("* Number of inputs: %zu\n", num_inputs); + print_indent(); printf("* Number of R1CS constraints: %zu\n", num_constraints); + print_indent(); printf("* Input type: %s\n", binary_input ? "binary" : "field"); + + enter_block("Generate constraint system and assignment"); + r1cs_example example; + if (binary_input) + { + example = generate_r1cs_example_with_binary_input(num_constraints, num_inputs); + } + else + { + example = generate_r1cs_example_with_field_input(num_constraints, num_inputs); + } + leave_block("Generate constraint system and assignment"); + + enter_block("Check satisfiability of constraint system"); + assert(example.constraint_system.is_satisfied(example.primary_input, example.auxiliary_input)); + leave_block("Check satisfiability of constraint system"); + + const FieldT t = FieldT::random_element(), + d1 = FieldT::random_element(), + d2 = FieldT::random_element(), + d3 = FieldT::random_element(); + + enter_block("Compute QAP instance 1"); + qap_instance qap_inst_1 = r1cs_to_qap_instance_map(example.constraint_system); + leave_block("Compute QAP instance 1"); + + enter_block("Compute QAP instance 2"); + qap_instance_evaluation qap_inst_2 = r1cs_to_qap_instance_map_with_evaluation(example.constraint_system, t); + leave_block("Compute QAP instance 2"); + + enter_block("Compute QAP witness"); + qap_witness qap_wit = r1cs_to_qap_witness_map(example.constraint_system, example.primary_input, example.auxiliary_input, d1, d2, d3); + leave_block("Compute QAP witness"); + + enter_block("Check satisfiability of QAP instance 1"); + assert(qap_inst_1.is_satisfied(qap_wit)); + leave_block("Check satisfiability of QAP instance 1"); + + enter_block("Check satisfiability of QAP instance 2"); + assert(qap_inst_2.is_satisfied(qap_wit)); + leave_block("Check satisfiability of QAP instance 2"); + + leave_block("Call to test_qap"); +} + +int main() +{ + start_profiling(); + + mnt6_pp::init_public_params(); + + const size_t num_inputs = 10; + + const size_t basic_domain_size = 1ul< >(basic_domain_size, num_inputs, true); + test_qap >(step_domain_size, num_inputs, true); + test_qap >(extended_domain_size, num_inputs, true); + test_qap >(extended_domain_size_special, num_inputs, true); + + leave_block("Test QAP with binary input"); + + enter_block("Test QAP with field input"); + + test_qap >(basic_domain_size, num_inputs, false); + test_qap >(step_domain_size, num_inputs, false); + test_qap >(extended_domain_size, num_inputs, false); + test_qap >(extended_domain_size_special, num_inputs, false); + + leave_block("Test QAP with field input"); +} diff --git a/src/relations/constraint_satisfaction_problems/r1cs/examples/r1cs_examples.hpp b/src/relations/constraint_satisfaction_problems/r1cs/examples/r1cs_examples.hpp new file mode 100644 index 000000000..47003e959 --- /dev/null +++ b/src/relations/constraint_satisfaction_problems/r1cs/examples/r1cs_examples.hpp @@ -0,0 +1,73 @@ +/** @file + ***************************************************************************** + + Declaration of interfaces for a R1CS example, as well as functions to sample + R1CS examples with prescribed parameters (according to some distribution). + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef R1CS_EXAMPLES_HPP_ +#define R1CS_EXAMPLES_HPP_ + +#include "relations/constraint_satisfaction_problems/r1cs/r1cs.hpp" + +namespace libsnark { + +/** + * A R1CS example comprises a R1CS constraint system, R1CS input, and R1CS witness. + */ +template +struct r1cs_example { + r1cs_constraint_system constraint_system; + r1cs_primary_input primary_input; + r1cs_auxiliary_input auxiliary_input; + + r1cs_example() = default; + r1cs_example(const r1cs_example &other) = default; + r1cs_example(const r1cs_constraint_system &constraint_system, + const r1cs_primary_input &primary_input, + const r1cs_auxiliary_input &auxiliary_input) : + constraint_system(constraint_system), + primary_input(primary_input), + auxiliary_input(auxiliary_input) + {}; + r1cs_example(r1cs_constraint_system &&constraint_system, + r1cs_primary_input &&primary_input, + r1cs_auxiliary_input &&auxiliary_input) : + constraint_system(std::move(constraint_system)), + primary_input(std::move(primary_input)), + auxiliary_input(std::move(auxiliary_input)) + {}; +}; + +/** + * Generate a R1CS example such that: + * - the number of constraints of the R1CS constraint system is num_constraints; + * - the number of variables of the R1CS constraint system is (approximately) num_constraints; + * - the number of inputs of the R1CS constraint system is num_inputs; + * - the R1CS input consists of ``full'' field elements (typically require the whole log|Field| bits to represent). + */ +template +r1cs_example generate_r1cs_example_with_field_input(const size_t num_constraints, + const size_t num_inputs); + +/** + * Generate a R1CS example such that: + * - the number of constraints of the R1CS constraint system is num_constraints; + * - the number of variables of the R1CS constraint system is (approximately) num_constraints; + * - the number of inputs of the R1CS constraint system is num_inputs; + * - the R1CS input consists of binary values (as opposed to ``full'' field elements). + */ +template +r1cs_example generate_r1cs_example_with_binary_input(const size_t num_constraints, + const size_t num_inputs); + +} // libsnark + +#include "relations/constraint_satisfaction_problems/r1cs/examples/r1cs_examples.tcc" + +#endif // R1CS_EXAMPLES_HPP_ diff --git a/src/relations/constraint_satisfaction_problems/r1cs/examples/r1cs_examples.tcc b/src/relations/constraint_satisfaction_problems/r1cs/examples/r1cs_examples.tcc new file mode 100644 index 000000000..defa07721 --- /dev/null +++ b/src/relations/constraint_satisfaction_problems/r1cs/examples/r1cs_examples.tcc @@ -0,0 +1,164 @@ +/** @file + ***************************************************************************** + + Implementation of functions to sample R1CS examples with prescribed parameters + (according to some distribution). + + See r1cs_examples.hpp . + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef R1CS_EXAMPLES_TCC_ +#define R1CS_EXAMPLES_TCC_ + +#include + +#include "common/utils.hpp" + +namespace libsnark { + +template +r1cs_example generate_r1cs_example_with_field_input(const size_t num_constraints, + const size_t num_inputs) +{ + enter_block("Call to generate_r1cs_example_with_field_input"); + + assert(num_inputs <= num_constraints + 2); + + r1cs_constraint_system cs; + cs.primary_input_size = num_inputs; + cs.auxiliary_input_size = 2 + num_constraints - num_inputs; // TODO: explain this + + r1cs_variable_assignment full_variable_assignment; + FieldT a = FieldT::random_element(); + FieldT b = FieldT::random_element(); + full_variable_assignment.push_back(a); + full_variable_assignment.push_back(b); + + for (size_t i = 0; i < num_constraints-1; ++i) + { + linear_combination A, B, C; + + if (i % 2) + { + // a * b = c + A.add_term(i+1, 1); + B.add_term(i+2, 1); + C.add_term(i+3, 1); + FieldT tmp = a*b; + full_variable_assignment.push_back(tmp); + a = b; b = tmp; + } + else + { + // a + b = c + B.add_term(0, 1); + A.add_term(i+1, 1); + A.add_term(i+2, 1); + C.add_term(i+3, 1); + FieldT tmp = a+b; + full_variable_assignment.push_back(tmp); + a = b; b = tmp; + } + + cs.add_constraint(r1cs_constraint(A, B, C)); + } + + linear_combination A, B, C; + FieldT fin = FieldT::zero(); + for (size_t i = 1; i < cs.num_variables(); ++i) + { + A.add_term(i, 1); + B.add_term(i, 1); + fin = fin + full_variable_assignment[i-1]; + } + C.add_term(cs.num_variables(), 1); + cs.add_constraint(r1cs_constraint(A, B, C)); + full_variable_assignment.push_back(fin.squared()); + + /* split variable assignment */ + r1cs_primary_input primary_input(full_variable_assignment.begin(), full_variable_assignment.begin() + num_inputs); + r1cs_primary_input auxiliary_input(full_variable_assignment.begin() + num_inputs, full_variable_assignment.end()); + + /* sanity checks */ + assert(cs.num_variables() == full_variable_assignment.size()); + assert(cs.num_variables() >= num_inputs); + assert(cs.num_inputs() == num_inputs); + assert(cs.num_constraints() == num_constraints); + assert(cs.is_satisfied(primary_input, auxiliary_input)); + + leave_block("Call to generate_r1cs_example_with_field_input"); + + return r1cs_example(std::move(cs), std::move(primary_input), std::move(auxiliary_input)); +} + +template +r1cs_example generate_r1cs_example_with_binary_input(const size_t num_constraints, + const size_t num_inputs) +{ + enter_block("Call to generate_r1cs_example_with_binary_input"); + + assert(num_inputs >= 1); + + r1cs_constraint_system cs; + cs.primary_input_size = num_inputs; + cs.auxiliary_input_size = num_constraints; /* we will add one auxiliary variable per constraint */ + + r1cs_variable_assignment full_variable_assignment; + for (size_t i = 0; i < num_inputs; ++i) + { + full_variable_assignment.push_back(FieldT(std::rand() % 2)); + } + + size_t lastvar = num_inputs-1; + for (size_t i = 0; i < num_constraints; ++i) + { + ++lastvar; + const size_t u = (i == 0 ? std::rand() % num_inputs : std::rand() % i); + const size_t v = (i == 0 ? std::rand() % num_inputs : std::rand() % i); + + /* chose two random bits and XOR them together: + res = u + v - 2 * u * v + 2 * u * v = u + v - res + */ + linear_combination A, B, C; + A.add_term(u+1, 2); + B.add_term(v+1, 1); + if (u == v) + { + C.add_term(u+1, 2); + } + else + { + C.add_term(u+1, 1); + C.add_term(v+1, 1); + } + C.add_term(lastvar+1, -FieldT::one()); + + cs.add_constraint(r1cs_constraint(A, B, C)); + full_variable_assignment.push_back(full_variable_assignment[u] + full_variable_assignment[v] - full_variable_assignment[u] * full_variable_assignment[v] - full_variable_assignment[u] * full_variable_assignment[v]); + } + + /* split variable assignment */ + r1cs_primary_input primary_input(full_variable_assignment.begin(), full_variable_assignment.begin() + num_inputs); + r1cs_primary_input auxiliary_input(full_variable_assignment.begin() + num_inputs, full_variable_assignment.end()); + + /* sanity checks */ + assert(cs.num_variables() == full_variable_assignment.size()); + assert(cs.num_variables() >= num_inputs); + assert(cs.num_inputs() == num_inputs); + assert(cs.num_constraints() == num_constraints); + assert(cs.is_satisfied(primary_input, auxiliary_input)); + + leave_block("Call to generate_r1cs_example_with_binary_input"); + + return r1cs_example(std::move(cs), std::move(primary_input), std::move(auxiliary_input)); +} + +} // libsnark + +#endif // R1CS_EXAMPLES_TCC diff --git a/src/relations/constraint_satisfaction_problems/r1cs/r1cs.hpp b/src/relations/constraint_satisfaction_problems/r1cs/r1cs.hpp new file mode 100644 index 000000000..ca3acb3a9 --- /dev/null +++ b/src/relations/constraint_satisfaction_problems/r1cs/r1cs.hpp @@ -0,0 +1,153 @@ +/** @file + ***************************************************************************** + + Declaration of interfaces for: + - a R1CS constraint, + - a R1CS variable assignment, and + - a R1CS constraint system. + + Above, R1CS stands for "Rank-1 Constraint System". + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef R1CS_HPP_ +#define R1CS_HPP_ + +#include +#include +#include +#include +#include + +#include "relations/variable.hpp" + +namespace libsnark { + +/************************* R1CS constraint ***********************************/ + +template +class r1cs_constraint; + +template +std::ostream& operator<<(std::ostream &out, const r1cs_constraint &c); + +template +std::istream& operator>>(std::istream &in, r1cs_constraint &c); + +/** + * A R1CS constraint is a formal expression of the form + * + * < A , X > * < B , X > = < C , X > , + * + * where X = (x_0,x_1,...,x_m) is a vector of formal variables and A,B,C each + * consist of 1+m elements in . + * + * A R1CS constraint is used to construct a R1CS constraint system (see below). + */ +template +class r1cs_constraint { +public: + + linear_combination a, b, c; + + r1cs_constraint() {}; + r1cs_constraint(const linear_combination &a, + const linear_combination &b, + const linear_combination &c); + + r1cs_constraint(const std::initializer_list > &A, + const std::initializer_list > &B, + const std::initializer_list > &C); + + bool operator==(const r1cs_constraint &other) const; + + friend std::ostream& operator<< (std::ostream &out, const r1cs_constraint &c); + friend std::istream& operator>> (std::istream &in, r1cs_constraint &c); +}; + +/************************* R1CS variable assignment **************************/ + +/** + * A R1CS variable assignment is a vector of elements that represents + * a candidate solution to a R1CS constraint system (see below). + */ + +/* TODO: specify that it does *NOT* include the constant 1 */ +template +using r1cs_primary_input = std::vector; + +template +using r1cs_auxiliary_input = std::vector; + +template +using r1cs_variable_assignment = std::vector; /* note the changed name! (TODO: remove this comment after primary_input transition is complete) */ + +/************************* R1CS constraint system ****************************/ + +template +class r1cs_constraint_system; + +template +std::ostream& operator<<(std::ostream &out, const r1cs_constraint_system &cs); + +template +std::istream& operator>>(std::istream &in, r1cs_constraint_system &cs); + +/** + * A system of R1CS constraints looks like + * + * { < A_k , X > * < B_k , X > = < C_k , X > }_{k=1}^{n} . + * + * In other words, the system is satisfied if and only if there exist a + * USCS variable assignment for which each R1CS constraint is satisfied. + * + * NOTE: + * The 0-th variable (i.e., "x_{0}") always represents the constant 1. + * Thus, the 0-th variable is not included in num_variables. + */ +template +class r1cs_constraint_system { +public: + size_t primary_input_size; + size_t auxiliary_input_size; + + std::vector > constraints; + + r1cs_constraint_system() : primary_input_size(0), auxiliary_input_size(0) {} + + size_t num_inputs() const; + size_t num_variables() const; + size_t num_constraints() const; + +#ifdef DEBUG + std::map constraint_annotations; + std::map variable_annotations; +#endif + + bool is_valid() const; + bool is_satisfied(const r1cs_primary_input &primary_input, + const r1cs_auxiliary_input &auxiliary_input) const; + + void add_constraint(const r1cs_constraint &c); + void add_constraint(const r1cs_constraint &c, const std::string &annotation); + + void swap_AB_if_beneficial(); + + bool operator==(const r1cs_constraint_system &other) const; + + friend std::ostream& operator<< (std::ostream &out, const r1cs_constraint_system &cs); + friend std::istream& operator>> (std::istream &in, r1cs_constraint_system &cs); + + void report_linear_constraint_statistics() const; +}; + + +} // libsnark + +#include "relations/constraint_satisfaction_problems/r1cs/r1cs.tcc" + +#endif // R1CS_HPP_ diff --git a/src/relations/constraint_satisfaction_problems/r1cs/r1cs.tcc b/src/relations/constraint_satisfaction_problems/r1cs/r1cs.tcc new file mode 100644 index 000000000..0faa56a87 --- /dev/null +++ b/src/relations/constraint_satisfaction_problems/r1cs/r1cs.tcc @@ -0,0 +1,310 @@ +/** @file + ***************************************************************************** + + Declaration of interfaces for: + - a R1CS constraint, + - a R1CS variable assignment, and + - a R1CS constraint system. + + See r1cs.hpp . + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef R1CS_TCC_ +#define R1CS_TCC_ + +#include +#include +#include +#include "common/utils.hpp" +#include "common/profiling.hpp" +#include "algebra/fields/bigint.hpp" + +namespace libsnark { + +template +r1cs_constraint::r1cs_constraint(const linear_combination &a, + const linear_combination &b, + const linear_combination &c) : + a(a), b(b), c(c) +{ +} + +template +r1cs_constraint::r1cs_constraint(const std::initializer_list > &A, + const std::initializer_list > &B, + const std::initializer_list > &C) +{ + for (auto lc_A : A) + { + a.terms.insert(a.terms.end(), lc_A.terms.begin(), lc_A.terms.end()); + } + for (auto lc_B : B) + { + b.terms.insert(b.terms.end(), lc_B.terms.begin(), lc_B.terms.end()); + } + for (auto lc_C : C) + { + c.terms.insert(c.terms.end(), lc_C.terms.begin(), lc_C.terms.end()); + } +} + +template +bool r1cs_constraint::operator==(const r1cs_constraint &other) const +{ + return (this->a == other.a && + this->b == other.b && + this->c == other.c); +} + +template +std::ostream& operator<<(std::ostream &out, const r1cs_constraint &c) +{ + out << c.a; + out << c.b; + out << c.c; + + return out; +} + +template +std::istream& operator>>(std::istream &in, r1cs_constraint &c) +{ + in >> c.a; + in >> c.b; + in >> c.c; + + return in; +} + +template +size_t r1cs_constraint_system::num_inputs() const +{ + return primary_input_size; +} + +template +size_t r1cs_constraint_system::num_variables() const +{ + return primary_input_size + auxiliary_input_size; +} + + +template +size_t r1cs_constraint_system::num_constraints() const +{ + return constraints.size(); +} + +template +bool r1cs_constraint_system::is_valid() const +{ + if (this->num_inputs() > this->num_variables()) return false; + + for (size_t c = 0; c < constraints.size(); ++c) + { + if (!(constraints[c].a.is_valid(this->num_variables()) && + constraints[c].b.is_valid(this->num_variables()) && + constraints[c].c.is_valid(this->num_variables()))) + { + return false; + } + } + + return true; +} + +template +void dump_r1cs_constraint(const r1cs_constraint &constraint, + const r1cs_variable_assignment &full_variable_assignment, + const std::map &variable_annotations) +{ + printf("terms for a:\n"); constraint.a.print_with_assignment(full_variable_assignment, variable_annotations); + printf("terms for b:\n"); constraint.b.print_with_assignment(full_variable_assignment, variable_annotations); + printf("terms for c:\n"); constraint.c.print_with_assignment(full_variable_assignment, variable_annotations); +} + +template +bool r1cs_constraint_system::is_satisfied(const r1cs_primary_input &primary_input, + const r1cs_auxiliary_input &auxiliary_input) const +{ + assert(primary_input.size() == num_inputs()); + assert(primary_input.size() + auxiliary_input.size() == num_variables()); + + r1cs_variable_assignment full_variable_assignment = primary_input; + full_variable_assignment.insert(full_variable_assignment.end(), auxiliary_input.begin(), auxiliary_input.end()); + + for (size_t c = 0; c < constraints.size(); ++c) + { + const FieldT ares = constraints[c].a.evaluate(full_variable_assignment); + const FieldT bres = constraints[c].b.evaluate(full_variable_assignment); + const FieldT cres = constraints[c].c.evaluate(full_variable_assignment); + + if (!(ares*bres == cres)) + { +#ifdef DEBUG + auto it = constraint_annotations.find(c); + printf("constraint %zu (%s) unsatisfied\n", c, (it == constraint_annotations.end() ? "no annotation" : it->second.c_str())); + printf(" = "); ares.print(); + printf(" = "); bres.print(); + printf(" = "); cres.print(); + printf("constraint was:\n"); + dump_r1cs_constraint(constraints[c], full_variable_assignment, variable_annotations); +#endif // DEBUG + return false; + } + } + + return true; +} + +template +void r1cs_constraint_system::add_constraint(const r1cs_constraint &c) +{ + constraints.emplace_back(c); +} + +template +void r1cs_constraint_system::add_constraint(const r1cs_constraint &c, const std::string &annotation) +{ +#ifdef DEBUG + constraint_annotations[constraints.size()] = annotation; +#endif + constraints.emplace_back(c); +} + +template +void r1cs_constraint_system::swap_AB_if_beneficial() +{ + enter_block("Call to r1cs_constraint_system::swap_AB_if_beneficial"); + + enter_block("Estimate densities"); + bit_vector touched_by_A(this->num_variables() + 1, false), touched_by_B(this->num_variables() + 1, false); + + for (size_t i = 0; i < this->constraints.size(); ++i) + { + for (size_t j = 0; j < this->constraints[i].a.terms.size(); ++j) + { + touched_by_A[this->constraints[i].a.terms[j].index] = true; + } + + for (size_t j = 0; j < this->constraints[i].b.terms.size(); ++j) + { + touched_by_B[this->constraints[i].b.terms[j].index] = true; + } + } + + size_t non_zero_A_count = 0, non_zero_B_count = 0; + for (size_t i = 0; i < this->num_variables() + 1; ++i) + { + non_zero_A_count += touched_by_A[i] ? 1 : 0; + non_zero_B_count += touched_by_B[i] ? 1 : 0; + } + + if (!inhibit_profiling_info) + { + print_indent(); printf("* Non-zero A-count (estimate): %zu\n", non_zero_A_count); + print_indent(); printf("* Non-zero B-count (estimate): %zu\n", non_zero_B_count); + } + leave_block("Estimate densities"); + + if (non_zero_B_count > non_zero_A_count) + { + enter_block("Perform the swap"); + for (size_t i = 0; i < this->constraints.size(); ++i) + { + std::swap(this->constraints[i].a, this->constraints[i].b); + } + leave_block("Perform the swap"); + } + else + { + print_indent(); printf("Swap is not beneficial, not performing\n"); + } + + leave_block("Call to r1cs_constraint_system::swap_AB_if_beneficial"); +} + +template +bool r1cs_constraint_system::operator==(const r1cs_constraint_system &other) const +{ + return (this->constraints == other.constraints && + this->primary_input_size == other.primary_input_size && + this->auxiliary_input_size == other.auxiliary_input_size); +} + +template +std::ostream& operator<<(std::ostream &out, const r1cs_constraint_system &cs) +{ + out << cs.primary_input_size << "\n"; + out << cs.auxiliary_input_size << "\n"; + + out << cs.num_constraints() << "\n"; + for (const r1cs_constraint& c : cs.constraints) + { + out << c; + } + + return out; +} + +template +std::istream& operator>>(std::istream &in, r1cs_constraint_system &cs) +{ + in >> cs.primary_input_size; + in >> cs.auxiliary_input_size; + + cs.constraints.clear(); + + size_t s; + in >> s; + + char b; + in.read(&b, 1); + + cs.constraints.reserve(s); + + for (size_t i = 0; i < s; ++i) + { + r1cs_constraint c; + in >> c; + cs.constraints.emplace_back(c); + } + + return in; +} + +template +void r1cs_constraint_system::report_linear_constraint_statistics() const +{ +#ifdef DEBUG + for (size_t i = 0; i < constraints.size(); ++i) + { + auto &constr = constraints[i]; + bool a_is_const = true; + for (auto &t : constr.a.terms) + { + a_is_const = a_is_const && (t.index == 0); + } + + bool b_is_const = true; + for (auto &t : constr.b.terms) + { + b_is_const = b_is_const && (t.index == 0); + } + + if (a_is_const || b_is_const) + { + auto it = constraint_annotations.find(i); + printf("%s\n", (it == constraint_annotations.end() ? FORMAT("", "constraint_%zu", i) : it->second).c_str()); + } + } +#endif +} + +} // libsnark +#endif // R1CS_TCC_ diff --git a/src/relations/variable.hpp b/src/relations/variable.hpp new file mode 100644 index 000000000..a9a1449b8 --- /dev/null +++ b/src/relations/variable.hpp @@ -0,0 +1,213 @@ +/** @file + ***************************************************************************** + + Declaration of interfaces for: + - a variable (i.e., x_i), + - a linear term (i.e., a_i * x_i), and + - a linear combination (i.e., sum_i a_i * x_i). + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef VARIABLE_HPP_ +#define VARIABLE_HPP_ + +#include +#include +#include +#include + +namespace libsnark { + +/** + * Mnemonic typedefs. + */ +typedef size_t var_index_t; +typedef long integer_coeff_t; + +/** + * Forward declaration. + */ +template +class linear_term; + +/** + * Forward declaration. + */ +template +class linear_combination; + +/********************************* Variable **********************************/ + +/** + * A variable represents a formal expresison of the form "x_{index}". + */ +template +class variable { +public: + + var_index_t index; + + variable(const var_index_t index = 0) : index(index) {}; + + linear_term operator*(const integer_coeff_t int_coeff) const; + linear_term operator*(const FieldT &field_coeff) const; + + linear_combination operator+(const linear_combination &other) const; + linear_combination operator-(const linear_combination &other) const; + + linear_term operator-() const; + + bool operator==(const variable &other) const; +}; + +template +linear_term operator*(const integer_coeff_t int_coeff, const variable &var); + +template +linear_term operator*(const FieldT &field_coeff, const variable &var); + +template +linear_combination operator+(const integer_coeff_t int_coeff, const variable &var); + +template +linear_combination operator+(const FieldT &field_coeff, const variable &var); + +template +linear_combination operator-(const integer_coeff_t int_coeff, const variable &var); + +template +linear_combination operator-(const FieldT &field_coeff, const variable &var); + + +/****************************** Linear term **********************************/ + +/** + * A linear term represents a formal expression of the form "coeff * x_{index}". + */ +template +class linear_term { +public: + + var_index_t index = 0; + FieldT coeff; + + linear_term() {}; + linear_term(const variable &var); + linear_term(const variable &var, const integer_coeff_t int_coeff); + linear_term(const variable &var, const FieldT &field_coeff); + + linear_term operator*(const integer_coeff_t int_coeff) const; + linear_term operator*(const FieldT &field_coeff) const; + + linear_combination operator+(const linear_combination &other) const; + linear_combination operator-(const linear_combination &other) const; + + linear_term operator-() const; + + bool operator==(const linear_term &other) const; +}; + +template +linear_term operator*(const integer_coeff_t int_coeff, const linear_term <); + +template +linear_term operator*(const FieldT &field_coeff, const linear_term <); + +template +linear_combination operator+(const integer_coeff_t int_coeff, const linear_term <); + +template +linear_combination operator+(const FieldT &field_coeff, const linear_term <); + +template +linear_combination operator-(const integer_coeff_t int_coeff, const linear_term <); + +template +linear_combination operator-(const FieldT &field_coeff, const linear_term <); + + +/***************************** Linear combination ****************************/ + +template +class linear_combination; + +template +std::ostream& operator<<(std::ostream &out, const linear_combination &lc); + +template +std::istream& operator>>(std::istream &in, linear_combination &lc); + +/** + * A linear combination represents a formal expression of the form "sum_i coeff_i * x_{index_i}". + */ +template +class linear_combination { +public: + + std::vector > terms; + + linear_combination() {}; + linear_combination(const integer_coeff_t int_coeff); + linear_combination(const FieldT &field_coeff); + linear_combination(const variable &var); + linear_combination(const linear_term <); + linear_combination(const std::vector > &all_terms); + + /* for supporting range-based for loops over linear_combination */ + typename std::vector >::const_iterator begin() const; + typename std::vector >::const_iterator end() const; + + void add_term(const variable &var); + void add_term(const variable &var, const integer_coeff_t int_coeff); + void add_term(const variable &var, const FieldT &field_coeff); + + void add_term(const linear_term <); + + FieldT evaluate(const std::vector &assignment) const; + + linear_combination operator*(const integer_coeff_t int_coeff) const; + linear_combination operator*(const FieldT &field_coeff) const; + + linear_combination operator+(const linear_combination &other) const; + + linear_combination operator-(const linear_combination &other) const; + linear_combination operator-() const; + + bool operator==(const linear_combination &other) const; + + bool is_valid(const size_t num_variables) const; + + void print(const std::map &variable_annotations = std::map()) const; + void print_with_assignment(const std::vector &full_assignment, const std::map &variable_annotations = std::map()) const; + + friend std::ostream& operator<< (std::ostream &out, const linear_combination &lc); + friend std::istream& operator>> (std::istream &in, linear_combination &lc); +}; + +template +linear_combination operator*(const integer_coeff_t int_coeff, const linear_combination &lc); + +template +linear_combination operator*(const FieldT &field_coeff, const linear_combination &lc); + +template +linear_combination operator+(const integer_coeff_t int_coeff, const linear_combination &lc); + +template +linear_combination operator+(const FieldT &field_coeff, const linear_combination &lc); + +template +linear_combination operator-(const integer_coeff_t int_coeff, const linear_combination &lc); + +template +linear_combination operator-(const FieldT &field_coeff, const linear_combination &lc); + +} // libsnark + +#include "relations/variable.tcc" + +#endif // VARIABLE_HPP_ diff --git a/src/relations/variable.tcc b/src/relations/variable.tcc new file mode 100644 index 000000000..4c4cab97f --- /dev/null +++ b/src/relations/variable.tcc @@ -0,0 +1,512 @@ +/** @file + ***************************************************************************** + + Implementation of interfaces for: + - a variable (i.e., x_i), + - a linear term (i.e., a_i * x_i), and + - a linear combination (i.e., sum_i a_i * x_i). + + See variabe.hpp . + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef VARIABLE_TCC_ +#define VARIABLE_TCC_ + +#include +#include + +#include "algebra/fields/bigint.hpp" + +namespace libsnark { + +template +linear_term variable::operator*(const integer_coeff_t int_coeff) const +{ + return linear_term(*this, int_coeff); +} + +template +linear_term variable::operator*(const FieldT &field_coeff) const +{ + return linear_term(*this, field_coeff); +} + +template +linear_combination variable::operator+(const linear_combination &other) const +{ + linear_combination result; + + result.add_term(*this); + result.terms.insert(result.terms.begin(), other.terms.begin(), other.terms.end()); + + return result; +} + +template +linear_combination variable::operator-(const linear_combination &other) const +{ + return (*this) + (-other); +} + +template +linear_term variable::operator-() const +{ + return linear_term(*this, -FieldT::one()); +} + +template +bool variable::operator==(const variable &other) const +{ + return (this->index == other.index); +} + +template +linear_term operator*(const integer_coeff_t int_coeff, const variable &var) +{ + return linear_term(var, int_coeff); +} + +template +linear_term operator*(const FieldT &field_coeff, const variable &var) +{ + return linear_term(var, field_coeff); +} + +template +linear_combination operator+(const integer_coeff_t int_coeff, const variable &var) +{ + return linear_combination(int_coeff) + var; +} + +template +linear_combination operator+(const FieldT &field_coeff, const variable &var) +{ + return linear_combination(field_coeff) + var; +} + +template +linear_combination operator-(const integer_coeff_t int_coeff, const variable &var) +{ + return linear_combination(int_coeff) - var; +} + +template +linear_combination operator-(const FieldT &field_coeff, const variable &var) +{ + return linear_combination(field_coeff) - var; +} + +template +linear_term::linear_term(const variable &var) : + index(var.index), coeff(FieldT::one()) +{ +} + +template +linear_term::linear_term(const variable &var, const integer_coeff_t int_coeff) : + index(var.index), coeff(FieldT(int_coeff)) +{ +} + +template +linear_term::linear_term(const variable &var, const FieldT &coeff) : + index(var.index), coeff(coeff) +{ +} + +template +linear_term linear_term::operator*(const integer_coeff_t int_coeff) const +{ + return (this->operator*(FieldT(int_coeff))); +} + +template +linear_term linear_term::operator*(const FieldT &field_coeff) const +{ + return linear_term(this->index, field_coeff * this->coeff); +} + +template +linear_combination operator+(const integer_coeff_t int_coeff, const linear_term <) +{ + return linear_combination(int_coeff) + lt; +} + +template +linear_combination operator+(const FieldT &field_coeff, const linear_term <) +{ + return linear_combination(field_coeff) + lt; +} + +template +linear_combination operator-(const integer_coeff_t int_coeff, const linear_term <) +{ + return linear_combination(int_coeff) - lt; +} + +template +linear_combination operator-(const FieldT &field_coeff, const linear_term <) +{ + return linear_combination(field_coeff) - lt; +} + +template +linear_combination linear_term::operator+(const linear_combination &other) const +{ + return linear_combination(*this) + other; +} + +template +linear_combination linear_term::operator-(const linear_combination &other) const +{ + return (*this) + (-other); +} + +template +linear_term linear_term::operator-() const +{ + return linear_term(this->index, -this->coeff); +} + +template +bool linear_term::operator==(const linear_term &other) const +{ + return (this->index == other.index && + this->coeff == other.coeff); +} + +template +linear_term operator*(const integer_coeff_t int_coeff, const linear_term <) +{ + return FieldT(int_coeff) * lt; +} + +template +linear_term operator*(const FieldT &field_coeff, const linear_term <) +{ + return linear_term(lt.index, field_coeff * lt.coeff); +} + +template +linear_combination::linear_combination(const integer_coeff_t int_coeff) +{ + this->add_term(linear_term(0, int_coeff)); +} + +template +linear_combination::linear_combination(const FieldT &field_coeff) +{ + this->add_term(linear_term(0, field_coeff)); +} + +template +linear_combination::linear_combination(const variable &var) +{ + this->add_term(var); +} + +template +linear_combination::linear_combination(const linear_term <) +{ + this->add_term(lt); +} + +template +typename std::vector >::const_iterator linear_combination::begin() const +{ + return terms.begin(); +} + +template +typename std::vector >::const_iterator linear_combination::end() const +{ + return terms.end(); +} + +template +void linear_combination::add_term(const variable &var) +{ + this->terms.emplace_back(linear_term(var.index, FieldT::one())); +} + +template +void linear_combination::add_term(const variable &var, const integer_coeff_t int_coeff) +{ + this->terms.emplace_back(linear_term(var.index, int_coeff)); +} + +template +void linear_combination::add_term(const variable &var, const FieldT &coeff) +{ + this->terms.emplace_back(linear_term(var.index, coeff)); +} + +template +void linear_combination::add_term(const linear_term &other) +{ + this->terms.emplace_back(other); +} + +template +linear_combination linear_combination::operator*(const integer_coeff_t int_coeff) const +{ + return (*this) * FieldT(int_coeff); +} + +template +FieldT linear_combination::evaluate(const std::vector &assignment) const +{ + FieldT acc = FieldT::zero(); + for (auto < : terms) + { + acc += (lt.index == 0 ? FieldT::one() : assignment[lt.index-1]) * lt.coeff; + } + return acc; +} + +template +linear_combination linear_combination::operator*(const FieldT &field_coeff) const +{ + linear_combination result; + result.terms.reserve(this->terms.size()); + for (const linear_term < : this->terms) + { + result.terms.emplace_back(lt * field_coeff); + } + return result; +} + +template +linear_combination linear_combination::operator+(const linear_combination &other) const +{ + linear_combination result; + + auto it1 = this->terms.begin(); + auto it2 = other.terms.begin(); + + /* invariant: it1 and it2 always point to unprocessed items in the corresponding linear combinations */ + while (it1 != this->terms.end() && it2 != other.terms.end()) + { + if (it1->index < it2->index) + { + result.terms.emplace_back(*it1); + ++it1; + } + else if (it1->index > it2->index) + { + result.terms.emplace_back(*it2); + ++it2; + } + else + { + /* it1->index == it2->index */ + result.terms.emplace_back(linear_term(variable(it1->index), it1->coeff + it2->coeff)); + ++it1; + ++it2; + } + } + + if (it1 != this->terms.end()) + { + result.terms.insert(result.terms.end(), it1, this->terms.end()); + } + else + { + result.terms.insert(result.terms.end(), it2, other.terms.end()); + } + + return result; +} + +template +linear_combination linear_combination::operator-(const linear_combination &other) const +{ + return (*this) + (-other); +} + +template +linear_combination linear_combination::operator-() const +{ + return (*this) * (-FieldT::one()); +} + +template +bool linear_combination::operator==(const linear_combination &other) const +{ + return (this->terms == other.terms); +} + +template +bool linear_combination::is_valid(const size_t num_variables) const +{ + /* check that all terms in linear combination are sorted */ + for (size_t i = 1; i < terms.size(); ++i) + { + if (terms[i-1].index >= terms[i].index) + { + return false; + } + } + + /* check that the variables are in proper range. as the variables + are sorted, it suffices to check the last term */ + if ((--terms.end())->index >= num_variables) + { + return false; + } + + return true; +} + +template +void linear_combination::print(const std::map &variable_annotations) const +{ + for (auto < : terms) + { + if (lt.index == 0) + { + printf(" 1 * "); + lt.coeff.print(); + } + else + { + auto it = variable_annotations.find(lt.index); + printf(" x_%zu (%s) * ", lt.index, (it == variable_annotations.end() ? "no annotation" : it->second.c_str())); + lt.coeff.print(); + } + } +} + +template +void linear_combination::print_with_assignment(const std::vector &full_assignment, const std::map &variable_annotations) const +{ + for (auto < : terms) + { + if (lt.index == 0) + { + printf(" 1 * "); + lt.coeff.print(); + } + else + { + printf(" x_%zu * ", lt.index); + lt.coeff.print(); + + auto it = variable_annotations.find(lt.index); + printf(" where x_%zu (%s) was assigned value ", lt.index, + (it == variable_annotations.end() ? "no annotation" : it->second.c_str())); + full_assignment[lt.index-1].print(); + printf(" i.e. negative of "); + (-full_assignment[lt.index-1]).print(); + } + } +} + +template +std::ostream& operator<<(std::ostream &out, const linear_combination &lc) +{ + out << lc.terms.size() << "\n"; + for (const linear_term& lt : lc.terms) + { + out << lt.index << "\n"; + out << lt.coeff << OUTPUT_NEWLINE; + } + + return out; +} + +template +std::istream& operator>>(std::istream &in, linear_combination &lc) +{ + lc.terms.clear(); + + size_t s; + in >> s; + + consume_newline(in); + + lc.terms.reserve(s); + + for (size_t i = 0; i < s; ++i) + { + linear_term lt; + in >> lt.index; + consume_newline(in); + in >> lt.coeff; + consume_OUTPUT_NEWLINE(in); + lc.terms.emplace_back(lt); + } + + return in; +} + +template +linear_combination operator*(const integer_coeff_t int_coeff, const linear_combination &lc) +{ + return lc * int_coeff; +} + +template +linear_combination operator*(const FieldT &field_coeff, const linear_combination &lc) +{ + return lc * field_coeff; +} + +template +linear_combination operator+(const integer_coeff_t int_coeff, const linear_combination &lc) +{ + return linear_combination(int_coeff) + lc; +} + +template +linear_combination operator+(const FieldT &field_coeff, const linear_combination &lc) +{ + return linear_combination(field_coeff) + lc; +} + +template +linear_combination operator-(const integer_coeff_t int_coeff, const linear_combination &lc) +{ + return linear_combination(int_coeff) - lc; +} + +template +linear_combination operator-(const FieldT &field_coeff, const linear_combination &lc) +{ + return linear_combination(field_coeff) - lc; +} + +template +linear_combination::linear_combination(const std::vector > &all_terms) +{ + if (all_terms.empty()) + { + return; + } + + terms = all_terms; + std::sort(terms.begin(), terms.end(), [](linear_term a, linear_term b) { return a.index < b.index; }); + + auto result_it = terms.begin(); + for (auto it = ++terms.begin(); it != terms.end(); ++it) + { + if (it->index == result_it->index) + { + result_it->coeff += it->coeff; + } + else + { + *(++result_it) = *it; + } + } + terms.resize((result_it - terms.begin()) + 1); +} + +} // libsnark + +#endif // VARIABLE_TCC diff --git a/src/zk_proof_systems/ppzksnark/r1cs_ppzksnark/examples/run_r1cs_ppzksnark.hpp b/src/zk_proof_systems/ppzksnark/r1cs_ppzksnark/examples/run_r1cs_ppzksnark.hpp new file mode 100644 index 000000000..fcd28abf3 --- /dev/null +++ b/src/zk_proof_systems/ppzksnark/r1cs_ppzksnark/examples/run_r1cs_ppzksnark.hpp @@ -0,0 +1,35 @@ +/** @file + ***************************************************************************** + + Declaration of functionality that runs the R1CS ppzkSNARK for + a given R1CS example. + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef RUN_R1CS_PPZKSNARK_HPP_ +#define RUN_R1CS_PPZKSNARK_HPP_ + +#include "relations/constraint_satisfaction_problems/r1cs/examples/r1cs_examples.hpp" + +namespace libsnark { + +/** + * Runs the ppzkSNARK (generator, prover, and verifier) for a given + * R1CS example (specified by a constraint system, input, and witness). + * + * Optionally, also test the serialization routines for keys and proofs. + * (This takes additional time.) + */ +template +bool run_r1cs_ppzksnark(const r1cs_example > &example, + const bool test_serialization); + +} // libsnark + +#include "zk_proof_systems/ppzksnark/r1cs_ppzksnark/examples/run_r1cs_ppzksnark.tcc" + +#endif // RUN_R1CS_PPZKSNARK_HPP_ diff --git a/src/zk_proof_systems/ppzksnark/r1cs_ppzksnark/examples/run_r1cs_ppzksnark.tcc b/src/zk_proof_systems/ppzksnark/r1cs_ppzksnark/examples/run_r1cs_ppzksnark.tcc new file mode 100644 index 000000000..9bc875869 --- /dev/null +++ b/src/zk_proof_systems/ppzksnark/r1cs_ppzksnark/examples/run_r1cs_ppzksnark.tcc @@ -0,0 +1,114 @@ +/** @file + ***************************************************************************** + + Implementation of functionality that runs the R1CS ppzkSNARK for + a given R1CS example. + + See run_r1cs_ppzksnark.hpp . + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef RUN_R1CS_PPZKSNARK_TCC_ +#define RUN_R1CS_PPZKSNARK_TCC_ + +#include "zk_proof_systems/ppzksnark/r1cs_ppzksnark/r1cs_ppzksnark.hpp" + +#include +#include + +#include "common/profiling.hpp" + +namespace libsnark { + +template +typename std::enable_if::type +test_affine_verifier(const r1cs_ppzksnark_verification_key &vk, + const r1cs_ppzksnark_primary_input &primary_input, + const r1cs_ppzksnark_proof &proof, + const bool expected_answer) +{ + print_header("R1CS ppzkSNARK Affine Verifier"); + const bool answer = r1cs_ppzksnark_affine_verifier_weak_IC(vk, primary_input, proof); + assert(answer == expected_answer); +} + +template +typename std::enable_if::type +test_affine_verifier(const r1cs_ppzksnark_verification_key &vk, + const r1cs_ppzksnark_primary_input &primary_input, + const r1cs_ppzksnark_proof &proof, + const bool expected_answer) +{ + UNUSED(vk, primary_input, proof, expected_answer); + print_header("R1CS ppzkSNARK Affine Verifier"); + printf("Affine verifier is not supported; not testing anything.\n"); +} + +/** + * The code below provides an example of all stages of running a R1CS ppzkSNARK. + * + * Of course, in a real-life scenario, we would have three distinct entities, + * mangled into one in the demonstration below. The three entities are as follows. + * (1) The "generator", which runs the ppzkSNARK generator on input a given + * constraint system CS to create a proving and a verification key for CS. + * (2) The "prover", which runs the ppzkSNARK prover on input the proving key, + * a primary input for CS, and an auxiliary input for CS. + * (3) The "verifier", which runs the ppzkSNARK verifier on input the verification key, + * a primary input for CS, and a proof. + */ +template +bool run_r1cs_ppzksnark(const r1cs_example > &example, + const bool test_serialization) +{ + enter_block("Call to run_r1cs_ppzksnark"); + + print_header("R1CS ppzkSNARK Generator"); + r1cs_ppzksnark_keypair keypair = r1cs_ppzksnark_generator(example.constraint_system); + printf("\n"); print_indent(); print_mem("after generator"); + + print_header("Preprocess verification key"); + r1cs_ppzksnark_processed_verification_key pvk = r1cs_ppzksnark_verifier_process_vk(keypair.vk); + + if (test_serialization) + { + enter_block("Test serialization of keys"); + keypair.pk = reserialize >(keypair.pk); + keypair.vk = reserialize >(keypair.vk); + pvk = reserialize >(pvk); + leave_block("Test serialization of keys"); + } + + print_header("R1CS ppzkSNARK Prover"); + r1cs_ppzksnark_proof proof = r1cs_ppzksnark_prover(keypair.pk, example.primary_input, example.auxiliary_input); + printf("\n"); print_indent(); print_mem("after prover"); + + if (test_serialization) + { + enter_block("Test serialization of proof"); + proof = reserialize >(proof); + leave_block("Test serialization of proof"); + } + + print_header("R1CS ppzkSNARK Verifier"); + const bool ans = r1cs_ppzksnark_verifier_strong_IC(keypair.vk, example.primary_input, proof); + printf("\n"); print_indent(); print_mem("after verifier"); + printf("* The verification result is: %s\n", (ans ? "PASS" : "FAIL")); + + print_header("R1CS ppzkSNARK Online Verifier"); + const bool ans2 = r1cs_ppzksnark_online_verifier_strong_IC(pvk, example.primary_input, proof); + assert(ans == ans2); + + test_affine_verifier(keypair.vk, example.primary_input, proof, ans); + + leave_block("Call to run_r1cs_ppzksnark"); + + return ans; +} + +} // libsnark + +#endif // RUN_R1CS_PPZKSNARK_TCC_ diff --git a/src/zk_proof_systems/ppzksnark/r1cs_ppzksnark/profiling/profile_r1cs_ppzksnark.cpp b/src/zk_proof_systems/ppzksnark/r1cs_ppzksnark/profiling/profile_r1cs_ppzksnark.cpp new file mode 100644 index 000000000..5c5415028 --- /dev/null +++ b/src/zk_proof_systems/ppzksnark/r1cs_ppzksnark/profiling/profile_r1cs_ppzksnark.cpp @@ -0,0 +1,71 @@ +/** @file + ***************************************************************************** + Profiling program that exercises the ppzkSNARK (first generator, then prover, + then verifier) on a synthetic R1CS instance. + + The command + + $ src/zk_proof_systems/ppzksnark/r1cs_ppzksnark/profiling/profile_r1cs_ppzksnark 1000 10 Fr + + exercises the ppzkSNARK (first generator, then prover, then verifier) on an R1CS instance with 1000 equations and an input consisting of 10 field elements. + + (If you get the error `zmInit ERR:can't protect`, see the discussion [above](#elliptic-curve-choices).) + + The command + + $ src/zk_proof_systems/ppzksnark/r1cs_ppzksnark/profiling/profile_r1cs_ppzksnark 1000 10 bytes + + does the same but now the input consists of 10 bytes. + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ +#include +#include + +#include "common/default_types/r1cs_ppzksnark_pp.hpp" +#include "common/profiling.hpp" +#include "common/utils.hpp" +#include "relations/constraint_satisfaction_problems/r1cs/examples/r1cs_examples.hpp" +#include "zk_proof_systems/ppzksnark/r1cs_ppzksnark/examples/run_r1cs_ppzksnark.hpp" + +using namespace libsnark; + +int main(int argc, const char * argv[]) +{ + default_r1cs_ppzksnark_pp::init_public_params(); + start_profiling(); + + if (argc == 2 && strcmp(argv[1], "-v") == 0) + { + print_compilation_info(); + return 0; + } + + if (argc != 3 && argc != 4) + { + printf("usage: %s num_constraints input_size [Fr|bytes]\n", argv[0]); + return 1; + } + const int num_constraints = atoi(argv[1]); + int input_size = atoi(argv[2]); + if (argc == 4) + { + assert(strcmp(argv[3], "Fr") == 0 || strcmp(argv[3], "bytes") == 0); + if (strcmp(argv[3], "bytes") == 0) + { + input_size = div_ceil(8 * input_size, Fr::capacity()); + } + } + + enter_block("Generate R1CS example"); + r1cs_example > example = generate_r1cs_example_with_field_input >(num_constraints, input_size); + leave_block("Generate R1CS example"); + + print_header("(enter) Profile R1CS ppzkSNARK"); + const bool test_serialization = true; + run_r1cs_ppzksnark(example, test_serialization); + print_header("(leave) Profile R1CS ppzkSNARK"); +} diff --git a/src/zk_proof_systems/ppzksnark/r1cs_ppzksnark/r1cs_ppzksnark.hpp b/src/zk_proof_systems/ppzksnark/r1cs_ppzksnark/r1cs_ppzksnark.hpp new file mode 100644 index 000000000..a068b09fd --- /dev/null +++ b/src/zk_proof_systems/ppzksnark/r1cs_ppzksnark/r1cs_ppzksnark.hpp @@ -0,0 +1,479 @@ +/** @file + ***************************************************************************** + + Declaration of interfaces for a ppzkSNARK for R1CS. + + This includes: + - class for proving key + - class for verification key + - class for processed verification key + - class for key pair (proving key & verification key) + - class for proof + - generator algorithm + - prover algorithm + - verifier algorithm (with strong or weak input consistency) + - online verifier algorithm (with strong or weak input consistency) + + The implementation instantiates (a modification of) the protocol of \[PGHR13], + by following extending, and optimizing the approach described in \[BCTV14]. + + + Acronyms: + + - R1CS = "Rank-1 Constraint Systems" + - ppzkSNARK = "PreProcessing Zero-Knowledge Succinct Non-interactive ARgument of Knowledge" + + References: + + \[BCTV14]: + "Succinct Non-Interactive Zero Knowledge for a von Neumann Architecture", + Eli Ben-Sasson, Alessandro Chiesa, Eran Tromer, Madars Virza, + USENIX Security 2014, + + + \[PGHR13]: + "Pinocchio: Nearly practical verifiable computation", + Bryan Parno, Craig Gentry, Jon Howell, Mariana Raykova, + IEEE S&P 2013, + + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef R1CS_PPZKSNARK_HPP_ +#define R1CS_PPZKSNARK_HPP_ + +#include + +#include "algebra/curves/public_params.hpp" +#include "common/data_structures/accumulation_vector.hpp" +#include "algebra/knowledge_commitment/knowledge_commitment.hpp" +#include "relations/constraint_satisfaction_problems/r1cs/r1cs.hpp" +#include "zk_proof_systems/ppzksnark/r1cs_ppzksnark/r1cs_ppzksnark_params.hpp" + +namespace libsnark { + +/******************************** Proving key ********************************/ + +template +class r1cs_ppzksnark_proving_key; + +template +std::ostream& operator<<(std::ostream &out, const r1cs_ppzksnark_proving_key &pk); + +template +std::istream& operator>>(std::istream &in, r1cs_ppzksnark_proving_key &pk); + +/** + * A proving key for the R1CS ppzkSNARK. + */ +template +class r1cs_ppzksnark_proving_key { +public: + knowledge_commitment_vector, G1 > A_query; + knowledge_commitment_vector, G1 > B_query; + knowledge_commitment_vector, G1 > C_query; + G1_vector H_query; + G1_vector K_query; + + r1cs_ppzksnark_proving_key() {}; + r1cs_ppzksnark_proving_key& operator=(const r1cs_ppzksnark_proving_key &other) = default; + r1cs_ppzksnark_proving_key(const r1cs_ppzksnark_proving_key &other) = default; + r1cs_ppzksnark_proving_key(r1cs_ppzksnark_proving_key &&other) = default; + r1cs_ppzksnark_proving_key(knowledge_commitment_vector, G1 > &&A_query, + knowledge_commitment_vector, G1 > &&B_query, + knowledge_commitment_vector, G1 > &&C_query, + G1_vector &&H_query, + G1_vector &&K_query) : + A_query(std::move(A_query)), + B_query(std::move(B_query)), + C_query(std::move(C_query)), + H_query(std::move(H_query)), + K_query(std::move(K_query)) + {}; + + size_t G1_size() const + { + return 2*(A_query.domain_size() + C_query.domain_size()) + B_query.domain_size() + H_query.size() + K_query.size(); + } + + size_t G2_size() const + { + return B_query.domain_size(); + } + + size_t G1_sparse_size() const + { + return 2*(A_query.size() + C_query.size()) + B_query.size() + H_query.size() + K_query.size(); + } + + size_t G2_sparse_size() const + { + return B_query.size(); + } + + size_t size_in_bits() const + { + return A_query.size_in_bits() + B_query.size_in_bits() + C_query.size_in_bits() + libsnark::size_in_bits(H_query) + libsnark::size_in_bits(K_query); + } + + void print_size() const + { + print_indent(); printf("* G1 elements in PK: %zu\n", this->G1_size()); + print_indent(); printf("* Non-zero G1 elements in PK: %zu\n", this->G1_sparse_size()); + print_indent(); printf("* G2 elements in PK: %zu\n", this->G2_size()); + print_indent(); printf("* Non-zero G2 elements in PK: %zu\n", this->G2_sparse_size()); + print_indent(); printf("* PK size in bits: %zu\n", this->size_in_bits()); + } + + bool operator==(const r1cs_ppzksnark_proving_key &other) const; + friend std::ostream& operator<< (std::ostream &out, const r1cs_ppzksnark_proving_key &pk); + friend std::istream& operator>> (std::istream &in, r1cs_ppzksnark_proving_key &pk); +}; + + +/******************************* Verification key ****************************/ + +template +class r1cs_ppzksnark_verification_key; + +template +std::ostream& operator<<(std::ostream &out, const r1cs_ppzksnark_verification_key &vk); + +template +std::istream& operator>>(std::istream &in, r1cs_ppzksnark_verification_key &vk); + +/** + * A verification key for the R1CS ppzkSNARK. + */ +template +class r1cs_ppzksnark_verification_key { +public: + G2 alphaA_g2; + G1 alphaB_g1; + G2 alphaC_g2; + G2 gamma_g2; + G1 gamma_beta_g1; + G2 gamma_beta_g2; + G2 rC_Z_g2; + + accumulation_vector > encoded_IC_query; + + r1cs_ppzksnark_verification_key() = default; + r1cs_ppzksnark_verification_key(const G2 &alphaA_g2, + const G1 &alphaB_g1, + const G2 &alphaC_g2, + const G2 &gamma_g2, + const G1 &gamma_beta_g1, + const G2 &gamma_beta_g2, + const G2 &rC_Z_g2, + const accumulation_vector > &eIC) : + alphaA_g2(alphaA_g2), + alphaB_g1(alphaB_g1), + alphaC_g2(alphaC_g2), + gamma_g2(gamma_g2), + gamma_beta_g1(gamma_beta_g1), + gamma_beta_g2(gamma_beta_g2), + rC_Z_g2(rC_Z_g2), + encoded_IC_query(eIC) + {}; + + size_t G1_size() const + { + return 2 + encoded_IC_query.size(); + } + + size_t G2_size() const + { + return 5; + } + + size_t size_in_bits() const + { + return (2 * G1::size_in_bits() + encoded_IC_query.size_in_bits() + 5 * G2::size_in_bits()); + } + + void print_size() const + { + print_indent(); printf("* G1 elements in VK: %zu\n", this->G1_size()); + print_indent(); printf("* G2 elements in VK: %zu\n", this->G2_size()); + print_indent(); printf("* VK size in bits: %zu\n", this->size_in_bits()); + } + + bool operator==(const r1cs_ppzksnark_verification_key &other) const; + friend std::ostream& operator<< (std::ostream &out, const r1cs_ppzksnark_verification_key &vk); + friend std::istream& operator>> (std::istream &in, r1cs_ppzksnark_verification_key &vk); + + static r1cs_ppzksnark_verification_key dummy_verification_key(const size_t input_size); +}; + + +/************************ Processed verification key *************************/ + +template +class r1cs_ppzksnark_processed_verification_key; + +template +std::ostream& operator<<(std::ostream &out, const r1cs_ppzksnark_processed_verification_key &pvk); + +template +std::istream& operator>>(std::istream &in, r1cs_ppzksnark_processed_verification_key &pvk); + +/** + * A processed verification key for the R1CS ppzkSNARK. + * + * Compared to a (non-processed) verification key, a processed verification key + * contains a small constant amount of additional pre-computed information that + * enables a faster verification time. + */ +template +class r1cs_ppzksnark_processed_verification_key { +public: + G2_precomp pp_G2_one_precomp; + G2_precomp vk_alphaA_g2_precomp; + G1_precomp vk_alphaB_g1_precomp; + G2_precomp vk_alphaC_g2_precomp; + G2_precomp vk_rC_Z_g2_precomp; + G2_precomp vk_gamma_g2_precomp; + G1_precomp vk_gamma_beta_g1_precomp; + G2_precomp vk_gamma_beta_g2_precomp; + + accumulation_vector > encoded_IC_query; + + bool operator==(const r1cs_ppzksnark_processed_verification_key &other) const; + friend std::ostream& operator<< (std::ostream &out, const r1cs_ppzksnark_processed_verification_key &pvk); + friend std::istream& operator>> (std::istream &in, r1cs_ppzksnark_processed_verification_key &pvk); +}; + + +/********************************** Key pair *********************************/ + +/** + * A key pair for the R1CS ppzkSNARK, which consists of a proving key and a verification key. + */ +template +class r1cs_ppzksnark_keypair { +public: + r1cs_ppzksnark_proving_key pk; + r1cs_ppzksnark_verification_key vk; + + r1cs_ppzksnark_keypair() = default; + r1cs_ppzksnark_keypair(const r1cs_ppzksnark_keypair &other) = default; + r1cs_ppzksnark_keypair(r1cs_ppzksnark_proving_key &&pk, + r1cs_ppzksnark_verification_key &&vk) : + pk(std::move(pk)), + vk(std::move(vk)) + {} + + r1cs_ppzksnark_keypair(r1cs_ppzksnark_keypair &&other) = default; +}; + + +/*********************************** Proof ***********************************/ + +template +class r1cs_ppzksnark_proof; + +template +std::ostream& operator<<(std::ostream &out, const r1cs_ppzksnark_proof &proof); + +template +std::istream& operator>>(std::istream &in, r1cs_ppzksnark_proof &proof); + +/** + * A proof for the R1CS ppzkSNARK. + * + * While the proof has a structure, externally one merely opaquely produces, + * seralizes/deserializes, and verifies proofs. We only expose some information + * about the structure for statistics purposes. + */ +template +class r1cs_ppzksnark_proof { +public: + knowledge_commitment, G1 > g_A; + knowledge_commitment, G1 > g_B; + knowledge_commitment, G1 > g_C; + G1 g_H; + G1 g_K; + + r1cs_ppzksnark_proof() + { + // invalid proof with valid curve points + this->g_A.g = G1 ::one(); + this->g_A.h = G1::one(); + this->g_B.g = G2 ::one(); + this->g_B.h = G1::one(); + this->g_C.g = G1 ::one(); + this->g_C.h = G1::one(); + this->g_H = G1::one(); + this->g_K = G1::one(); + } + r1cs_ppzksnark_proof(knowledge_commitment, G1 > &&g_A, + knowledge_commitment, G1 > &&g_B, + knowledge_commitment, G1 > &&g_C, + G1 &&g_H, + G1 &&g_K) : + g_A(std::move(g_A)), + g_B(std::move(g_B)), + g_C(std::move(g_C)), + g_H(std::move(g_H)), + g_K(std::move(g_K)) + {}; + + size_t G1_size() const + { + return 7; + } + + size_t G2_size() const + { + return 1; + } + + size_t size_in_bits() const + { + return G1_size() * G1::size_in_bits() + G2_size() * G2::size_in_bits(); + } + + void print_size() const + { + print_indent(); printf("* G1 elements in proof: %zu\n", this->G1_size()); + print_indent(); printf("* G2 elements in proof: %zu\n", this->G2_size()); + print_indent(); printf("* Proof size in bits: %zu\n", this->size_in_bits()); + } + + bool is_well_formed() const + { + return (g_A.g.is_well_formed() && g_A.h.is_well_formed() && + g_B.g.is_well_formed() && g_B.h.is_well_formed() && + g_C.g.is_well_formed() && g_C.h.is_well_formed() && + g_H.is_well_formed() && + g_K.is_well_formed()); + } + + bool operator==(const r1cs_ppzksnark_proof &other) const; + friend std::ostream& operator<< (std::ostream &out, const r1cs_ppzksnark_proof &proof); + friend std::istream& operator>> (std::istream &in, r1cs_ppzksnark_proof &proof); +}; + + +/***************************** Main algorithms *******************************/ + +/** + * A generator algorithm for the R1CS ppzkSNARK. + * + * Given a R1CS constraint system CS, this algorithm produces proving and verification keys for CS. + */ +template +r1cs_ppzksnark_keypair r1cs_ppzksnark_generator(const r1cs_ppzksnark_constraint_system &cs); + +template +r1cs_ppzksnark_keypair r1cs_ppzksnark_generator( + const r1cs_ppzksnark_constraint_system &cs, + const Fr& t, + const Fr& alphaA, + const Fr& alphaB, + const Fr& alphaC, + const Fr& rA, + const Fr& rB, + const Fr& beta, + const Fr& gamma +); + +/** + * A prover algorithm for the R1CS ppzkSNARK. + * + * Given a R1CS primary input X and a R1CS auxiliary input Y, this algorithm + * produces a proof (of knowledge) that attests to the following statement: + * ``there exists Y such that CS(X,Y)=0''. + * Above, CS is the R1CS constraint system that was given as input to the generator algorithm. + */ +template +r1cs_ppzksnark_proof r1cs_ppzksnark_prover(const r1cs_ppzksnark_proving_key &pk, + const r1cs_ppzksnark_primary_input &primary_input, + const r1cs_ppzksnark_auxiliary_input &auxiliary_input); + +/* + Below are four variants of verifier algorithm for the R1CS ppzkSNARK. + + These are the four cases that arise from the following two choices: + + (1) The verifier accepts a (non-processed) verification key or, instead, a processed verification key. + In the latter case, we call the algorithm an "online verifier". + + (2) The verifier checks for "weak" input consistency or, instead, "strong" input consistency. + Strong input consistency requires that |primary_input| = CS.num_inputs, whereas + weak input consistency requires that |primary_input| <= CS.num_inputs (and + the primary input is implicitly padded with zeros up to length CS.num_inputs). + */ + +/** + * A verifier algorithm for the R1CS ppzkSNARK that: + * (1) accepts a non-processed verification key, and + * (2) has weak input consistency. + */ +template +bool r1cs_ppzksnark_verifier_weak_IC(const r1cs_ppzksnark_verification_key &vk, + const r1cs_ppzksnark_primary_input &primary_input, + const r1cs_ppzksnark_proof &proof); + +/** + * A verifier algorithm for the R1CS ppzkSNARK that: + * (1) accepts a non-processed verification key, and + * (2) has strong input consistency. + */ +template +bool r1cs_ppzksnark_verifier_strong_IC(const r1cs_ppzksnark_verification_key &vk, + const r1cs_ppzksnark_primary_input &primary_input, + const r1cs_ppzksnark_proof &proof); + +/** + * Convert a (non-processed) verification key into a processed verification key. + */ +template +r1cs_ppzksnark_processed_verification_key r1cs_ppzksnark_verifier_process_vk(const r1cs_ppzksnark_verification_key &vk); + +/** + * A verifier algorithm for the R1CS ppzkSNARK that: + * (1) accepts a processed verification key, and + * (2) has weak input consistency. + */ +template +bool r1cs_ppzksnark_online_verifier_weak_IC(const r1cs_ppzksnark_processed_verification_key &pvk, + const r1cs_ppzksnark_primary_input &input, + const r1cs_ppzksnark_proof &proof); + +/** + * A verifier algorithm for the R1CS ppzkSNARK that: + * (1) accepts a processed verification key, and + * (2) has strong input consistency. + */ +template +bool r1cs_ppzksnark_online_verifier_strong_IC(const r1cs_ppzksnark_processed_verification_key &pvk, + const r1cs_ppzksnark_primary_input &primary_input, + const r1cs_ppzksnark_proof &proof); + +/****************************** Miscellaneous ********************************/ + +/** + * For debugging purposes (of r1cs_ppzksnark_r1cs_ppzksnark_verifier_gadget): + * + * A verifier algorithm for the R1CS ppzkSNARK that: + * (1) accepts a non-processed verification key, + * (2) has weak input consistency, and + * (3) uses affine coordinates for elliptic-curve computations. + */ +template +bool r1cs_ppzksnark_affine_verifier_weak_IC(const r1cs_ppzksnark_verification_key &vk, + const r1cs_ppzksnark_primary_input &primary_input, + const r1cs_ppzksnark_proof &proof); + + +} // libsnark + +#include "zk_proof_systems/ppzksnark/r1cs_ppzksnark/r1cs_ppzksnark.tcc" + +#endif // R1CS_PPZKSNARK_HPP_ diff --git a/src/zk_proof_systems/ppzksnark/r1cs_ppzksnark/r1cs_ppzksnark.tcc b/src/zk_proof_systems/ppzksnark/r1cs_ppzksnark/r1cs_ppzksnark.tcc new file mode 100644 index 000000000..aeb2bbb85 --- /dev/null +++ b/src/zk_proof_systems/ppzksnark/r1cs_ppzksnark/r1cs_ppzksnark.tcc @@ -0,0 +1,762 @@ +/** @file +***************************************************************************** + +Implementation of interfaces for a ppzkSNARK for R1CS. + +See r1cs_ppzksnark.hpp . + +***************************************************************************** +* @author This file is part of libsnark, developed by SCIPR Lab +* and contributors (see AUTHORS). +* @copyright MIT license (see LICENSE file) +*****************************************************************************/ + +#ifndef R1CS_PPZKSNARK_TCC_ +#define R1CS_PPZKSNARK_TCC_ + +#include +#include +#include +#include +#include + +#include "common/profiling.hpp" +#include "common/utils.hpp" +#include "algebra/scalar_multiplication/multiexp.hpp" +#include "algebra/scalar_multiplication/kc_multiexp.hpp" +#include "reductions/r1cs_to_qap/r1cs_to_qap.hpp" + +namespace libsnark { + +template +bool r1cs_ppzksnark_proving_key::operator==(const r1cs_ppzksnark_proving_key &other) const +{ + return (this->A_query == other.A_query && + this->B_query == other.B_query && + this->C_query == other.C_query && + this->H_query == other.H_query && + this->K_query == other.K_query); +} + +template +std::ostream& operator<<(std::ostream &out, const r1cs_ppzksnark_proving_key &pk) +{ + out << pk.A_query; + out << pk.B_query; + out << pk.C_query; + out << pk.H_query; + out << pk.K_query; + + return out; +} + +template +std::istream& operator>>(std::istream &in, r1cs_ppzksnark_proving_key &pk) +{ + in >> pk.A_query; + in >> pk.B_query; + in >> pk.C_query; + in >> pk.H_query; + in >> pk.K_query; + + return in; +} + +template +bool r1cs_ppzksnark_verification_key::operator==(const r1cs_ppzksnark_verification_key &other) const +{ + return (this->alphaA_g2 == other.alphaA_g2 && + this->alphaB_g1 == other.alphaB_g1 && + this->alphaC_g2 == other.alphaC_g2 && + this->gamma_g2 == other.gamma_g2 && + this->gamma_beta_g1 == other.gamma_beta_g1 && + this->gamma_beta_g2 == other.gamma_beta_g2 && + this->rC_Z_g2 == other.rC_Z_g2 && + this->encoded_IC_query == other.encoded_IC_query); +} + +template +std::ostream& operator<<(std::ostream &out, const r1cs_ppzksnark_verification_key &vk) +{ + out << vk.alphaA_g2 << OUTPUT_NEWLINE; + out << vk.alphaB_g1 << OUTPUT_NEWLINE; + out << vk.alphaC_g2 << OUTPUT_NEWLINE; + out << vk.gamma_g2 << OUTPUT_NEWLINE; + out << vk.gamma_beta_g1 << OUTPUT_NEWLINE; + out << vk.gamma_beta_g2 << OUTPUT_NEWLINE; + out << vk.rC_Z_g2 << OUTPUT_NEWLINE; + out << vk.encoded_IC_query << OUTPUT_NEWLINE; + + return out; +} + +template +std::istream& operator>>(std::istream &in, r1cs_ppzksnark_verification_key &vk) +{ + in >> vk.alphaA_g2; + consume_OUTPUT_NEWLINE(in); + in >> vk.alphaB_g1; + consume_OUTPUT_NEWLINE(in); + in >> vk.alphaC_g2; + consume_OUTPUT_NEWLINE(in); + in >> vk.gamma_g2; + consume_OUTPUT_NEWLINE(in); + in >> vk.gamma_beta_g1; + consume_OUTPUT_NEWLINE(in); + in >> vk.gamma_beta_g2; + consume_OUTPUT_NEWLINE(in); + in >> vk.rC_Z_g2; + consume_OUTPUT_NEWLINE(in); + in >> vk.encoded_IC_query; + consume_OUTPUT_NEWLINE(in); + + return in; +} + +template +bool r1cs_ppzksnark_processed_verification_key::operator==(const r1cs_ppzksnark_processed_verification_key &other) const +{ + return (this->pp_G2_one_precomp == other.pp_G2_one_precomp && + this->vk_alphaA_g2_precomp == other.vk_alphaA_g2_precomp && + this->vk_alphaB_g1_precomp == other.vk_alphaB_g1_precomp && + this->vk_alphaC_g2_precomp == other.vk_alphaC_g2_precomp && + this->vk_rC_Z_g2_precomp == other.vk_rC_Z_g2_precomp && + this->vk_gamma_g2_precomp == other.vk_gamma_g2_precomp && + this->vk_gamma_beta_g1_precomp == other.vk_gamma_beta_g1_precomp && + this->vk_gamma_beta_g2_precomp == other.vk_gamma_beta_g2_precomp && + this->encoded_IC_query == other.encoded_IC_query); +} + +template +std::ostream& operator<<(std::ostream &out, const r1cs_ppzksnark_processed_verification_key &pvk) +{ + out << pvk.pp_G2_one_precomp << OUTPUT_NEWLINE; + out << pvk.vk_alphaA_g2_precomp << OUTPUT_NEWLINE; + out << pvk.vk_alphaB_g1_precomp << OUTPUT_NEWLINE; + out << pvk.vk_alphaC_g2_precomp << OUTPUT_NEWLINE; + out << pvk.vk_rC_Z_g2_precomp << OUTPUT_NEWLINE; + out << pvk.vk_gamma_g2_precomp << OUTPUT_NEWLINE; + out << pvk.vk_gamma_beta_g1_precomp << OUTPUT_NEWLINE; + out << pvk.vk_gamma_beta_g2_precomp << OUTPUT_NEWLINE; + out << pvk.encoded_IC_query << OUTPUT_NEWLINE; + + return out; +} + +template +std::istream& operator>>(std::istream &in, r1cs_ppzksnark_processed_verification_key &pvk) +{ + in >> pvk.pp_G2_one_precomp; + consume_OUTPUT_NEWLINE(in); + in >> pvk.vk_alphaA_g2_precomp; + consume_OUTPUT_NEWLINE(in); + in >> pvk.vk_alphaB_g1_precomp; + consume_OUTPUT_NEWLINE(in); + in >> pvk.vk_alphaC_g2_precomp; + consume_OUTPUT_NEWLINE(in); + in >> pvk.vk_rC_Z_g2_precomp; + consume_OUTPUT_NEWLINE(in); + in >> pvk.vk_gamma_g2_precomp; + consume_OUTPUT_NEWLINE(in); + in >> pvk.vk_gamma_beta_g1_precomp; + consume_OUTPUT_NEWLINE(in); + in >> pvk.vk_gamma_beta_g2_precomp; + consume_OUTPUT_NEWLINE(in); + in >> pvk.encoded_IC_query; + consume_OUTPUT_NEWLINE(in); + + return in; +} + +template +bool r1cs_ppzksnark_proof::operator==(const r1cs_ppzksnark_proof &other) const +{ + return (this->g_A == other.g_A && + this->g_B == other.g_B && + this->g_C == other.g_C && + this->g_H == other.g_H && + this->g_K == other.g_K); +} + +template +std::ostream& operator<<(std::ostream &out, const r1cs_ppzksnark_proof &proof) +{ + out << proof.g_A << OUTPUT_NEWLINE; + out << proof.g_B << OUTPUT_NEWLINE; + out << proof.g_C << OUTPUT_NEWLINE; + out << proof.g_H << OUTPUT_NEWLINE; + out << proof.g_K << OUTPUT_NEWLINE; + + return out; +} + +template +std::istream& operator>>(std::istream &in, r1cs_ppzksnark_proof &proof) +{ + in >> proof.g_A; + consume_OUTPUT_NEWLINE(in); + in >> proof.g_B; + consume_OUTPUT_NEWLINE(in); + in >> proof.g_C; + consume_OUTPUT_NEWLINE(in); + in >> proof.g_H; + consume_OUTPUT_NEWLINE(in); + in >> proof.g_K; + consume_OUTPUT_NEWLINE(in); + + return in; +} + +template +r1cs_ppzksnark_verification_key r1cs_ppzksnark_verification_key::dummy_verification_key(const size_t input_size) +{ + r1cs_ppzksnark_verification_key result; + result.alphaA_g2 = Fr::random_element() * G2::one(); + result.alphaB_g1 = Fr::random_element() * G1::one(); + result.alphaC_g2 = Fr::random_element() * G2::one(); + result.gamma_g2 = Fr::random_element() * G2::one(); + result.gamma_beta_g1 = Fr::random_element() * G1::one(); + result.gamma_beta_g2 = Fr::random_element() * G2::one(); + result.rC_Z_g2 = Fr::random_element() * G2::one(); + + G1 base = Fr::random_element() * G1::one(); + G1_vector v; + for (size_t i = 0; i < input_size; ++i) + { + v.emplace_back(Fr::random_element() * G1::one()); + } + + result.encoded_IC_query = accumulation_vector >(std::move(base), std::move(v)); + + return result; +} + +template +r1cs_ppzksnark_keypair r1cs_ppzksnark_generator(const r1cs_ppzksnark_constraint_system &cs) +{ + /* draw random element at which the QAP is evaluated */ + const Fr t = Fr::random_element(); + + const Fr alphaA = Fr::random_element(), + alphaB = Fr::random_element(), + alphaC = Fr::random_element(), + rA = Fr::random_element(), + rB = Fr::random_element(), + beta = Fr::random_element(), + gamma = Fr::random_element(); + + return r1cs_ppzksnark_generator(cs, t, alphaA, alphaB, alphaC, rA, rB, beta, gamma); +} + +template +r1cs_ppzksnark_keypair r1cs_ppzksnark_generator( + const r1cs_ppzksnark_constraint_system &cs, + const Fr& t, + const Fr& alphaA, + const Fr& alphaB, + const Fr& alphaC, + const Fr& rA, + const Fr& rB, + const Fr& beta, + const Fr& gamma +) +{ + enter_block("Call to r1cs_ppzksnark_generator"); + + /* make the B_query "lighter" if possible */ + r1cs_ppzksnark_constraint_system cs_copy(cs); + cs_copy.swap_AB_if_beneficial(); + + qap_instance_evaluation > qap_inst = r1cs_to_qap_instance_map_with_evaluation(cs_copy, t); + + print_indent(); printf("* QAP number of variables: %zu\n", qap_inst.num_variables()); + print_indent(); printf("* QAP pre degree: %zu\n", cs_copy.constraints.size()); + print_indent(); printf("* QAP degree: %zu\n", qap_inst.degree()); + print_indent(); printf("* QAP number of input variables: %zu\n", qap_inst.num_inputs()); + + enter_block("Compute query densities"); + size_t non_zero_At = 0, non_zero_Bt = 0, non_zero_Ct = 0, non_zero_Ht = 0; + for (size_t i = 0; i < qap_inst.num_variables()+1; ++i) + { + if (!qap_inst.At[i].is_zero()) + { + ++non_zero_At; + } + if (!qap_inst.Bt[i].is_zero()) + { + ++non_zero_Bt; + } + if (!qap_inst.Ct[i].is_zero()) + { + ++non_zero_Ct; + } + } + for (size_t i = 0; i < qap_inst.degree()+1; ++i) + { + if (!qap_inst.Ht[i].is_zero()) + { + ++non_zero_Ht; + } + } + leave_block("Compute query densities"); + + Fr_vector At = std::move(qap_inst.At); // qap_inst.At is now in unspecified state, but we do not use it later + Fr_vector Bt = std::move(qap_inst.Bt); // qap_inst.Bt is now in unspecified state, but we do not use it later + Fr_vector Ct = std::move(qap_inst.Ct); // qap_inst.Ct is now in unspecified state, but we do not use it later + Fr_vector Ht = std::move(qap_inst.Ht); // qap_inst.Ht is now in unspecified state, but we do not use it later + + /* append Zt to At,Bt,Ct with */ + At.emplace_back(qap_inst.Zt); + Bt.emplace_back(qap_inst.Zt); + Ct.emplace_back(qap_inst.Zt); + + const Fr rC = rA * rB; + + // consrtuct the same-coefficient-check query (must happen before zeroing out the prefix of At) + Fr_vector Kt; + Kt.reserve(qap_inst.num_variables()+4); + for (size_t i = 0; i < qap_inst.num_variables()+1; ++i) + { + Kt.emplace_back( beta * (rA * At[i] + rB * Bt[i] + rC * Ct[i] ) ); + } + Kt.emplace_back(beta * rA * qap_inst.Zt); + Kt.emplace_back(beta * rB * qap_inst.Zt); + Kt.emplace_back(beta * rC * qap_inst.Zt); + + /* zero out prefix of At and stick it into IC coefficients */ + Fr_vector IC_coefficients; + IC_coefficients.reserve(qap_inst.num_inputs() + 1); + for (size_t i = 0; i < qap_inst.num_inputs() + 1; ++i) + { + IC_coefficients.emplace_back(At[i]); + assert(!IC_coefficients[i].is_zero()); + At[i] = Fr::zero(); + } + + const size_t g1_exp_count = 2*(non_zero_At - qap_inst.num_inputs() + non_zero_Ct) + non_zero_Bt + non_zero_Ht + Kt.size(); + const size_t g2_exp_count = non_zero_Bt; + + size_t g1_window = get_exp_window_size >(g1_exp_count); + size_t g2_window = get_exp_window_size >(g2_exp_count); + print_indent(); printf("* G1 window: %zu\n", g1_window); + print_indent(); printf("* G2 window: %zu\n", g2_window); + +#ifdef MULTICORE + const size_t chunks = omp_get_max_threads(); // to override, set OMP_NUM_THREADS env var or call omp_set_num_threads() +#else + const size_t chunks = 1; +#endif + + enter_block("Generating G1 multiexp table"); + window_table > g1_table = get_window_table(Fr::size_in_bits(), g1_window, G1::one()); + leave_block("Generating G1 multiexp table"); + + enter_block("Generating G2 multiexp table"); + window_table > g2_table = get_window_table(Fr::size_in_bits(), g2_window, G2::one()); + leave_block("Generating G2 multiexp table"); + + enter_block("Generate R1CS proving key"); + + enter_block("Generate knowledge commitments"); + enter_block("Compute the A-query", false); + knowledge_commitment_vector, G1 > A_query = kc_batch_exp(Fr::size_in_bits(), g1_window, g1_window, g1_table, g1_table, rA, rA*alphaA, At, chunks); + leave_block("Compute the A-query", false); + + enter_block("Compute the B-query", false); + knowledge_commitment_vector, G1 > B_query = kc_batch_exp(Fr::size_in_bits(), g2_window, g1_window, g2_table, g1_table, rB, rB*alphaB, Bt, chunks); + leave_block("Compute the B-query", false); + + enter_block("Compute the C-query", false); + knowledge_commitment_vector, G1 > C_query = kc_batch_exp(Fr::size_in_bits(), g1_window, g1_window, g1_table, g1_table, rC, rC*alphaC, Ct, chunks); + leave_block("Compute the C-query", false); + + enter_block("Compute the H-query", false); + G1_vector H_query = batch_exp(Fr::size_in_bits(), g1_window, g1_table, Ht); + leave_block("Compute the H-query", false); + + enter_block("Compute the K-query", false); + G1_vector K_query = batch_exp(Fr::size_in_bits(), g1_window, g1_table, Kt); +#ifdef USE_MIXED_ADDITION + batch_to_special >(K_query); +#endif + leave_block("Compute the K-query", false); + + leave_block("Generate knowledge commitments"); + + leave_block("Generate R1CS proving key"); + + enter_block("Generate R1CS verification key"); + G2 alphaA_g2 = alphaA * G2::one(); + G1 alphaB_g1 = alphaB * G1::one(); + G2 alphaC_g2 = alphaC * G2::one(); + G2 gamma_g2 = gamma * G2::one(); + G1 gamma_beta_g1 = (gamma * beta) * G1::one(); + G2 gamma_beta_g2 = (gamma * beta) * G2::one(); + G2 rC_Z_g2 = (rC * qap_inst.Zt) * G2::one(); + + enter_block("Encode IC query for R1CS verification key"); + G1 encoded_IC_base = (rA * IC_coefficients[0]) * G1::one(); + Fr_vector multiplied_IC_coefficients; + multiplied_IC_coefficients.reserve(qap_inst.num_inputs()); + for (size_t i = 1; i < qap_inst.num_inputs() + 1; ++i) + { + multiplied_IC_coefficients.emplace_back(rA * IC_coefficients[i]); + } + G1_vector encoded_IC_values = batch_exp(Fr::size_in_bits(), g1_window, g1_table, multiplied_IC_coefficients); + + leave_block("Encode IC query for R1CS verification key"); + leave_block("Generate R1CS verification key"); + + leave_block("Call to r1cs_ppzksnark_generator"); + + accumulation_vector > encoded_IC_query(std::move(encoded_IC_base), std::move(encoded_IC_values)); + + r1cs_ppzksnark_verification_key vk = r1cs_ppzksnark_verification_key(alphaA_g2, + alphaB_g1, + alphaC_g2, + gamma_g2, + gamma_beta_g1, + gamma_beta_g2, + rC_Z_g2, + encoded_IC_query); + r1cs_ppzksnark_proving_key pk = r1cs_ppzksnark_proving_key(std::move(A_query), + std::move(B_query), + std::move(C_query), + std::move(H_query), + std::move(K_query)); + + pk.print_size(); + vk.print_size(); + + return r1cs_ppzksnark_keypair(std::move(pk), std::move(vk)); +} + +template +r1cs_ppzksnark_proof r1cs_ppzksnark_prover(const r1cs_ppzksnark_proving_key &pk, + const r1cs_ppzksnark_primary_input &primary_input, + const r1cs_ppzksnark_auxiliary_input &auxiliary_input, + const r1cs_ppzksnark_constraint_system &constraint_system) +{ + enter_block("Call to r1cs_ppzksnark_prover"); + +#ifdef DEBUG + assert(constraint_system.is_satisfied(primary_input, auxiliary_input)); +#endif + + const Fr d1 = Fr::random_element(), + d2 = Fr::random_element(), + d3 = Fr::random_element(); + + enter_block("Compute the polynomial H"); + const qap_witness > qap_wit = r1cs_to_qap_witness_map(constraint_system, primary_input, auxiliary_input, d1, d2, d3); + leave_block("Compute the polynomial H"); + +#ifdef DEBUG + const Fr t = Fr::random_element(); + qap_instance_evaluation > qap_inst = r1cs_to_qap_instance_map_with_evaluation(constraint_system, t); + assert(qap_inst.is_satisfied(qap_wit)); +#endif + + knowledge_commitment, G1 > g_A = pk.A_query[0] + qap_wit.d1*pk.A_query[qap_wit.num_variables()+1]; + knowledge_commitment, G1 > g_B = pk.B_query[0] + qap_wit.d2*pk.B_query[qap_wit.num_variables()+1]; + knowledge_commitment, G1 > g_C = pk.C_query[0] + qap_wit.d3*pk.C_query[qap_wit.num_variables()+1]; + + G1 g_H = G1::zero(); + G1 g_K = (pk.K_query[0] + + qap_wit.d1*pk.K_query[qap_wit.num_variables()+1] + + qap_wit.d2*pk.K_query[qap_wit.num_variables()+2] + + qap_wit.d3*pk.K_query[qap_wit.num_variables()+3]); + +#ifdef DEBUG + for (size_t i = 0; i < qap_wit.num_inputs() + 1; ++i) + { + assert(pk.A_query[i].g == G1::zero()); + } + assert(pk.A_query.domain_size() == qap_wit.num_variables()+2); + assert(pk.B_query.domain_size() == qap_wit.num_variables()+2); + assert(pk.C_query.domain_size() == qap_wit.num_variables()+2); + assert(pk.H_query.size() == qap_wit.degree()+1); + assert(pk.K_query.size() == qap_wit.num_variables()+4); +#endif + +#ifdef MULTICORE + const size_t chunks = omp_get_max_threads(); // to override, set OMP_NUM_THREADS env var or call omp_set_num_threads() +#else + const size_t chunks = 1; +#endif + + enter_block("Compute the proof"); + + enter_block("Compute answer to A-query", false); + g_A = g_A + kc_multi_exp_with_mixed_addition, G1, Fr >(pk.A_query, + 1, 1+qap_wit.num_variables(), + qap_wit.coefficients_for_ABCs.begin(), qap_wit.coefficients_for_ABCs.begin()+qap_wit.num_variables(), + chunks, true); + leave_block("Compute answer to A-query", false); + + enter_block("Compute answer to B-query", false); + g_B = g_B + kc_multi_exp_with_mixed_addition, G1, Fr >(pk.B_query, + 1, 1+qap_wit.num_variables(), + qap_wit.coefficients_for_ABCs.begin(), qap_wit.coefficients_for_ABCs.begin()+qap_wit.num_variables(), + chunks, true); + leave_block("Compute answer to B-query", false); + + enter_block("Compute answer to C-query", false); + g_C = g_C + kc_multi_exp_with_mixed_addition, G1, Fr >(pk.C_query, + 1, 1+qap_wit.num_variables(), + qap_wit.coefficients_for_ABCs.begin(), qap_wit.coefficients_for_ABCs.begin()+qap_wit.num_variables(), + chunks, true); + leave_block("Compute answer to C-query", false); + + enter_block("Compute answer to H-query", false); + g_H = g_H + multi_exp, Fr >(pk.H_query.begin(), pk.H_query.begin()+qap_wit.degree()+1, + qap_wit.coefficients_for_H.begin(), qap_wit.coefficients_for_H.begin()+qap_wit.degree()+1, + chunks, true); + leave_block("Compute answer to H-query", false); + + enter_block("Compute answer to K-query", false); + g_K = g_K + multi_exp_with_mixed_addition, Fr >(pk.K_query.begin()+1, pk.K_query.begin()+1+qap_wit.num_variables(), + qap_wit.coefficients_for_ABCs.begin(), qap_wit.coefficients_for_ABCs.begin()+qap_wit.num_variables(), + chunks, true); + leave_block("Compute answer to K-query", false); + + leave_block("Compute the proof"); + + leave_block("Call to r1cs_ppzksnark_prover"); + + r1cs_ppzksnark_proof proof = r1cs_ppzksnark_proof(std::move(g_A), std::move(g_B), std::move(g_C), std::move(g_H), std::move(g_K)); + //proof.print_size(); + + return proof; +} + +template +r1cs_ppzksnark_processed_verification_key r1cs_ppzksnark_verifier_process_vk(const r1cs_ppzksnark_verification_key &vk) +{ + enter_block("Call to r1cs_ppzksnark_verifier_process_vk"); + + r1cs_ppzksnark_processed_verification_key pvk; + pvk.pp_G2_one_precomp = ppT::precompute_G2(G2::one()); + pvk.vk_alphaA_g2_precomp = ppT::precompute_G2(vk.alphaA_g2); + pvk.vk_alphaB_g1_precomp = ppT::precompute_G1(vk.alphaB_g1); + pvk.vk_alphaC_g2_precomp = ppT::precompute_G2(vk.alphaC_g2); + pvk.vk_rC_Z_g2_precomp = ppT::precompute_G2(vk.rC_Z_g2); + pvk.vk_gamma_g2_precomp = ppT::precompute_G2(vk.gamma_g2); + pvk.vk_gamma_beta_g1_precomp = ppT::precompute_G1(vk.gamma_beta_g1); + pvk.vk_gamma_beta_g2_precomp = ppT::precompute_G2(vk.gamma_beta_g2); + + pvk.encoded_IC_query = vk.encoded_IC_query; + + leave_block("Call to r1cs_ppzksnark_verifier_process_vk"); + + return pvk; +} + +template +bool r1cs_ppzksnark_online_verifier_weak_IC(const r1cs_ppzksnark_processed_verification_key &pvk, + const r1cs_ppzksnark_primary_input &primary_input, + const r1cs_ppzksnark_proof &proof) +{ + assert(pvk.encoded_IC_query.domain_size() >= primary_input.size()); + + const accumulation_vector > accumulated_IC = pvk.encoded_IC_query.template accumulate_chunk >(primary_input.begin(), primary_input.end(), 0); + const G1 &acc = accumulated_IC.first; + + if (!proof.is_well_formed()) + { + return false; + } + + G1_precomp proof_g_A_g_precomp = ppT::precompute_G1(proof.g_A.g); + G1_precomp proof_g_A_h_precomp = ppT::precompute_G1(proof.g_A.h); + Fqk kc_A_1 = ppT::miller_loop(proof_g_A_g_precomp, pvk.vk_alphaA_g2_precomp); + Fqk kc_A_2 = ppT::miller_loop(proof_g_A_h_precomp, pvk.pp_G2_one_precomp); + GT kc_A = ppT::final_exponentiation(kc_A_1 * kc_A_2.unitary_inverse()); + if (kc_A != GT::one()) + { + return false; + } + + G2_precomp proof_g_B_g_precomp = ppT::precompute_G2(proof.g_B.g); + G1_precomp proof_g_B_h_precomp = ppT::precompute_G1(proof.g_B.h); + Fqk kc_B_1 = ppT::miller_loop(pvk.vk_alphaB_g1_precomp, proof_g_B_g_precomp); + Fqk kc_B_2 = ppT::miller_loop(proof_g_B_h_precomp, pvk.pp_G2_one_precomp); + GT kc_B = ppT::final_exponentiation(kc_B_1 * kc_B_2.unitary_inverse()); + if (kc_B != GT::one()) + { + return false; + } + + G1_precomp proof_g_C_g_precomp = ppT::precompute_G1(proof.g_C.g); + G1_precomp proof_g_C_h_precomp = ppT::precompute_G1(proof.g_C.h); + Fqk kc_C_1 = ppT::miller_loop(proof_g_C_g_precomp, pvk.vk_alphaC_g2_precomp); + Fqk kc_C_2 = ppT::miller_loop(proof_g_C_h_precomp, pvk.pp_G2_one_precomp); + GT kc_C = ppT::final_exponentiation(kc_C_1 * kc_C_2.unitary_inverse()); + if (kc_C != GT::one()) + { + return false; + } + + // check that g^((A+acc)*B)=g^(H*\Prod(t-\sigma)+C) + // equivalently, via pairings, that e(g^(A+acc), g^B) = e(g^H, g^Z) + e(g^C, g^1) + G1_precomp proof_g_A_g_acc_precomp = ppT::precompute_G1(proof.g_A.g + acc); + G1_precomp proof_g_H_precomp = ppT::precompute_G1(proof.g_H); + Fqk QAP_1 = ppT::miller_loop(proof_g_A_g_acc_precomp, proof_g_B_g_precomp); + Fqk QAP_23 = ppT::double_miller_loop(proof_g_H_precomp, pvk.vk_rC_Z_g2_precomp, proof_g_C_g_precomp, pvk.pp_G2_one_precomp); + GT QAP = ppT::final_exponentiation(QAP_1 * QAP_23.unitary_inverse()); + if (QAP != GT::one()) + { + return false; + } + + G1_precomp proof_g_K_precomp = ppT::precompute_G1(proof.g_K); + G1_precomp proof_g_A_g_acc_C_precomp = ppT::precompute_G1((proof.g_A.g + acc) + proof.g_C.g); + Fqk K_1 = ppT::miller_loop(proof_g_K_precomp, pvk.vk_gamma_g2_precomp); + Fqk K_23 = ppT::double_miller_loop(proof_g_A_g_acc_C_precomp, pvk.vk_gamma_beta_g2_precomp, pvk.vk_gamma_beta_g1_precomp, proof_g_B_g_precomp); + GT K = ppT::final_exponentiation(K_1 * K_23.unitary_inverse()); + if (K != GT::one()) + { + return false; + } + + return true; +} + +template +bool r1cs_ppzksnark_verifier_weak_IC(const r1cs_ppzksnark_verification_key &vk, + const r1cs_ppzksnark_primary_input &primary_input, + const r1cs_ppzksnark_proof &proof) +{ + enter_block("Call to r1cs_ppzksnark_verifier_weak_IC"); + r1cs_ppzksnark_processed_verification_key pvk = r1cs_ppzksnark_verifier_process_vk(vk); + bool result = r1cs_ppzksnark_online_verifier_weak_IC(pvk, primary_input, proof); + leave_block("Call to r1cs_ppzksnark_verifier_weak_IC"); + return result; +} + +template +bool r1cs_ppzksnark_online_verifier_strong_IC(const r1cs_ppzksnark_processed_verification_key &pvk, + const r1cs_ppzksnark_primary_input &primary_input, + const r1cs_ppzksnark_proof &proof) +{ + bool result = true; + enter_block("Call to r1cs_ppzksnark_online_verifier_strong_IC"); + + if (pvk.encoded_IC_query.domain_size() != primary_input.size()) + { + print_indent(); printf("Input length differs from expected (got %zu, expected %zu).\n", primary_input.size(), pvk.encoded_IC_query.domain_size()); + result = false; + } + else + { + result = r1cs_ppzksnark_online_verifier_weak_IC(pvk, primary_input, proof); + } + + leave_block("Call to r1cs_ppzksnark_online_verifier_strong_IC"); + return result; +} + +template +bool r1cs_ppzksnark_verifier_strong_IC(const r1cs_ppzksnark_verification_key &vk, + const r1cs_ppzksnark_primary_input &primary_input, + const r1cs_ppzksnark_proof &proof) +{ + enter_block("Call to r1cs_ppzksnark_verifier_strong_IC"); + r1cs_ppzksnark_processed_verification_key pvk = r1cs_ppzksnark_verifier_process_vk(vk); + bool result = r1cs_ppzksnark_online_verifier_strong_IC(pvk, primary_input, proof); + leave_block("Call to r1cs_ppzksnark_verifier_strong_IC"); + return result; +} + +template +bool r1cs_ppzksnark_affine_verifier_weak_IC(const r1cs_ppzksnark_verification_key &vk, + const r1cs_ppzksnark_primary_input &primary_input, + const r1cs_ppzksnark_proof &proof) +{ + enter_block("Call to r1cs_ppzksnark_affine_verifier_weak_IC"); + assert(vk.encoded_IC_query.domain_size() >= primary_input.size()); + + affine_ate_G2_precomp pvk_pp_G2_one_precomp = ppT::affine_ate_precompute_G2(G2::one()); + affine_ate_G2_precomp pvk_vk_alphaA_g2_precomp = ppT::affine_ate_precompute_G2(vk.alphaA_g2); + affine_ate_G1_precomp pvk_vk_alphaB_g1_precomp = ppT::affine_ate_precompute_G1(vk.alphaB_g1); + affine_ate_G2_precomp pvk_vk_alphaC_g2_precomp = ppT::affine_ate_precompute_G2(vk.alphaC_g2); + affine_ate_G2_precomp pvk_vk_rC_Z_g2_precomp = ppT::affine_ate_precompute_G2(vk.rC_Z_g2); + affine_ate_G2_precomp pvk_vk_gamma_g2_precomp = ppT::affine_ate_precompute_G2(vk.gamma_g2); + affine_ate_G1_precomp pvk_vk_gamma_beta_g1_precomp = ppT::affine_ate_precompute_G1(vk.gamma_beta_g1); + affine_ate_G2_precomp pvk_vk_gamma_beta_g2_precomp = ppT::affine_ate_precompute_G2(vk.gamma_beta_g2); + + enter_block("Compute input-dependent part of A"); + const accumulation_vector > accumulated_IC = vk.encoded_IC_query.template accumulate_chunk >(primary_input.begin(), primary_input.end(), 0); + assert(accumulated_IC.is_fully_accumulated()); + const G1 &acc = accumulated_IC.first; + leave_block("Compute input-dependent part of A"); + + bool result = true; + enter_block("Check knowledge commitment for A is valid"); + affine_ate_G1_precomp proof_g_A_g_precomp = ppT::affine_ate_precompute_G1(proof.g_A.g); + affine_ate_G1_precomp proof_g_A_h_precomp = ppT::affine_ate_precompute_G1(proof.g_A.h); + Fqk kc_A_miller = ppT::affine_ate_e_over_e_miller_loop(proof_g_A_g_precomp, pvk_vk_alphaA_g2_precomp, proof_g_A_h_precomp, pvk_pp_G2_one_precomp); + GT kc_A = ppT::final_exponentiation(kc_A_miller); + + if (kc_A != GT::one()) + { + print_indent(); printf("Knowledge commitment for A query incorrect.\n"); + result = false; + } + leave_block("Check knowledge commitment for A is valid"); + + enter_block("Check knowledge commitment for B is valid"); + affine_ate_G2_precomp proof_g_B_g_precomp = ppT::affine_ate_precompute_G2(proof.g_B.g); + affine_ate_G1_precomp proof_g_B_h_precomp = ppT::affine_ate_precompute_G1(proof.g_B.h); + Fqk kc_B_miller = ppT::affine_ate_e_over_e_miller_loop(pvk_vk_alphaB_g1_precomp, proof_g_B_g_precomp, proof_g_B_h_precomp, pvk_pp_G2_one_precomp); + GT kc_B = ppT::final_exponentiation(kc_B_miller); + if (kc_B != GT::one()) + { + print_indent(); printf("Knowledge commitment for B query incorrect.\n"); + result = false; + } + leave_block("Check knowledge commitment for B is valid"); + + enter_block("Check knowledge commitment for C is valid"); + affine_ate_G1_precomp proof_g_C_g_precomp = ppT::affine_ate_precompute_G1(proof.g_C.g); + affine_ate_G1_precomp proof_g_C_h_precomp = ppT::affine_ate_precompute_G1(proof.g_C.h); + Fqk kc_C_miller = ppT::affine_ate_e_over_e_miller_loop(proof_g_C_g_precomp, pvk_vk_alphaC_g2_precomp, proof_g_C_h_precomp, pvk_pp_G2_one_precomp); + GT kc_C = ppT::final_exponentiation(kc_C_miller); + if (kc_C != GT::one()) + { + print_indent(); printf("Knowledge commitment for C query incorrect.\n"); + result = false; + } + leave_block("Check knowledge commitment for C is valid"); + + enter_block("Check QAP divisibility"); + affine_ate_G1_precomp proof_g_A_g_acc_precomp = ppT::affine_ate_precompute_G1(proof.g_A.g + acc); + affine_ate_G1_precomp proof_g_H_precomp = ppT::affine_ate_precompute_G1(proof.g_H); + Fqk QAP_miller = ppT::affine_ate_e_times_e_over_e_miller_loop(proof_g_H_precomp, pvk_vk_rC_Z_g2_precomp, proof_g_C_g_precomp, pvk_pp_G2_one_precomp, proof_g_A_g_acc_precomp, proof_g_B_g_precomp); + GT QAP = ppT::final_exponentiation(QAP_miller); + if (QAP != GT::one()) + { + print_indent(); printf("QAP divisibility check failed.\n"); + result = false; + } + leave_block("Check QAP divisibility"); + + enter_block("Check same coefficients were used"); + affine_ate_G1_precomp proof_g_K_precomp = ppT::affine_ate_precompute_G1(proof.g_K); + affine_ate_G1_precomp proof_g_A_g_acc_C_precomp = ppT::affine_ate_precompute_G1((proof.g_A.g + acc) + proof.g_C.g); + Fqk K_miller = ppT::affine_ate_e_times_e_over_e_miller_loop(proof_g_A_g_acc_C_precomp, pvk_vk_gamma_beta_g2_precomp, pvk_vk_gamma_beta_g1_precomp, proof_g_B_g_precomp, proof_g_K_precomp, pvk_vk_gamma_g2_precomp); + GT K = ppT::final_exponentiation(K_miller); + if (K != GT::one()) + { + print_indent(); printf("Same-coefficient check failed.\n"); + result = false; + } + leave_block("Check same coefficients were used"); + + leave_block("Call to r1cs_ppzksnark_affine_verifier_weak_IC"); + + return result; +} + +} // libsnark +#endif // R1CS_PPZKSNARK_TCC_ diff --git a/src/zk_proof_systems/ppzksnark/r1cs_ppzksnark/r1cs_ppzksnark_params.hpp b/src/zk_proof_systems/ppzksnark/r1cs_ppzksnark/r1cs_ppzksnark_params.hpp new file mode 100644 index 000000000..4054b8e3b --- /dev/null +++ b/src/zk_proof_systems/ppzksnark/r1cs_ppzksnark/r1cs_ppzksnark_params.hpp @@ -0,0 +1,34 @@ +/** @file + ***************************************************************************** + + Declaration of public-parameter selector for the R1CS ppzkSNARK. + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ + +#ifndef R1CS_PPZKSNARK_PARAMS_HPP_ +#define R1CS_PPZKSNARK_PARAMS_HPP_ + +#include "relations/constraint_satisfaction_problems/r1cs/r1cs.hpp" + +namespace libsnark { + +/** + * Below are various template aliases (used for convenience). + */ + +template +using r1cs_ppzksnark_constraint_system = r1cs_constraint_system >; + +template +using r1cs_ppzksnark_primary_input = r1cs_primary_input >; + +template +using r1cs_ppzksnark_auxiliary_input = r1cs_auxiliary_input >; + +} // libsnark + +#endif // R1CS_PPZKSNARK_PARAMS_HPP_ diff --git a/src/zk_proof_systems/ppzksnark/r1cs_ppzksnark/tests/test_r1cs_ppzksnark.cpp b/src/zk_proof_systems/ppzksnark/r1cs_ppzksnark/tests/test_r1cs_ppzksnark.cpp new file mode 100644 index 000000000..6f8b575f2 --- /dev/null +++ b/src/zk_proof_systems/ppzksnark/r1cs_ppzksnark/tests/test_r1cs_ppzksnark.cpp @@ -0,0 +1,42 @@ +/** @file + ***************************************************************************** + Test program that exercises the ppzkSNARK (first generator, then + prover, then verifier) on a synthetic R1CS instance. + + ***************************************************************************** + * @author This file is part of libsnark, developed by SCIPR Lab + * and contributors (see AUTHORS). + * @copyright MIT license (see LICENSE file) + *****************************************************************************/ +#include +#include + +#include "common/default_types/r1cs_ppzksnark_pp.hpp" +#include "common/profiling.hpp" +#include "common/utils.hpp" +#include "relations/constraint_satisfaction_problems/r1cs/examples/r1cs_examples.hpp" +#include "zk_proof_systems/ppzksnark/r1cs_ppzksnark/examples/run_r1cs_ppzksnark.hpp" + +using namespace libsnark; + +template +void test_r1cs_ppzksnark(size_t num_constraints, + size_t input_size) +{ + print_header("(enter) Test R1CS ppzkSNARK"); + + const bool test_serialization = true; + r1cs_example > example = generate_r1cs_example_with_binary_input >(num_constraints, input_size); + const bool bit = run_r1cs_ppzksnark(example, test_serialization); + assert(bit); + + print_header("(leave) Test R1CS ppzkSNARK"); +} + +int main() +{ + default_r1cs_ppzksnark_pp::init_public_params(); + start_profiling(); + + test_r1cs_ppzksnark(1000, 100); +}