Auto merge of #4060 - str4d:remove-libsnark, r=daira

Remove libsnark

Closes #167. Closes #416. Closes #418. Closes #437.
Closes #521. Closes #743. Closes #750. Closes #894.
Closes #903. Closes #1125. Closes #1136. Closes #1240.
Closes #1264. Closes #1516. Closes #1517. Closes #1651.
Closes #2064. Closes #2158. Closes #3478. Closes #3652.
Closes #3744.
This commit is contained in:
Homu 2019-09-26 11:21:40 -07:00
commit 961c0d58ec
209 changed files with 658 additions and 45832 deletions

View File

@ -46,8 +46,4 @@ following files in build-aux/m4 (see https://github.com/zcash/zcash/issues/2827)
* ax_check_compile_flag.m4
* ax_check_link_flag.m4
* ax_check_preproc_flag.m4
* ax_compiler_vendor.m4
* ax_gcc_archflag.m4
* ax_gcc_x86_cpuid.m4
* ax_openmp.m4
* ax_pthread.m4

View File

@ -1,117 +0,0 @@
# ===========================================================================
# https://www.gnu.org/software/autoconf-archive/ax_compiler_vendor.html
# ===========================================================================
#
# SYNOPSIS
#
# AX_COMPILER_VENDOR
#
# DESCRIPTION
#
# Determine the vendor of the C, C++ or Fortran compiler. The vendor is
# returned in the cache variable $ax_cv_c_compiler_vendor for C,
# $ax_cv_cxx_compiler_vendor for C++ or $ax_cv_fc_compiler_vendor for
# (modern) Fortran. The value is one of "intel", "ibm", "pathscale",
# "clang" (LLVM), "cray", "fujitsu", "sdcc", "sx", "portland" (PGI), "gnu"
# (GCC), "sun" (Oracle Developer Studio), "hp", "dec", "borland",
# "comeau", "kai", "lcc", "sgi", "microsoft", "metrowerks", "watcom",
# "tcc" (Tiny CC) or "unknown" (if the compiler cannot be determined).
#
# To check for a Fortran compiler, you must first call AC_FC_PP_SRCEXT
# with an appropriate preprocessor-enabled extension. For example:
#
# AC_LANG_PUSH([Fortran])
# AC_PROG_FC
# AC_FC_PP_SRCEXT([F])
# AX_COMPILER_VENDOR
# AC_LANG_POP([Fortran])
#
# LICENSE
#
# Copyright (c) 2008 Steven G. Johnson <stevenj@alum.mit.edu>
# Copyright (c) 2008 Matteo Frigo
# Copyright (c) 2018-19 John Zaitseff <J.Zaitseff@zap.org.au>
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <https://www.gnu.org/licenses/>.
#
# As a special exception, the respective Autoconf Macro's copyright owner
# gives unlimited permission to copy, distribute and modify the configure
# scripts that are the output of Autoconf when processing the Macro. You
# need not follow the terms of the GNU General Public License when using
# or distributing such scripts, even though portions of the text of the
# Macro appear in them. The GNU General Public License (GPL) does govern
# all other use of the material that constitutes the Autoconf Macro.
#
# This special exception to the GPL applies to versions of the Autoconf
# Macro released by the Autoconf Archive. When you make and distribute a
# modified version of the Autoconf Macro, you may extend this special
# exception to the GPL to apply to your modified version as well.
#serial 30
AC_DEFUN([AX_COMPILER_VENDOR], [dnl
AC_CACHE_CHECK([for _AC_LANG compiler vendor], ax_cv_[]_AC_LANG_ABBREV[]_compiler_vendor, [dnl
dnl If you modify this list of vendors, please add similar support
dnl to ax_compiler_version.m4 if at all possible.
dnl
dnl Note: Do NOT check for GCC first since some other compilers
dnl define __GNUC__ to remain compatible with it. Compilers that
dnl are very slow to start (such as Intel) are listed first.
vendors="
intel: __ICC,__ECC,__INTEL_COMPILER
ibm: __xlc__,__xlC__,__IBMC__,__IBMCPP__,__ibmxl__
pathscale: __PATHCC__,__PATHSCALE__
clang: __clang__
cray: _CRAYC
fujitsu: __FUJITSU
sdcc: SDCC,__SDCC
sx: _SX
portland: __PGI
gnu: __GNUC__
sun: __SUNPRO_C,__SUNPRO_CC,__SUNPRO_F90,__SUNPRO_F95
hp: __HP_cc,__HP_aCC
dec: __DECC,__DECCXX,__DECC_VER,__DECCXX_VER
borland: __BORLANDC__,__CODEGEARC__,__TURBOC__
comeau: __COMO__
kai: __KCC
lcc: __LCC__
sgi: __sgi,sgi
microsoft: _MSC_VER
metrowerks: __MWERKS__
watcom: __WATCOMC__
tcc: __TINYC__
unknown: UNKNOWN
"
for ventest in $vendors; do
case $ventest in
*:)
vendor=$ventest
continue
;;
*)
vencpp="defined("`echo $ventest | sed 's/,/) || defined(/g'`")"
;;
esac
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [[
#if !($vencpp)
thisisanerror;
#endif
]])], [break])
done
ax_cv_[]_AC_LANG_ABBREV[]_compiler_vendor=`echo $vendor | cut -d: -f1`
])
])dnl

View File

@ -1,267 +0,0 @@
# ===========================================================================
# https://www.gnu.org/software/autoconf-archive/ax_gcc_archflag.html
# ===========================================================================
#
# SYNOPSIS
#
# AX_GCC_ARCHFLAG([PORTABLE?], [ACTION-SUCCESS], [ACTION-FAILURE])
#
# DESCRIPTION
#
# This macro tries to guess the "native" arch corresponding to the target
# architecture for use with gcc's -march=arch or -mtune=arch flags. If
# found, the cache variable $ax_cv_gcc_archflag is set to this flag and
# ACTION-SUCCESS is executed; otherwise $ax_cv_gcc_archflag is set to
# "unknown" and ACTION-FAILURE is executed. The default ACTION-SUCCESS is
# to add $ax_cv_gcc_archflag to the end of $CFLAGS.
#
# PORTABLE? should be either [yes] (default) or [no]. In the former case,
# the flag is set to -mtune (or equivalent) so that the architecture is
# only used for tuning, but the instruction set used is still portable. In
# the latter case, the flag is set to -march (or equivalent) so that
# architecture-specific instructions are enabled.
#
# The user can specify --with-gcc-arch=<arch> in order to override the
# macro's choice of architecture, or --without-gcc-arch to disable this.
#
# When cross-compiling, or if $CC is not gcc, then ACTION-FAILURE is
# called unless the user specified --with-gcc-arch manually.
#
# Requires macros: AX_CHECK_COMPILE_FLAG, AX_GCC_X86_CPUID
#
# (The main emphasis here is on recent CPUs, on the principle that doing
# high-performance computing on old hardware is uncommon.)
#
# LICENSE
#
# Copyright (c) 2008 Steven G. Johnson <stevenj@alum.mit.edu>
# Copyright (c) 2008 Matteo Frigo
# Copyright (c) 2014 Tsukasa Oi
# Copyright (c) 2017-2018 Alexey Kopytov
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <https://www.gnu.org/licenses/>.
#
# As a special exception, the respective Autoconf Macro's copyright owner
# gives unlimited permission to copy, distribute and modify the configure
# scripts that are the output of Autoconf when processing the Macro. You
# need not follow the terms of the GNU General Public License when using
# or distributing such scripts, even though portions of the text of the
# Macro appear in them. The GNU General Public License (GPL) does govern
# all other use of the material that constitutes the Autoconf Macro.
#
# This special exception to the GPL applies to versions of the Autoconf
# Macro released by the Autoconf Archive. When you make and distribute a
# modified version of the Autoconf Macro, you may extend this special
# exception to the GPL to apply to your modified version as well.
#serial 22
AC_DEFUN([AX_GCC_ARCHFLAG],
[AC_REQUIRE([AC_PROG_CC])
AC_REQUIRE([AC_CANONICAL_HOST])
AC_REQUIRE([AC_PROG_SED])
AC_REQUIRE([AX_COMPILER_VENDOR])
AC_ARG_WITH(gcc-arch, [AS_HELP_STRING([--with-gcc-arch=<arch>], [use architecture <arch> for gcc -march/-mtune, instead of guessing])],
ax_gcc_arch=$withval, ax_gcc_arch=yes)
AC_MSG_CHECKING([for gcc architecture flag])
AC_MSG_RESULT([])
AC_CACHE_VAL(ax_cv_gcc_archflag,
[
ax_cv_gcc_archflag="unknown"
if test "$GCC" = yes; then
if test "x$ax_gcc_arch" = xyes; then
ax_gcc_arch=""
if test "$cross_compiling" = no; then
case $host_cpu in
i[[3456]]86*|x86_64*|amd64*) # use cpuid codes
AX_GCC_X86_CPUID(0)
AX_GCC_X86_CPUID(1)
case $ax_cv_gcc_x86_cpuid_0 in
*:756e6547:6c65746e:49656e69) # Intel
case $ax_cv_gcc_x86_cpuid_1 in
*5[[4578]]?:*:*:*) ax_gcc_arch="pentium-mmx pentium" ;;
*5[[123]]?:*:*:*) ax_gcc_arch=pentium ;;
*0?61?:*:*:*|?61?:*:*:*|61?:*:*:*) ax_gcc_arch=pentiumpro ;;
*0?6[[356]]?:*:*:*|?6[[356]]?:*:*:*|6[[356]]?:*:*:*) ax_gcc_arch="pentium2 pentiumpro" ;;
*0?6[[78ab]]?:*:*:*|?6[[78ab]]?:*:*:*|6[[78ab]]?:*:*:*) ax_gcc_arch="pentium3 pentiumpro" ;;
*0?6[[9d]]?:*:*:*|?6[[9d]]?:*:*:*|6[[9d]]?:*:*:*|*1?65?:*:*:*) ax_gcc_arch="pentium-m pentium3 pentiumpro" ;;
*0?6e?:*:*:*|?6e?:*:*:*|6e?:*:*:*) ax_gcc_arch="yonah pentium-m pentium3 pentiumpro" ;;
*0?6f?:*:*:*|?6f?:*:*:*|6f?:*:*:*|*1?66?:*:*:*) ax_gcc_arch="core2 pentium-m pentium3 pentiumpro" ;;
*1?6[[7d]]?:*:*:*) ax_gcc_arch="penryn core2 pentium-m pentium3 pentiumpro" ;;
*1?6[[aef]]?:*:*:*|*2?6e?:*:*:*) ax_gcc_arch="nehalem corei7 core2 pentium-m pentium3 pentiumpro" ;;
*2?6[[5cf]]?:*:*:*) ax_gcc_arch="westmere corei7 core2 pentium-m pentium3 pentiumpro" ;;
*2?6[[ad]]?:*:*:*) ax_gcc_arch="sandybridge corei7-avx corei7 core2 pentium-m pentium3 pentiumpro" ;;
*3?6[[ae]]?:*:*:*) ax_gcc_arch="ivybridge core-avx-i corei7-avx corei7 core2 pentium-m pentium3 pentiumpro" ;;
*3?6[[cf]]?:*:*:*|*4?6[[56]]?:*:*:*) ax_gcc_arch="haswell core-avx2 core-avx-i corei7-avx corei7 core2 pentium-m pentium3 pentiumpro" ;;
*3?6d?:*:*:*|*4?6[[7f]]?:*:*:*|*5?66?:*:*:*) ax_gcc_arch="broadwell core-avx2 core-avx-i corei7-avx corei7 core2 pentium-m pentium3 pentiumpro" ;;
*1?6c?:*:*:*|*2?6[[67]]?:*:*:*|*3?6[[56]]?:*:*:*) ax_gcc_arch="bonnell atom core2 pentium-m pentium3 pentiumpro" ;;
*3?67?:*:*:*|*[[45]]?6[[ad]]?:*:*:*) ax_gcc_arch="silvermont atom core2 pentium-m pentium3 pentiumpro" ;;
*000?f[[012]]?:*:*:*|?f[[012]]?:*:*:*|f[[012]]?:*:*:*) ax_gcc_arch="pentium4 pentiumpro" ;;
*000?f[[346]]?:*:*:*|?f[[346]]?:*:*:*|f[[346]]?:*:*:*) ax_gcc_arch="nocona prescott pentium4 pentiumpro" ;;
# fallback
*5??:*:*:*) ax_gcc_arch=pentium ;;
*??6??:*:*:*) ax_gcc_arch="core2 pentiumpro" ;;
*6??:*:*:*) ax_gcc_arch=pentiumpro ;;
*00??f??:*:*:*|??f??:*:*:*|?f??:*:*:*|f??:*:*:*) ax_gcc_arch="pentium4 pentiumpro" ;;
esac ;;
*:68747541:444d4163:69746e65) # AMD
case $ax_cv_gcc_x86_cpuid_1 in
*5[[67]]?:*:*:*) ax_gcc_arch=k6 ;;
*5[[8]]?:*:*:*) ax_gcc_arch="k6-2 k6" ;;
*5[[9d]]?:*:*:*) ax_gcc_arch="k6-3 k6" ;;
*6[[12]]?:*:*:*) ax_gcc_arch="athlon k7" ;;
*6[[34]]?:*:*:*) ax_gcc_arch="athlon-tbird k7" ;;
*6[[678a]]?:*:*:*) ax_gcc_arch="athlon-xp athlon-4 athlon k7" ;;
*000?f[[4578bcef]]?:*:*:*|?f[[4578bcef]]?:*:*:*|f[[4578bcef]]?:*:*:*|*001?f[[4578bcf]]?:*:*:*|1?f[[4578bcf]]?:*:*:*) ax_gcc_arch="athlon64 k8" ;;
*002?f[[13457bcf]]?:*:*:*|2?f[[13457bcf]]?:*:*:*|*004?f[[138bcf]]?:*:*:*|4?f[[138bcf]]?:*:*:*|*005?f[[df]]?:*:*:*|5?f[[df]]?:*:*:*|*006?f[[8bcf]]?:*:*:*|6?f[[8bcf]]?:*:*:*|*007?f[[cf]]?:*:*:*|7?f[[cf]]?:*:*:*|*00c?f1?:*:*:*|c?f1?:*:*:*|*020?f3?:*:*:*|20?f3?:*:*:*) ax_gcc_arch="athlon64-sse3 k8-sse3 athlon64 k8" ;;
*010?f[[245689a]]?:*:*:*|10?f[[245689a]]?:*:*:*|*030?f1?:*:*:*|30?f1?:*:*:*) ax_gcc_arch="barcelona amdfam10 k8" ;;
*050?f[[12]]?:*:*:*|50?f[[12]]?:*:*:*) ax_gcc_arch="btver1 amdfam10 k8" ;;
*060?f1?:*:*:*|60?f1?:*:*:*) ax_gcc_arch="bdver1 amdfam10 k8" ;;
*060?f2?:*:*:*|60?f2?:*:*:*|*061?f[[03]]?:*:*:*|61?f[[03]]?:*:*:*) ax_gcc_arch="bdver2 bdver1 amdfam10 k8" ;;
*063?f0?:*:*:*|63?f0?:*:*:*) ax_gcc_arch="bdver3 bdver2 bdver1 amdfam10 k8" ;;
*07[[03]]?f0?:*:*:*|7[[03]]?f0?:*:*:*) ax_gcc_arch="btver2 btver1 amdfam10 k8" ;;
# fallback
*0[[13]]??f??:*:*:*|[[13]]??f??:*:*:*) ax_gcc_arch="barcelona amdfam10 k8" ;;
*020?f??:*:*:*|20?f??:*:*:*) ax_gcc_arch="athlon64-sse3 k8-sse3 athlon64 k8" ;;
*05??f??:*:*:*|5??f??:*:*:*) ax_gcc_arch="btver1 amdfam10 k8" ;;
*060?f??:*:*:*|60?f??:*:*:*) ax_gcc_arch="bdver1 amdfam10 k8" ;;
*061?f??:*:*:*|61?f??:*:*:*) ax_gcc_arch="bdver2 bdver1 amdfam10 k8" ;;
*06??f??:*:*:*|6??f??:*:*:*) ax_gcc_arch="bdver3 bdver2 bdver1 amdfam10 k8" ;;
*070?f??:*:*:*|70?f??:*:*:*) ax_gcc_arch="btver2 btver1 amdfam10 k8" ;;
*???f??:*:*:*) ax_gcc_arch="amdfam10 k8" ;;
esac ;;
*:746e6543:736c7561:48727561) # IDT / VIA (Centaur)
case $ax_cv_gcc_x86_cpuid_1 in
*54?:*:*:*) ax_gcc_arch=winchip-c6 ;;
*5[[89]]?:*:*:*) ax_gcc_arch=winchip2 ;;
*66?:*:*:*) ax_gcc_arch=winchip2 ;;
*6[[78]]?:*:*:*) ax_gcc_arch=c3 ;;
*6[[9adf]]?:*:*:*) ax_gcc_arch="c3-2 c3" ;;
esac ;;
esac
if test x"$ax_gcc_arch" = x; then # fallback
case $host_cpu in
i586*) ax_gcc_arch=pentium ;;
i686*) ax_gcc_arch=pentiumpro ;;
esac
fi
;;
sparc*)
AC_PATH_PROG([PRTDIAG], [prtdiag], [prtdiag], [$PATH:/usr/platform/`uname -i`/sbin/:/usr/platform/`uname -m`/sbin/])
cputype=`(((grep cpu /proc/cpuinfo | cut -d: -f2) ; ($PRTDIAG -v |grep -i sparc) ; grep -i cpu /var/run/dmesg.boot ) | head -n 1) 2> /dev/null`
cputype=`echo "$cputype" | tr -d ' -' | $SED 's/SPARCIIi/SPARCII/' |tr $as_cr_LETTERS $as_cr_letters`
case $cputype in
*ultrasparciv*) ax_gcc_arch="ultrasparc4 ultrasparc3 ultrasparc v9" ;;
*ultrasparciii*) ax_gcc_arch="ultrasparc3 ultrasparc v9" ;;
*ultrasparc*) ax_gcc_arch="ultrasparc v9" ;;
*supersparc*|*tms390z5[[05]]*) ax_gcc_arch="supersparc v8" ;;
*hypersparc*|*rt62[[056]]*) ax_gcc_arch="hypersparc v8" ;;
*cypress*) ax_gcc_arch=cypress ;;
esac ;;
alphaev5) ax_gcc_arch=ev5 ;;
alphaev56) ax_gcc_arch=ev56 ;;
alphapca56) ax_gcc_arch="pca56 ev56" ;;
alphapca57) ax_gcc_arch="pca57 pca56 ev56" ;;
alphaev6) ax_gcc_arch=ev6 ;;
alphaev67) ax_gcc_arch=ev67 ;;
alphaev68) ax_gcc_arch="ev68 ev67" ;;
alphaev69) ax_gcc_arch="ev69 ev68 ev67" ;;
alphaev7) ax_gcc_arch="ev7 ev69 ev68 ev67" ;;
alphaev79) ax_gcc_arch="ev79 ev7 ev69 ev68 ev67" ;;
powerpc*)
cputype=`((grep cpu /proc/cpuinfo | head -n 1 | cut -d: -f2 | cut -d, -f1 | $SED 's/ //g') ; /usr/bin/machine ; /bin/machine; grep CPU /var/run/dmesg.boot | head -n 1 | cut -d" " -f2) 2> /dev/null`
cputype=`echo $cputype | $SED -e 's/ppc//g;s/ *//g'`
case $cputype in
*750*) ax_gcc_arch="750 G3" ;;
*740[[0-9]]*) ax_gcc_arch="$cputype 7400 G4" ;;
*74[[4-5]][[0-9]]*) ax_gcc_arch="$cputype 7450 G4" ;;
*74[[0-9]][[0-9]]*) ax_gcc_arch="$cputype G4" ;;
*970*) ax_gcc_arch="970 G5 power4";;
*POWER4*|*power4*|*gq*) ax_gcc_arch="power4 970";;
*POWER5*|*power5*|*gr*|*gs*) ax_gcc_arch="power5 power4 970";;
603ev|8240) ax_gcc_arch="$cputype 603e 603";;
*POWER7*) ax_gcc_arch="power7";;
*POWER8*) ax_gcc_arch="power8";;
*POWER9*) ax_gcc_arch="power9";;
*POWER10*) ax_gcc_arch="power10";;
*) ax_gcc_arch=$cputype ;;
esac
ax_gcc_arch="$ax_gcc_arch powerpc"
;;
aarch64)
cpuimpl=`grep 'CPU implementer' /proc/cpuinfo 2> /dev/null | cut -d: -f2 | tr -d " " | head -n 1`
cpuarch=`grep 'CPU architecture' /proc/cpuinfo 2> /dev/null | cut -d: -f2 | tr -d " " | head -n 1`
cpuvar=`grep 'CPU variant' /proc/cpuinfo 2> /dev/null | cut -d: -f2 | tr -d " " | head -n 1`
case $cpuimpl in
0x42) case $cpuarch in
8) case $cpuvar in
0x0) ax_gcc_arch="thunderx2t99 vulcan armv8.1-a armv8-a+lse armv8-a native" ;;
esac
;;
esac
;;
0x43) case $cpuarch in
8) case $cpuvar in
0x0) ax_gcc_arch="thunderx armv8-a native" ;;
0x1) ax_gcc_arch="thunderx+lse armv8.1-a armv8-a+lse armv8-a native" ;;
esac
;;
esac
;;
esac
;;
esac
fi # not cross-compiling
fi # guess arch
if test "x$ax_gcc_arch" != x -a "x$ax_gcc_arch" != xno; then
if test "x[]m4_default([$1],yes)" = xyes; then # if we require portable code
flag_prefixes="-mtune="
if test "x$ax_cv_[]_AC_LANG_ABBREV[]_compiler_vendor" = xclang; then flag_prefixes="-march="; fi
# -mcpu=$arch and m$arch generate nonportable code on every arch except
# x86. And some other arches (e.g. Alpha) don't accept -mtune. Grrr.
case $host_cpu in i*86|x86_64*|amd64*) flag_prefixes="$flag_prefixes -mcpu= -m";; esac
else
flag_prefixes="-march= -mcpu= -m"
fi
for flag_prefix in $flag_prefixes; do
for arch in $ax_gcc_arch; do
flag="$flag_prefix$arch"
AX_CHECK_COMPILE_FLAG($flag, [if test "x$ax_cv_[]_AC_LANG_ABBREV[]_compiler_vendor" = xclang; then
if test "x[]m4_default([$1],yes)" = xyes; then
if test "x$flag" = "x-march=$arch"; then flag=-mtune=$arch; fi
fi
fi; ax_cv_gcc_archflag=$flag; break])
done
test "x$ax_cv_gcc_archflag" = xunknown || break
done
fi
fi # $GCC=yes
])
AC_MSG_CHECKING([for gcc architecture flag])
AC_MSG_RESULT($ax_cv_gcc_archflag)
if test "x$ax_cv_gcc_archflag" = xunknown; then
m4_default([$3],:)
else
m4_default([$2], [CFLAGS="$CFLAGS $ax_cv_gcc_archflag"])
fi
])

View File

@ -1,89 +0,0 @@
# ===========================================================================
# https://www.gnu.org/software/autoconf-archive/ax_gcc_x86_cpuid.html
# ===========================================================================
#
# SYNOPSIS
#
# AX_GCC_X86_CPUID(OP)
# AX_GCC_X86_CPUID_COUNT(OP, COUNT)
#
# DESCRIPTION
#
# On Pentium and later x86 processors, with gcc or a compiler that has a
# compatible syntax for inline assembly instructions, run a small program
# that executes the cpuid instruction with input OP. This can be used to
# detect the CPU type. AX_GCC_X86_CPUID_COUNT takes an additional COUNT
# parameter that gets passed into register ECX before calling cpuid.
#
# On output, the values of the eax, ebx, ecx, and edx registers are stored
# as hexadecimal strings as "eax:ebx:ecx:edx" in the cache variable
# ax_cv_gcc_x86_cpuid_OP.
#
# If the cpuid instruction fails (because you are running a
# cross-compiler, or because you are not using gcc, or because you are on
# a processor that doesn't have this instruction), ax_cv_gcc_x86_cpuid_OP
# is set to the string "unknown".
#
# This macro mainly exists to be used in AX_GCC_ARCHFLAG.
#
# LICENSE
#
# Copyright (c) 2008 Steven G. Johnson <stevenj@alum.mit.edu>
# Copyright (c) 2008 Matteo Frigo
# Copyright (c) 2015 Michael Petch <mpetch@capp-sysware.com>
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <https://www.gnu.org/licenses/>.
#
# As a special exception, the respective Autoconf Macro's copyright owner
# gives unlimited permission to copy, distribute and modify the configure
# scripts that are the output of Autoconf when processing the Macro. You
# need not follow the terms of the GNU General Public License when using
# or distributing such scripts, even though portions of the text of the
# Macro appear in them. The GNU General Public License (GPL) does govern
# all other use of the material that constitutes the Autoconf Macro.
#
# This special exception to the GPL applies to versions of the Autoconf
# Macro released by the Autoconf Archive. When you make and distribute a
# modified version of the Autoconf Macro, you may extend this special
# exception to the GPL to apply to your modified version as well.
#serial 10
AC_DEFUN([AX_GCC_X86_CPUID],
[AX_GCC_X86_CPUID_COUNT($1, 0)
])
AC_DEFUN([AX_GCC_X86_CPUID_COUNT],
[AC_REQUIRE([AC_PROG_CC])
AC_LANG_PUSH([C])
AC_CACHE_CHECK(for x86 cpuid $1 output, ax_cv_gcc_x86_cpuid_$1,
[AC_RUN_IFELSE([AC_LANG_PROGRAM([#include <stdio.h>], [
int op = $1, level = $2, eax, ebx, ecx, edx;
FILE *f;
__asm__ __volatile__ ("xchg %%ebx, %1\n"
"cpuid\n"
"xchg %%ebx, %1\n"
: "=a" (eax), "=r" (ebx), "=c" (ecx), "=d" (edx)
: "a" (op), "2" (level));
f = fopen("conftest_cpuid", "w"); if (!f) return 1;
fprintf(f, "%x:%x:%x:%x\n", eax, ebx, ecx, edx);
fclose(f);
return 0;
])],
[ax_cv_gcc_x86_cpuid_$1=`cat conftest_cpuid`; rm -f conftest_cpuid],
[ax_cv_gcc_x86_cpuid_$1=unknown; rm -f conftest_cpuid],
[ax_cv_gcc_x86_cpuid_$1=unknown])])
AC_LANG_POP([C])
])

View File

@ -1,123 +0,0 @@
# ===========================================================================
# https://www.gnu.org/software/autoconf-archive/ax_openmp.html
# ===========================================================================
#
# SYNOPSIS
#
# AX_OPENMP([ACTION-IF-FOUND[, ACTION-IF-NOT-FOUND]])
#
# DESCRIPTION
#
# This macro tries to find out how to compile programs that use OpenMP a
# standard API and set of compiler directives for parallel programming
# (see http://www-unix.mcs/)
#
# On success, it sets the OPENMP_CFLAGS/OPENMP_CXXFLAGS/OPENMP_F77FLAGS
# output variable to the flag (e.g. -omp) used both to compile *and* link
# OpenMP programs in the current language.
#
# NOTE: You are assumed to not only compile your program with these flags,
# but also link it with them as well.
#
# If you want to compile everything with OpenMP, you should set:
#
# CFLAGS="$CFLAGS $OPENMP_CFLAGS"
# #OR# CXXFLAGS="$CXXFLAGS $OPENMP_CXXFLAGS"
# #OR# FFLAGS="$FFLAGS $OPENMP_FFLAGS"
#
# (depending on the selected language).
#
# The user can override the default choice by setting the corresponding
# environment variable (e.g. OPENMP_CFLAGS).
#
# ACTION-IF-FOUND is a list of shell commands to run if an OpenMP flag is
# found, and ACTION-IF-NOT-FOUND is a list of commands to run it if it is
# not found. If ACTION-IF-FOUND is not specified, the default action will
# define HAVE_OPENMP.
#
# LICENSE
#
# Copyright (c) 2008 Steven G. Johnson <stevenj@alum.mit.edu>
# Copyright (c) 2015 John W. Peterson <jwpeterson@gmail.com>
# Copyright (c) 2016 Nick R. Papior <nickpapior@gmail.com>
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <https://www.gnu.org/licenses/>.
#
# As a special exception, the respective Autoconf Macro's copyright owner
# gives unlimited permission to copy, distribute and modify the configure
# scripts that are the output of Autoconf when processing the Macro. You
# need not follow the terms of the GNU General Public License when using
# or distributing such scripts, even though portions of the text of the
# Macro appear in them. The GNU General Public License (GPL) does govern
# all other use of the material that constitutes the Autoconf Macro.
#
# This special exception to the GPL applies to versions of the Autoconf
# Macro released by the Autoconf Archive. When you make and distribute a
# modified version of the Autoconf Macro, you may extend this special
# exception to the GPL to apply to your modified version as well.
#serial 13
AC_DEFUN([AX_OPENMP], [
AC_PREREQ([2.69]) dnl for _AC_LANG_PREFIX
AC_CACHE_CHECK([for OpenMP flag of _AC_LANG compiler], ax_cv_[]_AC_LANG_ABBREV[]_openmp, [save[]_AC_LANG_PREFIX[]FLAGS=$[]_AC_LANG_PREFIX[]FLAGS
ax_cv_[]_AC_LANG_ABBREV[]_openmp=unknown
# Flags to try: -fopenmp (gcc), -mp (SGI & PGI),
# -qopenmp (icc>=15), -openmp (icc),
# -xopenmp (Sun), -omp (Tru64),
# -qsmp=omp (AIX),
# none
ax_openmp_flags="-fopenmp -openmp -qopenmp -mp -xopenmp -omp -qsmp=omp none"
if test "x$OPENMP_[]_AC_LANG_PREFIX[]FLAGS" != x; then
ax_openmp_flags="$OPENMP_[]_AC_LANG_PREFIX[]FLAGS $ax_openmp_flags"
fi
for ax_openmp_flag in $ax_openmp_flags; do
case $ax_openmp_flag in
none) []_AC_LANG_PREFIX[]FLAGS=$save[]_AC_LANG_PREFIX[] ;;
*) []_AC_LANG_PREFIX[]FLAGS="$save[]_AC_LANG_PREFIX[]FLAGS $ax_openmp_flag" ;;
esac
AC_LINK_IFELSE([AC_LANG_SOURCE([[
@%:@include <omp.h>
static void
parallel_fill(int * data, int n)
{
int i;
@%:@pragma omp parallel for
for (i = 0; i < n; ++i)
data[i] = i;
}
int
main()
{
int arr[100000];
omp_set_num_threads(2);
parallel_fill(arr, 100000);
return 0;
}
]])],[ax_cv_[]_AC_LANG_ABBREV[]_openmp=$ax_openmp_flag; break],[])
done
[]_AC_LANG_PREFIX[]FLAGS=$save[]_AC_LANG_PREFIX[]FLAGS
])
if test "x$ax_cv_[]_AC_LANG_ABBREV[]_openmp" = "xunknown"; then
m4_default([$2],:)
else
if test "x$ax_cv_[]_AC_LANG_ABBREV[]_openmp" != "xnone"; then
OPENMP_[]_AC_LANG_PREFIX[]FLAGS=$ax_cv_[]_AC_LANG_ABBREV[]_openmp
fi
m4_default([$1], [AC_DEFINE(HAVE_OPENMP,1,[Define if OpenMP is enabled])])
fi
])dnl AX_OPENMP

View File

@ -693,7 +693,7 @@ if test x$use_pkgconfig = xyes; then
)
else
# BUG: Fix this:
echo 'BUG: configure does not yet check for the following dependencies if pkg-config is not on the system: libcrypto++, libgmp'
echo 'BUG: configure does not yet check for the following dependencies if pkg-config is not on the system: libcrypto++'
AC_CHECK_HEADER([openssl/crypto.h],,AC_MSG_ERROR(libcrypto headers missing))
AC_CHECK_LIB([crypto], [main],CRYPTO_LIBS=-lcrypto, AC_MSG_ERROR(libcrypto missing))
@ -733,14 +733,6 @@ else
fi
fi
# These packages don't provide pkgconfig config files across all
# platforms, so we use older autoconf detection mechanisms:
AC_CHECK_HEADER([gmp.h],,AC_MSG_ERROR(libgmp headers missing))
AC_CHECK_LIB([gmp],[[__gmpn_sub_n]],GMP_LIBS=-lgmp, [AC_MSG_ERROR(libgmp missing)])
AC_CHECK_HEADER([gmpxx.h],,AC_MSG_ERROR(libgmpxx headers missing))
AC_CHECK_LIB([gmpxx],[main],GMPXX_LIBS=-lgmpxx, [AC_MSG_ERROR(libgmpxx missing)])
RUST_LIBS="-lrustzcash"
case $host in
*mingw*)
@ -750,33 +742,12 @@ case $host in
;;
esac
dnl Check for OpenMP support
AX_OPENMP(
[AC_DEFINE(HAVE_OPENMP, 1, [Define if OpenMP is enabled])
AM_CONDITIONAL([HAVE_OPENMP], [true])
CPPFLAGS="$CPPFLAGS -DMULTICORE"
CXXFLAGS="$CXXFLAGS $OPENMP_CXXFLAGS"],
[AC_MSG_WARN([OpenMP not supported, disabling multithreading])
AC_DEFINE(HAVE_OPENMP, 0, [Define if OpenMP is enabled])
AM_CONDITIONAL([HAVE_OPENMP], [false])])
# Gitian uses a config.site that sets depends_prefix, and then sets --prefix=/
# build.sh just uses --prefix
if test x$depends_prefix != x; then
LIBSNARK_DEPINST="$depends_prefix"
else
LIBSNARK_DEPINST="$prefix"
fi
# Set optimization flags for libsnark
AX_GCC_ARCHFLAG([no], [LIBSNARK_OPTFLAGS="-O2 $ax_cv_gcc_archflag"], [LIBSNARK_OPTFLAGS="-O2"])
# Additional Zcash flags
AX_CHECK_COMPILE_FLAG([-fwrapv],[CXXFLAGS="$CXXFLAGS -fwrapv"])
AX_CHECK_COMPILE_FLAG([-fno-strict-aliasing],[CXXFLAGS="$CXXFLAGS -fno-strict-aliasing"])
AX_CHECK_COMPILE_FLAG([-Wno-builtin-declaration-mismatch],[CXXFLAGS="$CXXFLAGS -Wno-builtin-declaration-mismatch"],,[[$CXXFLAG_WERROR]])
LIBZCASH_LIBS="-lgmp -lgmpxx $BOOST_SYSTEM_LIB -lcrypto -lsodium $RUST_LIBS"
LIBZCASH_LIBS="$BOOST_SYSTEM_LIB -lcrypto -lsodium $RUST_LIBS"
AC_MSG_CHECKING([whether to build bitcoind])
AM_CONDITIONAL([BUILD_BITCOIND], [test x$build_bitcoind = xyes])
@ -906,10 +877,6 @@ AC_SUBST(SSL_LIBS)
AC_SUBST(EVENT_LIBS)
AC_SUBST(EVENT_PTHREADS_LIBS)
AC_SUBST(ZMQ_LIBS)
AC_SUBST(GMP_LIBS)
AC_SUBST(GMPXX_LIBS)
AC_SUBST(LIBSNARK_DEPINST)
AC_SUBST(LIBSNARK_OPTFLAGS)
AC_SUBST(LIBZCASH_LIBS)
AC_SUBST(PROTON_LIBS)
AC_CONFIG_FILES([Makefile src/Makefile doc/man/Makefile src/test/buildenv.py])
@ -940,7 +907,7 @@ unset PKG_CONFIG_LIBDIR
PKG_CONFIG_LIBDIR="$PKGCONFIG_LIBDIR_TEMP"
ac_configure_args="${ac_configure_args} --disable-shared --with-pic --with-bignum=no --enable-module-recovery"
AC_CONFIG_SUBDIRS([src/secp256k1 src/snark src/univalue])
AC_CONFIG_SUBDIRS([src/secp256k1 src/univalue])
AC_OUTPUT

View File

@ -71,31 +71,6 @@ Copyright: 2008, Guido U. Draheim <guidod@gmx.de>
2011, Maarten Bosmans <mkbosmans@gmail.com>
License: GPLv3-or-later-with-Autoconf-exception
Files: build-aux/m4/ax_compiler_vendor.m4
Copyright: 2008, Steven G. Johnson <stevenj@alum.mit.edu>
2008, Matteo Frigo
2018-19, John Zaitseff <J.Zaitseff@zap.org.au>
License: GPLv3-or-later-with-Autoconf-exception
Files: build-aux/m4/ax_gcc_archflag.m4
Copyright: 2008, Steven G. Johnson <stevenj@alum.mit.edu>
2008, Matteo Frigo
2014, Tsukasa Oi
2017-2018, Alexey Kopytov
License: GPLv3-or-later-with-Autoconf-exception
Files: build-aux/m4/ax_gcc_x86_cpuid.m4
Copyright: 2008, Steven G. Johnson <stevenj@alum.mit.edu>
2008, Matteo Frigo
2015, Michael Petch <mpetch@capp-sysware.com>
License: GPLv3-or-later-with-Autoconf-exception
Files: build-aux/m4/ax_openmp.m4
Copyright: 2008, Steven G. Johnson <stevenj@alum.mit.edu>
2015, John W. Peterson <jwpeterson@gmail.com>
2016, Nick R. Papior <nickpapior@gmail.com>
License: GPLv3-or-later-with-Autoconf-exception
Files: build-aux/m4/ax_pthread.m4
Copyright: 2008, Steven G. Johnson <stevenj@alum.mit.edu>
2011, Daniel Richard G. <skunk@iSKUNK.ORG>
@ -109,10 +84,6 @@ Files: depends/sources/libsodium-*.tar.gz
Copyright: 2013-2016 Frank Denis
License: ISC
Files: depends/sources/gmp-*.tar.bz2
Copyright: 1991, 1996, 1999, 2000, 2007 Free Software Foundation, Inc.
License: LGPL
Files: depends/sources/boost_*.tar.gz
Copyright: 2008 Beman Dawes
License: Boost-Software-License-1.0
@ -162,11 +133,6 @@ Copyright: 2011, The LevelDB Authors
License: BSD-3clause-Google
Comment: The LevelDB Authors are listed in src/leveldb/AUTHORS.
Files src/snark/*
Copyright: 2012-2019, SCIPR Lab and contributors
License: Expat
Comment: The contributors are listed in src/snark/AUTHORS.
License: Boost-Software-License-1.0
Permission is hereby granted, free of charge, to any person or organization
obtaining a copy of the software and accompanying documentation covered by

View File

@ -1,19 +0,0 @@
package=libgmp
$(package)_version=6.1.1
$(package)_download_path=https://gmplib.org/download/gmp/
$(package)_file_name=gmp-$($(package)_version).tar.bz2
$(package)_sha256_hash=a8109865f2893f1373b0a8ed5ff7429de8db696fc451b1036bd7bdf95bbeffd6
$(package)_dependencies=
$(package)_config_opts=--enable-cxx --disable-shared
define $(package)_config_cmds
$($(package)_autoconf) --host=$(host) --build=$(build)
endef
define $(package)_build_cmds
$(MAKE) CPPFLAGS='-fPIC'
endef
define $(package)_stage_cmds
$(MAKE) DESTDIR=$($(package)_staging_dir) install ; echo '=== staging find for $(package):' ; find $($(package)_staging_dir)
endef

View File

@ -34,7 +34,7 @@ rust_crates := \
crate_winapi_x86_64_pc_windows_gnu
rust_packages := rust $(rust_crates) librustzcash
proton_packages := proton
zcash_packages := libgmp libsodium
zcash_packages := libsodium
packages := boost openssl libevent zeromq $(zcash_packages) googletest
native_packages := native_ccache

View File

@ -4,3 +4,52 @@ release-notes at release time)
Notable changes
===============
Fake chain detection during initial block download
--------------------------------------------------
One of the mechanisms that `zcashd` uses to detect whether it is in "initial
block download" (IBD) mode is to compare the active chain's cumulative work
against a hard-coded "minimum chain work" value. This mechanism (inherited from
Bitcoin Core) means that once a node exits IBD mode, it is either on the main
chain, or a fake alternate chain with similar amounts of work. In the latter
case, the node has most likely become the victim of a 50% + 1 adversary.
Starting from this release, `zcashd` additionally hard-codes the block hashes
for the activation blocks of each past network upgrade (NU). During initial
chain synchronization, and after the active chain has reached "minimum chain
work", the node checks the blocks at each NU activation height against the
hard-coded hashes. If any of them do not match, the node will immediately alert
the user and **shut down for safety**.
Disabling old Sprout proofs
---------------------------
As part of our ongoing work to clean up the codebase and minimise the security
surface of `zcashd`, we are removing `libsnark` from the codebase, and dropping
support for creating and verifying old Sprout proofs. Funds stored in Sprout
addresses are not affected, as they are spent using the hybrid Sprout circuit
(built using `bellman`) that was deployed during the Sapling network upgrade.
This change has several implications:
- `zcashd` no longer verifies old Sprout proofs, and will instead assume they
are valid. This has a minor implication for nodes: during initial block
download, an adversary could feed the node fake blocks containing invalid old
Sprout proofs, and the node would accept the fake chain as valid. However,
as soon as the active chain contains at least as much work as the hard-coded
"minimum chain work" value, the node will detect this situation and shut down.
- Shielded transactions can no longer be created before Sapling has activated.
This does not affect Zcash itself, but will affect downstream codebases that
have not yet activated Sapling (or that start a new chain after this point and
do not activate Sapling from launch). Note that the old Sprout circuit is
[vulnerable to counterfeiting](https://z.cash/support/security/announcements/security-announcement-2019-02-05-cve-2019-7167/)
and should not be used in current deployments.
- Starting from this release, the circuit parameters from the original Sprout
MPC are no longer required to start `zcashd`, and will not be downloaded by
`fetch-params.sh`. They are not being automatically deleted at this time.
We would like to take a moment to thank the `libsnark` authors and contributors.
It was vital to the success of Zcash, and the development of zero-knowledge
proofs in general, to have this code available and usable.

View File

@ -43,7 +43,6 @@ testScripts=(
'rest.py'
'mempool_spendcoinbase.py'
'mempool_reorg.py'
'mempool_tx_input_limit.py'
'mempool_nu_activation.py'
'mempool_tx_expiry.py'
'httpbasics.py'

View File

@ -66,7 +66,9 @@ class BIP65Test(ComparisonTestFramework):
def get_tests(self):
self.coinbase_blocks = self.nodes[0].generate(1)
self.nodes[0].generate(100)
self.tip = int ("0x" + self.nodes[0].getbestblockhash() + "L", 0)
hashTip = self.nodes[0].getbestblockhash()
hashFinalSaplingRoot = int("0x" + self.nodes[0].getblock(hashTip)['finalsaplingroot'] + "L", 0)
self.tip = int ("0x" + hashTip + "L", 0)
self.nodeaddress = self.nodes[0].getnewaddress()
'''Check that the rules are enforced.'''
@ -83,7 +85,8 @@ class BIP65Test(ComparisonTestFramework):
self.block_bits = int("0x" + gbt["bits"], 0)
block = create_block(self.tip, create_coinbase(101),
self.block_time, self.block_bits)
self.block_time, self.block_bits,
hashFinalSaplingRoot)
block.nVersion = 4
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()

View File

@ -73,7 +73,9 @@ class BIP66Test(ComparisonTestFramework):
def get_tests(self):
self.coinbase_blocks = self.nodes[0].generate(1)
self.nodes[0].generate(100)
self.tip = int ("0x" + self.nodes[0].getbestblockhash() + "L", 0)
hashTip = self.nodes[0].getbestblockhash()
hashFinalSaplingRoot = int("0x" + self.nodes[0].getblock(hashTip)['finalsaplingroot'] + "L", 0)
self.tip = int ("0x" + hashTip + "L", 0)
self.nodeaddress = self.nodes[0].getnewaddress()
'''Check that the rules are enforced.'''
@ -90,7 +92,8 @@ class BIP66Test(ComparisonTestFramework):
self.block_bits = int("0x" + gbt["bits"], 0)
block = create_block(self.tip, create_coinbase(101),
self.block_time, self.block_bits)
self.block_time, self.block_bits,
hashFinalSaplingRoot)
block.nVersion = 4
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()

View File

@ -30,8 +30,6 @@ class FinalSaplingRootTest(BitcoinTestFramework):
def setup_network(self, split=False):
self.nodes = start_nodes(4, self.options.tmpdir, extra_args=[[
'-nuparams=5ba81b19:100', # Overwinter
'-nuparams=76b809bb:200', # Sapling
'-txindex' # Avoid JSONRPC error: No information available about transaction
]] * 4 )
connect_nodes_bi(self.nodes,0,1)
@ -42,7 +40,6 @@ class FinalSaplingRootTest(BitcoinTestFramework):
self.sync_all()
def run_test(self):
# Activate Overwinter and Sapling
self.nodes[0].generate(200)
self.sync_all()

View File

@ -180,8 +180,8 @@ class RawTransactionsTest(BitcoinTestFramework):
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
# 4-byte version + 4-byte versionGroupId + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:90] + "0100" + rawtx[92:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])

View File

@ -11,7 +11,6 @@ from test_framework.util import (
start_node, connect_nodes, wait_and_assert_operationid_status,
get_coinbase_address
)
from test_framework.authproxy import JSONRPCException
from decimal import Decimal
@ -22,9 +21,7 @@ class MempoolUpgradeActivationTest(BitcoinTestFramework):
def setup_network(self):
args = ["-checkmempool", "-debug=mempool", "-blockmaxsize=4000",
"-nuparams=5ba81b19:200", # Overwinter
"-nuparams=76b809bb:210", # Sapling
"-nuparams=2bb40e60:220", # Blossom
"-nuparams=2bb40e60:200", # Blossom
]
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, args))
@ -43,23 +40,24 @@ class MempoolUpgradeActivationTest(BitcoinTestFramework):
# Mine 97 blocks. After this, nodes[1] blocks
# 1 to 97 are spend-able.
self.nodes[0].generate(97)
self.nodes[0].generate(94)
self.sync_all()
# Shield some ZEC
node1_taddr = get_coinbase_address(self.nodes[1])
node0_zaddr = self.nodes[0].z_getnewaddress('sprout')
node0_zaddr = self.nodes[0].z_getnewaddress('sapling')
recipients = [{'address': node0_zaddr, 'amount': Decimal('10')}]
myopid = self.nodes[1].z_sendmany(node1_taddr, recipients, 1, Decimal('0'))
print wait_and_assert_operationid_status(self.nodes[1], myopid)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
# Mempool checks for activation of upgrade Y at height H on base X
def nu_activation_checks():
# Mine block H - 2. After this, the mempool expects
# block H - 1, which is the last X block.
self.nodes[0].generate(1)
self.sync_all()
# Start at block H - 5. After this, the mempool expects block H - 4, which is
# the last height at which we can create transactions for X blocks (due to the
# expiring-soon restrictions).
# Mempool should be empty.
assert_equal(set(self.nodes[0].getrawmempool()), set())
@ -67,39 +65,64 @@ class MempoolUpgradeActivationTest(BitcoinTestFramework):
# Check node 0 shielded balance
assert_equal(self.nodes[0].z_getbalance(node0_zaddr), Decimal('10'))
# Fill the mempool with twice as many transactions as can fit into blocks
# Fill the mempool with more transactions than can fit into 4 blocks
node0_taddr = self.nodes[0].getnewaddress()
x_txids = []
info = self.nodes[0].getblockchaininfo()
chaintip_branchid = info["consensus"]["chaintip"]
while self.nodes[1].getmempoolinfo()['bytes'] < 2 * 4000:
try:
x_txids.append(self.nodes[1].sendtoaddress(node0_taddr, Decimal('0.001')))
except JSONRPCException:
# This fails due to expiring soon threshold, which applies from Overwinter onwards.
upgrade_name = info["upgrades"][chaintip_branchid]["name"]
assert_true(upgrade_name in ("Overwinter", "Sapling"), upgrade_name)
break
while self.nodes[1].getmempoolinfo()['bytes'] < 8 * 4000:
x_txids.append(self.nodes[1].sendtoaddress(node0_taddr, Decimal('0.001')))
self.sync_all()
# Spends should be in the mempool
x_mempool = set(self.nodes[0].getrawmempool())
assert_equal(x_mempool, set(x_txids))
assert_equal(set(self.nodes[1].getrawmempool()), set(x_txids))
blocks = []
# Mine block H - 4. After this, the mempool expects
# block H - 3, which is an X block.
self.nodes[0].generate(1)
self.sync_all()
blocks.append(self.nodes[0].getblock(self.nodes[0].getbestblockhash())['tx'])
# mempool should not be empty.
assert_true(len(set(self.nodes[0].getrawmempool())) > 0)
assert_true(len(set(self.nodes[1].getrawmempool())) > 0)
# Mine block H - 3. After this, the mempool expects
# block H - 2, which is an X block.
self.nodes[0].generate(1)
self.sync_all()
blocks.append(self.nodes[0].getblock(self.nodes[0].getbestblockhash())['tx'])
# mempool should not be empty.
assert_true(len(set(self.nodes[0].getrawmempool())) > 0)
assert_true(len(set(self.nodes[1].getrawmempool())) > 0)
# Mine block H - 2. After this, the mempool expects
# block H - 1, which is an X block.
self.nodes[0].generate(1)
self.sync_all()
blocks.append(self.nodes[0].getblock(self.nodes[0].getbestblockhash())['tx'])
# mempool should not be empty.
assert_true(len(set(self.nodes[0].getrawmempool())) > 0)
assert_true(len(set(self.nodes[1].getrawmempool())) > 0)
# Mine block H - 1. After this, the mempool expects
# block H, which is the first Y block.
self.nodes[0].generate(1)
self.sync_all()
blocks.append(self.nodes[0].getblock(self.nodes[0].getbestblockhash())['tx'])
# mempool should be empty.
assert_equal(set(self.nodes[0].getrawmempool()), set())
assert_equal(set(self.nodes[1].getrawmempool()), set())
# When transitioning from Sprout to Overwinter, where expiring soon threshold does not apply:
# Block H - 1 should contain a subset of the original mempool
# Blocks [H - 4..H - 1] should contain a subset of the original mempool
# (with all other transactions having been dropped)
block_txids = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['tx']
if chaintip_branchid is "00000000":
assert(len(block_txids) < len(x_txids))
assert(sum([len(block_txids) for block_txids in blocks]) < len(x_txids))
for block_txids in blocks:
for txid in block_txids[1:]: # Exclude coinbase
assert(txid in x_txids)
@ -117,6 +140,7 @@ class MempoolUpgradeActivationTest(BitcoinTestFramework):
# Spends should be in the mempool
assert_equal(set(self.nodes[0].getrawmempool()), set(y_txids))
assert_equal(set(self.nodes[1].getrawmempool()), set(y_txids))
# Node 0 note should be unspendable
assert_equal(self.nodes[0].z_getbalance(node0_zaddr), Decimal('0'))
@ -135,6 +159,9 @@ class MempoolUpgradeActivationTest(BitcoinTestFramework):
#assert_equal(set(self.nodes[0].getrawmempool()), set(block_txids[1:]))
assert_equal(set(self.nodes[0].getrawmempool()), set())
# Node 1's mempool is unaffected because it still considers block H - 1 valid.
assert_equal(set(self.nodes[1].getrawmempool()), set(y_txids))
# Node 0 note should be spendable again
assert_equal(self.nodes[0].z_getbalance(node0_zaddr), Decimal('10'))
@ -146,26 +173,10 @@ class MempoolUpgradeActivationTest(BitcoinTestFramework):
self.nodes[1].generate(6)
self.sync_all()
print('Testing Sprout -> Overwinter activation boundary')
# Current height = 197
print('Testing Sapling -> Blossom activation boundary')
# Current height = 195
nu_activation_checks()
# Current height = 205
self.nodes[0].generate(2)
self.sync_all()
print('Testing Overwinter -> Sapling activation boundary')
# Current height = 207
nu_activation_checks()
# Current height = 215
self.nodes[0].generate(2)
self.sync_all()
print('Testing Sapling -> Blossom activation boundary')
# Current height = 217
nu_activation_checks()
# Current height = 225
if __name__ == '__main__':
MempoolUpgradeActivationTest().main()

View File

@ -60,9 +60,9 @@ class MempoolCoinbaseTest(BitcoinTestFramework):
# Create a block-height-locked transaction which will be invalid after reorg
timelock_tx = self.nodes[0].createrawtransaction([{"txid": coinbase_txids[0], "vout": 0}], {node0_address: 10})
# Set the time lock
# Set the time lock, ensuring we don't clobber the rest of the Sapling v4 tx format
timelock_tx = timelock_tx.replace("ffffffff", "11111111", 1)
timelock_tx = timelock_tx[:-8] + hex(self.nodes[0].getblockcount() + 2)[2:] + "000000"
timelock_tx = timelock_tx[:-38] + hex(self.nodes[0].getblockcount() + 2)[2:] + "000000" + timelock_tx[-30:]
timelock_tx = self.nodes[0].signrawtransaction(timelock_tx)["hex"]
assert_raises(JSONRPCException, self.nodes[0].sendrawtransaction, timelock_tx)

View File

@ -4,7 +4,7 @@
# file COPYING or https://www.opensource.org/licenses/mit-license.php .
#
# Test proper expiry for transactions >= version 3
# Test proper expiry for transactions >= version 4
#
import sys; assert sys.version_info < (3,), ur"This script does not run under Python 3. Please use Python 2.7.x."
@ -17,7 +17,6 @@ from test_framework.util import assert_equal, \
from decimal import Decimal
SAPLING_ACTIVATION_HEIGHT = 300
TX_EXPIRING_SOON_THRESHOLD = 3
TX_EXPIRY_DELTA = 10
@ -26,8 +25,6 @@ class MempoolTxExpiryTest(BitcoinTestFramework):
def setup_nodes(self):
return start_nodes(4, self.options.tmpdir,
[[
"-nuparams=5ba81b19:205", # Overwinter
"-nuparams=76b809bb:%d" % SAPLING_ACTIVATION_HEIGHT, # Sapling
"-txexpirydelta=%d" % TX_EXPIRY_DELTA,
"-debug=mempool"
]] * 4)
@ -40,22 +37,13 @@ class MempoolTxExpiryTest(BitcoinTestFramework):
bob = self.nodes[2].getnewaddress()
z_bob = self.nodes[2].z_getnewaddress('sapling')
# When Overwinter not yet activated, no expiryheight in tx
tx = self.nodes[0].sendtoaddress(bob, 0.01)
rawtx = self.nodes[0].getrawtransaction(tx, 1)
assert_equal(rawtx["overwintered"], False)
assert("expiryheight" not in rawtx)
self.nodes[0].generate(6)
self.sync_all()
print "Splitting network..."
self.split_network()
# When Overwinter is activated, test dependent txs
# Test dependent txs
firstTx = self.nodes[0].sendtoaddress(alice, 0.1)
firstTxInfo = self.nodes[0].getrawtransaction(firstTx, 1)
assert_equal(firstTxInfo["version"], 3)
assert_equal(firstTxInfo["version"], 4)
assert_equal(firstTxInfo["overwintered"], True)
assert("expiryheight" in firstTxInfo)
print "First tx expiry height:", firstTxInfo['expiryheight']
@ -88,12 +76,6 @@ class MempoolTxExpiryTest(BitcoinTestFramework):
assert_equal(set(self.nodes[0].getrawmempool()), set())
assert_equal(set(self.nodes[2].getrawmempool()), set())
# Activate Sapling
n = SAPLING_ACTIVATION_HEIGHT - self.nodes[0].getblockcount()
assert(n > 0)
self.nodes[0].generate(n)
self.sync_all()
## Shield one of Alice's coinbase funds to her zaddr
res = self.nodes[0].z_shieldcoinbase("*", z_alice, 0.0001, 1)
wait_and_assert_operationid_status(self.nodes[0], res['opid'])

View File

@ -1,139 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2017 The Zcash developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or https://www.opensource.org/licenses/mit-license.php .
import sys; assert sys.version_info < (3,), ur"This script does not run under Python 3. Please use Python 2.7.x."
from test_framework.test_framework import BitcoinTestFramework
from test_framework.authproxy import JSONRPCException
from test_framework.util import assert_equal, initialize_chain_clean, \
start_node, connect_nodes, wait_and_assert_operationid_status, \
get_coinbase_address
from decimal import Decimal
# Test -mempooltxinputlimit
class MempoolTxInputLimitTest(BitcoinTestFramework):
alert_filename = None # Set by setup_network
def setup_network(self):
args = ["-checkmempool", "-debug=mempool", "-mempooltxinputlimit=2", "-nuparams=5ba81b19:110"]
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, args))
self.nodes.append(start_node(1, self.options.tmpdir, args))
connect_nodes(self.nodes[1], 0)
self.is_network_split = False
self.sync_all
def setup_chain(self):
print "Initializing test directory "+self.options.tmpdir
initialize_chain_clean(self.options.tmpdir, 2)
def call_z_sendmany(self, from_addr, to_addr, amount):
recipients = []
recipients.append({"address": to_addr, "amount": amount})
myopid = self.nodes[0].z_sendmany(from_addr, recipients)
return wait_and_assert_operationid_status(self.nodes[0], myopid)
def run_test(self):
self.nodes[0].generate(100)
self.sync_all()
# Mine three blocks. After this, nodes[0] blocks
# 1, 2, and 3 are spend-able.
self.nodes[1].generate(3)
self.sync_all()
# Check 1: z_sendmany is limited by -mempooltxinputlimit
# Add zaddr to node 0
node0_zaddr = self.nodes[0].z_getnewaddress('sprout')
# Send three inputs from node 0 taddr to zaddr to get out of coinbase
node0_taddr = get_coinbase_address(self.nodes[0])
recipients = []
recipients.append({"address":node0_zaddr, "amount":Decimal('30.0')-Decimal('0.0001')}) # utxo amount less fee
myopid = self.nodes[0].z_sendmany(node0_taddr, recipients)
# Spend should fail due to -mempooltxinputlimit
wait_and_assert_operationid_status(self.nodes[0], myopid, "failed", "Too many transparent inputs 3 > limit 2", 120)
# Mempool should be empty.
assert_equal(set(self.nodes[0].getrawmempool()), set())
# Reduce amount to only use two inputs
spend_zaddr_amount = Decimal('20.0') - Decimal('0.0001')
spend_zaddr_id = self.call_z_sendmany(node0_taddr, node0_zaddr, spend_zaddr_amount) # utxo amount less fee
self.sync_all()
# Spend should be in the mempool
assert_equal(set(self.nodes[0].getrawmempool()), set([ spend_zaddr_id ]))
self.nodes[0].generate(1)
self.sync_all()
# mempool should be empty.
assert_equal(set(self.nodes[0].getrawmempool()), set())
# Check 2: sendfrom is limited by -mempooltxinputlimit
recipients = []
spend_taddr_amount = spend_zaddr_amount - Decimal('0.0001')
spend_taddr_output = Decimal('8')
# Create three outputs
recipients.append({"address":self.nodes[1].getnewaddress(), "amount": spend_taddr_output})
recipients.append({"address":self.nodes[1].getnewaddress(), "amount": spend_taddr_output})
recipients.append({"address":self.nodes[1].getnewaddress(), "amount": spend_taddr_amount - spend_taddr_output - spend_taddr_output})
myopid = self.nodes[0].z_sendmany(node0_zaddr, recipients)
wait_and_assert_operationid_status(self.nodes[0], myopid)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# Should use three UTXOs and fail
try:
self.nodes[1].sendfrom("", node0_taddr, spend_taddr_amount - Decimal('1'))
assert(False)
except JSONRPCException,e:
msg = e.error['message']
assert_equal("Too many transparent inputs 3 > limit 2", msg)
# mempool should be empty.
assert_equal(set(self.nodes[1].getrawmempool()), set())
# Should use two UTXOs and succeed
spend_taddr_id2 = self.nodes[1].sendfrom("", node0_taddr, spend_taddr_output + spend_taddr_output - Decimal('1'))
# Spend should be in the mempool
assert_equal(set(self.nodes[1].getrawmempool()), set([ spend_taddr_id2 ]))
# Mine three blocks
self.nodes[1].generate(3)
self.sync_all()
# The next block to be mined, 109, is the last Sprout block
bci = self.nodes[0].getblockchaininfo()
assert_equal(bci['consensus']['chaintip'], '00000000')
assert_equal(bci['consensus']['nextblock'], '00000000')
# z_sendmany should be limited by -mempooltxinputlimit
recipients = []
recipients.append({"address":node0_zaddr, "amount":Decimal('30.0')-Decimal('0.0001')}) # utxo amount less fee
myopid = self.nodes[0].z_sendmany(node0_taddr, recipients)
wait_and_assert_operationid_status(self.nodes[0], myopid, 'failed', 'Too many transparent inputs 3 > limit 2')
# Mine one block
self.nodes[1].generate(1)
self.sync_all()
# The next block to be mined, 110, is the first Overwinter block
bci = self.nodes[0].getblockchaininfo()
assert_equal(bci['consensus']['chaintip'], '00000000')
assert_equal(bci['consensus']['nextblock'], '5ba81b19')
# z_sendmany should no longer be limited by -mempooltxinputlimit
myopid = self.nodes[0].z_sendmany(node0_taddr, recipients)
wait_and_assert_operationid_status(self.nodes[0], myopid)
if __name__ == '__main__':
MempoolTxInputLimitTest().main()

View File

@ -27,7 +27,7 @@ def assert_mergetoaddress_exception(expected_error_msg, merge_to_address_lambda)
class MergeToAddressHelper:
def __init__(self, addr_type, any_zaddr, utxos_to_generate, utxos_in_tx1, utxos_in_tx2, test_mempooltxinputlimit):
def __init__(self, addr_type, any_zaddr, utxos_to_generate, utxos_in_tx1, utxos_in_tx2):
self.addr_type = addr_type
self.any_zaddr = [any_zaddr]
self.any_zaddr_or_utxo = [any_zaddr, "ANY_TADDR"]
@ -35,7 +35,6 @@ class MergeToAddressHelper:
self.utxos_to_generate = utxos_to_generate
self.utxos_in_tx1 = utxos_in_tx1
self.utxos_in_tx2 = utxos_in_tx2
self.test_mempooltxinputlimit = test_mempooltxinputlimit
def setup_chain(self, test):
print("Initializing test directory "+test.options.tmpdir)
@ -282,16 +281,10 @@ class MergeToAddressHelper:
test.nodes[1].generate(1)
test.sync_all()
# Verify maximum number of UTXOs which node 2 can shield is limited by option -mempooltxinputlimit
# This option is used when the limit parameter is set to 0.
# -mempooltxinputlimit is not used after overwinter activation
if self.test_mempooltxinputlimit:
expected_to_merge = 7
expected_remaining = 13
else:
expected_to_merge = 20
expected_remaining = 0
# Verify maximum number of UTXOs which node 2 can shield is not limited
# when the limit parameter is set to 0.
expected_to_merge = 20
expected_remaining = 0
result = test.nodes[2].z_mergetoaddress([n2taddr], myzaddr, Decimal('0.0001'), 0)
assert_equal(result["mergingUTXOs"], expected_to_merge)

View File

@ -15,8 +15,6 @@ from mergetoaddress_helper import assert_mergetoaddress_exception
class MergeToAddressMixedNotes(BitcoinTestFramework):
def setup_nodes(self):
return start_nodes(4, self.options.tmpdir, [[
'-nuparams=5ba81b19:100', # Overwinter
'-nuparams=76b809bb:100', # Sapling
'-experimentalfeatures', '-zmergetoaddress'
]] * 4)

View File

@ -12,16 +12,13 @@ from mergetoaddress_helper import MergeToAddressHelper
class MergeToAddressSapling (BitcoinTestFramework):
# 13505 would be the maximum number of utxos based on the transaction size limits for Sapling
# but testing this causes the test to take an indeterminately long time to run.
helper = MergeToAddressHelper('sapling', 'ANY_SAPLING', 800, 800, 0, False)
helper = MergeToAddressHelper('sapling', 'ANY_SAPLING', 800, 800, 0)
def setup_chain(self):
self.helper.setup_chain(self)
def setup_network(self, split=False):
self.helper.setup_network(self, [
'-nuparams=5ba81b19:100', # Overwinter
'-nuparams=76b809bb:100', # Sapling
])
self.helper.setup_network(self)
def run_test(self):
self.helper.run_test(self)

View File

@ -10,7 +10,9 @@ from mergetoaddress_helper import MergeToAddressHelper
class MergeToAddressSprout (BitcoinTestFramework):
helper = MergeToAddressHelper('sprout', 'ANY_SPROUT', 800, 662, 138, True)
# 13505 would be the maximum number of utxos based on the transaction size limits for Sapling
# but testing this causes the test to take an indeterminately long time to run.
helper = MergeToAddressHelper('sprout', 'ANY_SPROUT', 800, 800, 0)
def setup_chain(self):
self.helper.setup_chain(self)

View File

@ -6,7 +6,7 @@
import sys; assert sys.version_info < (3,), ur"This script does not run under Python 3. Please use Python 2.7.x."
from test_framework.mininode import NodeConn, NodeConnCB, NetworkThread, \
msg_filteradd, msg_filterclear, mininode_lock, SPROUT_PROTO_VERSION
msg_filteradd, msg_filterclear, mininode_lock, SAPLING_PROTO_VERSION
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import initialize_chain_clean, start_nodes, \
p2p_port, assert_equal
@ -74,10 +74,10 @@ class NodeBloomTest(BitcoinTestFramework):
# Verify mininodes are connected to zcashd nodes
peerinfo = self.nodes[0].getpeerinfo()
versions = [x["version"] for x in peerinfo]
assert_equal(1, versions.count(SPROUT_PROTO_VERSION))
assert_equal(1, versions.count(SAPLING_PROTO_VERSION))
peerinfo = self.nodes[1].getpeerinfo()
versions = [x["version"] for x in peerinfo]
assert_equal(1, versions.count(SPROUT_PROTO_VERSION))
assert_equal(1, versions.count(SAPLING_PROTO_VERSION))
# Mininodes send filterclear message to zcashd node.
nobf_node.send_message(msg_filterclear())
@ -88,10 +88,10 @@ class NodeBloomTest(BitcoinTestFramework):
# Verify mininodes are still connected to zcashd nodes
peerinfo = self.nodes[0].getpeerinfo()
versions = [x["version"] for x in peerinfo]
assert_equal(1, versions.count(SPROUT_PROTO_VERSION))
assert_equal(1, versions.count(SAPLING_PROTO_VERSION))
peerinfo = self.nodes[1].getpeerinfo()
versions = [x["version"] for x in peerinfo]
assert_equal(1, versions.count(SPROUT_PROTO_VERSION))
assert_equal(1, versions.count(SAPLING_PROTO_VERSION))
# Mininodes send filteradd message to zcashd node.
nobf_node.send_message(msg_filteradd())
@ -102,10 +102,10 @@ class NodeBloomTest(BitcoinTestFramework):
# Verify NoBF mininode has been dropped, and BF mininode is still connected.
peerinfo = self.nodes[0].getpeerinfo()
versions = [x["version"] for x in peerinfo]
assert_equal(0, versions.count(SPROUT_PROTO_VERSION))
assert_equal(0, versions.count(SAPLING_PROTO_VERSION))
peerinfo = self.nodes[1].getpeerinfo()
versions = [x["version"] for x in peerinfo]
assert_equal(1, versions.count(SPROUT_PROTO_VERSION))
assert_equal(1, versions.count(SAPLING_PROTO_VERSION))
[ c.disconnect_node() for c in connections ]

View File

@ -7,7 +7,7 @@ import sys; assert sys.version_info < (3,), ur"This script does not run under Py
from test_framework.authproxy import JSONRPCException
from test_framework.mininode import NodeConn, NetworkThread, CInv, \
msg_mempool, msg_getdata, msg_tx, mininode_lock, OVERWINTER_PROTO_VERSION
msg_mempool, msg_getdata, msg_tx, mininode_lock, SAPLING_PROTO_VERSION
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, connect_nodes_bi, fail, \
initialize_chain_clean, p2p_port, start_nodes, sync_blocks, sync_mempools
@ -23,8 +23,7 @@ class TxExpiringSoonTest(BitcoinTestFramework):
initialize_chain_clean(self.options.tmpdir, 3)
def setup_network(self):
self.nodes = start_nodes(3, self.options.tmpdir,
extra_args=[['-nuparams=5ba81b19:10']] * 3)
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes, 0, 1)
# We don't connect node 2
@ -85,7 +84,7 @@ class TxExpiringSoonTest(BitcoinTestFramework):
testnode0 = TestNode()
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0],
testnode0, "regtest", OVERWINTER_PROTO_VERSION))
testnode0, "regtest", SAPLING_PROTO_VERSION))
testnode0.add_connection(connections[0])
# Start up network handling in another thread
@ -95,7 +94,7 @@ class TxExpiringSoonTest(BitcoinTestFramework):
# Verify mininodes are connected to zcashd nodes
peerinfo = self.nodes[0].getpeerinfo()
versions = [x["version"] for x in peerinfo]
assert_equal(1, versions.count(OVERWINTER_PROTO_VERSION))
assert_equal(1, versions.count(SAPLING_PROTO_VERSION))
assert_equal(0, peerinfo[0]["banscore"])
# Mine some blocks so we can spend
@ -150,7 +149,7 @@ class TxExpiringSoonTest(BitcoinTestFramework):
# Set up test node for node 2
testnode2 = TestNode()
connections.append(NodeConn('127.0.0.1', p2p_port(2), self.nodes[2],
testnode2, "regtest", OVERWINTER_PROTO_VERSION))
testnode2, "regtest", SAPLING_PROTO_VERSION))
testnode2.add_connection(connections[-1])
# Verify block count

View File

@ -6,7 +6,7 @@
import sys; assert sys.version_info < (3,), ur"This script does not run under Python 3. Please use Python 2.7.x."
from test_framework.mininode import NodeConn, NetworkThread, \
msg_tx, OVERWINTER_PROTO_VERSION
msg_tx, SAPLING_PROTO_VERSION
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import initialize_chain_clean, start_nodes, \
p2p_port, assert_equal
@ -22,15 +22,14 @@ class TxExpiryDoSTest(BitcoinTestFramework):
initialize_chain_clean(self.options.tmpdir, 1)
def setup_network(self):
self.nodes = start_nodes(1, self.options.tmpdir,
extra_args=[['-nuparams=5ba81b19:10']])
self.nodes = start_nodes(1, self.options.tmpdir)
def run_test(self):
test_node = TestNode()
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0],
test_node, "regtest", OVERWINTER_PROTO_VERSION))
test_node, "regtest", SAPLING_PROTO_VERSION))
test_node.add_connection(connections[0])
# Start up network handling in another thread
@ -41,7 +40,7 @@ class TxExpiryDoSTest(BitcoinTestFramework):
# Verify mininodes are connected to zcashd nodes
peerinfo = self.nodes[0].getpeerinfo()
versions = [x["version"] for x in peerinfo]
assert_equal(1, versions.count(OVERWINTER_PROTO_VERSION))
assert_equal(1, versions.count(SAPLING_PROTO_VERSION))
assert_equal(0, peerinfo[0]["banscore"])
coinbase_blocks = self.nodes[0].generate(1)
@ -62,7 +61,7 @@ class TxExpiryDoSTest(BitcoinTestFramework):
# and still has a banscore of 0.
peerinfo = self.nodes[0].getpeerinfo()
versions = [x["version"] for x in peerinfo]
assert_equal(1, versions.count(OVERWINTER_PROTO_VERSION))
assert_equal(1, versions.count(SAPLING_PROTO_VERSION))
assert_equal(0, peerinfo[0]["banscore"])
# Mine a block and resend the transaction
@ -75,7 +74,7 @@ class TxExpiryDoSTest(BitcoinTestFramework):
# but has a banscore of 10.
peerinfo = self.nodes[0].getpeerinfo()
versions = [x["version"] for x in peerinfo]
assert_equal(1, versions.count(OVERWINTER_PROTO_VERSION))
assert_equal(1, versions.count(SAPLING_PROTO_VERSION))
assert_equal(10, peerinfo[0]["banscore"])
[c.disconnect_node() for c in connections]

View File

@ -132,7 +132,7 @@ class RawTransactionsTest(BitcoinTestFramework):
break;
bal = self.nodes[0].getbalance()
inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex']}]
inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex'], "amount" : vout['value']}]
outputs = { self.nodes[0].getnewaddress() : 2.199 }
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned = self.nodes[1].signrawtransaction(rawTx, inputs)

View File

@ -6,16 +6,10 @@
import sys; assert sys.version_info < (3,), ur"This script does not run under Python 3. Please use Python 2.7.x."
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import start_nodes, wait_and_assert_operationid_status
from test_framework.util import wait_and_assert_operationid_status
class RegtestSignrawtransactionTest (BitcoinTestFramework):
def setup_nodes(self):
return start_nodes(4, self.options.tmpdir, [[
"-nuparams=5ba81b19:200", # Overwinter
"-nuparams=76b809bb:206", # Sapling
]] * 4)
def run_test(self):
self.nodes[0].generate(1)
self.sync_all()
@ -26,7 +20,7 @@ class RegtestSignrawtransactionTest (BitcoinTestFramework):
self.nodes[0].generate(1)
self.sync_all()
# Create and sign Overwinter transaction.
# Create and sign Sapling transaction.
# If the incorrect consensus branch id is selected, there will be a signing error.
opid = self.nodes[1].z_sendmany(taddr,
[{'address': zaddr1, 'amount': 1}])

View File

@ -11,6 +11,8 @@ from test_framework.util import assert_equal, initialize_chain_clean, \
import time
FAKE_SPROUT = ['-nuparams=5ba81b19:210', '-nuparams=76b809bb:220']
FAKE_OVERWINTER = ['-nuparams=5ba81b19:10', '-nuparams=76b809bb:220']
class RewindBlockIndexTest (BitcoinTestFramework):
@ -22,7 +24,11 @@ class RewindBlockIndexTest (BitcoinTestFramework):
# Node 0 - Overwinter, then Sprout, then Overwinter again
# Node 1 - Sprout
# Node 2 - Overwinter
self.nodes = start_nodes(3, self.options.tmpdir, extra_args=[['-nuparams=5ba81b19:10'], [], ['-nuparams=5ba81b19:10']])
self.nodes = start_nodes(3, self.options.tmpdir, extra_args=[
FAKE_OVERWINTER,
FAKE_SPROUT,
FAKE_OVERWINTER,
])
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
@ -52,7 +58,7 @@ class RewindBlockIndexTest (BitcoinTestFramework):
print("Switching node 0 from Overwinter to Sprout")
self.nodes[0].stop()
bitcoind_processes[0].wait()
self.nodes[0] = start_node(0,self.options.tmpdir)
self.nodes[0] = start_node(0, self.options.tmpdir, extra_args=FAKE_SPROUT)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
@ -69,7 +75,7 @@ class RewindBlockIndexTest (BitcoinTestFramework):
print("Switching node 0 from Sprout to Overwinter")
self.nodes[0].stop()
bitcoind_processes[0].wait()
self.nodes[0] = start_node(0,self.options.tmpdir, extra_args=['-nuparams=5ba81b19:10'])
self.nodes[0] = start_node(0, self.options.tmpdir, extra_args=FAKE_OVERWINTER)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)

View File

@ -13,7 +13,7 @@ class SignOfflineTest (BitcoinTestFramework):
initialize_chain_clean(self.options.tmpdir, 2)
def setup_network(self):
self.nodes = [ start_node(0, self.options.tmpdir, ["-nuparams=5ba81b19:10"]) ]
self.nodes = [ start_node(0, self.options.tmpdir, ["-nuparams=2bb40e60:10"]) ]
self.is_network_split = False
self.sync_all()
@ -22,7 +22,7 @@ class SignOfflineTest (BitcoinTestFramework):
print "Mining blocks..."
self.nodes[0].generate(101)
offline_node = start_node(1, self.options.tmpdir, ["-maxconnections=0", "-nuparams=5ba81b19:10"])
offline_node = start_node(1, self.options.tmpdir, ["-maxconnections=0", "-nuparams=2bb40e60:10"])
self.nodes.append(offline_node)
assert_equal(0, len(offline_node.getpeerinfo())) # make sure node 1 has no peers
@ -49,7 +49,7 @@ class SignOfflineTest (BitcoinTestFramework):
pass
# Passing in the consensus branch id resolves the issue for offline regtest nodes.
signed_tx = offline_node.signrawtransaction(create_hex, sign_inputs, privkeys, "ALL", "5ba81b19")
signed_tx = offline_node.signrawtransaction(create_hex, sign_inputs, privkeys, "ALL", "2bb40e60")
# If we return the transaction hash, then we have have not thrown an error (success)
online_tx_hash = self.nodes[0].sendrawtransaction(signed_tx['hex'])

View File

@ -52,10 +52,7 @@ def check_migration_status(node, destination_address, migration_state):
class SproutSaplingMigration(BitcoinTestFramework):
def setup_nodes(self):
# Activate overwinter/sapling on all nodes
extra_args = [[
'-nuparams=5ba81b19:100', # Overwinter
'-nuparams=76b809bb:100', # Sapling
]] * 4
# Add migration parameters to nodes[0]
extra_args[0] = extra_args[0] + [
@ -63,8 +60,8 @@ class SproutSaplingMigration(BitcoinTestFramework):
'-migrationdestaddress=' + SAPLING_ADDR,
'-debug=zrpcunsafe'
]
assert_equal(5, len(extra_args[0]))
assert_equal(2, len(extra_args[1]))
assert_equal(3, len(extra_args[0]))
assert_equal(0, len(extra_args[1]))
return start_nodes(4, self.options.tmpdir, extra_args)
def setup_chain(self):

View File

@ -8,7 +8,7 @@ from mininode import CBlock, CTransaction, CTxIn, CTxOut, COutPoint
from script import CScript, OP_0, OP_EQUAL, OP_HASH160
# Create a block (with regtest difficulty)
def create_block(hashprev, coinbase, nTime=None, nBits=None):
def create_block(hashprev, coinbase, nTime=None, nBits=None, hashFinalSaplingRoot=None):
block = CBlock()
if nTime is None:
import time
@ -16,6 +16,8 @@ def create_block(hashprev, coinbase, nTime=None, nBits=None):
else:
block.nTime = nTime
block.hashPrevBlock = hashprev
if hashFinalSaplingRoot is not None:
block.hashFinalSaplingRoot = hashFinalSaplingRoot
if nBits is None:
block.nBits = 0x200f0f0f # Will break after a difficulty adjustment...
else:

View File

@ -355,8 +355,72 @@ class CBlockLocator(object):
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
return "CBlockLocator(nVersion=%i vHave=%r)" \
% (self.nVersion, self.vHave)
class SpendDescription(object):
def __init__(self):
self.cv = None
self.anchor = None
self.nullifier = None
self.rk = None
self.zkproof = None
self.spendAuthSig = None
def deserialize(self, f):
self.cv = deser_uint256(f)
self.anchor = deser_uint256(f)
self.nullifier = deser_uint256(f)
self.rk = deser_uint256(f)
self.zkproof = f.read(192)
self.spendAuthSig = f.read(64)
def serialize(self):
r = ""
r += ser_uint256(self.cv)
r += ser_uint256(self.anchor)
r += ser_uint256(self.nullifier)
r += ser_uint256(self.rk)
r += self.zkproof
r += self.spendAuthSig
return r
def __repr__(self):
return "SpendDescription(cv=%064x, anchor=%064x, nullifier=%064x, rk=%064x, zkproof=%064x, spendAuthSig=%064x)" \
% (self.cv, self.anchor, self.nullifier, self.rk, self.zkproof, self.spendauthsig)
class OutputDescription(object):
def __init__(self):
self.cv = None
self.cmu = None
self.ephemeralKey = None
self.encCiphertext = None
self.outCiphertext = None
self.zkproof = None
def deserialize(self, f):
self.cv = deser_uint256(f)
self.cmu = deser_uint256(f)
self.ephemeralKey = deser_uint256(f)
self.encCiphertext = f.read(580)
self.outCiphertext = f.read(80)
self.zkproof = f.read(192)
def serialize(self):
r = ""
r += ser_uint256(self.cv)
r += ser_uint256(self.cmu)
r += ser_uint256(self.ephemeralKey)
r += self.encCiphertext
r += self.outCiphertext
r += self.zkproof
return r
def __repr__(self):
return "OutputDescription(cv=%064x, cmu=%064x, ephemeralKey=%064x, encCiphertext=%064x, outCiphertext=%064x, zkproof=%064x)" \
% (self.cv, self.cmu, self.ephemeralKey, self.encCiphertext, self.outCiphertext, self.zkproof)
G1_PREFIX_MASK = 0x02
@ -412,11 +476,11 @@ class ZCProof(object):
return r
def __repr__(self):
return "ZCProof(g_A=%s g_A_prime=%s g_B=%s g_B_prime=%s g_C=%s g_C_prime=%s g_K=%s g_H=%s)" \
% (repr(self.g_A), repr(self.g_A_prime),
repr(self.g_B), repr(self.g_B_prime),
repr(self.g_C), repr(self.g_C_prime),
repr(self.g_K), repr(self.g_H))
return "ZCProof(g_A=%r g_A_prime=%r g_B=%r g_B_prime=%r g_C=%r g_C_prime=%r g_K=%r g_H=%r)" \
% (self.g_A, self.g_A_prime,
self.g_B, self.g_B_prime,
self.g_C, self.g_C_prime,
self.g_K, self.g_H)
ZC_NUM_JS_INPUTS = 2
@ -502,9 +566,9 @@ class JSDescription(object):
return r
def __repr__(self):
return "JSDescription(vpub_old=%i.%08i vpub_new=%i.%08i anchor=%064x onetimePubKey=%064x randomSeed=%064x proof=%s)" \
return "JSDescription(vpub_old=%i.%08i vpub_new=%i.%08i anchor=%064x onetimePubKey=%064x randomSeed=%064x proof=%r)" \
% (self.vpub_old, self.vpub_new, self.anchor,
self.onetimePubKey, self.randomSeed, repr(self.proof))
self.onetimePubKey, self.randomSeed, self.proof)
class COutPoint(object):
@ -549,8 +613,8 @@ class CTxIn(object):
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), binascii.hexlify(self.scriptSig),
return "CTxIn(prevout=%r scriptSig=%s nSequence=%i)" \
% (self.prevout, binascii.hexlify(self.scriptSig),
self.nSequence)
@ -578,16 +642,20 @@ class CTxOut(object):
class CTransaction(object):
def __init__(self, tx=None):
if tx is None:
self.fOverwintered = False
self.nVersion = 1
self.nVersionGroupId = 0
self.fOverwintered = True
self.nVersion = 4
self.nVersionGroupId = SAPLING_VERSION_GROUP_ID
self.vin = []
self.vout = []
self.nLockTime = 0
self.nExpiryHeight = 0
self.valueBalance = 0
self.shieldedSpends = []
self.shieldedOutputs = []
self.vJoinSplit = []
self.joinSplitPubKey = None
self.joinSplitSig = None
self.bindingSig = None
self.sha256 = None
self.hash = None
else:
@ -598,9 +666,13 @@ class CTransaction(object):
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.nExpiryHeight = tx.nExpiryHeight
self.valueBalance = tx.valueBalance
self.shieldedSpends = copy.deepcopy(tx.shieldedSpends)
self.shieldedOutputs = copy.deepcopy(tx.shieldedOutputs)
self.vJoinSplit = copy.deepcopy(tx.vJoinSplit)
self.joinSplitPubKey = tx.joinSplitPubKey
self.joinSplitSig = tx.joinSplitSig
self.bindingSig = tx.bindingSig
self.sha256 = None
self.hash = None
@ -614,19 +686,30 @@ class CTransaction(object):
isOverwinterV3 = (self.fOverwintered and
self.nVersionGroupId == OVERWINTER_VERSION_GROUP_ID and
self.nVersion == 3)
isSaplingV4 = (self.fOverwintered and
self.nVersionGroupId == SAPLING_VERSION_GROUP_ID and
self.nVersion == 4)
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
self.nLockTime = struct.unpack("<I", f.read(4))[0]
if isOverwinterV3:
if isOverwinterV3 or isSaplingV4:
self.nExpiryHeight = struct.unpack("<I", f.read(4))[0]
if isSaplingV4:
self.valueBalance = struct.unpack("<q", f.read(8))[0]
self.shieldedSpends = deser_vector(f, SpendDescription)
self.shieldedOutputs = deser_vector(f, OutputDescription)
if self.nVersion >= 2:
self.vJoinSplit = deser_vector(f, JSDescription)
if len(self.vJoinSplit) > 0:
self.joinSplitPubKey = deser_uint256(f)
self.joinSplitSig = f.read(64)
if isSaplingV4 and not (len(self.shieldedSpends) == 0 and len(self.shieldedOutputs) == 0):
self.bindingSig = f.read(64)
self.sha256 = None
self.hash = None
@ -635,6 +718,9 @@ class CTransaction(object):
isOverwinterV3 = (self.fOverwintered and
self.nVersionGroupId == OVERWINTER_VERSION_GROUP_ID and
self.nVersion == 3)
isSaplingV4 = (self.fOverwintered and
self.nVersionGroupId == SAPLING_VERSION_GROUP_ID and
self.nVersion == 4)
r = ""
r += struct.pack("<I", header)
@ -643,13 +729,19 @@ class CTransaction(object):
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
if isOverwinterV3:
if isOverwinterV3 or isSaplingV4:
r += struct.pack("<I", self.nExpiryHeight)
if isSaplingV4:
r += struct.pack("<q", self.valueBalance)
r += ser_vector(self.shieldedSpends)
r += ser_vector(self.shieldedOutputs)
if self.nVersion >= 2:
r += ser_vector(self.vJoinSplit)
if len(self.vJoinSplit) > 0:
r += ser_uint256(self.joinSplitPubKey)
r += self.joinSplitSig
if isSaplingV4 and not (len(self.shieldedSpends) == 0 and len(self.shieldedOutputs) == 0):
r += self.bindingSig
return r
def rehash(self):
@ -670,14 +762,18 @@ class CTransaction(object):
def __repr__(self):
r = ("CTransaction(fOverwintered=%r nVersion=%i nVersionGroupId=0x%08x "
"vin=%s vout=%s nLockTime=%i nExpiryHeight=%i"
"vin=%r vout=%r nLockTime=%i nExpiryHeight=%i "
"valueBalance=%i shieldedSpends=%r shieldedOutputs=%r"
% (self.fOverwintered, self.nVersion, self.nVersionGroupId,
repr(self.vin), repr(self.vout), self.nLockTime, self.nExpiryHeight))
self.vin, self.vout, self.nLockTime, self.nExpiryHeight,
self.valueBalance, self.shieldedSpends, self.shieldedOutputs))
if self.nVersion >= 2:
r += " vJoinSplit=%s" % repr(self.vJoinSplit)
r += " vJoinSplit=%r" % (self.vJoinSplit,)
if len(self.vJoinSplit) > 0:
r += " joinSplitPubKey=%064x joinSplitSig=%064x" \
(self.joinSplitPubKey, self.joinSplitSig)
if len(self.shieldedSpends) > 0 or len(self.shieldedOutputs) > 0:
r += " bindingSig=%064x" % (self.bindingSig,)
r += ")"
return r
@ -690,7 +786,7 @@ class CBlockHeader(object):
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.hashReserved = header.hashReserved
self.hashFinalSaplingRoot = header.hashFinalSaplingRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
@ -703,7 +799,7 @@ class CBlockHeader(object):
self.nVersion = 4
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.hashReserved = 0
self.hashFinalSaplingRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
@ -715,7 +811,7 @@ class CBlockHeader(object):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.hashReserved = deser_uint256(f)
self.hashFinalSaplingRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = deser_uint256(f)
@ -728,7 +824,7 @@ class CBlockHeader(object):
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += ser_uint256(self.hashReserved)
r += ser_uint256(self.hashFinalSaplingRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += ser_uint256(self.nNonce)
@ -741,7 +837,7 @@ class CBlockHeader(object):
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += ser_uint256(self.hashReserved)
r += ser_uint256(self.hashFinalSaplingRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += ser_uint256(self.nNonce)
@ -755,9 +851,9 @@ class CBlockHeader(object):
return self.sha256
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x hashReserved=%064x nTime=%s nBits=%08x nNonce=%064x nSolution=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot, self.hashReserved,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.nSolution))
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x hashFinalSaplingRoot=%064x nTime=%s nBits=%08x nNonce=%064x nSolution=%r)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot, self.hashFinalSaplingRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, self.nSolution)
class CBlock(CBlockHeader):
@ -827,10 +923,10 @@ class CBlock(CBlockHeader):
self.nNonce += 1
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x hashReserved=%064x nTime=%s nBits=%08x nNonce=%064x nSolution=%s vtx=%s)" \
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x hashFinalSaplingRoot=%064x nTime=%s nBits=%08x nNonce=%064x nSolution=%r vtx=%r)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
self.hashReserved, time.ctime(self.nTime), self.nBits,
self.nNonce, repr(self.nSolution), repr(self.vtx))
self.hashFinalSaplingRoot, time.ctime(self.nTime), self.nBits,
self.nNonce, self.nSolution, self.vtx)
class CUnsignedAlert(object):
@ -958,9 +1054,9 @@ class msg_version(object):
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i)' \
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%r addrFrom=%r nNonce=0x%016X strSubVer=%s nStartingHeight=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.addrTo, self.addrFrom, self.nNonce,
self.strSubVer, self.nStartingHeight)
@ -993,7 +1089,7 @@ class msg_addr(object):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
return "msg_addr(addrs=%r)" % (self.addrs,)
class msg_alert(object):
@ -1012,7 +1108,7 @@ class msg_alert(object):
return r
def __repr__(self):
return "msg_alert(alert=%s)" % (repr(self.alert), )
return "msg_alert(alert=%r)" % (self.alert,)
class msg_inv(object):
@ -1031,7 +1127,7 @@ class msg_inv(object):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
return "msg_inv(inv=%r)" % (self.inv,)
class msg_getdata(object):
@ -1047,7 +1143,7 @@ class msg_getdata(object):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
return "msg_getdata(inv=%r)" % (self.inv,)
class msg_notfound(object):
@ -1063,7 +1159,7 @@ class msg_notfound(object):
return ser_vector(self.inv)
def __repr__(self):
return "msg_notfound(inv=%s)" % (repr(self.inv))
return "msg_notfound(inv=%r)" % (self.inv,)
class msg_getblocks(object):
@ -1085,8 +1181,8 @@ class msg_getblocks(object):
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
return "msg_getblocks(locator=%r hashstop=%064x)" \
% (self.locator, self.hashstop)
class msg_tx(object):
@ -1102,7 +1198,7 @@ class msg_tx(object):
return self.tx.serialize()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
return "msg_tx(tx=%r)" % (self.tx,)
class msg_block(object):
@ -1121,7 +1217,7 @@ class msg_block(object):
return self.block.serialize()
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
return "msg_block(block=%r)" % (self.block,)
class msg_getaddr(object):
@ -1231,8 +1327,8 @@ class msg_getheaders(object):
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
return "msg_getheaders(locator=%r, stop=%064x)" \
% (self.locator, self.hashstop)
# headers message has
@ -1254,7 +1350,7 @@ class msg_headers(object):
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
return "msg_headers(headers=%r)" % (self.headers,)
class msg_reject(object):
@ -1299,7 +1395,7 @@ class msg_filteradd(object):
return ser_string(self.data)
def __repr__(self):
return "msg_filteradd(data=%s)" % (repr(self.data))
return "msg_filteradd(data=%r)" % (self.data,)
class msg_filterclear(object):
@ -1352,7 +1448,7 @@ class NodeConnCB(object):
try:
self.cbmap[message.command](conn, message)
except:
print "ERROR delivering %s (%s)" % (repr(message),
print "ERROR delivering %r (%s)" % (message,
sys.exc_info()[0])
def on_version(self, conn, message):
@ -1421,7 +1517,7 @@ class NodeConn(asyncore.dispatcher):
"regtest": "\xaa\xe8\x3f\x5f" # regtest
}
def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", protocol_version=SPROUT_PROTO_VERSION):
def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", protocol_version=SAPLING_PROTO_VERSION):
asyncore.dispatcher.__init__(self, map=mininode_socket_map)
self.log = logging.getLogger("NodeConn(%s:%d)" % (dstaddr, dstport))
self.dstaddr = dstaddr
@ -1503,7 +1599,7 @@ class NodeConn(asyncore.dispatcher):
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != self.MAGIC_BYTES[self.network]:
raise ValueError("got garbage %s" % repr(self.recvbuf))
raise ValueError("got garbage %r" % (self.recvbuf,))
if self.ver_recv < 209:
if len(self.recvbuf) < 4 + 12 + 4:
return
@ -1526,7 +1622,7 @@ class NodeConn(asyncore.dispatcher):
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum " + repr(self.recvbuf))
raise ValueError("got bad checksum %r" % (self.recvbuf,))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if command in self.messagemap:
f = cStringIO.StringIO(msg)
@ -1534,13 +1630,12 @@ class NodeConn(asyncore.dispatcher):
t.deserialize(f)
self.got_message(t)
else:
self.show_debug_msg("Unknown command: '" + command + "' " +
repr(msg))
self.show_debug_msg("Unknown command: '%s' %r" % (command, msg))
def send_message(self, message, pushbuf=False):
if self.state != "connected" and not pushbuf:
return
self.show_debug_msg("Send %s" % repr(message))
self.show_debug_msg("Send %r" % (message,))
command = message.command
data = message.serialize()
tmsg = self.MAGIC_BYTES[self.network]
@ -1562,7 +1657,7 @@ class NodeConn(asyncore.dispatcher):
self.messagemap['ping'] = msg_ping_prebip31
if self.last_sent + 30 * 60 < time.time():
self.send_message(self.messagemap['ping']())
self.show_debug_msg("Recv %s" % repr(message))
self.show_debug_msg("Recv %r" % (message,))
self.cb.deliver(self, message)
def disconnect_node(self):

View File

@ -106,6 +106,10 @@ def initialize_chain(test_dir):
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("BITCOIND", "bitcoind"), "-keypool=1", "-datadir="+datadir, "-discover=0" ]
args.extend([
'-nuparams=5ba81b19:1', # Overwinter
'-nuparams=76b809bb:1', # Sapling
])
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
@ -191,6 +195,10 @@ def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=
if binary is None:
binary = os.getenv("BITCOIND", "bitcoind")
args = [ binary, "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ]
args.extend([
'-nuparams=5ba81b19:1', # Overwinter
'-nuparams=76b809bb:1', # Sapling
])
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
devnull = open("/dev/null", "w+")

View File

@ -40,8 +40,6 @@ from test_framework.util import (
)
from decimal import Decimal
NUPARAMS_ARGS = ['-nuparams=5ba81b19:100', # Overwinter
'-nuparams=76b809bb:101'] # Sapling
TURNSTILE_ARGS = ['-experimentalfeatures',
'-developersetpoolsizezero']
@ -52,8 +50,7 @@ class TurnstileTest (BitcoinTestFramework):
initialize_chain_clean(self.options.tmpdir, 3)
def setup_network(self, split=False):
self.nodes = start_nodes(3, self.options.tmpdir,
extra_args=[NUPARAMS_ARGS] * 3)
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
self.is_network_split=False
@ -70,7 +67,7 @@ class TurnstileTest (BitcoinTestFramework):
# Helper method to start a single node with extra args and sync to the network
def start_and_sync_node(self, index, args=[]):
self.nodes[index] = start_node(index, self.options.tmpdir, extra_args=NUPARAMS_ARGS + args)
self.nodes[index] = start_node(index, self.options.tmpdir, extra_args=args)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)

View File

@ -92,7 +92,7 @@ class WalletTest (BitcoinTestFramework):
# Catch an attempt to send a transaction with an absurdly high fee.
# Send 1.0 from an utxo of value 10.0 but don't specify a change output, so then
# the change of 9.0 becomes the fee, which is greater than estimated fee of 0.0019.
# the change of 9.0 becomes the fee, which is greater than estimated fee of 0.0021.
inputs = []
outputs = {}
for utxo in node2utxos:
@ -108,7 +108,7 @@ class WalletTest (BitcoinTestFramework):
except JSONRPCException,e:
errorString = e.error['message']
assert("absurdly high fees" in errorString)
assert("900000000 > 190000" in errorString)
assert("900000000 > 210000" in errorString)
# create both transactions
txns_to_send = []
@ -286,12 +286,16 @@ class WalletTest (BitcoinTestFramework):
myzaddr = self.nodes[0].z_getnewaddress('sprout')
recipients = []
num_t_recipients = 3000
num_t_recipients = 1000
num_z_recipients = 2100
amount_per_recipient = Decimal('0.00000001')
errorString = ''
for i in xrange(0,num_t_recipients):
newtaddr = self.nodes[2].getnewaddress()
recipients.append({"address":newtaddr, "amount":amount_per_recipient})
for i in xrange(0,num_z_recipients):
newzaddr = self.nodes[2].z_getnewaddress('sprout')
recipients.append({"address":newzaddr, "amount":amount_per_recipient})
# Issue #2759 Workaround START
# HTTP connection to node 0 may fall into a state, during the few minutes it takes to process
@ -302,47 +306,12 @@ class WalletTest (BitcoinTestFramework):
self.nodes[0].getinfo()
# Issue #2759 Workaround END
try:
self.nodes[0].z_sendmany(myzaddr, recipients)
except JSONRPCException,e:
errorString = e.error['message']
assert("Too many outputs, size of raw transaction" in errorString)
recipients = []
num_t_recipients = 2000
num_z_recipients = 50
amount_per_recipient = Decimal('0.00000001')
errorString = ''
for i in xrange(0,num_t_recipients):
newtaddr = self.nodes[2].getnewaddress()
recipients.append({"address":newtaddr, "amount":amount_per_recipient})
for i in xrange(0,num_z_recipients):
newzaddr = self.nodes[2].z_getnewaddress('sprout')
recipients.append({"address":newzaddr, "amount":amount_per_recipient})
# Issue #2759 Workaround START
self.nodes[0].getinfo()
# Issue #2759 Workaround END
try:
self.nodes[0].z_sendmany(myzaddr, recipients)
except JSONRPCException,e:
errorString = e.error['message']
assert("size of raw transaction would be larger than limit" in errorString)
recipients = []
num_z_recipients = 100
amount_per_recipient = Decimal('0.00000001')
errorString = ''
for i in xrange(0,num_z_recipients):
newzaddr = self.nodes[2].z_getnewaddress('sprout')
recipients.append({"address":newzaddr, "amount":amount_per_recipient})
try:
self.nodes[0].z_sendmany(myzaddr, recipients)
except JSONRPCException,e:
errorString = e.error['message']
assert("Invalid parameter, too many zaddr outputs" in errorString)
# add zaddr to node 2
myzaddr = self.nodes[2].z_getnewaddress('sprout')

View File

@ -6,17 +6,11 @@
import sys; assert sys.version_info < (3,), ur"This script does not run under Python 3. Please use Python 2.7.x."
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, start_nodes
from test_framework.util import assert_equal
# Test wallet address behaviour across network upgradesa\
class WalletAddressesTest(BitcoinTestFramework):
def setup_nodes(self):
return start_nodes(4, self.options.tmpdir, [[
'-nuparams=5ba81b19:202', # Overwinter
'-nuparams=76b809bb:204', # Sapling
]] * 4)
def run_test(self):
def addr_checks(default_type):
# Check default type, as well as explicit types
@ -38,41 +32,17 @@ class WalletAddressesTest(BitcoinTestFramework):
# Sanity-check the test harness
assert_equal(self.nodes[0].getblockcount(), 200)
# Current height = 200 -> Sprout
# Current height = 200 -> Sapling
# Default address type is Sapling
print "Testing height 200 (Sprout)"
print "Testing height 200 (Sapling)"
addr_checks('sapling')
self.nodes[0].generate(1)
self.sync_all()
# Current height = 201 -> Sprout
# Current height = 201 -> Sapling
# Default address type is Sapling
print "Testing height 201 (Sprout)"
addr_checks('sapling')
self.nodes[0].generate(1)
self.sync_all()
# Current height = 202 -> Overwinter
# Default address type is Sapling
print "Testing height 202 (Overwinter)"
addr_checks('sapling')
self.nodes[0].generate(1)
self.sync_all()
# Current height = 203 -> Overwinter
# Default address type is Sapling
print "Testing height 203 (Overwinter)"
addr_checks('sapling')
self.nodes[0].generate(1)
self.sync_all()
# Current height = 204 -> Sapling
# Default address type is Sapling
print "Testing height 204 (Sapling)"
print "Testing height 201 (Sapling)"
addr_checks('sapling')
if __name__ == '__main__':

View File

@ -9,7 +9,6 @@ from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
get_coinbase_address,
start_nodes,
wait_and_assert_operationid_status,
)
@ -18,29 +17,21 @@ from decimal import Decimal
# Test wallet z_listunspent behaviour across network upgrades
class WalletListNotes(BitcoinTestFramework):
def setup_nodes(self):
return start_nodes(4, self.options.tmpdir, [[
'-nuparams=5ba81b19:202', # Overwinter
'-nuparams=76b809bb:214', # Sapling
]] * 4)
def run_test(self):
# Current height = 200 -> Sprout
# Current height = 200
assert_equal(200, self.nodes[0].getblockcount())
sproutzaddr = self.nodes[0].z_getnewaddress('sprout')
# test that we can create a sapling zaddr before sapling activates
saplingzaddr = self.nodes[0].z_getnewaddress('sapling')
# we've got lots of coinbase (taddr) but no shielded funds yet
assert_equal(0, Decimal(self.nodes[0].z_gettotalbalance()['private']))
# Set current height to 201 -> Sprout
# Set current height to 201
self.nodes[0].generate(1)
self.sync_all()
assert_equal(201, self.nodes[0].getblockcount())
# Shield coinbase funds (must be a multiple of 10, no change allowed pre-sapling)
# Shield coinbase funds (must be a multiple of 10, no change allowed)
receive_amount_10 = Decimal('10.0') - Decimal('0.0001')
recipients = [{"address":sproutzaddr, "amount":receive_amount_10}]
myopid = self.nodes[0].z_sendmany(get_coinbase_address(self.nodes[0]), recipients)
@ -70,8 +61,8 @@ class WalletListNotes(BitcoinTestFramework):
# Generate a block to confirm shield coinbase tx
self.nodes[0].generate(1)
self.sync_all()
# Current height = 202 -> Overwinter. Default address type remains Sprout
# Current height = 202
assert_equal(202, self.nodes[0].getblockcount())
# Send 1.0 (actually 0.9999) from sproutzaddr to a new zaddr
@ -108,12 +99,7 @@ class WalletListNotes(BitcoinTestFramework):
unspent_tx_filter = self.nodes[0].z_listunspent(0, 9999, False, [sproutzaddr])
assert_equal(1, len(unspent_tx_filter))
assert_equal(unspent_tx[1], unspent_tx_filter[0])
# Set current height to 204 -> Sapling
self.nodes[0].generate(12)
self.sync_all()
assert_equal(214, self.nodes[0].getblockcount())
# No funds in saplingzaddr yet
assert_equal(0, len(self.nodes[0].z_listunspent(0, 9999, False, [saplingzaddr])))

View File

@ -7,7 +7,7 @@ import sys; assert sys.version_info < (3,), ur"This script does not run under Py
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_true, assert_false
from test_framework.util import start_nodes, wait_and_assert_operationid_status
from test_framework.util import wait_and_assert_operationid_status
from decimal import Decimal
my_memo = 'c0ffee' # stay awake
@ -19,12 +19,6 @@ fee = Decimal('0.0001')
class ListReceivedTest (BitcoinTestFramework):
def setup_nodes(self):
return start_nodes(4, self.options.tmpdir, [[
"-nuparams=5ba81b19:201", # Overwinter
"-nuparams=76b809bb:214", # Sapling
]] * 4)
def generate_and_sync(self, new_height):
current_height = self.nodes[0].getblockcount()
assert(new_height > current_height)

View File

@ -6,9 +6,15 @@
import sys; assert sys.version_info < (3,), ur"This script does not run under Python 3. Please use Python 2.7.x."
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, initialize_chain_clean, \
start_nodes, connect_nodes_bi, wait_and_assert_operationid_status, \
assert_greater_than, get_coinbase_address
from test_framework.util import (
assert_equal,
assert_greater_than,
connect_nodes_bi,
get_coinbase_address,
initialize_chain_clean,
start_nodes,
wait_and_assert_operationid_status,
)
from test_framework.authproxy import JSONRPCException
from decimal import Decimal
@ -20,7 +26,11 @@ class WalletOverwinterTxTest (BitcoinTestFramework):
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self, split=False):
self.nodes = start_nodes(4, self.options.tmpdir, extra_args=[["-nuparams=5ba81b19:200", "-debug=zrpcunsafe", "-txindex"]] * 4 )
self.nodes = start_nodes(4, self.options.tmpdir, extra_args=[[
"-nuparams=2bb40e60:200",
"-debug=zrpcunsafe",
"-txindex",
]] * 4 )
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
@ -31,9 +41,9 @@ class WalletOverwinterTxTest (BitcoinTestFramework):
def run_test (self):
self.nodes[0].generate(100)
self.sync_all()
self.nodes[1].generate(98)
self.nodes[1].generate(95)
self.sync_all()
# Node 0 has reward from blocks 1 to 98 which are spendable.
# Node 0 has reward from blocks 1 to 95 which are spendable.
taddr0 = get_coinbase_address(self.nodes[0])
taddr1 = self.nodes[1].getnewaddress()
@ -43,19 +53,12 @@ class WalletOverwinterTxTest (BitcoinTestFramework):
zaddr3 = self.nodes[3].z_getnewaddress('sprout')
#
# Currently at block 198. The next block to be mined 199 is a Sprout block
# Currently at block 195. The next block to be mined 196 is a Sapling block
#
bci = self.nodes[0].getblockchaininfo()
assert_equal(bci['consensus']['chaintip'], '00000000')
assert_equal(bci['consensus']['nextblock'], '00000000')
assert_equal(bci['upgrades']['5ba81b19']['status'], 'pending')
# Cannot use the expiryheight parameter of createrawtransaction if Overwinter is not active in the next block
try:
self.nodes[0].createrawtransaction([], {}, 0, 99)
except JSONRPCException,e:
errorString = e.error['message']
assert_equal("Invalid parameter, expiryheight can only be used if Overwinter is active when the transaction is mined" in errorString, True)
assert_equal(bci['consensus']['chaintip'], '76b809bb')
assert_equal(bci['consensus']['nextblock'], '76b809bb')
assert_equal(bci['upgrades']['2bb40e60']['status'], 'pending')
# Node 0 sends transparent funds to Node 2
tsendamount = Decimal('1.0')
@ -75,8 +78,10 @@ class WalletOverwinterTxTest (BitcoinTestFramework):
myopid = self.nodes[0].z_sendmany(taddr0, recipients)
txid_shielded = wait_and_assert_operationid_status(self.nodes[0], myopid)
# Skip over the three blocks prior to activation; no transactions can be mined
# in them due to the nearly-expiring restrictions.
self.sync_all()
self.nodes[0].generate(1)
self.nodes[0].generate(4)
self.sync_all()
# Verify balance
@ -84,26 +89,26 @@ class WalletOverwinterTxTest (BitcoinTestFramework):
assert_equal(self.nodes[2].getbalance(), Decimal('0.4999'))
assert_equal(self.nodes[2].z_getbalance(zaddr2), zsendamount)
# Verify transaction versions are 1 or 2 (intended for Sprout)
# Verify transaction version is 4 (intended for Sapling+)
result = self.nodes[0].getrawtransaction(txid_transparent, 1)
assert_equal(result["version"], 1)
assert_equal(result["overwintered"], False)
assert_equal(result["version"], 4)
assert_equal(result["overwintered"], True)
result = self.nodes[0].getrawtransaction(txid_zsendmany, 1)
assert_equal(result["version"], 1)
assert_equal(result["overwintered"], False)
assert_equal(result["version"], 4)
assert_equal(result["overwintered"], True)
result = self.nodes[0].getrawtransaction(txid_shielded, 1)
assert_equal(result["version"], 2)
assert_equal(result["overwintered"], False)
assert_equal(result["version"], 4)
assert_equal(result["overwintered"], True)
#
# Currently at block 199. The next block to be mined 200 is an Overwinter block
# Currently at block 199. The next block to be mined 200 is a Blossom block
#
bci = self.nodes[0].getblockchaininfo()
assert_equal(bci['consensus']['chaintip'], '00000000')
assert_equal(bci['consensus']['nextblock'], '5ba81b19')
assert_equal(bci['upgrades']['5ba81b19']['status'], 'pending')
assert_equal(bci['consensus']['chaintip'], '76b809bb')
assert_equal(bci['consensus']['nextblock'], '2bb40e60')
assert_equal(bci['upgrades']['2bb40e60']['status'], 'pending')
# Test using expiryheight parameter of createrawtransaction when Overwinter is active in the next block
# Test using expiryheight parameter of createrawtransaction when Blossom is active in the next block
errorString = ""
try:
self.nodes[0].createrawtransaction([], {}, 0, 499999999)
@ -144,7 +149,7 @@ class WalletOverwinterTxTest (BitcoinTestFramework):
myopid = self.nodes[0].z_sendmany(taddr0, recipients)
txid_shielded = wait_and_assert_operationid_status(self.nodes[0], myopid)
# Mine the first Overwinter block
# Mine the first Blossom block
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
@ -153,28 +158,28 @@ class WalletOverwinterTxTest (BitcoinTestFramework):
# size_on_disk should be > 0
assert_greater_than(bci['size_on_disk'], 0)
assert_equal(bci['consensus']['chaintip'], '5ba81b19')
assert_equal(bci['consensus']['nextblock'], '5ba81b19')
assert_equal(bci['upgrades']['5ba81b19']['status'], 'active')
assert_equal(bci['consensus']['chaintip'], '2bb40e60')
assert_equal(bci['consensus']['nextblock'], '2bb40e60')
assert_equal(bci['upgrades']['2bb40e60']['status'], 'active')
# Verify balance
assert_equal(self.nodes[1].z_getbalance(taddr1), Decimal('1.0'))
assert_equal(self.nodes[3].getbalance(), Decimal('0.4999'))
assert_equal(self.nodes[3].z_getbalance(zaddr3), zsendamount)
# Verify transaction version is 3 (intended for Overwinter)
# Verify transaction version is 4 (intended for Sapling+)
result = self.nodes[0].getrawtransaction(txid_transparent, 1)
assert_equal(result["version"], 3)
assert_equal(result["version"], 4)
assert_equal(result["overwintered"], True)
assert_equal(result["versiongroupid"], "03c48270")
assert_equal(result["versiongroupid"], "892f2085")
result = self.nodes[0].getrawtransaction(txid_zsendmany, 1)
assert_equal(result["version"], 3)
assert_equal(result["version"], 4)
assert_equal(result["overwintered"], True)
assert_equal(result["versiongroupid"], "03c48270")
assert_equal(result["versiongroupid"], "892f2085")
result = self.nodes[0].getrawtransaction(txid_shielded, 1)
assert_equal(result["version"], 3)
assert_equal(result["version"], 4)
assert_equal(result["overwintered"], True)
assert_equal(result["versiongroupid"], "03c48270")
assert_equal(result["versiongroupid"], "892f2085")
if __name__ == '__main__':
WalletOverwinterTxTest().main()

View File

@ -22,11 +22,7 @@ class WalletPersistenceTest (BitcoinTestFramework):
initialize_chain_clean(self.options.tmpdir, 3)
def setup_network(self, split=False):
self.nodes = start_nodes(3, self.options.tmpdir,
extra_args=[[
'-nuparams=5ba81b19:100', # Overwinter
'-nuparams=76b809bb:201', # Sapling
]] * 3)
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
self.is_network_split=False
@ -38,7 +34,7 @@ class WalletPersistenceTest (BitcoinTestFramework):
assert_equal(self.nodes[0].getblockcount(), 200)
self.sync_all()
# Verify Sapling address is persisted in wallet (even when Sapling is not yet active)
# Verify Sapling address is persisted in wallet
sapling_addr = self.nodes[0].z_getnewaddress('sapling')
# Make sure the node has the addresss
@ -54,10 +50,6 @@ class WalletPersistenceTest (BitcoinTestFramework):
addresses = self.nodes[0].z_listaddresses()
assert_true(sapling_addr in addresses, "Should contain address after restart")
# Activate Sapling
self.nodes[0].generate(1)
self.sync_all()
# Node 0 shields funds to Sapling address
taddr0 = get_coinbase_address(self.nodes[0])
recipients = []

View File

@ -21,8 +21,6 @@ class WalletSaplingTest(BitcoinTestFramework):
def setup_nodes(self):
return start_nodes(4, self.options.tmpdir, [[
'-nuparams=5ba81b19:201', # Overwinter
'-nuparams=76b809bb:203', # Sapling
'-experimentalfeatures', '-zmergetoaddress',
]] * 4)
@ -30,52 +28,6 @@ class WalletSaplingTest(BitcoinTestFramework):
# Sanity-check the test harness
assert_equal(self.nodes[0].getblockcount(), 200)
# Activate Overwinter
self.nodes[2].generate(1)
self.sync_all()
# Verify RPCs disallow Sapling value transfer if Sapling is not active
tmp_taddr = get_coinbase_address(self.nodes[3])
tmp_zaddr = self.nodes[3].z_getnewaddress('sapling')
try:
recipients = []
recipients.append({"address": tmp_zaddr, "amount": Decimal('10')})
self.nodes[3].z_sendmany(tmp_taddr, recipients, 1, 0)
raise AssertionError("Should have thrown an exception")
except JSONRPCException as e:
assert_equal("Invalid parameter, Sapling has not activated", e.error['message'])
try:
recipients = []
recipients.append({"address": tmp_taddr, "amount": Decimal('10')})
self.nodes[3].z_sendmany(tmp_zaddr, recipients, 1, 0)
raise AssertionError("Should have thrown an exception")
except JSONRPCException as e:
assert_equal("Invalid parameter, Sapling has not activated", e.error['message'])
try:
self.nodes[3].z_shieldcoinbase(tmp_taddr, tmp_zaddr)
raise AssertionError("Should have thrown an exception")
except JSONRPCException as e:
assert_equal("Invalid parameter, Sapling has not activated", e.error['message'])
# Verify z_mergetoaddress RPC does not support Sapling yet
try:
self.nodes[3].z_mergetoaddress([tmp_taddr], tmp_zaddr)
raise AssertionError("Should have thrown an exception")
except JSONRPCException as e:
assert_equal("Invalid parameter, Sapling has not activated", e.error['message'])
try:
self.nodes[3].z_mergetoaddress([tmp_zaddr], tmp_taddr)
raise AssertionError("Should have thrown an exception")
except JSONRPCException as e:
# When sending from a zaddr we check for sapling activation only if
# we find notes belonging to that address. Since sapling is not active
# none can be generated and none will be found.
assert_equal("Could not find any funds to merge.", e.error['message'])
# Activate Sapling
self.nodes[2].generate(2)
self.sync_all()
taddr1 = self.nodes[1].getnewaddress()
saplingAddr0 = self.nodes[0].z_getnewaddress('sapling')
saplingAddr1 = self.nodes[1].z_getnewaddress('sapling')

View File

@ -24,18 +24,10 @@ class WalletShieldCoinbaseTest (BitcoinTestFramework):
def setup_network(self, split=False):
args = ['-regtestprotectcoinbase', '-debug=zrpcunsafe']
args2 = ['-regtestprotectcoinbase', '-debug=zrpcunsafe', "-mempooltxinputlimit=7"]
if self.addr_type != 'sprout':
nu = [
'-nuparams=5ba81b19:0', # Overwinter
'-nuparams=76b809bb:1', # Sapling
]
args.extend(nu)
args2 = args
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, args))
self.nodes.append(start_node(1, self.options.tmpdir, args))
self.nodes.append(start_node(2, self.options.tmpdir, args2))
self.nodes.append(start_node(2, self.options.tmpdir, args))
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
@ -164,14 +156,8 @@ class WalletShieldCoinbaseTest (BitcoinTestFramework):
wait_and_assert_operationid_status(self.nodes[0], opid1)
wait_and_assert_operationid_status(self.nodes[0], opid2)
if self.addr_type == 'sprout':
# Shielding the 800 utxos will occur over two transactions, since max tx size is 100,000 bytes.
# We don't verify shieldingValue as utxos are not selected in any specific order, so value can change on each test run.
# We set an unrealistically high limit parameter of 99999, to verify that max tx size will constrain the number of utxos.
verify_locking('662', '138', 99999)
else:
# Shield the 800 utxos over two transactions
verify_locking('500', '300', 500)
# Shield the 800 utxos over two transactions
verify_locking('500', '300', 500)
# sync_all() invokes sync_mempool() but node 2's mempool limit will cause tx1 and tx2 to be rejected.
# So instead, we sync on blocks and mempool for node 0 and node 1, and after a new block is generated
@ -181,18 +167,6 @@ class WalletShieldCoinbaseTest (BitcoinTestFramework):
self.nodes[1].generate(1)
self.sync_all()
if self.addr_type == 'sprout':
# Verify maximum number of utxos which node 2 can shield is limited by option -mempooltxinputlimit
# This option is used when the limit parameter is set to 0.
mytaddr = get_coinbase_address(self.nodes[2], 20)
result = self.nodes[2].z_shieldcoinbase(mytaddr, myzaddr, Decimal('0.0001'), 0)
assert_equal(result["shieldingUTXOs"], Decimal('7'))
assert_equal(result["remainingUTXOs"], Decimal('13'))
wait_and_assert_operationid_status(self.nodes[2], result['opid'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# Verify maximum number of utxos which node 0 can shield is set by default limit parameter of 50
self.nodes[0].generate(200)
self.sync_all()

View File

@ -138,7 +138,6 @@ STAGES = [
'no-dot-so',
'util-test',
'secp256k1',
'libsnark',
'univalue',
'rpc',
]
@ -150,7 +149,6 @@ STAGE_COMMANDS = {
'no-dot-so': ensure_no_dot_so_in_depends,
'util-test': util_test,
'secp256k1': ['make', '-C', repofile('src/secp256k1'), 'check'],
'libsnark': ['make', '-C', repofile('src'), 'libsnark-tests'],
'univalue': ['make', '-C', repofile('src/univalue'), 'check'],
'rpc': [repofile('qa/pull-tester/rpc-tests.sh')],
}

View File

@ -30,8 +30,6 @@ BITCOIN_CONFIG_INCLUDES=-I$(builddir)/config
BITCOIN_INCLUDES=-I$(builddir) -I$(builddir)/obj $(BDB_CPPFLAGS) $(BOOST_CPPFLAGS) $(LEVELDB_CPPFLAGS) $(CRYPTO_CFLAGS) $(SSL_CFLAGS)
BITCOIN_INCLUDES += -I$(srcdir)/secp256k1/include
BITCOIN_INCLUDES += -I$(srcdir)/snark
BITCOIN_INCLUDES += -I$(srcdir)/snark/libsnark
BITCOIN_INCLUDES += -I$(srcdir)/univalue/include
LIBBITCOIN_SERVER=libbitcoin_server.a
@ -40,7 +38,6 @@ LIBBITCOIN_CLI=libbitcoin_cli.a
LIBBITCOIN_UTIL=libbitcoin_util.a
LIBBITCOIN_CRYPTO=crypto/libbitcoin_crypto.a
LIBSECP256K1=secp256k1/libsecp256k1.la
LIBSNARK=snark/libsnark.a
LIBUNIVALUE=univalue/libunivalue.la
LIBZCASH=libzcash.a
@ -60,21 +57,6 @@ endif
$(LIBSECP256K1): $(wildcard secp256k1/src/*) $(wildcard secp256k1/include/*)
$(AM_V_at)$(MAKE) $(AM_MAKEFLAGS) -C $(@D) $(@F)
LIBSNARK_CXXFLAGS = $(AM_CXXFLAGS) $(PIC_FLAGS) -DBINARY_OUTPUT -DNO_PT_COMPRESSION=1 -fstack-protector-all
LIBSNARK_CONFIG_FLAGS = CURVE=ALT_BN128 NO_PROCPS=1 NO_DOCS=1 STATIC=1 NO_SUPERCOP=1 FEATUREFLAGS=-DMONTGOMERY_OUTPUT NO_COPY_DEPINST=1 NO_COMPILE_LIBGTEST=1
if HAVE_OPENMP
LIBSNARK_CONFIG_FLAGS += MULTICORE=1
endif
if TARGET_DARWIN
LIBSNARK_CONFIG_FLAGS += PLATFORM=darwin
endif
$(LIBSNARK): $(wildcard snark/src/*)
$(AM_V_at) CC="$(CC)" CXX="$(CXX)" AR="$(AR)" CXXFLAGS="$(LIBSNARK_CXXFLAGS)" $(MAKE) $(AM_MAKEFLAGS) -C snark/ DEPINST="$(LIBSNARK_DEPINST)" $(LIBSNARK_CONFIG_FLAGS) OPTFLAGS="$(LIBSNARK_OPTFLAGS)"
libsnark-tests: $(wildcard snark/src/*)
$(AM_V_at) CC="$(CC)" CXX="$(CXX)" AR="$(AR)" CXXFLAGS="$(LIBSNARK_CXXFLAGS)" $(MAKE) $(AM_MAKEFLAGS) -C snark/ check DEPINST="$(LIBSNARK_DEPINST)" $(LIBSNARK_CONFIG_FLAGS) OPTFLAGS="$(LIBSNARK_OPTFLAGS)"
$(LIBUNIVALUE): $(wildcard univalue/lib/*) $(wildcard univalue/include/*)
$(AM_V_at)$(MAKE) $(AM_MAKEFLAGS) -C $(@D) $(@F)
@ -117,7 +99,7 @@ LIBZCASH_H = \
zcash/Zcash.h \
zcash/zip32.h
.PHONY: FORCE collate-libsnark check-symbols check-security
.PHONY: FORCE check-symbols check-security
# bitcoin core #
BITCOIN_CORE_H = \
addressindex.h \
@ -458,7 +440,6 @@ zcashd_LDADD = \
$(LIBBITCOIN_PROTON) \
$(LIBBITCOIN_CRYPTO) \
$(LIBZCASH) \
$(LIBSNARK) \
$(LIBLEVELDB) \
$(LIBMEMENV) \
$(LIBSECP256K1)
@ -494,7 +475,6 @@ zcash_cli_LDADD = \
$(CRYPTO_LIBS) \
$(EVENT_LIBS) \
$(LIBZCASH) \
$(LIBSNARK) \
$(LIBBITCOIN_CRYPTO) \
$(LIBZCASH_LIBS)
#
@ -516,7 +496,6 @@ zcash_tx_LDADD = \
$(LIBBITCOIN_UTIL) \
$(LIBSECP256K1) \
$(LIBZCASH) \
$(LIBSNARK) \
$(LIBBITCOIN_CRYPTO) \
$(LIBZCASH_LIBS)
@ -533,13 +512,7 @@ libzcash_a_SOURCES = \
zcash/Note.cpp \
zcash/prf.cpp \
zcash/util.cpp \
zcash/zip32.cpp \
zcash/circuit/commitment.tcc \
zcash/circuit/gadget.tcc \
zcash/circuit/merkle.tcc \
zcash/circuit/note.tcc \
zcash/circuit/prfs.tcc \
zcash/circuit/utils.tcc
zcash/zip32.cpp
libzcash_a_CPPFLAGS = $(AM_CPPFLAGS) $(PIC_FLAGS) -DBINARY_OUTPUT -DCURVE_ALT_BN128 -DBOOST_SPIRIT_THREADSAFE -fvisibility=hidden -DSTATIC $(BITCOIN_INCLUDES)
libzcash_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
@ -582,12 +555,11 @@ CLEANFILES = leveldb/libleveldb.a leveldb/libmemenv.a *.gcda *.gcno */*.gcno wal
DISTCLEANFILES = obj/build.h
EXTRA_DIST = leveldb snark
EXTRA_DIST = leveldb
clean-local:
-$(MAKE) -C leveldb clean
-$(MAKE) -C secp256k1 clean
-$(MAKE) -C snark clean
-$(MAKE) -C univalue clean
rm -f leveldb/*/*.gcno leveldb/helpers/memenv/*.gcno
-rm -f config.h

View File

@ -38,10 +38,8 @@ zcash_gtest_SOURCES += \
gtest/test_transaction_builder.cpp \
gtest/test_upgrades.cpp \
gtest/test_validation.cpp \
gtest/test_circuit.cpp \
gtest/test_txid.cpp \
gtest/test_libzcash_utils.cpp \
gtest/test_proofs.cpp \
gtest/test_pedersen_hash.cpp \
gtest/test_checkblock.cpp \
gtest/test_zip32.cpp
@ -63,7 +61,7 @@ if ENABLE_WALLET
zcash_gtest_LDADD += $(LIBBITCOIN_WALLET)
endif
zcash_gtest_LDADD += $(LIBZCASH_CONSENSUS) $(BDB_LIBS) $(SSL_LIBS) $(CRYPTO_LIBS) $(EVENT_PTHREADS_LIBS) $(EVENT_LIBS) $(LIBZCASH) $(LIBSNARK) $(LIBZCASH_LIBS)
zcash_gtest_LDADD += $(LIBZCASH_CONSENSUS) $(BDB_LIBS) $(SSL_LIBS) $(CRYPTO_LIBS) $(EVENT_PTHREADS_LIBS) $(EVENT_LIBS) $(LIBZCASH) $(LIBZCASH_LIBS)
if ENABLE_PROTON
zcash_gtest_LDADD += $(LIBBITCOIN_PROTON) $(PROTON_LIBS)

View File

@ -44,8 +44,6 @@ JSON_TEST_FILES = \
test/data/merkle_witness_serialization_sapling.json \
test/data/merkle_path_sapling.json \
test/data/merkle_commitments_sapling.json \
test/data/g1_compressed.json \
test/data/g2_compressed.json \
test/data/sapling_key_components.json
RAW_TEST_FILES = test/data/alertTests.raw
@ -126,7 +124,7 @@ test_test_bitcoin_LDADD += $(LIBBITCOIN_SERVER) $(LIBBITCOIN_CLI) $(LIBBITCOIN_C
$(LIBLEVELDB) $(LIBMEMENV) $(BOOST_LIBS) $(BOOST_UNIT_TEST_FRAMEWORK_LIB) $(LIBSECP256K1) $(EVENT_LIBS) $(EVENT_PTHREADS_LIBS)
test_test_bitcoin_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
test_test_bitcoin_LDADD += $(LIBZCASH_CONSENSUS) $(BDB_LIBS) $(SSL_LIBS) $(CRYPTO_LIBS) $(LIBZCASH) $(LIBSNARK) $(LIBZCASH_LIBS)
test_test_bitcoin_LDADD += $(LIBZCASH_CONSENSUS) $(BDB_LIBS) $(SSL_LIBS) $(CRYPTO_LIBS) $(LIBZCASH) $(LIBZCASH_LIBS)
test_test_bitcoin_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS) -static
if ENABLE_ZMQ

View File

@ -110,8 +110,12 @@ public:
Consensus::NetworkUpgrade::NO_ACTIVATION_HEIGHT;
consensus.vUpgrades[Consensus::UPGRADE_OVERWINTER].nProtocolVersion = 170005;
consensus.vUpgrades[Consensus::UPGRADE_OVERWINTER].nActivationHeight = 347500;
consensus.vUpgrades[Consensus::UPGRADE_OVERWINTER].hashActivationBlock =
uint256S("0000000003761c0d0c3974b54bdb425613bbb1eaadd6e70b764de82f195ea243");
consensus.vUpgrades[Consensus::UPGRADE_SAPLING].nProtocolVersion = 170007;
consensus.vUpgrades[Consensus::UPGRADE_SAPLING].nActivationHeight = 419200;
consensus.vUpgrades[Consensus::UPGRADE_SAPLING].hashActivationBlock =
uint256S("00000000025a57200d898ac7f21e26bf29028bbe96ec46e05b2c17cc9db9e4f3");
consensus.vUpgrades[Consensus::UPGRADE_BLOSSOM].nProtocolVersion = 170009;
consensus.vUpgrades[Consensus::UPGRADE_BLOSSOM].nActivationHeight =
Consensus::NetworkUpgrade::NO_ACTIVATION_HEIGHT;
@ -303,13 +307,19 @@ public:
Consensus::NetworkUpgrade::NO_ACTIVATION_HEIGHT;
consensus.vUpgrades[Consensus::UPGRADE_OVERWINTER].nProtocolVersion = 170003;
consensus.vUpgrades[Consensus::UPGRADE_OVERWINTER].nActivationHeight = 207500;
consensus.vUpgrades[Consensus::UPGRADE_OVERWINTER].hashActivationBlock =
uint256S("0000257c4331b098045023fcfbfa2474681f4564ab483f84e4e1ad078e4acf44");
consensus.vUpgrades[Consensus::UPGRADE_SAPLING].nProtocolVersion = 170007;
consensus.vUpgrades[Consensus::UPGRADE_SAPLING].nActivationHeight = 280000;
consensus.vUpgrades[Consensus::UPGRADE_SAPLING].hashActivationBlock =
uint256S("000420e7fcc3a49d729479fb0b560dd7b8617b178a08e9e389620a9d1dd6361a");
consensus.vUpgrades[Consensus::UPGRADE_BLOSSOM].nProtocolVersion = 170008;
consensus.vUpgrades[Consensus::UPGRADE_BLOSSOM].nActivationHeight = 584000;
consensus.vUpgrades[Consensus::UPGRADE_BLOSSOM].hashActivationBlock =
uint256S("00367515ef2e781b8c9358b443b6329572599edd02c59e8af67db9785122f298");
// The best chain should have at least this much work.
consensus.nMinimumChainWork = uint256S("0x00000000000000000000000000000000000000000000000000000001d0c4d9cd");
consensus.nMinimumChainWork = uint256S("0x0000000000000000000000000000000000000000000000000000001dbb4c4224");
pchMessageStart[0] = 0xfa;
pchMessageStart[1] = 0x1a;

View File

@ -59,6 +59,18 @@ struct NetworkUpgrade {
* should remain disabled on mainnet.
*/
static constexpr int NO_ACTIVATION_HEIGHT = -1;
/**
* The hash of the block at height nActivationHeight, if known. This is set manually
* after a network upgrade activates.
*
* We use this in IsInitialBlockDownload to detect whether we are potentially being
* fed a fake alternate chain. We use NU activation blocks for this purpose instead of
* the checkpoint blocks, because network upgrades (should) have significantly more
* scrutiny than regular releases. nMinimumChainWork MUST be set to at least the chain
* work of this block, otherwise this detection will have false positives.
*/
boost::optional<uint256> hashActivationBlock;
};
/** ZIP208 block target interval in seconds. */

View File

@ -5,9 +5,6 @@
#include "zcash/JoinSplit.hpp"
#include "util.h"
#include <libsnark/common/default_types/r1cs_ppzksnark_pp.hpp>
#include <libsnark/zk_proof_systems/ppzksnark/r1cs_ppzksnark/r1cs_ppzksnark.hpp>
#include "librustzcash.h"
struct ECCryptoClosure
@ -23,12 +20,7 @@ int main(int argc, char **argv) {
assert(init_and_check_sodium() != -1);
ECC_Start();
libsnark::default_r1cs_ppzksnark_pp::init_public_params();
libsnark::inhibit_profiling_info = true;
libsnark::inhibit_profiling_counters = true;
boost::filesystem::path pk_path = ZC_GetParamsDir() / "sprout-proving.key";
boost::filesystem::path vk_path = ZC_GetParamsDir() / "sprout-verifying.key";
params = ZCJoinSplit::Prepared(vk_path.string(), pk_path.string());
params = ZCJoinSplit::Prepared();
boost::filesystem::path sapling_spend = ZC_GetParamsDir() / "sapling-spend.params";
boost::filesystem::path sapling_output = ZC_GetParamsDir() / "sapling-output.params";

View File

@ -748,7 +748,6 @@ TEST(checktransaction_tests, SaplingSproutInputSumsTooLarge) {
std::array<size_t, ZC_NUM_JS_OUTPUTS> outputMap;
auto jsdesc = JSDescription::Randomized(
true,
*params, joinSplitPubKey, rt,
inputs, outputs,
inputMap, outputMap,

View File

@ -1,183 +0,0 @@
#include <gtest/gtest.h>
#include "uint256.h"
#include "zcash/util.h"
#include <boost/foreach.hpp>
#include <boost/format.hpp>
#include <boost/optional.hpp>
#include <libsnark/common/default_types/r1cs_ppzksnark_pp.hpp>
#include <libsnark/zk_proof_systems/ppzksnark/r1cs_ppzksnark/r1cs_ppzksnark.hpp>
#include <libsnark/gadgetlib1/gadgets/hashes/sha256/sha256_gadget.hpp>
#include <libsnark/gadgetlib1/gadgets/merkle_tree/merkle_tree_check_read_gadget.hpp>
#include "zcash/IncrementalMerkleTree.hpp"
using namespace libsnark;
using namespace libzcash;
#include "zcash/circuit/utils.tcc"
#include "zcash/circuit/merkle.tcc"
template<typename FieldT>
void test_value_equals(uint64_t i) {
protoboard<FieldT> pb;
pb_variable_array<FieldT> num;
num.allocate(pb, 64, "");
num.fill_with_bits(pb, uint64_to_bool_vector(i));
pb.add_r1cs_constraint(r1cs_constraint<FieldT>(
packed_addition(num),
FieldT::one(),
FieldT::one() * i
), "");
ASSERT_TRUE(pb.is_satisfied());
}
TEST(circuit, values)
{
typedef Fr<default_r1cs_ppzksnark_pp> FieldT;
test_value_equals<FieldT>(0);
test_value_equals<FieldT>(1);
test_value_equals<FieldT>(3);
test_value_equals<FieldT>(5391);
test_value_equals<FieldT>(883128374);
test_value_equals<FieldT>(173419028459);
test_value_equals<FieldT>(2205843009213693953);
}
TEST(circuit, endianness)
{
std::vector<unsigned char> before = {
0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55,
56, 57, 58, 59, 60, 61, 62, 63
};
auto result = swap_endianness_u64(before);
std::vector<unsigned char> after = {
56, 57, 58, 59, 60, 61, 62, 63,
48, 49, 50, 51, 52, 53, 54, 55,
40, 41, 42, 43, 44, 45, 46, 47,
32, 33, 34, 35, 36, 37, 38, 39,
24, 25, 26, 27, 28, 29, 30, 31,
16, 17, 18, 19, 20, 21, 22, 23,
8, 9, 10, 11, 12, 13, 14, 15,
0, 1, 2, 3, 4, 5, 6, 7
};
EXPECT_EQ(after, result);
std::vector<unsigned char> bad = {0, 1, 2, 3};
ASSERT_THROW(swap_endianness_u64(bad), std::length_error);
}
template<typename FieldT>
bool test_merkle_gadget(
bool enforce_a,
bool enforce_b,
bool write_root_first
)
{
protoboard<FieldT> pb;
digest_variable<FieldT> root(pb, 256, "root");
pb.set_input_sizes(256);
digest_variable<FieldT> commitment1(pb, 256, "commitment1");
digest_variable<FieldT> commitment2(pb, 256, "commitment2");
pb_variable<FieldT> commitment1_read;
commitment1_read.allocate(pb);
pb_variable<FieldT> commitment2_read;
commitment2_read.allocate(pb);
merkle_tree_gadget<FieldT> mgadget1(pb, commitment1, root, commitment1_read);
merkle_tree_gadget<FieldT> mgadget2(pb, commitment2, root, commitment2_read);
commitment1.generate_r1cs_constraints();
commitment2.generate_r1cs_constraints();
root.generate_r1cs_constraints();
mgadget1.generate_r1cs_constraints();
mgadget2.generate_r1cs_constraints();
SproutMerkleTree tree;
uint256 commitment1_data = uint256S("54d626e08c1c802b305dad30b7e54a82f102390cc92c7d4db112048935236e9c");
uint256 commitment2_data = uint256S("59d2cde5e65c1414c32ba54f0fe4bdb3d67618125286e6a191317917c812c6d7");
tree.append(commitment1_data);
auto wit1 = tree.witness();
tree.append(commitment2_data);
wit1.append(commitment2_data);
auto wit2 = tree.witness();
auto expected_root = tree.root();
tree.append(uint256S("3e243c8798678570bb8d42616c23a536af44be15c4eef073490c2a44ae5f32c3"));
auto unexpected_root = tree.root();
tree.append(uint256S("26d9b20c7f1c3d2528bbcd43cd63344b0afd3b6a0a8ebd37ec51cba34907bec7"));
auto badwit1 = tree.witness();
tree.append(uint256S("02c2467c9cd15e0d150f74cd636505ed675b0b71b66a719f6f52fdb49a5937bb"));
auto badwit2 = tree.witness();
// Perform the test
pb.val(commitment1_read) = enforce_a ? FieldT::one() : FieldT::zero();
pb.val(commitment2_read) = enforce_b ? FieldT::one() : FieldT::zero();
commitment1.bits.fill_with_bits(pb, uint256_to_bool_vector(commitment1_data));
commitment2.bits.fill_with_bits(pb, uint256_to_bool_vector(commitment2_data));
if (write_root_first) {
root.bits.fill_with_bits(pb, uint256_to_bool_vector(expected_root));
}
mgadget1.generate_r1cs_witness(wit1.path());
mgadget2.generate_r1cs_witness(wit2.path());
// Overwrite with our expected root
root.bits.fill_with_bits(pb, uint256_to_bool_vector(expected_root));
return pb.is_satisfied();
}
TEST(circuit, merkle_tree_gadget_weirdness)
{
/*
The merkle tree gadget takes a leaf in the merkle tree (the Note commitment),
a merkle tree authentication path, and a root (anchor). It also takes a parameter
called read_success, which is used to determine if the commitment actually needs to
appear in the tree.
If two input notes use the same root (which our protocol does) then if `read_success`
is disabled on the first note but enabled on the second note (i.e., the first note
has value of zero and second note has nonzero value) then there is an edge case in
the witnessing behavior. The first witness will accidentally constrain the root to
equal null (the default value of the anchor) and the second witness will actually
copy the bits, violating the constraint system.
Notice that this edge case is not in the constraint system but in the witnessing
behavior.
*/
typedef Fr<default_r1cs_ppzksnark_pp> FieldT;
// Test the normal case
ASSERT_TRUE(test_merkle_gadget<FieldT>(true, true, false));
ASSERT_TRUE(test_merkle_gadget<FieldT>(true, true, true));
// Test the case where the first commitment is enforced but the second isn't
// Works because the first read is performed before the second one
ASSERT_TRUE(test_merkle_gadget<FieldT>(true, false, false));
ASSERT_TRUE(test_merkle_gadget<FieldT>(true, false, true));
// Test the case where the first commitment isn't enforced but the second is
// Doesn't work because the first multipacker witnesses the existing root (which
// is null)
ASSERT_TRUE(!test_merkle_gadget<FieldT>(false, true, false));
// Test the last again, except this time write the root first.
ASSERT_TRUE(test_merkle_gadget<FieldT>(false, true, true));
}

View File

@ -22,10 +22,9 @@ using namespace libzcash;
extern ZCJoinSplit* params;
typedef std::array<JSDescription, 2> SproutProofs;
// Make both the PHGR and Groth proof for a Sprout statement,
// and store the results in JSDescription objects.
SproutProofs makeSproutProofs(
// Make the Groth proof for a Sprout statement,
// and store the result in a JSDescription object.
JSDescription makeSproutProof(
ZCJoinSplit& js,
const std::array<JSInput, 2>& inputs,
const std::array<JSOutput, 2>& outputs,
@ -34,25 +33,17 @@ SproutProofs makeSproutProofs(
uint64_t vpub_new,
const uint256& rt
){
//Making the PHGR proof
JSDescription phgr(false, js, joinSplitPubKey, rt, inputs, outputs, vpub_old, vpub_new);
//Making the Groth proof
JSDescription groth(true, js, joinSplitPubKey, rt, inputs, outputs, vpub_old, vpub_new);
return {phgr, groth};
return JSDescription(js, joinSplitPubKey, rt, inputs, outputs, vpub_old, vpub_new);
}
bool verifySproutProofs(
bool verifySproutProof(
ZCJoinSplit& js,
const SproutProofs& jsdescs,
const JSDescription& jsdesc,
const uint256& joinSplitPubKey
)
{
auto verifier = libzcash::ProofVerifier::Strict();
bool phgrPassed = jsdescs[0].Verify(js, verifier, joinSplitPubKey);
bool grothPassed = jsdescs[1].Verify(js, verifier, joinSplitPubKey);
return phgrPassed && grothPassed;
return jsdesc.Verify(js, verifier, joinSplitPubKey);
}
@ -73,7 +64,7 @@ void test_full_api(ZCJoinSplit* js)
uint64_t vpub_new = 0;
uint256 joinSplitPubKey = random_uint256();
uint256 rt = tree.root();
SproutProofs jsdescs;
JSDescription jsdesc;
{
std::array<JSInput, 2> inputs = {
@ -89,7 +80,7 @@ void test_full_api(ZCJoinSplit* js)
std::array<SproutNote, 2> output_notes;
// Perform the proofs
jsdescs = makeSproutProofs(
jsdesc = makeSproutProof(
*js,
inputs,
outputs,
@ -101,13 +92,11 @@ void test_full_api(ZCJoinSplit* js)
}
// Verify both PHGR and Groth Proof:
ASSERT_TRUE(verifySproutProofs(*js, jsdescs, joinSplitPubKey));
ASSERT_TRUE(verifySproutProof(*js, jsdesc, joinSplitPubKey));
// Run tests using both phgr and groth as basis for field values
for (auto jsdesc : jsdescs)
{
SproutMerkleTree tree;
SproutProofs jsdescs2;
JSDescription jsdesc2;
// Recipient should decrypt
// Now the recipient should spend the money again
auto h_sig = js->h_sig(jsdesc.randomSeed, jsdesc.nullifiers, joinSplitPubKey);
@ -153,7 +142,7 @@ void test_full_api(ZCJoinSplit* js)
// Perform the proofs
jsdescs2 = makeSproutProofs(
jsdesc2 = makeSproutProof(
*js,
inputs,
outputs,
@ -166,8 +155,8 @@ void test_full_api(ZCJoinSplit* js)
}
// Verify both PHGR and Groth Proof:
ASSERT_TRUE(verifySproutProofs(*js, jsdescs2, joinSplitPubKey2));
// Verify Groth Proof:
ASSERT_TRUE(verifySproutProof(*js, jsdesc2, joinSplitPubKey2));
}
}
@ -191,28 +180,8 @@ void invokeAPI(
std::array<SproutNote, 2> output_notes;
// PHGR
SproutProof proof = js->prove(
false,
inputs,
outputs,
output_notes,
ciphertexts,
ephemeralKey,
joinSplitPubKey,
randomSeed,
macs,
nullifiers,
commitments,
vpub_old,
vpub_new,
rt,
false
);
// Groth
proof = js->prove(
true,
SproutProof proof = js->prove(
inputs,
outputs,
output_notes,

View File

@ -26,17 +26,11 @@
#include "zcash/IncrementalMerkleTree.hpp"
#include "zcash/util.h"
#include <libsnark/common/default_types/r1cs_ppzksnark_pp.hpp>
#include <libsnark/zk_proof_systems/ppzksnark/r1cs_ppzksnark/r1cs_ppzksnark.hpp>
#include <libsnark/gadgetlib1/gadgets/hashes/sha256/sha256_gadget.hpp>
#include <libsnark/gadgetlib1/gadgets/merkle_tree/merkle_tree_check_read_gadget.hpp>
#include <boost/foreach.hpp>
#include "json_test_vectors.h"
using namespace std;
using namespace libsnark;
template<>
void expect_deser_same(const SproutTestingWitness& expected)
@ -58,8 +52,7 @@ void test_tree(
UniValue root_tests,
UniValue ser_tests,
UniValue witness_ser_tests,
UniValue path_tests,
bool libsnark_test
UniValue path_tests
)
{
size_t witness_ser_i = 0;
@ -115,55 +108,6 @@ void test_tree(
} else {
auto path = wit.path();
expect_test_vector(path_tests[path_i++], path);
if (libsnark_test) {
typedef Fr<default_r1cs_ppzksnark_pp> FieldT;
protoboard<FieldT> pb;
pb_variable_array<FieldT> positions;
digest_variable<FieldT> commitment(pb, 256, "commitment");
digest_variable<FieldT> root(pb, 256, "root");
positions.allocate(pb, INCREMENTAL_MERKLE_TREE_DEPTH_TESTING, "pos");
merkle_authentication_path_variable<FieldT, sha256_two_to_one_hash_gadget<FieldT>> authvars(pb, INCREMENTAL_MERKLE_TREE_DEPTH_TESTING, "auth");
merkle_tree_check_read_gadget<FieldT, sha256_two_to_one_hash_gadget<FieldT>> auth(
pb, INCREMENTAL_MERKLE_TREE_DEPTH_TESTING, positions, commitment, root, authvars, ONE, "path"
);
commitment.generate_r1cs_constraints();
root.generate_r1cs_constraints();
authvars.generate_r1cs_constraints();
auth.generate_r1cs_constraints();
std::vector<bool> commitment_bv;
{
uint256 witnessed_commitment = wit.element();
std::vector<unsigned char> commitment_v(witnessed_commitment.begin(), witnessed_commitment.end());
commitment_bv = convertBytesVectorToVector(commitment_v);
}
size_t path_index = convertVectorToInt(path.index);
commitment.bits.fill_with_bits(pb, bit_vector(commitment_bv));
positions.fill_with_bits_of_uint64(pb, path_index);
authvars.generate_r1cs_witness(path_index, path.authentication_path);
auth.generate_r1cs_witness();
std::vector<bool> root_bv;
{
uint256 witroot = wit.root();
std::vector<unsigned char> root_v(witroot.begin(), witroot.end());
root_bv = convertBytesVectorToVector(root_v);
}
root.bits.fill_with_bits(pb, bit_vector(root_bv));
ASSERT_TRUE(pb.is_satisfied());
root_bv[0] = !root_bv[0];
root.bits.fill_with_bits(pb, bit_vector(root_bv));
ASSERT_TRUE(!pb.is_satisfied());
}
}
// Check witness serialization
@ -200,8 +144,7 @@ TEST(merkletree, vectors) {
root_tests,
ser_tests,
witness_ser_tests,
path_tests,
true
path_tests
);
}
@ -217,8 +160,7 @@ TEST(merkletree, SaplingVectors) {
root_tests,
ser_tests,
witness_ser_tests,
path_tests,
false
path_tests
);
}

View File

@ -1,702 +0,0 @@
#include <gtest/gtest.h>
#include "zcash/Proof.hpp"
#include <iostream>
#include <libsnark/common/default_types/r1cs_ppzksnark_pp.hpp>
#include <libsnark/relations/constraint_satisfaction_problems/r1cs/examples/r1cs_examples.hpp>
#include <libsnark/zk_proof_systems/ppzksnark/r1cs_ppzksnark/r1cs_ppzksnark.hpp>
using namespace libzcash;
typedef libsnark::default_r1cs_ppzksnark_pp curve_pp;
typedef libsnark::default_r1cs_ppzksnark_pp::G1_type curve_G1;
typedef libsnark::default_r1cs_ppzksnark_pp::G2_type curve_G2;
typedef libsnark::default_r1cs_ppzksnark_pp::GT_type curve_GT;
typedef libsnark::default_r1cs_ppzksnark_pp::Fp_type curve_Fr;
typedef libsnark::default_r1cs_ppzksnark_pp::Fq_type curve_Fq;
typedef libsnark::default_r1cs_ppzksnark_pp::Fqe_type curve_Fq2;
#include "streams.h"
#include "version.h"
#include "utilstrencodings.h"
TEST(proofs, g1_pairing_at_infinity)
{
for (size_t i = 0; i < 100; i++) {
auto r1 = curve_G1::random_element();
auto r2 = curve_G2::random_element();
ASSERT_TRUE(
curve_pp::reduced_pairing(curve_G1::zero(), r2) ==
curve_GT::one()
);
ASSERT_TRUE(
curve_pp::final_exponentiation(
curve_pp::double_miller_loop(
curve_pp::precompute_G1(curve_G1::zero()),
curve_pp::precompute_G2(r2),
curve_pp::precompute_G1(curve_G1::zero()),
curve_pp::precompute_G2(r2)
)
) ==
curve_GT::one()
);
ASSERT_TRUE(
curve_pp::final_exponentiation(
curve_pp::double_miller_loop(
curve_pp::precompute_G1(r1),
curve_pp::precompute_G2(r2),
curve_pp::precompute_G1(curve_G1::zero()),
curve_pp::precompute_G2(r2)
)
) ==
curve_pp::reduced_pairing(r1, r2)
);
ASSERT_TRUE(
curve_pp::final_exponentiation(
curve_pp::double_miller_loop(
curve_pp::precompute_G1(curve_G1::zero()),
curve_pp::precompute_G2(r2),
curve_pp::precompute_G1(r1),
curve_pp::precompute_G2(r2)
)
) ==
curve_pp::reduced_pairing(r1, r2)
);
}
}
TEST(proofs, g2_subgroup_check)
{
// all G2 elements are order r
ASSERT_TRUE(libsnark::alt_bn128_modulus_r * curve_G2::random_element() == curve_G2::zero());
// but that doesn't mean all elements that satisfy the curve equation are in G2...
curve_G2 p = curve_G2::one();
while (1) {
// This will construct an order r(2q-r) point with high probability
p.X = curve_Fq2::random_element();
try {
p.Y = ((p.X.squared() * p.X) + libsnark::alt_bn128_twist_coeff_b).sqrt();
break;
} catch(...) {}
}
ASSERT_TRUE(p.is_well_formed()); // it's on the curve
ASSERT_TRUE(libsnark::alt_bn128_modulus_r * p != curve_G2::zero()); // but not the order r subgroup..
{
// libsnark unfortunately doesn't check, and the pairing will complete
auto e = curve_Fr("149");
auto a = curve_pp::reduced_pairing(curve_G1::one(), p);
auto b = curve_pp::reduced_pairing(e * curve_G1::one(), p);
// though it will not preserve bilinearity
ASSERT_TRUE((a^e) != b);
}
{
// so, our decompression API should not allow you to decompress G2 elements of that form!
CompressedG2 badp(p);
try {
auto newp = badp.to_libsnark_g2<curve_G2>();
FAIL() << "Expected std::runtime_error";
} catch (std::runtime_error const & err) {
EXPECT_EQ(err.what(), std::string("point is not in G2"));
} catch(...) {
FAIL() << "Expected std::runtime_error";
}
}
// educational purposes: showing that E'(Fp2) is of order r(2q-r),
// by multiplying our random point in E' by (2q-r) = (q + q - r) to
// get an element in G2
{
auto p1 = libsnark::alt_bn128_modulus_q * p;
p1 = p1 + p1;
p1 = p1 - (libsnark::alt_bn128_modulus_r * p);
ASSERT_TRUE(p1.is_well_formed());
ASSERT_TRUE(libsnark::alt_bn128_modulus_r * p1 == curve_G2::zero());
CompressedG2 goodp(p1);
auto newp = goodp.to_libsnark_g2<curve_G2>();
ASSERT_TRUE(newp == p1);
}
}
TEST(proofs, sqrt_zero)
{
ASSERT_TRUE(curve_Fq::zero() == curve_Fq::zero().sqrt());
ASSERT_TRUE(curve_Fq2::zero() == curve_Fq2::zero().sqrt());
}
TEST(proofs, sqrt_fq)
{
// Poor man's PRNG
curve_Fq acc = curve_Fq("348957923485290374852379485") ^ 1000;
size_t quadratic_residues = 0;
size_t quadratic_nonresidues = 0;
for (size_t i = 1; i < 1000; i++) {
try {
acc += curve_Fq("45634563456") ^ i;
curve_Fq x = acc.sqrt();
ASSERT_TRUE((x*x) == acc);
quadratic_residues += 1;
} catch (std::runtime_error &e) {
quadratic_nonresidues += 1;
}
}
// Half of all nonzero elements in Fp are quadratic residues
ASSERT_TRUE(quadratic_residues == 511);
ASSERT_TRUE(quadratic_nonresidues == 488);
for (size_t i = 0; i < 1000; i++) {
curve_Fq x = curve_Fq::random_element();
curve_Fq x2 = x * x;
ASSERT_TRUE((x2.sqrt() == x) || (x2.sqrt() == -x));
}
// Test vectors
ASSERT_TRUE(
curve_Fq("5204065062716160319596273903996315000119019512886596366359652578430118331601")
==
curve_Fq("348579348568").sqrt()
);
ASSERT_THROW(curve_Fq("348579348569").sqrt(), std::runtime_error);
}
TEST(proofs, sqrt_fq2)
{
curve_Fq2 acc = curve_Fq2(
curve_Fq("3456293840592348059238409578239048769348760238476029347885092384059238459834") ^ 1000,
curve_Fq("2394578084760439457823945729347502374590283479582739485723945729384759823745") ^ 1000
);
size_t quadratic_residues = 0;
size_t quadratic_nonresidues = 0;
for (size_t i = 1; i < 1000; i++) {
try {
acc = acc + curve_Fq2(
curve_Fq("5204065062716160319596273903996315000119019512886596366359652578430118331601") ^ i,
curve_Fq("348957923485290374852379485348957923485290374852379485348957923485290374852") ^ i
);
curve_Fq2 x = acc.sqrt();
ASSERT_TRUE((x*x) == acc);
quadratic_residues += 1;
} catch (std::runtime_error &e) {
quadratic_nonresidues += 1;
}
}
// Half of all nonzero elements in Fp^k are quadratic residues as long
// as p != 2
ASSERT_TRUE(quadratic_residues == 505);
ASSERT_TRUE(quadratic_nonresidues == 494);
for (size_t i = 0; i < 1000; i++) {
curve_Fq2 x = curve_Fq2::random_element();
curve_Fq2 x2 = x * x;
ASSERT_TRUE((x2.sqrt() == x) || (x2.sqrt() == -x));
}
// Test vectors
ASSERT_THROW(curve_Fq2(
curve_Fq("2"),
curve_Fq("1")
).sqrt(), std::runtime_error);
ASSERT_THROW(curve_Fq2(
curve_Fq("3345897230485723946872934576923485762803457692345760237495682347502347589473"),
curve_Fq("1234912378405347958234756902345768290345762348957605678245967234857634857676")
).sqrt(), std::runtime_error);
curve_Fq2 x = curve_Fq2(
curve_Fq("12844195307879678418043983815760255909500142247603239203345049921980497041944"),
curve_Fq("7476417578426924565731404322659619974551724117137577781074613937423560117731")
);
curve_Fq2 nx = -x;
curve_Fq2 x2 = curve_Fq2(
curve_Fq("3345897230485723946872934576923485762803457692345760237495682347502347589474"),
curve_Fq("1234912378405347958234756902345768290345762348957605678245967234857634857676")
);
ASSERT_TRUE(x == x2.sqrt());
ASSERT_TRUE(nx == -x2.sqrt());
ASSERT_TRUE(x*x == x2);
ASSERT_TRUE(nx*nx == x2);
}
TEST(proofs, size_is_expected)
{
PHGRProof p;
CDataStream ss(SER_NETWORK, PROTOCOL_VERSION);
ss << p;
ASSERT_EQ(ss.size(), 296);
}
TEST(proofs, fq_serializes_properly)
{
for (size_t i = 0; i < 1000; i++) {
curve_Fq e = curve_Fq::random_element();
Fq e2(e);
CDataStream ss(SER_NETWORK, PROTOCOL_VERSION);
ss << e2;
Fq e3;
ss >> e3;
curve_Fq e4 = e3.to_libsnark_fq<curve_Fq>();
ASSERT_TRUE(e == e4);
}
}
TEST(proofs, fq2_serializes_properly)
{
for (size_t i = 0; i < 1000; i++) {
curve_Fq2 e = curve_Fq2::random_element();
Fq2 e2(e);
CDataStream ss(SER_NETWORK, PROTOCOL_VERSION);
ss << e2;
Fq2 e3;
ss >> e3;
curve_Fq2 e4 = e3.to_libsnark_fq2<curve_Fq2>();
ASSERT_TRUE(e == e4);
}
}
template<typename T>
T deserialize_tv(std::string s)
{
T e;
CDataStream ss(ParseHex(s), SER_NETWORK, PROTOCOL_VERSION);
ss >> e;
return e;
}
curve_Fq deserialize_fq(std::string s)
{
return deserialize_tv<Fq>(s).to_libsnark_fq<curve_Fq>();
}
curve_Fq2 deserialize_fq2(std::string s)
{
return deserialize_tv<Fq2>(s).to_libsnark_fq2<curve_Fq2>();
}
TEST(proofs, fq_valid)
{
curve_Fq e = deserialize_fq("30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd46");
ASSERT_TRUE(e == curve_Fq("21888242871839275222246405745257275088696311157297823662689037894645226208582"));
ASSERT_TRUE(e != curve_Fq("21888242871839275222246405745257275088696311157297823662689037894645226208581"));
curve_Fq e2 = deserialize_fq("30644e72e131a029b75045b68181585d97816a916871ca8d3c208c16d87cfd46");
ASSERT_TRUE(e2 == curve_Fq("21888242871839275222221885816603420866962577604863418715751138068690288573766"));
}
TEST(proofs, fq_invalid)
{
// Should not be able to deserialize the modulus
ASSERT_THROW(
deserialize_fq("30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47"),
std::logic_error
);
// Should not be able to deserialize the modulus plus one
ASSERT_THROW(
deserialize_fq("30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd48"),
std::logic_error
);
// Should not be able to deserialize a ridiculously out of bound int
ASSERT_THROW(
deserialize_fq("ff644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd46"),
std::logic_error
);
}
TEST(proofs, fq2_valid)
{
// (q - 1) * q + q
curve_Fq2 e = deserialize_fq2("0925c4b8763cbf9c599a6f7c0348d21cb00b85511637560626edfa5c34c6b38d04689e957a1242c84a50189c6d96cadca602072d09eac1013b5458a2275d69b0");
ASSERT_TRUE(e.c0 == curve_Fq("21888242871839275222246405745257275088696311157297823662689037894645226208582"));
ASSERT_TRUE(e.c1 == curve_Fq("21888242871839275222246405745257275088696311157297823662689037894645226208582"));
curve_Fq2 e2 = deserialize_fq2("000000000000000000000000000000000000000000000000010245be1c91e3186bbbe1c430a93fcfc5aada4ab10c3492f70eea97a91c7b29554db55acffa34d2");
ASSERT_TRUE(e2.c0 == curve_Fq("238769481237490823"));
ASSERT_TRUE(e2.c1 == curve_Fq("384579238459723485"));
curve_Fq2 e3 = deserialize_fq2("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000");
ASSERT_TRUE(e3.c0 == curve_Fq("0"));
ASSERT_TRUE(e3.c1 == curve_Fq("0"));
curve_Fq2 e4 = deserialize_fq2("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001");
ASSERT_TRUE(e4.c0 == curve_Fq("1"));
ASSERT_TRUE(e4.c1 == curve_Fq("0"));
}
TEST(proofs, fq2_invalid)
{
// (q - 1) * q + q is invalid
ASSERT_THROW(
deserialize_fq2("0925c4b8763cbf9c599a6f7c0348d21cb00b85511637560626edfa5c34c6b38d04689e957a1242c84a50189c6d96cadca602072d09eac1013b5458a2275d69b1"),
std::logic_error
);
// q * q + (q - 1) is invalid
ASSERT_THROW(
deserialize_fq2("0925c4b8763cbf9c599a6f7c0348d21cb00b85511637560626edfa5c34c6b38d34cced085b43e2f202a05e52ef18233a3d8371be725c8b8e7774e4b8ffda66f7"),
std::logic_error
);
// Ridiculously out of bounds
ASSERT_THROW(
deserialize_fq2("0fffc4b8763cbf9c599a6f7c0348d21cb00b85511637560626edfa5c34c6b38d04689e957a1242c84a50189c6d96cadca602072d09eac1013b5458a2275d69b0"),
std::logic_error
);
ASSERT_THROW(
deserialize_fq2("ffffffff763cbf9c599a6f7c0348d21cb00b85511637560626edfa5c34c6b38d04689e957a1242c84a50189c6d96cadca602072d09eac1013b5458a2275d69b0"),
std::logic_error
);
}
TEST(proofs, g1_serializes_properly)
{
// Cannot serialize zero
{
ASSERT_THROW({CompressedG1 g = CompressedG1(curve_G1::zero());}, std::domain_error);
}
for (size_t i = 0; i < 1000; i++) {
curve_G1 e = curve_G1::random_element();
CompressedG1 e2(e);
CDataStream ss(SER_NETWORK, PROTOCOL_VERSION);
ss << e2;
CompressedG1 e3;
ss >> e3;
ASSERT_TRUE(e2 == e3);
curve_G1 e4 = e3.to_libsnark_g1<curve_G1>();
ASSERT_TRUE(e == e4);
}
}
TEST(proofs, g2_serializes_properly)
{
// Cannot serialize zero
{
ASSERT_THROW({CompressedG2 g = CompressedG2(curve_G2::zero());}, std::domain_error);
}
for (size_t i = 0; i < 1000; i++) {
curve_G2 e = curve_G2::random_element();
CompressedG2 e2(e);
CDataStream ss(SER_NETWORK, PROTOCOL_VERSION);
ss << e2;
CompressedG2 e3;
ss >> e3;
ASSERT_TRUE(e2 == e3);
curve_G2 e4 = e3.to_libsnark_g2<curve_G2>();
ASSERT_TRUE(e == e4);
}
}
TEST(proofs, zksnark_serializes_properly)
{
auto example = libsnark::generate_r1cs_example_with_field_input<curve_Fr>(250, 4);
example.constraint_system.swap_AB_if_beneficial();
auto kp = libsnark::r1cs_ppzksnark_generator<curve_pp>(example.constraint_system);
auto vkprecomp = libsnark::r1cs_ppzksnark_verifier_process_vk(kp.vk);
for (size_t i = 0; i < 20; i++) {
auto badproof = PHGRProof::random_invalid();
auto proof = badproof.to_libsnark_proof<libsnark::r1cs_ppzksnark_proof<curve_pp>>();
auto verifierEnabled = ProofVerifier::Strict();
auto verifierDisabled = ProofVerifier::Disabled();
// This verifier should catch the bad proof
ASSERT_FALSE(verifierEnabled.check(
kp.vk,
vkprecomp,
example.primary_input,
proof
));
// This verifier won't!
ASSERT_TRUE(verifierDisabled.check(
kp.vk,
vkprecomp,
example.primary_input,
proof
));
}
for (size_t i = 0; i < 20; i++) {
auto proof = libsnark::r1cs_ppzksnark_prover<curve_pp>(
kp.pk,
example.primary_input,
example.auxiliary_input,
example.constraint_system
);
{
auto verifierEnabled = ProofVerifier::Strict();
auto verifierDisabled = ProofVerifier::Disabled();
ASSERT_TRUE(verifierEnabled.check(
kp.vk,
vkprecomp,
example.primary_input,
proof
));
ASSERT_TRUE(verifierDisabled.check(
kp.vk,
vkprecomp,
example.primary_input,
proof
));
}
ASSERT_TRUE(libsnark::r1cs_ppzksnark_verifier_strong_IC<curve_pp>(
kp.vk,
example.primary_input,
proof
));
PHGRProof compressed_proof_0(proof);
CDataStream ss(SER_NETWORK, PROTOCOL_VERSION);
ss << compressed_proof_0;
PHGRProof compressed_proof_1;
ss >> compressed_proof_1;
ASSERT_TRUE(compressed_proof_0 == compressed_proof_1);
auto newproof = compressed_proof_1.to_libsnark_proof<libsnark::r1cs_ppzksnark_proof<curve_pp>>();
ASSERT_TRUE(proof == newproof);
ASSERT_TRUE(libsnark::r1cs_ppzksnark_verifier_strong_IC<curve_pp>(
kp.vk,
example.primary_input,
newproof
));
}
}
TEST(proofs, g1_deserialization)
{
CompressedG1 g;
curve_G1 expected;
// Valid G1 element.
{
CDataStream ss(ParseHex("0230644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd46"), SER_NETWORK, PROTOCOL_VERSION);
ss >> g;
expected.X = curve_Fq("21888242871839275222246405745257275088696311157297823662689037894645226208582");
expected.Y = curve_Fq("3969792565221544645472939191694882283483352126195956956354061729942568608776");
expected.Z = curve_Fq::one();
ASSERT_TRUE(g.to_libsnark_g1<curve_G1>() == expected);
}
// Its negation.
{
CDataStream ss(ParseHex("0330644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd46"), SER_NETWORK, PROTOCOL_VERSION);
ss >> g;
expected.X = curve_Fq("21888242871839275222246405745257275088696311157297823662689037894645226208582");
expected.Y = curve_Fq("3969792565221544645472939191694882283483352126195956956354061729942568608776");
expected.Z = curve_Fq::one();
ASSERT_TRUE(g.to_libsnark_g1<curve_G1>() == -expected);
}
// Invalid leading bytes
{
CDataStream ss(ParseHex("ff30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd46"), SER_NETWORK, PROTOCOL_VERSION);
ASSERT_THROW(ss >> g, std::ios_base::failure);
}
// Invalid point
{
CDataStream ss(ParseHex("0208c6d2adffacbc8438f09f321874ea66e2fcc29f8dcfec2caefa21ec8c96a77c"), SER_NETWORK, PROTOCOL_VERSION);
ss >> g;
ASSERT_THROW(g.to_libsnark_g1<curve_G1>(), std::runtime_error);
}
// Point with out of bounds Fq
{
CDataStream ss(ParseHex("02ffc6d2adffacbc8438f09f321874ea66e2fcc29f8dcfec2caefa21ec8c96a77c"), SER_NETWORK, PROTOCOL_VERSION);
ss >> g;
ASSERT_THROW(g.to_libsnark_g1<curve_G1>(), std::logic_error);
}
// Randomly produce valid G1 representations and fail/succeed to
// turn them into G1 points based on whether they are valid.
for (size_t i = 0; i < 5000; i++) {
curve_Fq e = curve_Fq::random_element();
CDataStream ss(ParseHex("02"), SER_NETWORK, PROTOCOL_VERSION);
ss << Fq(e);
CompressedG1 g;
ss >> g;
try {
curve_G1 g_real = g.to_libsnark_g1<curve_G1>();
} catch(...) {
}
}
}
TEST(proofs, g2_deserialization)
{
CompressedG2 g;
curve_G2 expected = curve_G2::random_element();
// Valid G2 point
{
CDataStream ss(ParseHex("0a023aed31b5a9e486366ea9988b05dba469c6206e58361d9c065bbea7d928204a761efc6e4fa08ed227650134b52c7f7dd0463963e8a4bf21f4899fe5da7f984a"), SER_NETWORK, PROTOCOL_VERSION);
ss >> g;
expected.X = curve_Fq2(
curve_Fq("5923585509243758863255447226263146374209884951848029582715967108651637186684"),
curve_Fq("5336385337059958111259504403491065820971993066694750945459110579338490853570")
);
expected.Y = curve_Fq2(
curve_Fq("10374495865873200088116930399159835104695426846400310764827677226300185211748"),
curve_Fq("5256529835065685814318509161957442385362539991735248614869838648137856366932")
);
expected.Z = curve_Fq2::one();
ASSERT_TRUE(g.to_libsnark_g2<curve_G2>() == expected);
}
// Its negation
{
CDataStream ss(ParseHex("0b023aed31b5a9e486366ea9988b05dba469c6206e58361d9c065bbea7d928204a761efc6e4fa08ed227650134b52c7f7dd0463963e8a4bf21f4899fe5da7f984a"), SER_NETWORK, PROTOCOL_VERSION);
ss >> g;
expected.X = curve_Fq2(
curve_Fq("5923585509243758863255447226263146374209884951848029582715967108651637186684"),
curve_Fq("5336385337059958111259504403491065820971993066694750945459110579338490853570")
);
expected.Y = curve_Fq2(
curve_Fq("10374495865873200088116930399159835104695426846400310764827677226300185211748"),
curve_Fq("5256529835065685814318509161957442385362539991735248614869838648137856366932")
);
expected.Z = curve_Fq2::one();
ASSERT_TRUE(g.to_libsnark_g2<curve_G2>() == -expected);
}
// Invalid leading bytes
{
CDataStream ss(ParseHex("ff023aed31b5a9e486366ea9988b05dba469c6206e58361d9c065bbea7d928204a761efc6e4fa08ed227650134b52c7f7dd0463963e8a4bf21f4899fe5da7f984a"), SER_NETWORK, PROTOCOL_VERSION);
ASSERT_THROW(ss >> g, std::ios_base::failure);
}
// Invalid point
{
CDataStream ss(ParseHex("0b023aed31b5a9e486366ea9988b05dba469c6206e58361d9c065bbea7d928204a761efc6e4fa08ed227650134b52c7f7dd0463963e8a4bf21f4899fe5da7f984b"), SER_NETWORK, PROTOCOL_VERSION);
ss >> g;
ASSERT_THROW(g.to_libsnark_g2<curve_G2>(), std::runtime_error);
}
// Point with out of bounds Fq2
{
CDataStream ss(ParseHex("0a0f3aed31b5a9e486366ea9988b05dba469c6206e58361d9c065bbea7d928204a761efc6e4fa08ed227650134b52c7f7dd0463963e8a4bf21f4899fe5da7f984a"), SER_NETWORK, PROTOCOL_VERSION);
ss >> g;
ASSERT_THROW(g.to_libsnark_g2<curve_G2>(), std::logic_error);
}
// Randomly produce valid G2 representations and fail/succeed to
// turn them into G2 points based on whether they are valid.
for (size_t i = 0; i < 5000; i++) {
curve_Fq2 e = curve_Fq2::random_element();
CDataStream ss(ParseHex("0a"), SER_NETWORK, PROTOCOL_VERSION);
ss << Fq2(e);
CompressedG2 g;
ss >> g;
try {
curve_G2 g_real = g.to_libsnark_g2<curve_G2>();
} catch(...) {
}
}
}
#include "json_test_vectors.h"
#include "test/data/g1_compressed.json.h"
TEST(proofs, g1_test_vectors)
{
UniValue v = read_json(std::string(json_tests::g1_compressed, json_tests::g1_compressed + sizeof(json_tests::g1_compressed)));
curve_G1 e = curve_Fr("34958239045823") * curve_G1::one();
for (size_t i = 0; i < 10000; i++) {
e = (curve_Fr("34958239045823") ^ i) * e;
auto expected = CompressedG1(e);
expect_test_vector(v[i], expected);
ASSERT_TRUE(expected.to_libsnark_g1<curve_G1>() == e);
}
}
#include "test/data/g2_compressed.json.h"
TEST(proofs, g2_test_vectors)
{
UniValue v = read_json(std::string(json_tests::g2_compressed, json_tests::g2_compressed + sizeof(json_tests::g2_compressed)));
curve_G2 e = curve_Fr("34958239045823") * curve_G2::one();
for (size_t i = 0; i < 10000; i++) {
e = (curve_Fr("34958239045823") ^ i) * e;
auto expected = CompressedG2(e);
expect_test_vector(v[i], expected);
ASSERT_TRUE(expected.to_libsnark_g2<curve_G2>() == e);
}
}

View File

@ -45,7 +45,6 @@ TEST(Transaction, JSDescriptionRandomized) {
{
auto jsdesc = JSDescription::Randomized(
false,
*params, joinSplitPubKey, rt,
inputs, outputs,
inputMap, outputMap,
@ -62,7 +61,6 @@ TEST(Transaction, JSDescriptionRandomized) {
{
auto jsdesc = JSDescription::Randomized(
false,
*params, joinSplitPubKey, rt,
inputs, outputs,
inputMap, outputMap,
@ -76,7 +74,6 @@ TEST(Transaction, JSDescriptionRandomized) {
{
auto jsdesc = JSDescription::Randomized(
false,
*params, joinSplitPubKey, rt,
inputs, outputs,
inputMap, outputMap,

View File

@ -59,8 +59,6 @@
#include <boost/thread.hpp>
#include <openssl/crypto.h>
#include <libsnark/common/profiling.hpp>
#if ENABLE_ZMQ
#include "zmq/zmqnotificationinterface.h"
#endif
@ -527,7 +525,7 @@ std::string HelpMessage(HelpMessageMode mode)
strUsage += HelpMessageOpt("-rpcservertimeout=<n>", strprintf("Timeout during HTTP requests (default: %d)", DEFAULT_HTTP_SERVER_TIMEOUT));
}
// Disabled until we can lock notes and also tune performance of libsnark which by default uses multiple threads
// Disabled until we can lock notes and also tune performance of the prover which by default uses multiple threads
//strUsage += HelpMessageOpt("-rpcasyncthreads=<n>", strprintf(_("Set the number of threads to service Async RPC calls (default: %d)"), 1));
if (mode == HMM_BITCOIND) {
@ -703,15 +701,11 @@ static void ZC_LoadParams(
struct timeval tv_start, tv_end;
float elapsed;
boost::filesystem::path pk_path = ZC_GetParamsDir() / "sprout-proving.key";
boost::filesystem::path vk_path = ZC_GetParamsDir() / "sprout-verifying.key";
boost::filesystem::path sapling_spend = ZC_GetParamsDir() / "sapling-spend.params";
boost::filesystem::path sapling_output = ZC_GetParamsDir() / "sapling-output.params";
boost::filesystem::path sprout_groth16 = ZC_GetParamsDir() / "sprout-groth16.params";
if (!(
boost::filesystem::exists(pk_path) &&
boost::filesystem::exists(vk_path) &&
boost::filesystem::exists(sapling_spend) &&
boost::filesystem::exists(sapling_output) &&
boost::filesystem::exists(sprout_groth16)
@ -726,14 +720,7 @@ static void ZC_LoadParams(
return;
}
LogPrintf("Loading verifying key from %s\n", vk_path.string().c_str());
gettimeofday(&tv_start, 0);
pzcashParams = ZCJoinSplit::Prepared(vk_path.string(), pk_path.string());
gettimeofday(&tv_end, 0);
elapsed = float(tv_end.tv_sec-tv_start.tv_sec) + (tv_end.tv_usec-tv_start.tv_usec)/float(1000000);
LogPrintf("Loaded verifying key in %fs seconds.\n", elapsed);
pzcashParams = ZCJoinSplit::Prepared();
static_assert(
sizeof(boost::filesystem::path::value_type) == sizeof(codeunit),
@ -860,8 +847,6 @@ bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler)
return InitError(_("Payment disclosure requires -experimentalfeatures."));
} else if (mapArgs.count("-zmergetoaddress")) {
return InitError(_("RPC method z_mergetoaddress requires -experimentalfeatures."));
} else if (mapArgs.count("-savesproutr1cs")) {
return InitError(_("Saving the Sprout R1CS requires -experimentalfeatures."));
} else if (mapArgs.count("-insightexplorer")) {
return InitError(_("Insight explorer requires -experimentalfeatures."));
}
@ -1246,22 +1231,9 @@ bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler)
threadGroup.create_thread(&ThreadShowMetricsScreen);
}
// These must be disabled for now, they are buggy and we probably don't
// want any of libsnark's profiling in production anyway.
libsnark::inhibit_profiling_info = true;
libsnark::inhibit_profiling_counters = true;
// Initialize Zcash circuit parameters
ZC_LoadParams(chainparams);
if (GetBoolArg("-savesproutr1cs", false)) {
boost::filesystem::path r1cs_path = ZC_GetParamsDir() / "r1cs";
LogPrintf("Saving Sprout R1CS to %s\n", r1cs_path.string());
pzcashParams->saveR1CS(r1cs_path.string());
}
/* Start the RPC server already. It will be started in "warmup" mode
* and not really process calls already (but it will signify connections
* that the server is there and will be ready later). Warmup mode will

View File

@ -113,6 +113,24 @@ const string strMessageMagic = "Zcash Signed Message:\n";
// Internal stuff
namespace {
/** Abort with a message */
bool AbortNode(const std::string& strMessage, const std::string& userMessage="")
{
strMiscWarning = strMessage;
LogPrintf("*** %s\n", strMessage);
uiInterface.ThreadSafeMessageBox(
userMessage.empty() ? _("Error: A fatal internal error occurred, see debug.log for details") : userMessage,
"", CClientUIInterface::MSG_ERROR);
StartShutdown();
return false;
}
bool AbortNode(CValidationState& state, const std::string& strMessage, const std::string& userMessage="")
{
AbortNode(strMessage, userMessage);
return state.Error(strMessage);
}
struct CBlockIndexWorkComparator
{
bool operator()(CBlockIndex *pa, CBlockIndex *pb) const {
@ -1850,6 +1868,29 @@ bool IsInitialBlockDownload(const CChainParams& chainParams)
return true;
if (chainActive.Tip()->nChainWork < UintToArith256(chainParams.GetConsensus().nMinimumChainWork))
return true;
// Don't bother checking Sprout, it is always active.
for (int idx = Consensus::BASE_SPROUT + 1; idx < Consensus::MAX_NETWORK_UPGRADES; idx++) {
// If we expect a particular activation block hash, and either the upgrade is not
// active or it doesn't match the block at that height on the current chain, then
// we are not on the correct chain. As we have already checked that the current
// chain satisfies the minimum chain work, this is likely an adversarial situation
// where the node is being fed a fake alternate chain; shut down for safety.
auto upgrade = chainParams.GetConsensus().vUpgrades[idx];
if (upgrade.hashActivationBlock && (
!chainParams.GetConsensus().NetworkUpgradeActive(chainActive.Height(), Consensus::UpgradeIndex(idx))
|| chainActive[upgrade.nActivationHeight]->GetBlockHash() != upgrade.hashActivationBlock.get()
)) {
AbortNode(
strprintf(
"%s: Activation block hash mismatch for the %s network upgrade (expected %s, found %s). Likely adversarial condition; shutting down for safety.",
__func__,
NetworkUpgradeInfo[idx].strName,
upgrade.hashActivationBlock.get().GetHex(),
chainActive[upgrade.nActivationHeight]->GetBlockHash().GetHex()),
_("We are on a chain with sufficient work, but the network upgrade checkpoints do not match. Your node may be under attack! Shutting down for safety."));
return true;
}
}
if (chainActive.Tip()->GetBlockTime() < (GetTime() - nMaxTipAge))
return true;
LogPrintf("Leaving InitialBlockDownload (latching to false)\n");
@ -2239,24 +2280,6 @@ bool UndoReadFromDisk(CBlockUndo& blockundo, const CDiskBlockPos& pos, const uin
return true;
}
/** Abort with a message */
bool AbortNode(const std::string& strMessage, const std::string& userMessage="")
{
strMiscWarning = strMessage;
LogPrintf("*** %s\n", strMessage);
uiInterface.ThreadSafeMessageBox(
userMessage.empty() ? _("Error: A fatal internal error occurred, see debug.log for details") : userMessage,
"", CClientUIInterface::MSG_ERROR);
StartShutdown();
return false;
}
bool AbortNode(CValidationState& state, const std::string& strMessage, const std::string& userMessage="")
{
AbortNode(strMessage, userMessage);
return state.Error(strMessage);
}
} // anon namespace
/**

View File

@ -12,7 +12,6 @@
#include "librustzcash.h"
JSDescription::JSDescription(
bool makeGrothProof,
ZCJoinSplit& params,
const uint256& joinSplitPubKey,
const uint256& anchor,
@ -27,7 +26,6 @@ JSDescription::JSDescription(
std::array<libzcash::SproutNote, ZC_NUM_JS_OUTPUTS> notes;
proof = params.prove(
makeGrothProof,
inputs,
outputs,
notes,
@ -47,7 +45,6 @@ JSDescription::JSDescription(
}
JSDescription JSDescription::Randomized(
bool makeGrothProof,
ZCJoinSplit& params,
const uint256& joinSplitPubKey,
const uint256& anchor,
@ -72,7 +69,6 @@ JSDescription JSDescription::Randomized(
MappedShuffle(outputs.begin(), outputMap.begin(), ZC_NUM_JS_OUTPUTS, gen);
return JSDescription(
makeGrothProof,
params, joinSplitPubKey, anchor, inputs, outputs,
vpub_old, vpub_new, computeProof,
esk // payment disclosure
@ -96,18 +92,9 @@ public:
bool operator()(const libzcash::PHGRProof& proof) const
{
return params.verify(
proof,
verifier,
joinSplitPubKey,
jsdesc.randomSeed,
jsdesc.macs,
jsdesc.nullifiers,
jsdesc.commitments,
jsdesc.vpub_old,
jsdesc.vpub_new,
jsdesc.anchor
);
// We checkpoint after Sapling activation, so we can skip verification
// for all Sprout proofs.
return true;
}
bool operator()(const libzcash::GrothProof& proof) const

View File

@ -227,7 +227,6 @@ public:
JSDescription(): vpub_old(0), vpub_new(0) { }
JSDescription(
bool makeGrothProof,
ZCJoinSplit& params,
const uint256& joinSplitPubKey,
const uint256& rt,
@ -240,7 +239,6 @@ public:
);
static JSDescription Randomized(
bool makeGrothProof,
ZCJoinSplit& params,
const uint256& joinSplitPubKey,
const uint256& rt,

51
src/snark/.gitignore vendored
View File

@ -1,51 +0,0 @@
*.o
*.a
*.so
*.d
libsnark/gtests
depinst/
depsrc/
README.html
doxygen/
libsnark/gtests
libsnark/gadgetlib2/examples/tutorial
libsnark/gadgetlib2/tests/gadgetlib2_test
libsnark/algebra/curves/tests/test_bilinearity
libsnark/algebra/curves/tests/test_groups
libsnark/algebra/fields/tests/test_fields
libsnark/common/routing_algorithms/profiling/profile_routing_algorithms
libsnark/common/routing_algorithms/tests/test_routing_algorithms
libsnark/gadgetlib1/gadgets/cpu_checkers/fooram/examples/test_fooram
libsnark/gadgetlib1/gadgets/hashes/knapsack/tests/test_knapsack_gadget
libsnark/gadgetlib1/gadgets/hashes/sha256/tests/test_sha256_gadget
libsnark/gadgetlib1/gadgets/merkle_tree/tests/test_merkle_tree_gadgets
libsnark/gadgetlib1/gadgets/routing/profiling/profile_routing_gadgets
libsnark/gadgetlib1/gadgets/set_commitment/tests/test_set_commitment_gadget
libsnark/gadgetlib1/gadgets/verifiers/tests/test_r1cs_ppzksnark_verifier_gadget
libsnark/reductions/ram_to_r1cs/examples/demo_arithmetization
libsnark/relations/arithmetic_programs/qap/tests/test_qap
libsnark/relations/arithmetic_programs/ssp/tests/test_ssp
libsnark/zk_proof_systems/pcd/r1cs_pcd/r1cs_mp_ppzkpcd/profiling/profile_r1cs_mp_ppzkpcd
libsnark/zk_proof_systems/pcd/r1cs_pcd/r1cs_mp_ppzkpcd/tests/test_r1cs_mp_ppzkpcd
libsnark/zk_proof_systems/pcd/r1cs_pcd/r1cs_sp_ppzkpcd/profiling/profile_r1cs_sp_ppzkpcd
libsnark/zk_proof_systems/pcd/r1cs_pcd/r1cs_sp_ppzkpcd/tests/test_r1cs_sp_ppzkpcd
libsnark/zk_proof_systems/ppzkadsnark/r1cs_ppzkadsnark/examples/demo_r1cs_ppzkadsnark
libsnark/zk_proof_systems/ppzksnark/bacs_ppzksnark/profiling/profile_bacs_ppzksnark
libsnark/zk_proof_systems/ppzksnark/bacs_ppzksnark/tests/test_bacs_ppzksnark
libsnark/zk_proof_systems/ppzksnark/r1cs_gg_ppzksnark/profiling/profile_r1cs_gg_ppzksnark
libsnark/zk_proof_systems/ppzksnark/r1cs_gg_ppzksnark/tests/test_r1cs_gg_ppzksnark
libsnark/zk_proof_systems/ppzksnark/r1cs_ppzksnark/profiling/profile_r1cs_ppzksnark
libsnark/zk_proof_systems/ppzksnark/r1cs_ppzksnark/tests/test_r1cs_ppzksnark
libsnark/zk_proof_systems/ppzksnark/ram_ppzksnark/examples/demo_ram_ppzksnark
libsnark/zk_proof_systems/ppzksnark/ram_ppzksnark/examples/demo_ram_ppzksnark_generator
libsnark/zk_proof_systems/ppzksnark/ram_ppzksnark/examples/demo_ram_ppzksnark_prover
libsnark/zk_proof_systems/ppzksnark/ram_ppzksnark/examples/demo_ram_ppzksnark_verifier
libsnark/zk_proof_systems/ppzksnark/ram_ppzksnark/profiling/profile_ram_ppzksnark
libsnark/zk_proof_systems/ppzksnark/ram_ppzksnark/tests/test_ram_ppzksnark
libsnark/zk_proof_systems/ppzksnark/tbcs_ppzksnark/profiling/profile_tbcs_ppzksnark
libsnark/zk_proof_systems/ppzksnark/tbcs_ppzksnark/tests/test_tbcs_ppzksnark
libsnark/zk_proof_systems/ppzksnark/uscs_ppzksnark/profiling/profile_uscs_ppzksnark
libsnark/zk_proof_systems/ppzksnark/uscs_ppzksnark/tests/test_uscs_ppzksnark
libsnark/zk_proof_systems/zksnark/ram_zksnark/profiling/profile_ram_zksnark
libsnark/zk_proof_systems/zksnark/ram_zksnark/tests/test_ram_zksnark

View File

@ -1,19 +0,0 @@
SCIPR Lab:
Eli Ben-Sasson
Alessandro Chiesa
Daniel Genkin
Shaul Kfir
Eran Tromer
Madars Virza
External contributors:
Michael Backes
Manuel Barbosa
Dario Fiore
Jens Groth
Joshua A. Kroll
Shigeo MITSUNARI
Raphael Reischuk
Tadanori TERUYA
Sean Bowe
Daira Hopwood

View File

@ -1,24 +0,0 @@
The libsnark library is developed by SCIPR Lab (http://scipr-lab.org)
and contributors.
Copyright (c) 2012-2019 SCIPR Lab and contributors (see AUTHORS file).
All files, with the exceptions below, are released under the MIT License:
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -1,285 +0,0 @@
#********************************************************************************
# Makefile for the libsnark library.
#********************************************************************************
#* @author This file is part of libsnark, developed by SCIPR Lab
#* and contributors (see AUTHORS).
#* @copyright MIT license (see LICENSE file)
#*******************************************************************************/
# To override these, use "make OPTFLAGS=..." etc.
CURVE = BN128
OPTFLAGS = -O2 -march=native -mtune=native
FEATUREFLAGS = -DUSE_ASM -DMONTGOMERY_OUTPUT
# Initialize this using "CXXFLAGS=... make". The makefile appends to that.
CXXFLAGS += -std=c++11 -Wall -Wextra -Wno-unused-parameter -Wno-comment -Wfatal-errors $(OPTFLAGS) $(FEATUREFLAGS) -DCURVE_$(CURVE)
DEPSRC = depsrc
DEPINST = depinst
CXXFLAGS += -I$(DEPINST)/include -Ilibsnark
LDFLAGS += -L$(DEPINST)/lib -Wl,-rpath,$(DEPINST)/lib
LDLIBS += -lgmpxx -lgmp -lboost_program_options -lsodium
# List of .a files to include within libsnark.a and libsnark.so:
AR_LIBS =
# List of library files to install:
INSTALL_LIBS = $(LIB_FILE)
# Sentinel file to check existence of this directory (since directories don't work as a Make dependency):
DEPINST_EXISTS = $(DEPINST)/.exists
ifneq ($(NO_GTEST),1)
# Compile GTest from sourcecode if we can (e.g., Ubuntu). Otherwise use precompiled one (e.g., Fedora).
# See https://github.com/google/googletest/blob/master/googletest/docs/FAQ.md#why-is-it-not-recommended-to-install-a-pre-compiled-copy-of-google-test-for-example-into-usrlocal
ifneq ($(NO_COMPILE_LIBGTEST),1)
GTESTDIR=/usr/src/gtest
COMPILE_LIBGTEST = $(shell test -d $(GTESTDIR) && echo -n 1)
endif
GTEST_LDLIBS += -lgtest -lpthread
endif
ifneq ($(NO_SUPERCOP),1)
SUPERCOP_LDLIBS += -lsupercop
INSTALL_LIBS += depinst/lib/libsupercop.a
# Would have been nicer to roll supercop into libsnark.a ("AR_LIBS += $(DEPINST)/lib/libsupercop.a"), but it doesn't support position-independent code (libsnark issue #20).
endif
LIB_SRCS = \
libsnark/algebra/curves/alt_bn128/alt_bn128_g1.cpp \
libsnark/algebra/curves/alt_bn128/alt_bn128_g2.cpp \
libsnark/algebra/curves/alt_bn128/alt_bn128_init.cpp \
libsnark/algebra/curves/alt_bn128/alt_bn128_pairing.cpp \
libsnark/algebra/curves/alt_bn128/alt_bn128_pp.cpp \
libsnark/common/profiling.cpp \
libsnark/common/utils.cpp \
libsnark/gadgetlib1/constraint_profiling.cpp \
ifeq ($(CURVE),BN128)
LIB_SRCS += \
libsnark/algebra/curves/bn128/bn128_g1.cpp \
libsnark/algebra/curves/bn128/bn128_g2.cpp \
libsnark/algebra/curves/bn128/bn128_gt.cpp \
libsnark/algebra/curves/bn128/bn128_init.cpp \
libsnark/algebra/curves/bn128/bn128_pairing.cpp \
libsnark/algebra/curves/bn128/bn128_pp.cpp
CXXFLAGS += -DBN_SUPPORT_SNARK
AR_LIBS += $(DEPINST)/lib/libzm.a
endif
# FIXME: most of these are broken due to removed code.
DISABLED_EXECUTABLES = \
libsnark/common/routing_algorithms/profiling/profile_routing_algorithms \
libsnark/common/routing_algorithms/tests/test_routing_algorithms \
libsnark/gadgetlib1/gadgets/cpu_checkers/fooram/examples/test_fooram \
libsnark/gadgetlib1/gadgets/hashes/knapsack/tests/test_knapsack_gadget \
libsnark/gadgetlib1/gadgets/routing/profiling/profile_routing_gadgets \
libsnark/gadgetlib1/gadgets/set_commitment/tests/test_set_commitment_gadget \
libsnark/gadgetlib1/gadgets/verifiers/tests/test_r1cs_ppzksnark_verifier_gadget \
libsnark/reductions/ram_to_r1cs/examples/demo_arithmetization \
libsnark/relations/arithmetic_programs/ssp/tests/test_ssp \
libsnark/zk_proof_systems/pcd/r1cs_pcd/r1cs_mp_ppzkpcd/profiling/profile_r1cs_mp_ppzkpcd \
libsnark/zk_proof_systems/pcd/r1cs_pcd/r1cs_mp_ppzkpcd/tests/test_r1cs_mp_ppzkpcd \
libsnark/zk_proof_systems/pcd/r1cs_pcd/r1cs_sp_ppzkpcd/profiling/profile_r1cs_sp_ppzkpcd \
libsnark/zk_proof_systems/pcd/r1cs_pcd/r1cs_sp_ppzkpcd/tests/test_r1cs_sp_ppzkpcd \
libsnark/zk_proof_systems/ppzksnark/bacs_ppzksnark/profiling/profile_bacs_ppzksnark \
libsnark/zk_proof_systems/ppzksnark/bacs_ppzksnark/tests/test_bacs_ppzksnark \
libsnark/zk_proof_systems/ppzksnark/r1cs_gg_ppzksnark/profiling/profile_r1cs_gg_ppzksnark \
libsnark/zk_proof_systems/ppzksnark/r1cs_gg_ppzksnark/tests/test_r1cs_gg_ppzksnark \
libsnark/zk_proof_systems/ppzksnark/r1cs_ppzksnark/profiling/profile_r1cs_ppzksnark \
libsnark/zk_proof_systems/ppzksnark/ram_ppzksnark/examples/demo_ram_ppzksnark \
libsnark/zk_proof_systems/ppzksnark/ram_ppzksnark/examples/demo_ram_ppzksnark_generator \
libsnark/zk_proof_systems/ppzksnark/ram_ppzksnark/examples/demo_ram_ppzksnark_prover \
libsnark/zk_proof_systems/ppzksnark/ram_ppzksnark/examples/demo_ram_ppzksnark_verifier \
libsnark/zk_proof_systems/ppzksnark/ram_ppzksnark/profiling/profile_ram_ppzksnark \
libsnark/zk_proof_systems/ppzksnark/ram_ppzksnark/tests/test_ram_ppzksnark \
libsnark/zk_proof_systems/ppzksnark/tbcs_ppzksnark/profiling/profile_tbcs_ppzksnark \
libsnark/zk_proof_systems/ppzksnark/tbcs_ppzksnark/tests/test_tbcs_ppzksnark \
libsnark/zk_proof_systems/ppzksnark/uscs_ppzksnark/profiling/profile_uscs_ppzksnark \
libsnark/zk_proof_systems/ppzksnark/uscs_ppzksnark/tests/test_uscs_ppzksnark \
libsnark/zk_proof_systems/zksnark/ram_zksnark/profiling/profile_ram_zksnark \
libsnark/zk_proof_systems/zksnark/ram_zksnark/tests/test_ram_zksnark
EXECUTABLES =
EXECUTABLES_WITH_GTEST =
EXECUTABLES_WITH_SUPERCOP = \
libsnark/zk_proof_systems/ppzkadsnark/r1cs_ppzkadsnark/examples/demo_r1cs_ppzkadsnark
GTEST_TESTS = libsnark/gtests
GTEST_SRCS = \
libsnark/algebra/curves/tests/test_bilinearity.cpp \
libsnark/algebra/curves/tests/test_groups.cpp \
libsnark/algebra/fields/tests/test_bigint.cpp \
libsnark/algebra/fields/tests/test_fields.cpp \
libsnark/gadgetlib1/gadgets/hashes/sha256/tests/test_sha256_gadget.cpp \
libsnark/gadgetlib1/gadgets/merkle_tree/tests/test_merkle_tree_gadgets.cpp \
libsnark/relations/arithmetic_programs/qap/tests/test_qap.cpp \
libsnark/zk_proof_systems/ppzksnark/r1cs_ppzksnark/tests/test_r1cs_ppzksnark.cpp \
libsnark/gtests.cpp
DOCS = README.html
LIBSNARK_A = libsnark.a
# For documentation of the following options, see README.md .
ifeq ($(NO_PROCPS),1)
CXXFLAGS += -DNO_PROCPS
else
LDLIBS += -lprocps
endif
ifeq ($(LOWMEM),1)
CXXFLAGS += -DLOWMEM
endif
ifeq ($(PROFILE_OP_COUNTS),1)
STATIC = 1
CXXFLAGS += -DPROFILE_OP_COUNTS
endif
ifeq ($(STATIC),1)
ifneq ($(PLATFORM),darwin)
CXXFLAGS += -static
endif
CXXFLAGS += -DSTATIC
else
CXXFLAGS += -fPIC
endif
ifeq ($(MULTICORE),1)
CXXFLAGS += -DMULTICORE -fopenmp
endif
ifeq ($(CPPDEBUG),1)
CXXFLAGS += -D_GLIBCXX_DEBUG -D_GLIBCXX_DEBUG_PEDANTIC
DEBUG = 1
endif
ifeq ($(DEBUG),1)
CXXFLAGS += -DDEBUG -ggdb3
endif
ifeq ($(PERFORMANCE),1)
OPTFLAGS = -O3 -march=native -mtune=native
CXXFLAGS += -DNDEBUG
# Enable link-time optimization:
CXXFLAGS += -flto -fuse-linker-plugin
LDFLAGS += -flto
endif
LIB_OBJS =$(patsubst %.cpp,%.o,$(LIB_SRCS))
EXEC_OBJS =$(patsubst %,%.o,$(EXECUTABLES) $(EXECUTABLES_WITH_GTEST) $(EXECUTABLES_WITH_SUPERCOP))
GTEST_OBJS =$(patsubst %.cpp,%.o,$(GTEST_SRCS))
all: \
$(if $(NO_GTEST),,$(EXECUTABLES_WITH_GTEST) $(GTEST_TESTS)) \
$(if $(NO_SUPERCOP),,$(EXECUTABLES_WITH_SUPERCOP)) \
$(EXECUTABLES) \
$(if $(NO_DOCS),,doc)
doc: $(DOCS)
$(DEPINST_EXISTS):
# Create placeholder directories for installed dependencies. Some make settings (including the default) require actually running ./prepare-depends.sh to populate this directory.
mkdir -p $(DEPINST)/lib $(DEPINST)/include
touch $@
# In order to detect changes to #include dependencies. -MMD below generates a .d file for each .o file. Include the .d file.
-include $(patsubst %.o,%.d, $(LIB_OBJS) $(GTEST_OBJS) $(EXEC_OBJS) )
$(LIB_OBJS) $(if $(NO_GTEST),,$(GTEST_OBJS)) $(EXEC_OBJS): %.o: %.cpp
$(CXX) -o $@ $< -c -MMD $(CXXFLAGS)
LIBGTEST_A = $(DEPINST)/lib/libgtest.a
$(LIBGTEST_A): $(GTESTDIR)/libsnark/gtest-all.cc $(DEPINST_EXISTS)
$(CXX) -o $(DEPINST)/lib/gtest-all.o -I $(GTESTDIR) -c -isystem $(GTESTDIR)/include $< $(CXXFLAGS)
$(AR) -rv $(LIBGTEST_A) $(DEPINST)/lib/gtest-all.o
# libsnark.a will contains all of our relevant object files, and we also mash in the .a files of relevant dependencies built by ./prepare-depends.sh
$(LIBSNARK_A): $(LIB_OBJS) $(AR_LIBS)
$(AR) q $(LIBSNARK_A) $(LIB_OBJS)
if [ -n "$(AR_LIBS)" ]; then mkdir -p tmp-ar; cd tmp-ar; for AR_LIB in $(AR_LIBS); do $(AR) x $$AR_LIB; done; $(AR) qc $(LIBSNARK_A) tmp-ar/*; cd ..; rm -r tmp-ar; fi;
$(AR) s $(LIBSNARK_A)
libsnark.so: $(LIBSNARK_A) $(DEPINST_EXISTS)
$(CXX) -o $@ --shared -Wl,--whole-archive $(LIBSNARK_A) $(CXXFLAGS) $(LDFLAGS) -Wl,--no-whole-archive $(LDLIBS)
libsnark/gadgetlib2/tests/gadgetlib2_test: \
libsnark/gadgetlib2/tests/adapters_UTEST.cpp \
libsnark/gadgetlib2/tests/constraint_UTEST.cpp \
libsnark/gadgetlib2/tests/gadget_UTEST.cpp \
libsnark/gadgetlib2/tests/integration_UTEST.cpp \
libsnark/gadgetlib2/tests/protoboard_UTEST.cpp \
libsnark/gadgetlib2/tests/variable_UTEST.cpp
$(EXECUTABLES): %: %.o $(LIBSNARK_A) $(DEPINST_EXISTS)
$(CXX) -o $@ $@.o $(LIBSNARK_A) $(CXXFLAGS) $(LDFLAGS) $(LDLIBS)
$(EXECUTABLES_WITH_GTEST): %: %.o $(LIBSNARK_A) $(if $(COMPILE_LIBGTEST),$(LIBGTEST_A)) $(DEPINST_EXISTS)
$(CXX) -o $@ $@.o $(LIBSNARK_A) $(CXXFLAGS) $(LDFLAGS) $(GTEST_LDLIBS) $(LDLIBS)
$(EXECUTABLES_WITH_SUPERCOP): %: %.o $(LIBSNARK_A) $(DEPINST_EXISTS)
$(CXX) -o $@ $@.o $(LIBSNARK_A) $(CXXFLAGS) $(LDFLAGS) $(SUPERCOP_LDLIBS) $(LDLIBS)
$(GTEST_TESTS): %: $(GTEST_OBJS) $(LIBSNARK_A) $(if $(COMPILE_LIBGTEST),$(LIBGTEST_A)) $(DEPINST_EXISTS)
$(CXX) -o $@ $(GTEST_OBJS) $(LIBSNARK_A) $(CXXFLAGS) $(LDFLAGS) $(GTEST_LDLIBS) $(LDLIBS)
ifeq ($(STATIC),1)
LIB_FILE = $(LIBSNARK_A)
else
LIB_FILE = libsnark.so
endif
lib: $(LIB_FILE)
$(DOCS): %.html: %.md
markdown_py -f $@ $^ -x toc -x extra --noisy
# TODO: Would be nice to enable "-x smartypants" but Ubuntu 12.04 doesn't support that.
# TODO: switch to redcarpet, to produce same output as GitHub's processing of README.md. But what about TOC?
ifeq ($(PREFIX),)
install:
$(error Please provide PREFIX. E.g. make install PREFIX=/usr)
else
HEADERS_SRC=$(shell find libsnark -name '*.hpp' -o -name '*.tcc')
HEADERS_DEST=$(patsubst libsnark/%,$(PREFIX)/include/libsnark/%,$(HEADERS_SRC))
$(HEADERS_DEST): $(PREFIX)/include/libsnark/%: libsnark/%
mkdir -p $(shell dirname $@)
cp $< $@
install: $(INSTALL_LIBS) $(HEADERS_DEST) $(DEPINST_EXISTS)
mkdir -p $(PREFIX)/lib
cp -v $(INSTALL_LIBS) $(PREFIX)/lib/
ifneq ($(NO_COPY_DEPINST),1)
cp -rv $(DEPINST)/include $(PREFIX)
endif
endif
check: $(GTEST_TESTS)
$(GTEST_TESTS)
doxy:
doxygen doxygen.conf
# Clean generated files, except locally-compiled dependencies
clean:
$(RM) \
$(LIB_OBJS) $(GTEST_OBJS) $(EXEC_OBJS) \
$(EXECUTABLES) $(EXECUTABLES_WITH_GTEST) $(EXECUTABLES_WITH_SUPERCOP) $(GTEST_TESTS) \
$(DOCS) \
${patsubst %.o,%.d,${LIB_OBJS} ${GTEST_OBJS} ${EXEC_OBJS}} \
libsnark.so $(LIBSNARK_A) \
$(RM) -fr doxygen/ \
$(RM) $(LIBGTEST_A) $(DEPINST)/lib/gtest-all.o
# Clean all, including locally-compiled dependencies
clean-all: clean
$(RM) -fr $(DEPSRC) $(DEPINST)
.PHONY: all clean clean-all doc doxy lib install

View File

@ -1,628 +0,0 @@
libsnark: a C++ library for zkSNARK proofs
================================================================================
--------------------------------------------------------------------------------
Authors
--------------------------------------------------------------------------------
The libsnark library is developed by the [SCIPR Lab] project and contributors
and is released under the MIT License (see the [LICENSE] file).
Copyright (c) 2012-2014 SCIPR Lab and contributors (see [AUTHORS] file).
--------------------------------------------------------------------------------
[TOC]
<!---
NOTE: the file you are reading is in Markdown format, which is fairly readable
directly, but can be converted into an HTML file with much nicer formatting.
To do so, run "make doc" (this requires the python-markdown package) and view
the resulting file README.html. Alternatively, view the latest HTML version at
https://github.com/scipr-lab/libsnark .
-->
--------------------------------------------------------------------------------
Overview
--------------------------------------------------------------------------------
This library implements __zkSNARK__ schemes, which are a cryptographic method
for proving/verifying, in zero knowledge, the integrity of computations.
A computation can be expressed as an NP statement, in forms such as the following:
- "The C program _foo_, when executed, returns exit code 0 if given the input _bar_ and some additional input _qux_."
- "The Boolean circuit _foo_ is satisfiable by some input _qux_."
- "The arithmetic circuit _foo_ accepts the partial assignment _bar_, when extended into some full assignment _qux_."
- "The set of constraints _foo_ is satisfiable by the partial assignment _bar_, when extended into some full assignment _qux_."
A prover who knows the witness for the NP statement (i.e., a satisfying input/assignment) can produce a short proof attesting to the truth of the NP statement. This proof can be verified by anyone, and offers the following properties.
- __Zero knowledge:__
the verifier learns nothing from the proof beside the truth of the statement (i.e., the value _qux_, in the above examples, remains secret).
- __Succinctness:__
the proof is short and easy to verify.
- __Non-interactivity:__
the proof is a string (i.e. it does not require back-and-forth interaction between the prover and the verifier).
- __Soundness:__
the proof is computationally sound (i.e., it is infeasible to fake a proof of a false NP statement). Such a proof system is also called an _argument_.
- __Proof of knowledge:__
the proof attests not just that the NP statement is true, but also that the
prover knows why (e.g., knows a valid _qux_).
These properties are summarized by the _zkSNARK_ acronym, which stands for _Zero-Knowledge Succinct Non-interactive ARgument of Knowledge_ (though zkSNARKs are also knows as
_succinct non-interactive computationally-sound zero-knowledge proofs of knowledge_).
For formal definitions and theoretical discussions about these, see
\[BCCT12], \[BCIOP13], and the references therein.
The libsnark library currently provides a C++ implementation of:
1. General-purpose proof systems:
1. A preprocessing zkSNARK for the NP-complete language "R1CS"
(_Rank-1 Constraint Systems_), which is a language that is similar to arithmetic
circuit satisfiability.
2. A preprocessing SNARK for a language of arithmetic circuits, "BACS"
(_Bilinear Arithmetic Circuit Satisfiability_). This simplifies the writing
of NP statements when the additional flexibility of R1CS is not needed.
Internally, it reduces to R1CS.
3. A preprocessing SNARK for the language "USCS"
(_Unitary-Square Constraint Systems_). This abstracts and implements the core
contribution of \[DFGK14]
4. A preprocessing SNARK for a language of Boolean circuits, "TBCS"
(_Two-input Boolean Circuit Satisfiability_). Internally, it reduces to USCS.
This is much more efficient than going through R1CS.
5. ADSNARK, a preprocessing SNARKs for proving statements on authenticated
data, as described in \[BBFR15].
6. Proof-Carrying Data (PCD). This uses recursive composition of SNARKs, as
explained in \[BCCT13] and optimized in \[BCTV14b].
2. Gadget libraries (gadgetlib1 and gadgetlib2) for constructing R1CS
instances out of modular "gadget" classes.
3. Examples of applications that use the above proof systems to prove
statements about:
1. Several toy examples.
2. Execution of TinyRAM machine code, as explained in \[BCTV14a] and
\[BCGTV13]. (Such machine code can be obtained, e.g., by compiling from C.)
This is easily adapted to any other Random Access Machine that satisfies a
simple load-store interface.
3. A scalable for TinyRAM using Proof-Carrying Data, as explained in \[BCTV14b]
4. Zero-knowledge cluster MapReduce, as explained in \[CTV15].
The zkSNARK construction implemented by libsnark follows, extends, and
optimizes the approach described in \[BCTV14], itself an extension of
\[BCGTV13], following the approach of \[BCIOP13] and \[GGPR13]. An alternative
implementation of the basic approach is the _Pinocchio_ system of \[PGHR13].
See these references for discussions of efficiency aspects that arise in
practical use of such constructions, as well as security and trust
considerations.
This scheme is a _preprocessing zkSNARK_ (_ppzkSNARK_): before proofs can be
created and verified, one needs to first decide on a size/circuit/system
representing the NP statements to be proved, and run a _generator_ algorithm to
create corresponding public parameters (a long proving key and a short
verification key).
Using the library involves the following high-level steps:
1. Express the statements to be proved as an R1CS (or any of the other
languages above, such as arithmetic circuits, Boolean circuits, or TinyRAM).
This is done by writing C++ code that constructs an R1CS, and linking this code
together with libsnark
2. Use libsnark's generator algorithm to create the public parameters for this
statement (once and for all).
3. Use libsnark's prover algorithm to create proofs of true statements about
the satisfiability of the R1CS.
4. Use libsnark's verifier algorithm to check proofs for alleged statements.
--------------------------------------------------------------------------------
The NP-complete language R1CS
--------------------------------------------------------------------------------
The ppzkSNARK supports proving/verifying membership in a specific NP-complete
language: R1CS (*rank-1 constraint systems*). An instance of the language is
specified by a set of equations over a prime field F, and each equation looks like:
< A, (1,X) > * < B , (1,X) > = < C, (1,X) >
where A,B,C are vectors over F, and X is a vector of variables.
In particular, arithmetic (as well as boolean) circuits are easily reducible to
this language by converting each gate into a rank-1 constraint. See \[BCGTV13]
Appendix E (and "System of Rank 1 Quadratic Equations") for more details about this.
--------------------------------------------------------------------------------
Elliptic curve choices
--------------------------------------------------------------------------------
The ppzkSNARK can be instantiated with different parameter choices, depending on
which elliptic curve is used. The libsnark library currently provides three
options:
* "edwards":
an instantiation based on an Edwards curve, providing 80 bits of security.
* "bn128":
an instantiation based on a Barreto-Naehrig curve, providing 128
bits of security. The underlying curve implementation is
\[ate-pairing], which has incorporated our patch that changes the
BN curve to one suitable for SNARK applications.
* This implementation uses dynamically-generated machine code for the curve
arithmetic. Some modern systems disallow execution of code on the heap, and
will thus block this implementation.
For example, on Fedora 20 at its default settings, you will get the error
`zmInit ERR:can't protect` when running this code. To solve this,
run `sudo setsebool -P allow_execheap 1` to allow execution,
or use `make CURVE=ALT_BN128` instead.
* "alt_bn128":
an alternative to "bn128", somewhat slower but avoids dynamic code generation.
Note that bn128 requires an x86-64 CPU while the other curve choices
should be architecture-independent; see [portability](#portability).
--------------------------------------------------------------------------------
Gadget libraries
--------------------------------------------------------------------------------
The libsnark library currently provides two libraries for conveniently constructing
R1CS instances out of reusable "gadgets". Both libraries provide a way to construct
gadgets on other gadgets as well as additional explicit equations. In this way,
complex R1CS instances can be built bottom up.
### gadgetlib1
This is a low-level library which expose all features of the preprocessing
zkSNARK for R1CS. Its design is based on templates (as does the ppzkSNARK code)
to efficiently support working on multiple elliptic curves simultaneously. This
library is used for most of the constraint-building in libsnark, both internal
(reductions and Proof-Carrying Data) and examples applications.
### gadgetlib2
This is an alternative library for constructing systems of polynomial equations
and, in particular, also R1CS instances. It is better documented and easier to
use than gadgetlib1, and its interface does not use templates. However, fewer
useful gadgets are provided.
--------------------------------------------------------------------------------
Security
--------------------------------------------------------------------------------
The theoretical security of the underlying mathematical constructions, and the
requisite assumptions, are analyzed in detailed in the aforementioned research
papers.
**
This code is a research-quality proof of concept, and has not
yet undergone extensive review or testing. It is thus not suitable,
as is, for use in critical or production systems.
**
Known issues include the following:
* The ppzkSNARK's generator and prover exhibit data-dependent running times
and memory usage. These form timing and cache-contention side channels,
which may be an issue in some applications.
* Randomness is retrieved from /dev/urandom, but this should be
changed to a carefully considered (depending on system and threat
model) external, high-quality randomness source when creating
long-term proving/verification keys.
--------------------------------------------------------------------------------
Build instructions
--------------------------------------------------------------------------------
The libsnark library relies on the following:
- C++ build environment
- GMP for certain bit-integer arithmetic
- libprocps for reporting memory usage
- GTest for some of the unit tests
So far we have tested these only on Linux, though we have been able to make the library work,
with some features disabled (such as memory profiling or GTest tests), on Windows via Cygwin
and on Mac OS X. (If you succeed in achieving more complete ports of the library, please
let us know!) See also the notes on [portability](#portability) below.
For example, on a fresh install of Ubuntu 14.04, install the following packages:
$ sudo apt-get install build-essential git libgmp3-dev libprocps3-dev libgtest-dev python-markdown libboost-all-dev libssl-dev
Or, on Fedora 20:
$ sudo yum install gcc-c++ make git gmp-devel procps-ng-devel gtest-devel python-markdown
Run the following, to fetch dependencies from their GitHub repos and compile them.
(Not required if you set `CURVE` to other than the default `BN128` and also set `NO_SUPERCOP=1`.)
$ ./prepare-depends.sh
Then, to compile the library, tests, profiling harness and documentation, run:
$ make
To create just the HTML documentation, run
$ make doc
and then view the resulting `README.html` (which contains the very text you are reading now).
To create Doxygen documentation summarizing all files, classes and functions,
with some (currently sparse) comments, install the `doxygen` and `graphviz` packages, then run
$ make doxy
(this may take a few minutes). Then view the resulting [`doxygen/index.html`](doxygen/index.html).
### Using libsnark as a library
To develop an application that uses libsnark, you could add it within the libsnark directory tree and adjust the Makefile, but it is far better to build libsnark as a (shared or static) library. You can then write your code in a separate directory tree, and link it against libsnark.
To build just the shared object library `libsnark.so`, run:
$ make lib
To build just the static library `libsnark.a`, run:
$ make lib STATIC=1
Note that static compilation requires static versions of all libraries it depends on.
It may help to minimize these dependencies by appending
`CURVE=ALT_BN128 NO_PROCPS=1 NO_GTEST=1 NO_SUPERCOP=1`. On Fedora 21, the requisite
library RPM dependencies are then:
`boost-static glibc-static gmp-static libstdc++-static openssl-static zlib-static
boost-devel glibc-devel gmp-devel gmp-devel libstdc++-devel openssl-devel openssl-devel`.
To build *and install* the libsnark library:
$ make install PREFIX=/install/path
This will install `libsnark.so` into `/install/path/lib`; so your application should be linked using `-L/install/path/lib -lsnark`. It also installs the requisite headers into `/install/path/include`; so your application should be compiled using `-I/install/path/include`.
In addition, unless you use `NO_SUPERCOP=1`, `libsupercop.a` will be installed and should be linked in using `-lsupercop`.
### Building on Windows using Cygwin
Install Cygwin using the graphical installer, including the `g++`, `libgmp`
and `git` packages. Then disable the dependencies not easily supported under CygWin,
using:
$ make NO_PROCPS=1 NO_GTEST=1 NO_DOCS=1
### Building on Mac OS X
On Mac OS X, install GMP from MacPorts (`port install gmp`). Then disable the
dependencies not easily supported under CygWin, using:
$ make NO_PROCPS=1 NO_GTEST=1 NO_DOCS=1
MacPorts does not write its libraries into standard system folders, so you
might need to explicitly provide the paths to the header files and libraries by
appending `CXXFLAGS=-I/opt/local/include LDFLAGS=-L/opt/local/lib` to the line
above. Similarly, to pass the paths to ate-pairing you would run
`INC_DIR=-I/opt/local/include LIB_DIR=-L/opt/local/lib ./prepare-depends.sh`
instead of `./prepare-depends.sh` above.
--------------------------------------------------------------------------------
Tutorials
--------------------------------------------------------------------------------
libsnark includes a tutorial, and some usage examples, for the high-level API.
* `src/gadgetlib1/examples1` contains a simple example for constructing a
constraint system using gadgetlib1.
* `src/gadgetlib2/examples` contains a tutorial for using gadgetlib2 to express
NP statements as constraint systems. It introduces basic terminology, design
overview, and recommended programming style. It also shows how to invoke
ppzkSNARKs on such constraint systems. The main file, `tutorial.cpp`, builds
into a standalone executable.
* `src/zk_proof_systems/ppzksnark/r1cs_ppzksnark/profiling/profile_r1cs_ppzksnark.cpp`
constructs a simple constraint system and runs the ppzksnark. See below for how to
run it.
--------------------------------------------------------------------------------
Executing profiling example
--------------------------------------------------------------------------------
The command
$ src/zk_proof_systems/ppzksnark/r1cs_ppzksnark/profiling/profile_r1cs_ppzksnark 1000 10 Fr
exercises the ppzkSNARK (first generator, then prover, then verifier) on an
R1CS instance with 1000 equations and an input consisting of 10 field elements.
(If you get the error `zmInit ERR:can't protect`, see the discussion
[above](#elliptic-curve-choices).)
The command
$ src/zk_proof_systems/ppzksnark/r1cs_ppzksnark/profiling/profile_r1cs_ppzksnark 1000 10 bytes
does the same but now the input consists of 10 bytes.
--------------------------------------------------------------------------------
Build options
--------------------------------------------------------------------------------
The following flags change the behavior of the compiled code.
* `make FEATUREFLAGS='-Dname1 -Dname2 ...'`
Override the active conditional #define names (you can see the default at the top of the Makefile).
The next bullets list the most important conditionally-#defined features.
For example, `make FEATUREFLAGS='-DBINARY_OUTPUT'` enables binary output and disables the default
assembly optimizations and Montgomery-representation output.
* define `BINARY_OUTPUT`
In serialization, output raw binary data (instead of decimal, when not set).
* `make CURVE=choice` / define `CURVE_choice` (where `choice` is one of:
ALT_BN128, BN128, EDWARDS, MNT4, MNT6)
Set the default curve to one of the above (see [elliptic curve choices](#elliptic-curve-choices)).
* `make DEBUG=1` / define `DEBUG`
Print additional information for debugging purposes.
* `make LOWMEM=1` / define `LOWMEM`
Limit the size of multi-exponentiation tables, for low-memory platforms.
* `make NO_DOCS=1`
Do not generate HTML documentation, e.g. on platforms where Markdown is not easily available.
* `make NO_PROCPS=1`
Do not link against libprocps. This disables memory profiling.
* `make NO_GTEST=1`
Do not link against GTest. The tutorial and test suite of gadgetlib2 tutorial won't be compiled.
* `make NO_SUPERCOP=1`
Do not link against SUPERCOP for optimized crypto. The ADSNARK executables will not be built.
* `make MULTICORE=1`
Enable parallelized execution of the ppzkSNARK generator and prover, using OpenMP.
This will utilize all cores on the CPU for heavyweight parallelizable operations such as
FFT and multiexponentiation. The default is single-core.
To override the maximum number of cores used, set the environment variable `OMP_NUM_THREADS`
at runtime (not compile time), e.g., `OMP_NUM_THREADS=8 test_r1cs_sp_ppzkpc`. It defaults
to the autodetected number of cores, but on some devices, dynamic core management confused
OpenMP's autodetection, so setting `OMP_NUM_THREADS` is necessary for full utilization.
* define `NO_PT_COMPRESSION`
Do not use point compression.
This gives much faster serialization times, at the expense of ~2x larger
sizes for serialized keys and proofs.
* define `MONTGOMERY_OUTPUT` (on by default)
Serialize Fp elements as their Montgomery representations. If this
option is disabled then Fp elements are serialized as their
equivalence classes, which is slower but produces human-readable
output.
* `make PROFILE_OP_COUNTS=1` / define `PROFILE_OP_COUNTS`
Collect counts for field and curve operations inside static variables
of the corresponding algebraic objects. This option works for all
curves except bn128.
* define `USE_ASM` (on by default)
Use unrolled assembly routines for F[p] arithmetic and faster heap in
multi-exponentiation. (When not set, use GMP's `mpn_*` routines instead.)
* define `USE_MIXED_ADDITION`
Convert each element of the proving key and verification key to
affine coordinates. This allows using mixed addition formulas in
multiexponentiation and results in slightly faster prover and
verifier runtime at expense of increased proving time.
* `make PERFORMANCE=1`
Enables compiler optimizations such as link-time optimization, and disables debugging aids.
(On some distributions this causes a `plugin needed to handle lto object` link error and `undefined reference`s, which can be remedied by `AR=gcc-ar make ...`.)
Not all combinations are tested together or supported by every part of the codebase.
--------------------------------------------------------------------------------
Portability
--------------------------------------------------------------------------------
libsnark is written in fairly standard C++11.
However, having been developed on Linux on x86-64 CPUs, libsnark has some limitations
with respect to portability. Specifically:
1. libsnark's algebraic data structures assume little-endian byte order.
2. Profiling routines use `clock_gettime` and `readproc` calls, which are Linux-specific.
3. Random-number generation is done by reading from `/dev/urandom`, which is
specific to Unix-like systems.
4. libsnark binary serialization routines (see `BINARY_OUTPUT` above) assume
a fixed machine word size (i.e. sizeof(mp_limb_t) for GMP's limb data type).
Objects serialized in binary on a 64-bit system cannot be de-serialized on
a 32-bit system, and vice versa.
(The decimal serialization routines have no such limitation.)
5. libsnark requires a C++ compiler with good C++11 support. It has been
tested with g++ 4.7, g++ 4.8, and clang 3.4.
6. On x86-64, we by default use highly optimized assembly implementations for some
operations (see `USE_ASM` above). On other architectures we fall back to a
portable C++ implementation, which is slower.
Tested configurations include:
* Debian jessie with g++ 4.7 on x86-64
* Debian jessie with clang 3.4 on x86-64
* Fedora 20/21 with g++ 4.8.2/4.9.2 on x86-64 and i686
* Ubuntu 14.04 LTS with g++ 4.8 on x86-64
* Ubuntu 14.04 LTS with g++ 4.8 on x86-32, for EDWARDS and ALT_BN128 curve choices
* Debian wheezy with g++ 4.7 on ARM little endian (Debian armel port) inside QEMU, for EDWARDS and ALT_BN128 curve choices
* Windows 7 with g++ 4.8.3 under Cygwin 1.7.30 on x86-64 with NO_PROCPS=1, NO_GTEST=1 and NO_DOCS=1, for EDWARDS and ALT_BN128 curve choices
* Mac OS X 10.9.4 (Mavericks) with Apple LLVM version 5.1 (based on LLVM 3.4svn) on x86-64 with NO_PROCPS=1, NO_GTEST=1 and NO_DOCS=1
--------------------------------------------------------------------------------
Directory structure
--------------------------------------------------------------------------------
The directory structure of the libsnark library is as follows:
* src/ --- main C++ source code, containing the following modules:
* algebra/ --- fields and elliptic curve groups
* common/ --- miscellaneous utilities
* gadgetlib1/ --- gadgetlib1, a library to construct R1CS instances
* gadgets/ --- basic gadgets for gadgetlib1
* gadgetlib2/ --- gadgetlib2, a library to construct R1CS instances
* qap/ --- quadratic arithmetic program
* domains/ --- support for fast interpolation/evaluation, by providing
FFTs and Lagrange-coefficient computations for various domains
* relations/ --- interfaces for expressing statement (relations between instances and witnesses) as various NP-complete languages
* constraint_satisfaction_problems/ --- R1CS and USCS languages
* circuit_satisfaction_problems/ --- Boolean and arithmetic circuit satisfiability languages
* ram_computations/ --- RAM computation languages
* zk_proof_systems --- interfaces and implementations of the proof systems
* reductions --- reductions between languages (used internally, but contains many examples of building constraints)
Some of these module directories have the following subdirectories:
* ...
* examples/ --- example code and tutorials for this module
* tests/ --- unit tests for this module
In particular, the top-level API examples are at `src/r1cs_ppzksnark/examples/` and `src/gadgetlib2/examples/`.
* depsrc/ --- created by `prepare_depends.sh` for retrieved sourcecode and local builds of external code
(currently: \[ate-pairing], and its dependency xbyak).
* depinst/ --- created by `prepare_depends.sh` and `Makefile`
for local installation of locally-compiled dependencies.
* doxygen/ --- created by `make doxy` and contains a Doxygen summary of all files, classes etc. in libsnark.
--------------------------------------------------------------------------------
Further considerations
--------------------------------------------------------------------------------
### Multiexponentiation window size
The ppzkSNARK's generator has to solve a fixed-base multi-exponentiation
problem. We use a window-based method in which the optimal window size depends
on the size of the multiexponentiation instance *and* the platform.
On our benchmarking platform (a 3.40 GHz Intel Core i7-4770 CPU), we have
computed for each curve optimal windows, provided as
"fixed_base_exp_window_table" initialization sequences, for each curve; see
`X_init.cpp` for X=edwards,bn128,alt_bn128.
Performance on other platforms may not be optimal (but probably not be far off).
Future releases of the libsnark library will include a tool that generates
optimal window sizes.
--------------------------------------------------------------------------------
References
--------------------------------------------------------------------------------
\[BBFR15] [
_ADSNARK: nearly practical and privacy-preserving proofs on authenticated data_
](https://eprint.iacr.org/2014/617),
Michael Backes, Manuel Barbosa, Dario Fiore, Raphael M. Reischuk,
IEEE Symposium on Security and Privacy (Oakland) 2015
\[BCCT12] [
_From extractable collision resistance to succinct non-Interactive arguments of knowledge, and back again_
](http://eprint.iacr.org/2011/443),
Nir Bitansky, Ran Canetti, Alessandro Chiesa, Eran Tromer,
Innovations in Computer Science (ITCS) 2012
\[BCCT13] [
_Recursive composition and bootstrapping for SNARKs and proof-carrying data_
](http://eprint.iacr.org/2012/095)
Nir Bitansky, Ran Canetti, Alessandro Chiesa, Eran Tromer,
Symposium on Theory of Computing (STOC) 13
\[BCGTV13] [
_SNARKs for C: Verifying Program Executions Succinctly and in Zero Knowledge_
](http://eprint.iacr.org/2013/507),
Eli Ben-Sasson, Alessandro Chiesa, Daniel Genkin, Eran Tromer, Madars Virza,
CRYPTO 2013
\[BCIOP13] [
_Succinct Non-Interactive Arguments via Linear Interactive Proofs_
](http://eprint.iacr.org/2012/718),
Nir Bitansky, Alessandro Chiesa, Yuval Ishai, Rafail Ostrovsky, Omer Paneth,
Theory of Cryptography Conference 2013
\[BCTV14a] [
_Succinct Non-Interactive Zero Knowledge for a von Neumann Architecture_
](http://eprint.iacr.org/2013/879),
Eli Ben-Sasson, Alessandro Chiesa, Eran Tromer, Madars Virza,
USENIX Security 2014
\[BCTV14b] [
_Scalable succinct non-interactive arguments via cycles of elliptic curves_
](https://eprint.iacr.org/2014/595),
Eli Ben-Sasson, Alessandro Chiesa, Eran Tromer, Madars Virza,
CRYPTO 2014
\[CTV15] [
_Cluster computing in zero knowledge_
](https://eprint.iacr.org/2015/377),
Alessandro Chiesa, Eran Tromer, Madars Virza,
Eurocrypt 2015
\[DFGK14] [
Square span programs with applications to succinct NIZK arguments
](https://eprint.iacr.org/2014/718),
George Danezis, Cedric Fournet, Jens Groth, Markulf Kohlweiss,
ASIACCS 2014
\[GGPR13] [
_Quadratic span programs and succinct NIZKs without PCPs_
](http://eprint.iacr.org/2012/215),
Rosario Gennaro, Craig Gentry, Bryan Parno, Mariana Raykova,
EUROCRYPT 2013
\[ate-pairing] [
_High-Speed Software Implementation of the Optimal Ate Pairing over Barreto-Naehrig Curves_
](https://github.com/herumi/ate-pairing),
MITSUNARI Shigeo, TERUYA Tadanori
\[PGHR13] [
_Pinocchio: Nearly Practical Verifiable Computation_
](http://eprint.iacr.org/2013/279),
Bryan Parno, Craig Gentry, Jon Howell, Mariana Raykova,
IEEE Symposium on Security and Privacy (Oakland) 2013
[SCIPR Lab]: http://www.scipr-lab.org/ (Succinct Computational Integrity and Privacy Research Lab)
[LICENSE]: LICENSE (LICENSE file in top directory of libsnark distribution)
[AUTHORS]: AUTHORS (AUTHORS file in top directory of libsnark distribution)

File diff suppressed because it is too large Load Diff

View File

@ -1,524 +0,0 @@
/** @file
*****************************************************************************
* @author This file is part of libsnark, developed by SCIPR Lab
* and contributors (see AUTHORS).
* @copyright MIT license (see LICENSE file)
*****************************************************************************/
#include "algebra/curves/alt_bn128/alt_bn128_g1.hpp"
namespace libsnark {
#ifdef PROFILE_OP_COUNTS
int64_t alt_bn128_G1::add_cnt = 0;
int64_t alt_bn128_G1::dbl_cnt = 0;
#endif
std::vector<size_t> alt_bn128_G1::wnaf_window_table;
std::vector<size_t> alt_bn128_G1::fixed_base_exp_window_table;
alt_bn128_G1 alt_bn128_G1::G1_zero;
alt_bn128_G1 alt_bn128_G1::G1_one;
alt_bn128_G1::alt_bn128_G1()
{
this->X = G1_zero.X;
this->Y = G1_zero.Y;
this->Z = G1_zero.Z;
}
void alt_bn128_G1::print() const
{
if (this->is_zero())
{
printf("O\n");
}
else
{
alt_bn128_G1 copy(*this);
copy.to_affine_coordinates();
gmp_printf("(%Nd , %Nd)\n",
copy.X.as_bigint().data, alt_bn128_Fq::num_limbs,
copy.Y.as_bigint().data, alt_bn128_Fq::num_limbs);
}
}
void alt_bn128_G1::print_coordinates() const
{
if (this->is_zero())
{
printf("O\n");
}
else
{
gmp_printf("(%Nd : %Nd : %Nd)\n",
this->X.as_bigint().data, alt_bn128_Fq::num_limbs,
this->Y.as_bigint().data, alt_bn128_Fq::num_limbs,
this->Z.as_bigint().data, alt_bn128_Fq::num_limbs);
}
}
void alt_bn128_G1::to_affine_coordinates()
{
if (this->is_zero())
{
this->X = alt_bn128_Fq::zero();
this->Y = alt_bn128_Fq::one();
this->Z = alt_bn128_Fq::zero();
}
else
{
alt_bn128_Fq Z_inv = Z.inverse();
alt_bn128_Fq Z2_inv = Z_inv.squared();
alt_bn128_Fq Z3_inv = Z2_inv * Z_inv;
this->X = this->X * Z2_inv;
this->Y = this->Y * Z3_inv;
this->Z = alt_bn128_Fq::one();
}
}
void alt_bn128_G1::to_special()
{
this->to_affine_coordinates();
}
bool alt_bn128_G1::is_special() const
{
return (this->is_zero() || this->Z == alt_bn128_Fq::one());
}
bool alt_bn128_G1::is_zero() const
{
return (this->Z.is_zero());
}
bool alt_bn128_G1::operator==(const alt_bn128_G1 &other) const
{
if (this->is_zero())
{
return other.is_zero();
}
if (other.is_zero())
{
return false;
}
/* now neither is O */
// using Jacobian coordinates so:
// (X1:Y1:Z1) = (X2:Y2:Z2)
// iff
// X1/Z1^2 == X2/Z2^2 and Y1/Z1^3 == Y2/Z2^3
// iff
// X1 * Z2^2 == X2 * Z1^2 and Y1 * Z2^3 == Y2 * Z1^3
alt_bn128_Fq Z1_squared = (this->Z).squared();
alt_bn128_Fq Z2_squared = (other.Z).squared();
if ((this->X * Z2_squared) != (other.X * Z1_squared))
{
return false;
}
alt_bn128_Fq Z1_cubed = (this->Z) * Z1_squared;
alt_bn128_Fq Z2_cubed = (other.Z) * Z2_squared;
if ((this->Y * Z2_cubed) != (other.Y * Z1_cubed))
{
return false;
}
return true;
}
bool alt_bn128_G1::operator!=(const alt_bn128_G1& other) const
{
return !(operator==(other));
}
alt_bn128_G1 alt_bn128_G1::operator+(const alt_bn128_G1 &other) const
{
// handle special cases having to do with O
if (this->is_zero())
{
return other;
}
if (other.is_zero())
{
return *this;
}
// no need to handle points of order 2,4
// (they cannot exist in a prime-order subgroup)
// check for doubling case
// using Jacobian coordinates so:
// (X1:Y1:Z1) = (X2:Y2:Z2)
// iff
// X1/Z1^2 == X2/Z2^2 and Y1/Z1^3 == Y2/Z2^3
// iff
// X1 * Z2^2 == X2 * Z1^2 and Y1 * Z2^3 == Y2 * Z1^3
alt_bn128_Fq Z1Z1 = (this->Z).squared();
alt_bn128_Fq Z2Z2 = (other.Z).squared();
alt_bn128_Fq U1 = this->X * Z2Z2;
alt_bn128_Fq U2 = other.X * Z1Z1;
alt_bn128_Fq Z1_cubed = (this->Z) * Z1Z1;
alt_bn128_Fq Z2_cubed = (other.Z) * Z2Z2;
alt_bn128_Fq S1 = (this->Y) * Z2_cubed; // S1 = Y1 * Z2 * Z2Z2
alt_bn128_Fq S2 = (other.Y) * Z1_cubed; // S2 = Y2 * Z1 * Z1Z1
if (U1 == U2 && S1 == S2)
{
// dbl case; nothing of above can be reused
return this->dbl();
}
// rest of add case
alt_bn128_Fq H = U2 - U1; // H = U2-U1
alt_bn128_Fq S2_minus_S1 = S2-S1;
alt_bn128_Fq I = (H+H).squared(); // I = (2 * H)^2
alt_bn128_Fq J = H * I; // J = H * I
alt_bn128_Fq r = S2_minus_S1 + S2_minus_S1; // r = 2 * (S2-S1)
alt_bn128_Fq V = U1 * I; // V = U1 * I
alt_bn128_Fq X3 = r.squared() - J - (V+V); // X3 = r^2 - J - 2 * V
alt_bn128_Fq S1_J = S1 * J;
alt_bn128_Fq Y3 = r * (V-X3) - (S1_J+S1_J); // Y3 = r * (V-X3)-2 S1 J
alt_bn128_Fq Z3 = ((this->Z+other.Z).squared()-Z1Z1-Z2Z2) * H; // Z3 = ((Z1+Z2)^2-Z1Z1-Z2Z2) * H
return alt_bn128_G1(X3, Y3, Z3);
}
alt_bn128_G1 alt_bn128_G1::operator-() const
{
return alt_bn128_G1(this->X, -(this->Y), this->Z);
}
alt_bn128_G1 alt_bn128_G1::operator-(const alt_bn128_G1 &other) const
{
return (*this) + (-other);
}
alt_bn128_G1 alt_bn128_G1::add(const alt_bn128_G1 &other) const
{
// handle special cases having to do with O
if (this->is_zero())
{
return other;
}
if (other.is_zero())
{
return *this;
}
// no need to handle points of order 2,4
// (they cannot exist in a prime-order subgroup)
// handle double case
if (this->operator==(other))
{
return this->dbl();
}
#ifdef PROFILE_OP_COUNTS
this->add_cnt++;
#endif
// NOTE: does not handle O and pts of order 2,4
// http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-add-2007-bl
alt_bn128_Fq Z1Z1 = (this->Z).squared(); // Z1Z1 = Z1^2
alt_bn128_Fq Z2Z2 = (other.Z).squared(); // Z2Z2 = Z2^2
alt_bn128_Fq U1 = (this->X) * Z2Z2; // U1 = X1 * Z2Z2
alt_bn128_Fq U2 = (other.X) * Z1Z1; // U2 = X2 * Z1Z1
alt_bn128_Fq S1 = (this->Y) * (other.Z) * Z2Z2; // S1 = Y1 * Z2 * Z2Z2
alt_bn128_Fq S2 = (other.Y) * (this->Z) * Z1Z1; // S2 = Y2 * Z1 * Z1Z1
alt_bn128_Fq H = U2 - U1; // H = U2-U1
alt_bn128_Fq S2_minus_S1 = S2-S1;
alt_bn128_Fq I = (H+H).squared(); // I = (2 * H)^2
alt_bn128_Fq J = H * I; // J = H * I
alt_bn128_Fq r = S2_minus_S1 + S2_minus_S1; // r = 2 * (S2-S1)
alt_bn128_Fq V = U1 * I; // V = U1 * I
alt_bn128_Fq X3 = r.squared() - J - (V+V); // X3 = r^2 - J - 2 * V
alt_bn128_Fq S1_J = S1 * J;
alt_bn128_Fq Y3 = r * (V-X3) - (S1_J+S1_J); // Y3 = r * (V-X3)-2 S1 J
alt_bn128_Fq Z3 = ((this->Z+other.Z).squared()-Z1Z1-Z2Z2) * H; // Z3 = ((Z1+Z2)^2-Z1Z1-Z2Z2) * H
return alt_bn128_G1(X3, Y3, Z3);
}
alt_bn128_G1 alt_bn128_G1::mixed_add(const alt_bn128_G1 &other) const
{
#ifdef DEBUG
assert(other.is_special());
#endif
// handle special cases having to do with O
if (this->is_zero())
{
return other;
}
if (other.is_zero())
{
return *this;
}
// no need to handle points of order 2,4
// (they cannot exist in a prime-order subgroup)
// check for doubling case
// using Jacobian coordinates so:
// (X1:Y1:Z1) = (X2:Y2:Z2)
// iff
// X1/Z1^2 == X2/Z2^2 and Y1/Z1^3 == Y2/Z2^3
// iff
// X1 * Z2^2 == X2 * Z1^2 and Y1 * Z2^3 == Y2 * Z1^3
// we know that Z2 = 1
const alt_bn128_Fq Z1Z1 = (this->Z).squared();
const alt_bn128_Fq &U1 = this->X;
const alt_bn128_Fq U2 = other.X * Z1Z1;
const alt_bn128_Fq Z1_cubed = (this->Z) * Z1Z1;
const alt_bn128_Fq &S1 = (this->Y); // S1 = Y1 * Z2 * Z2Z2
const alt_bn128_Fq S2 = (other.Y) * Z1_cubed; // S2 = Y2 * Z1 * Z1Z1
if (U1 == U2 && S1 == S2)
{
// dbl case; nothing of above can be reused
return this->dbl();
}
#ifdef PROFILE_OP_COUNTS
this->add_cnt++;
#endif
// NOTE: does not handle O and pts of order 2,4
// http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-madd-2007-bl
alt_bn128_Fq H = U2-(this->X); // H = U2-X1
alt_bn128_Fq HH = H.squared() ; // HH = H&2
alt_bn128_Fq I = HH+HH; // I = 4*HH
I = I + I;
alt_bn128_Fq J = H*I; // J = H*I
alt_bn128_Fq r = S2-(this->Y); // r = 2*(S2-Y1)
r = r + r;
alt_bn128_Fq V = (this->X) * I ; // V = X1*I
alt_bn128_Fq X3 = r.squared()-J-V-V; // X3 = r^2-J-2*V
alt_bn128_Fq Y3 = (this->Y)*J; // Y3 = r*(V-X3)-2*Y1*J
Y3 = r*(V-X3) - Y3 - Y3;
alt_bn128_Fq Z3 = ((this->Z)+H).squared() - Z1Z1 - HH; // Z3 = (Z1+H)^2-Z1Z1-HH
return alt_bn128_G1(X3, Y3, Z3);
}
alt_bn128_G1 alt_bn128_G1::dbl() const
{
#ifdef PROFILE_OP_COUNTS
this->dbl_cnt++;
#endif
// handle point at infinity
if (this->is_zero())
{
return (*this);
}
// no need to handle points of order 2,4
// (they cannot exist in a prime-order subgroup)
// NOTE: does not handle O and pts of order 2,4
// http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-dbl-2009-l
alt_bn128_Fq A = (this->X).squared(); // A = X1^2
alt_bn128_Fq B = (this->Y).squared(); // B = Y1^2
alt_bn128_Fq C = B.squared(); // C = B^2
alt_bn128_Fq D = (this->X + B).squared() - A - C;
D = D+D; // D = 2 * ((X1 + B)^2 - A - C)
alt_bn128_Fq E = A + A + A; // E = 3 * A
alt_bn128_Fq F = E.squared(); // F = E^2
alt_bn128_Fq X3 = F - (D+D); // X3 = F - 2 D
alt_bn128_Fq eightC = C+C;
eightC = eightC + eightC;
eightC = eightC + eightC;
alt_bn128_Fq Y3 = E * (D - X3) - eightC; // Y3 = E * (D - X3) - 8 * C
alt_bn128_Fq Y1Z1 = (this->Y)*(this->Z);
alt_bn128_Fq Z3 = Y1Z1 + Y1Z1; // Z3 = 2 * Y1 * Z1
return alt_bn128_G1(X3, Y3, Z3);
}
bool alt_bn128_G1::is_well_formed() const
{
if (this->is_zero())
{
return true;
}
else
{
/*
y^2 = x^3 + b
We are using Jacobian coordinates, so equation we need to check is actually
(y/z^3)^2 = (x/z^2)^3 + b
y^2 / z^6 = x^3 / z^6 + b
y^2 = x^3 + b z^6
*/
alt_bn128_Fq X2 = this->X.squared();
alt_bn128_Fq Y2 = this->Y.squared();
alt_bn128_Fq Z2 = this->Z.squared();
alt_bn128_Fq X3 = this->X * X2;
alt_bn128_Fq Z3 = this->Z * Z2;
alt_bn128_Fq Z6 = Z3.squared();
return (Y2 == X3 + alt_bn128_coeff_b * Z6);
}
}
alt_bn128_G1 alt_bn128_G1::zero()
{
return G1_zero;
}
alt_bn128_G1 alt_bn128_G1::one()
{
return G1_one;
}
alt_bn128_G1 alt_bn128_G1::random_element()
{
return (scalar_field::random_element().as_bigint()) * G1_one;
}
std::ostream& operator<<(std::ostream &out, const alt_bn128_G1 &g)
{
alt_bn128_G1 copy(g);
copy.to_affine_coordinates();
out << (copy.is_zero() ? 1 : 0) << OUTPUT_SEPARATOR;
#ifdef NO_PT_COMPRESSION
out << copy.X << OUTPUT_SEPARATOR << copy.Y;
#else
/* storing LSB of Y */
out << copy.X << OUTPUT_SEPARATOR << (copy.Y.as_bigint().data[0] & 1);
#endif
return out;
}
std::istream& operator>>(std::istream &in, alt_bn128_G1 &g)
{
char is_zero;
alt_bn128_Fq tX, tY;
#ifdef NO_PT_COMPRESSION
in >> is_zero >> tX >> tY;
is_zero -= '0';
#else
in.read((char*)&is_zero, 1); // this reads is_zero;
is_zero -= '0';
consume_OUTPUT_SEPARATOR(in);
unsigned char Y_lsb;
in >> tX;
consume_OUTPUT_SEPARATOR(in);
in.read((char*)&Y_lsb, 1);
Y_lsb -= '0';
// y = +/- sqrt(x^3 + b)
if (!is_zero)
{
alt_bn128_Fq tX2 = tX.squared();
alt_bn128_Fq tY2 = tX2*tX + alt_bn128_coeff_b;
tY = tY2.sqrt();
if ((tY.as_bigint().data[0] & 1) != Y_lsb)
{
tY = -tY;
}
}
#endif
// using Jacobian coordinates
if (!is_zero)
{
g.X = tX;
g.Y = tY;
g.Z = alt_bn128_Fq::one();
}
else
{
g = alt_bn128_G1::zero();
}
return in;
}
std::ostream& operator<<(std::ostream& out, const std::vector<alt_bn128_G1> &v)
{
out << v.size() << "\n";
for (const alt_bn128_G1& t : v)
{
out << t << OUTPUT_NEWLINE;
}
return out;
}
std::istream& operator>>(std::istream& in, std::vector<alt_bn128_G1> &v)
{
v.clear();
size_t s;
in >> s;
consume_newline(in);
v.reserve(s);
for (size_t i = 0; i < s; ++i)
{
alt_bn128_G1 g;
in >> g;
consume_OUTPUT_NEWLINE(in);
v.emplace_back(g);
}
return in;
}
template<>
void batch_to_special_all_non_zeros<alt_bn128_G1>(std::vector<alt_bn128_G1> &vec)
{
std::vector<alt_bn128_Fq> Z_vec;
Z_vec.reserve(vec.size());
for (auto &el: vec)
{
Z_vec.emplace_back(el.Z);
}
batch_invert<alt_bn128_Fq>(Z_vec);
const alt_bn128_Fq one = alt_bn128_Fq::one();
for (size_t i = 0; i < vec.size(); ++i)
{
alt_bn128_Fq Z2 = Z_vec[i].squared();
alt_bn128_Fq Z3 = Z_vec[i] * Z2;
vec[i].X = vec[i].X * Z2;
vec[i].Y = vec[i].Y * Z3;
vec[i].Z = one;
}
}
} // libsnark

View File

@ -1,95 +0,0 @@
/** @file
*****************************************************************************
* @author This file is part of libsnark, developed by SCIPR Lab
* and contributors (see AUTHORS).
* @copyright MIT license (see LICENSE file)
*****************************************************************************/
#ifndef ALT_BN128_G1_HPP_
#define ALT_BN128_G1_HPP_
#include <vector>
#include "algebra/curves/alt_bn128/alt_bn128_init.hpp"
#include "algebra/curves/curve_utils.hpp"
namespace libsnark {
class alt_bn128_G1;
std::ostream& operator<<(std::ostream &, const alt_bn128_G1&);
std::istream& operator>>(std::istream &, alt_bn128_G1&);
class alt_bn128_G1 {
public:
#ifdef PROFILE_OP_COUNTS
static int64_t add_cnt;
static int64_t dbl_cnt;
#endif
static std::vector<size_t> wnaf_window_table;
static std::vector<size_t> fixed_base_exp_window_table;
static alt_bn128_G1 G1_zero;
static alt_bn128_G1 G1_one;
typedef alt_bn128_Fq base_field;
typedef alt_bn128_Fr scalar_field;
alt_bn128_Fq X, Y, Z;
// using Jacobian coordinates
alt_bn128_G1();
alt_bn128_G1(const alt_bn128_Fq& X, const alt_bn128_Fq& Y, const alt_bn128_Fq& Z) : X(X), Y(Y), Z(Z) {};
void print() const;
void print_coordinates() const;
void to_affine_coordinates();
void to_special();
bool is_special() const;
bool is_zero() const;
bool operator==(const alt_bn128_G1 &other) const;
bool operator!=(const alt_bn128_G1 &other) const;
alt_bn128_G1 operator+(const alt_bn128_G1 &other) const;
alt_bn128_G1 operator-() const;
alt_bn128_G1 operator-(const alt_bn128_G1 &other) const;
alt_bn128_G1 add(const alt_bn128_G1 &other) const;
alt_bn128_G1 mixed_add(const alt_bn128_G1 &other) const;
alt_bn128_G1 dbl() const;
bool is_well_formed() const;
static alt_bn128_G1 zero();
static alt_bn128_G1 one();
static alt_bn128_G1 random_element();
static size_t size_in_bits() { return base_field::size_in_bits() + 1; }
static bigint<base_field::num_limbs> base_field_char() { return base_field::field_char(); }
static bigint<scalar_field::num_limbs> order() { return scalar_field::field_char(); }
friend std::ostream& operator<<(std::ostream &out, const alt_bn128_G1 &g);
friend std::istream& operator>>(std::istream &in, alt_bn128_G1 &g);
};
template<mp_size_t m>
alt_bn128_G1 operator*(const bigint<m> &lhs, const alt_bn128_G1 &rhs)
{
return scalar_mul<alt_bn128_G1, m>(rhs, lhs);
}
template<mp_size_t m, const bigint<m>& modulus_p>
alt_bn128_G1 operator*(const Fp_model<m,modulus_p> &lhs, const alt_bn128_G1 &rhs)
{
return scalar_mul<alt_bn128_G1, m>(rhs, lhs.as_bigint());
}
std::ostream& operator<<(std::ostream& out, const std::vector<alt_bn128_G1> &v);
std::istream& operator>>(std::istream& in, std::vector<alt_bn128_G1> &v);
template<typename T>
void batch_to_special_all_non_zeros(std::vector<T> &vec);
template<>
void batch_to_special_all_non_zeros<alt_bn128_G1>(std::vector<alt_bn128_G1> &vec);
} // libsnark
#endif // ALT_BN128_G1_HPP_

View File

@ -1,505 +0,0 @@
/** @file
*****************************************************************************
* @author This file is part of libsnark, developed by SCIPR Lab
* and contributors (see AUTHORS).
* @copyright MIT license (see LICENSE file)
*****************************************************************************/
#include "algebra/curves/alt_bn128/alt_bn128_g2.hpp"
namespace libsnark {
#ifdef PROFILE_OP_COUNTS
int64_t alt_bn128_G2::add_cnt = 0;
int64_t alt_bn128_G2::dbl_cnt = 0;
#endif
std::vector<size_t> alt_bn128_G2::wnaf_window_table;
std::vector<size_t> alt_bn128_G2::fixed_base_exp_window_table;
alt_bn128_G2 alt_bn128_G2::G2_zero;
alt_bn128_G2 alt_bn128_G2::G2_one;
alt_bn128_G2::alt_bn128_G2()
{
this->X = G2_zero.X;
this->Y = G2_zero.Y;
this->Z = G2_zero.Z;
}
alt_bn128_Fq2 alt_bn128_G2::mul_by_b(const alt_bn128_Fq2 &elt)
{
return alt_bn128_Fq2(alt_bn128_twist_mul_by_b_c0 * elt.c0, alt_bn128_twist_mul_by_b_c1 * elt.c1);
}
void alt_bn128_G2::print() const
{
if (this->is_zero())
{
printf("O\n");
}
else
{
alt_bn128_G2 copy(*this);
copy.to_affine_coordinates();
gmp_printf("(%Nd*z + %Nd , %Nd*z + %Nd)\n",
copy.X.c1.as_bigint().data, alt_bn128_Fq::num_limbs,
copy.X.c0.as_bigint().data, alt_bn128_Fq::num_limbs,
copy.Y.c1.as_bigint().data, alt_bn128_Fq::num_limbs,
copy.Y.c0.as_bigint().data, alt_bn128_Fq::num_limbs);
}
}
void alt_bn128_G2::print_coordinates() const
{
if (this->is_zero())
{
printf("O\n");
}
else
{
gmp_printf("(%Nd*z + %Nd : %Nd*z + %Nd : %Nd*z + %Nd)\n",
this->X.c1.as_bigint().data, alt_bn128_Fq::num_limbs,
this->X.c0.as_bigint().data, alt_bn128_Fq::num_limbs,
this->Y.c1.as_bigint().data, alt_bn128_Fq::num_limbs,
this->Y.c0.as_bigint().data, alt_bn128_Fq::num_limbs,
this->Z.c1.as_bigint().data, alt_bn128_Fq::num_limbs,
this->Z.c0.as_bigint().data, alt_bn128_Fq::num_limbs);
}
}
void alt_bn128_G2::to_affine_coordinates()
{
if (this->is_zero())
{
this->X = alt_bn128_Fq2::zero();
this->Y = alt_bn128_Fq2::one();
this->Z = alt_bn128_Fq2::zero();
}
else
{
alt_bn128_Fq2 Z_inv = Z.inverse();
alt_bn128_Fq2 Z2_inv = Z_inv.squared();
alt_bn128_Fq2 Z3_inv = Z2_inv * Z_inv;
this->X = this->X * Z2_inv;
this->Y = this->Y * Z3_inv;
this->Z = alt_bn128_Fq2::one();
}
}
void alt_bn128_G2::to_special()
{
this->to_affine_coordinates();
}
bool alt_bn128_G2::is_special() const
{
return (this->is_zero() || this->Z == alt_bn128_Fq2::one());
}
bool alt_bn128_G2::is_zero() const
{
return (this->Z.is_zero());
}
bool alt_bn128_G2::operator==(const alt_bn128_G2 &other) const
{
if (this->is_zero())
{
return other.is_zero();
}
if (other.is_zero())
{
return false;
}
/* now neither is O */
// using Jacobian coordinates so:
// (X1:Y1:Z1) = (X2:Y2:Z2)
// iff
// X1/Z1^2 == X2/Z2^2 and Y1/Z1^3 == Y2/Z2^3
// iff
// X1 * Z2^2 == X2 * Z1^2 and Y1 * Z2^3 == Y2 * Z1^3
alt_bn128_Fq2 Z1_squared = (this->Z).squared();
alt_bn128_Fq2 Z2_squared = (other.Z).squared();
if ((this->X * Z2_squared) != (other.X * Z1_squared))
{
return false;
}
alt_bn128_Fq2 Z1_cubed = (this->Z) * Z1_squared;
alt_bn128_Fq2 Z2_cubed = (other.Z) * Z2_squared;
if ((this->Y * Z2_cubed) != (other.Y * Z1_cubed))
{
return false;
}
return true;
}
bool alt_bn128_G2::operator!=(const alt_bn128_G2& other) const
{
return !(operator==(other));
}
alt_bn128_G2 alt_bn128_G2::operator+(const alt_bn128_G2 &other) const
{
// handle special cases having to do with O
if (this->is_zero())
{
return other;
}
if (other.is_zero())
{
return *this;
}
// no need to handle points of order 2,4
// (they cannot exist in a prime-order subgroup)
// check for doubling case
// using Jacobian coordinates so:
// (X1:Y1:Z1) = (X2:Y2:Z2)
// iff
// X1/Z1^2 == X2/Z2^2 and Y1/Z1^3 == Y2/Z2^3
// iff
// X1 * Z2^2 == X2 * Z1^2 and Y1 * Z2^3 == Y2 * Z1^3
alt_bn128_Fq2 Z1Z1 = (this->Z).squared();
alt_bn128_Fq2 Z2Z2 = (other.Z).squared();
alt_bn128_Fq2 U1 = this->X * Z2Z2;
alt_bn128_Fq2 U2 = other.X * Z1Z1;
alt_bn128_Fq2 Z1_cubed = (this->Z) * Z1Z1;
alt_bn128_Fq2 Z2_cubed = (other.Z) * Z2Z2;
alt_bn128_Fq2 S1 = (this->Y) * Z2_cubed; // S1 = Y1 * Z2 * Z2Z2
alt_bn128_Fq2 S2 = (other.Y) * Z1_cubed; // S2 = Y2 * Z1 * Z1Z1
if (U1 == U2 && S1 == S2)
{
// dbl case; nothing of above can be reused
return this->dbl();
}
// rest of add case
alt_bn128_Fq2 H = U2 - U1; // H = U2-U1
alt_bn128_Fq2 S2_minus_S1 = S2-S1;
alt_bn128_Fq2 I = (H+H).squared(); // I = (2 * H)^2
alt_bn128_Fq2 J = H * I; // J = H * I
alt_bn128_Fq2 r = S2_minus_S1 + S2_minus_S1; // r = 2 * (S2-S1)
alt_bn128_Fq2 V = U1 * I; // V = U1 * I
alt_bn128_Fq2 X3 = r.squared() - J - (V+V); // X3 = r^2 - J - 2 * V
alt_bn128_Fq2 S1_J = S1 * J;
alt_bn128_Fq2 Y3 = r * (V-X3) - (S1_J+S1_J); // Y3 = r * (V-X3)-2 S1 J
alt_bn128_Fq2 Z3 = ((this->Z+other.Z).squared()-Z1Z1-Z2Z2) * H; // Z3 = ((Z1+Z2)^2-Z1Z1-Z2Z2) * H
return alt_bn128_G2(X3, Y3, Z3);
}
alt_bn128_G2 alt_bn128_G2::operator-() const
{
return alt_bn128_G2(this->X, -(this->Y), this->Z);
}
alt_bn128_G2 alt_bn128_G2::operator-(const alt_bn128_G2 &other) const
{
return (*this) + (-other);
}
alt_bn128_G2 alt_bn128_G2::add(const alt_bn128_G2 &other) const
{
// handle special cases having to do with O
if (this->is_zero())
{
return other;
}
if (other.is_zero())
{
return *this;
}
// no need to handle points of order 2,4
// (they cannot exist in a prime-order subgroup)
// handle double case
if (this->operator==(other))
{
return this->dbl();
}
#ifdef PROFILE_OP_COUNTS
this->add_cnt++;
#endif
// NOTE: does not handle O and pts of order 2,4
// http://www.hyperelliptic.org/EFD/g1p/auto-shortw-projective.html#addition-add-1998-cmo-2
alt_bn128_Fq2 Z1Z1 = (this->Z).squared(); // Z1Z1 = Z1^2
alt_bn128_Fq2 Z2Z2 = (other.Z).squared(); // Z2Z2 = Z2^2
alt_bn128_Fq2 U1 = (this->X) * Z2Z2; // U1 = X1 * Z2Z2
alt_bn128_Fq2 U2 = (other.X) * Z1Z1; // U2 = X2 * Z1Z1
alt_bn128_Fq2 S1 = (this->Y) * (other.Z) * Z2Z2; // S1 = Y1 * Z2 * Z2Z2
alt_bn128_Fq2 S2 = (other.Y) * (this->Z) * Z1Z1; // S2 = Y2 * Z1 * Z1Z1
alt_bn128_Fq2 H = U2 - U1; // H = U2-U1
alt_bn128_Fq2 S2_minus_S1 = S2-S1;
alt_bn128_Fq2 I = (H+H).squared(); // I = (2 * H)^2
alt_bn128_Fq2 J = H * I; // J = H * I
alt_bn128_Fq2 r = S2_minus_S1 + S2_minus_S1; // r = 2 * (S2-S1)
alt_bn128_Fq2 V = U1 * I; // V = U1 * I
alt_bn128_Fq2 X3 = r.squared() - J - (V+V); // X3 = r^2 - J - 2 * V
alt_bn128_Fq2 S1_J = S1 * J;
alt_bn128_Fq2 Y3 = r * (V-X3) - (S1_J+S1_J); // Y3 = r * (V-X3)-2 S1 J
alt_bn128_Fq2 Z3 = ((this->Z+other.Z).squared()-Z1Z1-Z2Z2) * H; // Z3 = ((Z1+Z2)^2-Z1Z1-Z2Z2) * H
return alt_bn128_G2(X3, Y3, Z3);
}
alt_bn128_G2 alt_bn128_G2::mixed_add(const alt_bn128_G2 &other) const
{
#ifdef DEBUG
assert(other.is_special());
#endif
// handle special cases having to do with O
if (this->is_zero())
{
return other;
}
if (other.is_zero())
{
return *this;
}
// no need to handle points of order 2,4
// (they cannot exist in a prime-order subgroup)
// check for doubling case
// using Jacobian coordinates so:
// (X1:Y1:Z1) = (X2:Y2:Z2)
// iff
// X1/Z1^2 == X2/Z2^2 and Y1/Z1^3 == Y2/Z2^3
// iff
// X1 * Z2^2 == X2 * Z1^2 and Y1 * Z2^3 == Y2 * Z1^3
// we know that Z2 = 1
const alt_bn128_Fq2 Z1Z1 = (this->Z).squared();
const alt_bn128_Fq2 &U1 = this->X;
const alt_bn128_Fq2 U2 = other.X * Z1Z1;
const alt_bn128_Fq2 Z1_cubed = (this->Z) * Z1Z1;
const alt_bn128_Fq2 &S1 = (this->Y); // S1 = Y1 * Z2 * Z2Z2
const alt_bn128_Fq2 S2 = (other.Y) * Z1_cubed; // S2 = Y2 * Z1 * Z1Z1
if (U1 == U2 && S1 == S2)
{
// dbl case; nothing of above can be reused
return this->dbl();
}
#ifdef PROFILE_OP_COUNTS
this->add_cnt++;
#endif
// NOTE: does not handle O and pts of order 2,4
// http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-madd-2007-bl
alt_bn128_Fq2 H = U2-(this->X); // H = U2-X1
alt_bn128_Fq2 HH = H.squared() ; // HH = H&2
alt_bn128_Fq2 I = HH+HH; // I = 4*HH
I = I + I;
alt_bn128_Fq2 J = H*I; // J = H*I
alt_bn128_Fq2 r = S2-(this->Y); // r = 2*(S2-Y1)
r = r + r;
alt_bn128_Fq2 V = (this->X) * I ; // V = X1*I
alt_bn128_Fq2 X3 = r.squared()-J-V-V; // X3 = r^2-J-2*V
alt_bn128_Fq2 Y3 = (this->Y)*J; // Y3 = r*(V-X3)-2*Y1*J
Y3 = r*(V-X3) - Y3 - Y3;
alt_bn128_Fq2 Z3 = ((this->Z)+H).squared() - Z1Z1 - HH; // Z3 = (Z1+H)^2-Z1Z1-HH
return alt_bn128_G2(X3, Y3, Z3);
}
alt_bn128_G2 alt_bn128_G2::dbl() const
{
#ifdef PROFILE_OP_COUNTS
this->dbl_cnt++;
#endif
// handle point at infinity
if (this->is_zero())
{
return (*this);
}
// NOTE: does not handle O and pts of order 2,4
// http://www.hyperelliptic.org/EFD/g1p/auto-shortw-projective.html#doubling-dbl-2007-bl
alt_bn128_Fq2 A = (this->X).squared(); // A = X1^2
alt_bn128_Fq2 B = (this->Y).squared(); // B = Y1^2
alt_bn128_Fq2 C = B.squared(); // C = B^2
alt_bn128_Fq2 D = (this->X + B).squared() - A - C;
D = D+D; // D = 2 * ((X1 + B)^2 - A - C)
alt_bn128_Fq2 E = A + A + A; // E = 3 * A
alt_bn128_Fq2 F = E.squared(); // F = E^2
alt_bn128_Fq2 X3 = F - (D+D); // X3 = F - 2 D
alt_bn128_Fq2 eightC = C+C;
eightC = eightC + eightC;
eightC = eightC + eightC;
alt_bn128_Fq2 Y3 = E * (D - X3) - eightC; // Y3 = E * (D - X3) - 8 * C
alt_bn128_Fq2 Y1Z1 = (this->Y)*(this->Z);
alt_bn128_Fq2 Z3 = Y1Z1 + Y1Z1; // Z3 = 2 * Y1 * Z1
return alt_bn128_G2(X3, Y3, Z3);
}
alt_bn128_G2 alt_bn128_G2::mul_by_q() const
{
return alt_bn128_G2(alt_bn128_twist_mul_by_q_X * (this->X).Frobenius_map(1),
alt_bn128_twist_mul_by_q_Y * (this->Y).Frobenius_map(1),
(this->Z).Frobenius_map(1));
}
bool alt_bn128_G2::is_well_formed() const
{
if (this->is_zero())
{
return true;
}
else
{
/*
y^2 = x^3 + b
We are using Jacobian coordinates, so equation we need to check is actually
(y/z^3)^2 = (x/z^2)^3 + b
y^2 / z^6 = x^3 / z^6 + b
y^2 = x^3 + b z^6
*/
alt_bn128_Fq2 X2 = this->X.squared();
alt_bn128_Fq2 Y2 = this->Y.squared();
alt_bn128_Fq2 Z2 = this->Z.squared();
alt_bn128_Fq2 X3 = this->X * X2;
alt_bn128_Fq2 Z3 = this->Z * Z2;
alt_bn128_Fq2 Z6 = Z3.squared();
return (Y2 == X3 + alt_bn128_twist_coeff_b * Z6);
}
}
alt_bn128_G2 alt_bn128_G2::zero()
{
return G2_zero;
}
alt_bn128_G2 alt_bn128_G2::one()
{
return G2_one;
}
alt_bn128_G2 alt_bn128_G2::random_element()
{
return (alt_bn128_Fr::random_element().as_bigint()) * G2_one;
}
std::ostream& operator<<(std::ostream &out, const alt_bn128_G2 &g)
{
alt_bn128_G2 copy(g);
copy.to_affine_coordinates();
out << (copy.is_zero() ? 1 : 0) << OUTPUT_SEPARATOR;
#ifdef NO_PT_COMPRESSION
out << copy.X << OUTPUT_SEPARATOR << copy.Y;
#else
/* storing LSB of Y */
out << copy.X << OUTPUT_SEPARATOR << (copy.Y.c0.as_bigint().data[0] & 1);
#endif
return out;
}
std::istream& operator>>(std::istream &in, alt_bn128_G2 &g)
{
char is_zero;
alt_bn128_Fq2 tX, tY;
#ifdef NO_PT_COMPRESSION
in >> is_zero >> tX >> tY;
is_zero -= '0';
#else
in.read((char*)&is_zero, 1); // this reads is_zero;
is_zero -= '0';
consume_OUTPUT_SEPARATOR(in);
unsigned char Y_lsb;
in >> tX;
consume_OUTPUT_SEPARATOR(in);
in.read((char*)&Y_lsb, 1);
Y_lsb -= '0';
// y = +/- sqrt(x^3 + b)
if (!is_zero)
{
alt_bn128_Fq2 tX2 = tX.squared();
alt_bn128_Fq2 tY2 = tX2 * tX + alt_bn128_twist_coeff_b;
tY = tY2.sqrt();
if ((tY.c0.as_bigint().data[0] & 1) != Y_lsb)
{
tY = -tY;
}
}
#endif
// using projective coordinates
if (!is_zero)
{
g.X = tX;
g.Y = tY;
g.Z = alt_bn128_Fq2::one();
}
else
{
g = alt_bn128_G2::zero();
}
return in;
}
template<>
void batch_to_special_all_non_zeros<alt_bn128_G2>(std::vector<alt_bn128_G2> &vec)
{
std::vector<alt_bn128_Fq2> Z_vec;
Z_vec.reserve(vec.size());
for (auto &el: vec)
{
Z_vec.emplace_back(el.Z);
}
batch_invert<alt_bn128_Fq2>(Z_vec);
const alt_bn128_Fq2 one = alt_bn128_Fq2::one();
for (size_t i = 0; i < vec.size(); ++i)
{
alt_bn128_Fq2 Z2 = Z_vec[i].squared();
alt_bn128_Fq2 Z3 = Z_vec[i] * Z2;
vec[i].X = vec[i].X * Z2;
vec[i].Y = vec[i].Y * Z3;
vec[i].Z = one;
}
}
} // libsnark

View File

@ -1,96 +0,0 @@
/** @file
*****************************************************************************
* @author This file is part of libsnark, developed by SCIPR Lab
* and contributors (see AUTHORS).
* @copyright MIT license (see LICENSE file)
*****************************************************************************/
#ifndef ALT_BN128_G2_HPP_
#define ALT_BN128_G2_HPP_
#include <vector>
#include "algebra/curves/alt_bn128/alt_bn128_init.hpp"
#include "algebra/curves/curve_utils.hpp"
namespace libsnark {
class alt_bn128_G2;
std::ostream& operator<<(std::ostream &, const alt_bn128_G2&);
std::istream& operator>>(std::istream &, alt_bn128_G2&);
class alt_bn128_G2 {
public:
#ifdef PROFILE_OP_COUNTS
static int64_t add_cnt;
static int64_t dbl_cnt;
#endif
static std::vector<size_t> wnaf_window_table;
static std::vector<size_t> fixed_base_exp_window_table;
static alt_bn128_G2 G2_zero;
static alt_bn128_G2 G2_one;
typedef alt_bn128_Fq base_field;
typedef alt_bn128_Fq2 twist_field;
typedef alt_bn128_Fr scalar_field;
alt_bn128_Fq2 X, Y, Z;
// using Jacobian coordinates
alt_bn128_G2();
alt_bn128_G2(const alt_bn128_Fq2& X, const alt_bn128_Fq2& Y, const alt_bn128_Fq2& Z) : X(X), Y(Y), Z(Z) {};
static alt_bn128_Fq2 mul_by_b(const alt_bn128_Fq2 &elt);
void print() const;
void print_coordinates() const;
void to_affine_coordinates();
void to_special();
bool is_special() const;
bool is_zero() const;
bool operator==(const alt_bn128_G2 &other) const;
bool operator!=(const alt_bn128_G2 &other) const;
alt_bn128_G2 operator+(const alt_bn128_G2 &other) const;
alt_bn128_G2 operator-() const;
alt_bn128_G2 operator-(const alt_bn128_G2 &other) const;
alt_bn128_G2 add(const alt_bn128_G2 &other) const;
alt_bn128_G2 mixed_add(const alt_bn128_G2 &other) const;
alt_bn128_G2 dbl() const;
alt_bn128_G2 mul_by_q() const;
bool is_well_formed() const;
static alt_bn128_G2 zero();
static alt_bn128_G2 one();
static alt_bn128_G2 random_element();
static size_t size_in_bits() { return twist_field::size_in_bits() + 1; }
static bigint<base_field::num_limbs> base_field_char() { return base_field::field_char(); }
static bigint<scalar_field::num_limbs> order() { return scalar_field::field_char(); }
friend std::ostream& operator<<(std::ostream &out, const alt_bn128_G2 &g);
friend std::istream& operator>>(std::istream &in, alt_bn128_G2 &g);
};
template<mp_size_t m>
alt_bn128_G2 operator*(const bigint<m> &lhs, const alt_bn128_G2 &rhs)
{
return scalar_mul<alt_bn128_G2, m>(rhs, lhs);
}
template<mp_size_t m, const bigint<m>& modulus_p>
alt_bn128_G2 operator*(const Fp_model<m,modulus_p> &lhs, const alt_bn128_G2 &rhs)
{
return scalar_mul<alt_bn128_G2, m>(rhs, lhs.as_bigint());
}
template<typename T>
void batch_to_special_all_non_zeros(std::vector<T> &vec);
template<>
void batch_to_special_all_non_zeros<alt_bn128_G2>(std::vector<alt_bn128_G2> &vec);
} // libsnark
#endif // ALT_BN128_G2_HPP_

View File

@ -1,273 +0,0 @@
/** @file
*****************************************************************************
* @author This file is part of libsnark, developed by SCIPR Lab
* and contributors (see AUTHORS).
* @copyright MIT license (see LICENSE file)
*****************************************************************************/
#include "algebra/curves/alt_bn128/alt_bn128_init.hpp"
#include "algebra/curves/alt_bn128/alt_bn128_g1.hpp"
#include "algebra/curves/alt_bn128/alt_bn128_g2.hpp"
namespace libsnark {
bigint<alt_bn128_r_limbs> alt_bn128_modulus_r;
bigint<alt_bn128_q_limbs> alt_bn128_modulus_q;
alt_bn128_Fq alt_bn128_coeff_b;
alt_bn128_Fq2 alt_bn128_twist;
alt_bn128_Fq2 alt_bn128_twist_coeff_b;
alt_bn128_Fq alt_bn128_twist_mul_by_b_c0;
alt_bn128_Fq alt_bn128_twist_mul_by_b_c1;
alt_bn128_Fq2 alt_bn128_twist_mul_by_q_X;
alt_bn128_Fq2 alt_bn128_twist_mul_by_q_Y;
bigint<alt_bn128_q_limbs> alt_bn128_ate_loop_count;
bool alt_bn128_ate_is_loop_count_neg;
bigint<12*alt_bn128_q_limbs> alt_bn128_final_exponent;
bigint<alt_bn128_q_limbs> alt_bn128_final_exponent_z;
bool alt_bn128_final_exponent_is_z_neg;
void init_alt_bn128_params()
{
typedef bigint<alt_bn128_r_limbs> bigint_r;
typedef bigint<alt_bn128_q_limbs> bigint_q;
assert(sizeof(mp_limb_t) == 8 || sizeof(mp_limb_t) == 4); // Montgomery assumes this
/* parameters for scalar field Fr */
alt_bn128_modulus_r = bigint_r("21888242871839275222246405745257275088548364400416034343698204186575808495617");
assert(alt_bn128_Fr::modulus_is_valid());
if (sizeof(mp_limb_t) == 8)
{
alt_bn128_Fr::Rsquared = bigint_r("944936681149208446651664254269745548490766851729442924617792859073125903783");
alt_bn128_Fr::Rcubed = bigint_r("5866548545943845227489894872040244720403868105578784105281690076696998248512");
alt_bn128_Fr::inv = 0xc2e1f593efffffff;
}
if (sizeof(mp_limb_t) == 4)
{
alt_bn128_Fr::Rsquared = bigint_r("944936681149208446651664254269745548490766851729442924617792859073125903783");
alt_bn128_Fr::Rcubed = bigint_r("5866548545943845227489894872040244720403868105578784105281690076696998248512");
alt_bn128_Fr::inv = 0xefffffff;
}
alt_bn128_Fr::num_bits = 254;
alt_bn128_Fr::euler = bigint_r("10944121435919637611123202872628637544274182200208017171849102093287904247808");
alt_bn128_Fr::s = 28;
alt_bn128_Fr::t = bigint_r("81540058820840996586704275553141814055101440848469862132140264610111");
alt_bn128_Fr::t_minus_1_over_2 = bigint_r("40770029410420498293352137776570907027550720424234931066070132305055");
alt_bn128_Fr::multiplicative_generator = alt_bn128_Fr("5");
alt_bn128_Fr::root_of_unity = alt_bn128_Fr("19103219067921713944291392827692070036145651957329286315305642004821462161904");
alt_bn128_Fr::nqr = alt_bn128_Fr("5");
alt_bn128_Fr::nqr_to_t = alt_bn128_Fr("19103219067921713944291392827692070036145651957329286315305642004821462161904");
/* parameters for base field Fq */
alt_bn128_modulus_q = bigint_q("21888242871839275222246405745257275088696311157297823662689037894645226208583");
assert(alt_bn128_Fq::modulus_is_valid());
if (sizeof(mp_limb_t) == 8)
{
alt_bn128_Fq::Rsquared = bigint_q("3096616502983703923843567936837374451735540968419076528771170197431451843209");
alt_bn128_Fq::Rcubed = bigint_q("14921786541159648185948152738563080959093619838510245177710943249661917737183");
alt_bn128_Fq::inv = 0x87d20782e4866389;
}
if (sizeof(mp_limb_t) == 4)
{
alt_bn128_Fq::Rsquared = bigint_q("3096616502983703923843567936837374451735540968419076528771170197431451843209");
alt_bn128_Fq::Rcubed = bigint_q("14921786541159648185948152738563080959093619838510245177710943249661917737183");
alt_bn128_Fq::inv = 0xe4866389;
}
alt_bn128_Fq::num_bits = 254;
alt_bn128_Fq::euler = bigint_q("10944121435919637611123202872628637544348155578648911831344518947322613104291");
alt_bn128_Fq::s = 1;
alt_bn128_Fq::t = bigint_q("10944121435919637611123202872628637544348155578648911831344518947322613104291");
alt_bn128_Fq::t_minus_1_over_2 = bigint_q("5472060717959818805561601436314318772174077789324455915672259473661306552145");
alt_bn128_Fq::multiplicative_generator = alt_bn128_Fq("3");
alt_bn128_Fq::root_of_unity = alt_bn128_Fq("21888242871839275222246405745257275088696311157297823662689037894645226208582");
alt_bn128_Fq::nqr = alt_bn128_Fq("3");
alt_bn128_Fq::nqr_to_t = alt_bn128_Fq("21888242871839275222246405745257275088696311157297823662689037894645226208582");
/* parameters for twist field Fq2 */
alt_bn128_Fq2::euler = bigint<2*alt_bn128_q_limbs>("239547588008311421220994022608339370399626158265550411218223901127035046843189118723920525909718935985594116157406550130918127817069793474323196511433944");
alt_bn128_Fq2::s = 4;
alt_bn128_Fq2::t = bigint<2*alt_bn128_q_limbs>("29943448501038927652624252826042421299953269783193801402277987640879380855398639840490065738714866998199264519675818766364765977133724184290399563929243");
alt_bn128_Fq2::t_minus_1_over_2 = bigint<2*alt_bn128_q_limbs>("14971724250519463826312126413021210649976634891596900701138993820439690427699319920245032869357433499099632259837909383182382988566862092145199781964621");
alt_bn128_Fq2::non_residue = alt_bn128_Fq("21888242871839275222246405745257275088696311157297823662689037894645226208582");
alt_bn128_Fq2::nqr = alt_bn128_Fq2(alt_bn128_Fq("2"),alt_bn128_Fq("1"));
alt_bn128_Fq2::nqr_to_t = alt_bn128_Fq2(alt_bn128_Fq("5033503716262624267312492558379982687175200734934877598599011485707452665730"),alt_bn128_Fq("314498342015008975724433667930697407966947188435857772134235984660852259084"));
alt_bn128_Fq2::Frobenius_coeffs_c1[0] = alt_bn128_Fq("1");
alt_bn128_Fq2::Frobenius_coeffs_c1[1] = alt_bn128_Fq("21888242871839275222246405745257275088696311157297823662689037894645226208582");
/* parameters for Fq6 */
alt_bn128_Fq6::non_residue = alt_bn128_Fq2(alt_bn128_Fq("9"),alt_bn128_Fq("1"));
alt_bn128_Fq6::Frobenius_coeffs_c1[0] = alt_bn128_Fq2(alt_bn128_Fq("1"),alt_bn128_Fq("0"));
alt_bn128_Fq6::Frobenius_coeffs_c1[1] = alt_bn128_Fq2(alt_bn128_Fq("21575463638280843010398324269430826099269044274347216827212613867836435027261"),alt_bn128_Fq("10307601595873709700152284273816112264069230130616436755625194854815875713954"));
alt_bn128_Fq6::Frobenius_coeffs_c1[2] = alt_bn128_Fq2(alt_bn128_Fq("21888242871839275220042445260109153167277707414472061641714758635765020556616"),alt_bn128_Fq("0"));
alt_bn128_Fq6::Frobenius_coeffs_c1[3] = alt_bn128_Fq2(alt_bn128_Fq("3772000881919853776433695186713858239009073593817195771773381919316419345261"),alt_bn128_Fq("2236595495967245188281701248203181795121068902605861227855261137820944008926"));
alt_bn128_Fq6::Frobenius_coeffs_c1[4] = alt_bn128_Fq2(alt_bn128_Fq("2203960485148121921418603742825762020974279258880205651966"),alt_bn128_Fq("0"));
alt_bn128_Fq6::Frobenius_coeffs_c1[5] = alt_bn128_Fq2(alt_bn128_Fq("18429021223477853657660792034369865839114504446431234726392080002137598044644"),alt_bn128_Fq("9344045779998320333812420223237981029506012124075525679208581902008406485703"));
alt_bn128_Fq6::Frobenius_coeffs_c2[0] = alt_bn128_Fq2(alt_bn128_Fq("1"),alt_bn128_Fq("0"));
alt_bn128_Fq6::Frobenius_coeffs_c2[1] = alt_bn128_Fq2(alt_bn128_Fq("2581911344467009335267311115468803099551665605076196740867805258568234346338"),alt_bn128_Fq("19937756971775647987995932169929341994314640652964949448313374472400716661030"));
alt_bn128_Fq6::Frobenius_coeffs_c2[2] = alt_bn128_Fq2(alt_bn128_Fq("2203960485148121921418603742825762020974279258880205651966"),alt_bn128_Fq("0"));
alt_bn128_Fq6::Frobenius_coeffs_c2[3] = alt_bn128_Fq2(alt_bn128_Fq("5324479202449903542726783395506214481928257762400643279780343368557297135718"),alt_bn128_Fq("16208900380737693084919495127334387981393726419856888799917914180988844123039"));
alt_bn128_Fq6::Frobenius_coeffs_c2[4] = alt_bn128_Fq2(alt_bn128_Fq("21888242871839275220042445260109153167277707414472061641714758635765020556616"),alt_bn128_Fq("0"));
alt_bn128_Fq6::Frobenius_coeffs_c2[5] = alt_bn128_Fq2(alt_bn128_Fq("13981852324922362344252311234282257507216387789820983642040889267519694726527"),alt_bn128_Fq("7629828391165209371577384193250820201684255241773809077146787135900891633097"));
/* parameters for Fq12 */
alt_bn128_Fq12::non_residue = alt_bn128_Fq2(alt_bn128_Fq("9"),alt_bn128_Fq("1"));
alt_bn128_Fq12::Frobenius_coeffs_c1[0] = alt_bn128_Fq2(alt_bn128_Fq("1"),alt_bn128_Fq("0"));
alt_bn128_Fq12::Frobenius_coeffs_c1[1] = alt_bn128_Fq2(alt_bn128_Fq("8376118865763821496583973867626364092589906065868298776909617916018768340080"),alt_bn128_Fq("16469823323077808223889137241176536799009286646108169935659301613961712198316"));
alt_bn128_Fq12::Frobenius_coeffs_c1[2] = alt_bn128_Fq2(alt_bn128_Fq("21888242871839275220042445260109153167277707414472061641714758635765020556617"),alt_bn128_Fq("0"));
alt_bn128_Fq12::Frobenius_coeffs_c1[3] = alt_bn128_Fq2(alt_bn128_Fq("11697423496358154304825782922584725312912383441159505038794027105778954184319"),alt_bn128_Fq("303847389135065887422783454877609941456349188919719272345083954437860409601"));
alt_bn128_Fq12::Frobenius_coeffs_c1[4] = alt_bn128_Fq2(alt_bn128_Fq("21888242871839275220042445260109153167277707414472061641714758635765020556616"),alt_bn128_Fq("0"));
alt_bn128_Fq12::Frobenius_coeffs_c1[5] = alt_bn128_Fq2(alt_bn128_Fq("3321304630594332808241809054958361220322477375291206261884409189760185844239"),alt_bn128_Fq("5722266937896532885780051958958348231143373700109372999374820235121374419868"));
alt_bn128_Fq12::Frobenius_coeffs_c1[6] = alt_bn128_Fq2(alt_bn128_Fq("21888242871839275222246405745257275088696311157297823662689037894645226208582"),alt_bn128_Fq("0"));
alt_bn128_Fq12::Frobenius_coeffs_c1[7] = alt_bn128_Fq2(alt_bn128_Fq("13512124006075453725662431877630910996106405091429524885779419978626457868503"),alt_bn128_Fq("5418419548761466998357268504080738289687024511189653727029736280683514010267"));
alt_bn128_Fq12::Frobenius_coeffs_c1[8] = alt_bn128_Fq2(alt_bn128_Fq("2203960485148121921418603742825762020974279258880205651966"),alt_bn128_Fq("0"));
alt_bn128_Fq12::Frobenius_coeffs_c1[9] = alt_bn128_Fq2(alt_bn128_Fq("10190819375481120917420622822672549775783927716138318623895010788866272024264"),alt_bn128_Fq("21584395482704209334823622290379665147239961968378104390343953940207365798982"));
alt_bn128_Fq12::Frobenius_coeffs_c1[10] = alt_bn128_Fq2(alt_bn128_Fq("2203960485148121921418603742825762020974279258880205651967"),alt_bn128_Fq("0"));
alt_bn128_Fq12::Frobenius_coeffs_c1[11] = alt_bn128_Fq2(alt_bn128_Fq("18566938241244942414004596690298913868373833782006617400804628704885040364344"),alt_bn128_Fq("16165975933942742336466353786298926857552937457188450663314217659523851788715"));
/* choice of short Weierstrass curve and its twist */
alt_bn128_coeff_b = alt_bn128_Fq("3");
alt_bn128_twist = alt_bn128_Fq2(alt_bn128_Fq("9"), alt_bn128_Fq("1"));
alt_bn128_twist_coeff_b = alt_bn128_coeff_b * alt_bn128_twist.inverse();
alt_bn128_twist_mul_by_b_c0 = alt_bn128_coeff_b * alt_bn128_Fq2::non_residue;
alt_bn128_twist_mul_by_b_c1 = alt_bn128_coeff_b * alt_bn128_Fq2::non_residue;
alt_bn128_twist_mul_by_q_X = alt_bn128_Fq2(alt_bn128_Fq("21575463638280843010398324269430826099269044274347216827212613867836435027261"),
alt_bn128_Fq("10307601595873709700152284273816112264069230130616436755625194854815875713954"));
alt_bn128_twist_mul_by_q_Y = alt_bn128_Fq2(alt_bn128_Fq("2821565182194536844548159561693502659359617185244120367078079554186484126554"),
alt_bn128_Fq("3505843767911556378687030309984248845540243509899259641013678093033130930403"));
/* choice of group G1 */
alt_bn128_G1::G1_zero = alt_bn128_G1(alt_bn128_Fq::zero(),
alt_bn128_Fq::one(),
alt_bn128_Fq::zero());
alt_bn128_G1::G1_one = alt_bn128_G1(alt_bn128_Fq("1"),
alt_bn128_Fq("2"),
alt_bn128_Fq::one());
alt_bn128_G1::wnaf_window_table.push_back(11);
alt_bn128_G1::wnaf_window_table.push_back(24);
alt_bn128_G1::wnaf_window_table.push_back(60);
alt_bn128_G1::wnaf_window_table.push_back(127);
alt_bn128_G1::fixed_base_exp_window_table.resize(0);
// window 1 is unbeaten in [-inf, 4.99]
alt_bn128_G1::fixed_base_exp_window_table.push_back(1);
// window 2 is unbeaten in [4.99, 10.99]
alt_bn128_G1::fixed_base_exp_window_table.push_back(5);
// window 3 is unbeaten in [10.99, 32.29]
alt_bn128_G1::fixed_base_exp_window_table.push_back(11);
// window 4 is unbeaten in [32.29, 55.23]
alt_bn128_G1::fixed_base_exp_window_table.push_back(32);
// window 5 is unbeaten in [55.23, 162.03]
alt_bn128_G1::fixed_base_exp_window_table.push_back(55);
// window 6 is unbeaten in [162.03, 360.15]
alt_bn128_G1::fixed_base_exp_window_table.push_back(162);
// window 7 is unbeaten in [360.15, 815.44]
alt_bn128_G1::fixed_base_exp_window_table.push_back(360);
// window 8 is unbeaten in [815.44, 2373.07]
alt_bn128_G1::fixed_base_exp_window_table.push_back(815);
// window 9 is unbeaten in [2373.07, 6977.75]
alt_bn128_G1::fixed_base_exp_window_table.push_back(2373);
// window 10 is unbeaten in [6977.75, 7122.23]
alt_bn128_G1::fixed_base_exp_window_table.push_back(6978);
// window 11 is unbeaten in [7122.23, 57818.46]
alt_bn128_G1::fixed_base_exp_window_table.push_back(7122);
// window 12 is never the best
alt_bn128_G1::fixed_base_exp_window_table.push_back(0);
// window 13 is unbeaten in [57818.46, 169679.14]
alt_bn128_G1::fixed_base_exp_window_table.push_back(57818);
// window 14 is never the best
alt_bn128_G1::fixed_base_exp_window_table.push_back(0);
// window 15 is unbeaten in [169679.14, 439758.91]
alt_bn128_G1::fixed_base_exp_window_table.push_back(169679);
// window 16 is unbeaten in [439758.91, 936073.41]
alt_bn128_G1::fixed_base_exp_window_table.push_back(439759);
// window 17 is unbeaten in [936073.41, 4666554.74]
alt_bn128_G1::fixed_base_exp_window_table.push_back(936073);
// window 18 is never the best
alt_bn128_G1::fixed_base_exp_window_table.push_back(0);
// window 19 is unbeaten in [4666554.74, 7580404.42]
alt_bn128_G1::fixed_base_exp_window_table.push_back(4666555);
// window 20 is unbeaten in [7580404.42, 34552892.20]
alt_bn128_G1::fixed_base_exp_window_table.push_back(7580404);
// window 21 is never the best
alt_bn128_G1::fixed_base_exp_window_table.push_back(0);
// window 22 is unbeaten in [34552892.20, inf]
alt_bn128_G1::fixed_base_exp_window_table.push_back(34552892);
/* choice of group G2 */
alt_bn128_G2::G2_zero = alt_bn128_G2(alt_bn128_Fq2::zero(),
alt_bn128_Fq2::one(),
alt_bn128_Fq2::zero());
alt_bn128_G2::G2_one = alt_bn128_G2(alt_bn128_Fq2(alt_bn128_Fq("10857046999023057135944570762232829481370756359578518086990519993285655852781"),
alt_bn128_Fq("11559732032986387107991004021392285783925812861821192530917403151452391805634")),
alt_bn128_Fq2(alt_bn128_Fq("8495653923123431417604973247489272438418190587263600148770280649306958101930"),
alt_bn128_Fq("4082367875863433681332203403145435568316851327593401208105741076214120093531")),
alt_bn128_Fq2::one());
alt_bn128_G2::wnaf_window_table.push_back(5);
alt_bn128_G2::wnaf_window_table.push_back(15);
alt_bn128_G2::wnaf_window_table.push_back(39);
alt_bn128_G2::wnaf_window_table.push_back(109);
alt_bn128_G2::fixed_base_exp_window_table.resize(0);
// window 1 is unbeaten in [-inf, 5.10]
alt_bn128_G2::fixed_base_exp_window_table.push_back(1);
// window 2 is unbeaten in [5.10, 10.43]
alt_bn128_G2::fixed_base_exp_window_table.push_back(5);
// window 3 is unbeaten in [10.43, 25.28]
alt_bn128_G2::fixed_base_exp_window_table.push_back(10);
// window 4 is unbeaten in [25.28, 59.00]
alt_bn128_G2::fixed_base_exp_window_table.push_back(25);
// window 5 is unbeaten in [59.00, 154.03]
alt_bn128_G2::fixed_base_exp_window_table.push_back(59);
// window 6 is unbeaten in [154.03, 334.25]
alt_bn128_G2::fixed_base_exp_window_table.push_back(154);
// window 7 is unbeaten in [334.25, 742.58]
alt_bn128_G2::fixed_base_exp_window_table.push_back(334);
// window 8 is unbeaten in [742.58, 2034.40]
alt_bn128_G2::fixed_base_exp_window_table.push_back(743);
// window 9 is unbeaten in [2034.40, 4987.56]
alt_bn128_G2::fixed_base_exp_window_table.push_back(2034);
// window 10 is unbeaten in [4987.56, 8888.27]
alt_bn128_G2::fixed_base_exp_window_table.push_back(4988);
// window 11 is unbeaten in [8888.27, 26271.13]
alt_bn128_G2::fixed_base_exp_window_table.push_back(8888);
// window 12 is unbeaten in [26271.13, 39768.20]
alt_bn128_G2::fixed_base_exp_window_table.push_back(26271);
// window 13 is unbeaten in [39768.20, 106275.75]
alt_bn128_G2::fixed_base_exp_window_table.push_back(39768);
// window 14 is unbeaten in [106275.75, 141703.40]
alt_bn128_G2::fixed_base_exp_window_table.push_back(106276);
// window 15 is unbeaten in [141703.40, 462422.97]
alt_bn128_G2::fixed_base_exp_window_table.push_back(141703);
// window 16 is unbeaten in [462422.97, 926871.84]
alt_bn128_G2::fixed_base_exp_window_table.push_back(462423);
// window 17 is unbeaten in [926871.84, 4873049.17]
alt_bn128_G2::fixed_base_exp_window_table.push_back(926872);
// window 18 is never the best
alt_bn128_G2::fixed_base_exp_window_table.push_back(0);
// window 19 is unbeaten in [4873049.17, 5706707.88]
alt_bn128_G2::fixed_base_exp_window_table.push_back(4873049);
// window 20 is unbeaten in [5706707.88, 31673814.95]
alt_bn128_G2::fixed_base_exp_window_table.push_back(5706708);
// window 21 is never the best
alt_bn128_G2::fixed_base_exp_window_table.push_back(0);
// window 22 is unbeaten in [31673814.95, inf]
alt_bn128_G2::fixed_base_exp_window_table.push_back(31673815);
/* pairing parameters */
alt_bn128_ate_loop_count = bigint_q("29793968203157093288");
alt_bn128_ate_is_loop_count_neg = false;
alt_bn128_final_exponent = bigint<12*alt_bn128_q_limbs>("552484233613224096312617126783173147097382103762957654188882734314196910839907541213974502761540629817009608548654680343627701153829446747810907373256841551006201639677726139946029199968412598804882391702273019083653272047566316584365559776493027495458238373902875937659943504873220554161550525926302303331747463515644711876653177129578303191095900909191624817826566688241804408081892785725967931714097716709526092261278071952560171111444072049229123565057483750161460024353346284167282452756217662335528813519139808291170539072125381230815729071544861602750936964829313608137325426383735122175229541155376346436093930287402089517426973178917569713384748081827255472576937471496195752727188261435633271238710131736096299798168852925540549342330775279877006784354801422249722573783561685179618816480037695005515426162362431072245638324744480");
alt_bn128_final_exponent_z = bigint_q("4965661367192848881");
alt_bn128_final_exponent_is_z_neg = false;
}
} // libsnark

View File

@ -1,57 +0,0 @@
/** @file
*****************************************************************************
* @author This file is part of libsnark, developed by SCIPR Lab
* and contributors (see AUTHORS).
* @copyright MIT license (see LICENSE file)
*****************************************************************************/
#ifndef ALT_BN128_INIT_HPP_
#define ALT_BN128_INIT_HPP_
#include "algebra/curves/public_params.hpp"
#include "algebra/fields/fp.hpp"
#include "algebra/fields/fp2.hpp"
#include "algebra/fields/fp6_3over2.hpp"
#include "algebra/fields/fp12_2over3over2.hpp"
namespace libsnark {
const mp_size_t alt_bn128_r_bitcount = 254;
const mp_size_t alt_bn128_q_bitcount = 254;
const mp_size_t alt_bn128_r_limbs = (alt_bn128_r_bitcount+GMP_NUMB_BITS-1)/GMP_NUMB_BITS;
const mp_size_t alt_bn128_q_limbs = (alt_bn128_q_bitcount+GMP_NUMB_BITS-1)/GMP_NUMB_BITS;
extern bigint<alt_bn128_r_limbs> alt_bn128_modulus_r;
extern bigint<alt_bn128_q_limbs> alt_bn128_modulus_q;
typedef Fp_model<alt_bn128_r_limbs, alt_bn128_modulus_r> alt_bn128_Fr;
typedef Fp_model<alt_bn128_q_limbs, alt_bn128_modulus_q> alt_bn128_Fq;
typedef Fp2_model<alt_bn128_q_limbs, alt_bn128_modulus_q> alt_bn128_Fq2;
typedef Fp6_3over2_model<alt_bn128_q_limbs, alt_bn128_modulus_q> alt_bn128_Fq6;
typedef Fp12_2over3over2_model<alt_bn128_q_limbs, alt_bn128_modulus_q> alt_bn128_Fq12;
typedef alt_bn128_Fq12 alt_bn128_GT;
// parameters for Barreto--Naehrig curve E/Fq : y^2 = x^3 + b
extern alt_bn128_Fq alt_bn128_coeff_b;
// parameters for twisted Barreto--Naehrig curve E'/Fq2 : y^2 = x^3 + b/xi
extern alt_bn128_Fq2 alt_bn128_twist;
extern alt_bn128_Fq2 alt_bn128_twist_coeff_b;
extern alt_bn128_Fq alt_bn128_twist_mul_by_b_c0;
extern alt_bn128_Fq alt_bn128_twist_mul_by_b_c1;
extern alt_bn128_Fq2 alt_bn128_twist_mul_by_q_X;
extern alt_bn128_Fq2 alt_bn128_twist_mul_by_q_Y;
// parameters for pairing
extern bigint<alt_bn128_q_limbs> alt_bn128_ate_loop_count;
extern bool alt_bn128_ate_is_loop_count_neg;
extern bigint<12*alt_bn128_q_limbs> alt_bn128_final_exponent;
extern bigint<alt_bn128_q_limbs> alt_bn128_final_exponent_z;
extern bool alt_bn128_final_exponent_is_z_neg;
void init_alt_bn128_params();
class alt_bn128_G1;
class alt_bn128_G2;
} // libsnark
#endif // ALT_BN128_INIT_HPP_

View File

@ -1,547 +0,0 @@
/** @file
*****************************************************************************
* @author This file is part of libsnark, developed by SCIPR Lab
* and contributors (see AUTHORS).
* @copyright MIT license (see LICENSE file)
*****************************************************************************/
#include "algebra/curves/alt_bn128/alt_bn128_pairing.hpp"
#include "algebra/curves/alt_bn128/alt_bn128_init.hpp"
#include "algebra/curves/alt_bn128/alt_bn128_g1.hpp"
#include "algebra/curves/alt_bn128/alt_bn128_g2.hpp"
#include <cassert>
#include "common/profiling.hpp"
#include "common/assert_except.hpp"
namespace libsnark {
bool alt_bn128_ate_G1_precomp::operator==(const alt_bn128_ate_G1_precomp &other) const
{
return (this->PX == other.PX &&
this->PY == other.PY);
}
std::ostream& operator<<(std::ostream &out, const alt_bn128_ate_G1_precomp &prec_P)
{
out << prec_P.PX << OUTPUT_SEPARATOR << prec_P.PY;
return out;
}
std::istream& operator>>(std::istream &in, alt_bn128_ate_G1_precomp &prec_P)
{
in >> prec_P.PX;
consume_OUTPUT_SEPARATOR(in);
in >> prec_P.PY;
return in;
}
bool alt_bn128_ate_ell_coeffs::operator==(const alt_bn128_ate_ell_coeffs &other) const
{
return (this->ell_0 == other.ell_0 &&
this->ell_VW == other.ell_VW &&
this->ell_VV == other.ell_VV);
}
std::ostream& operator<<(std::ostream &out, const alt_bn128_ate_ell_coeffs &c)
{
out << c.ell_0 << OUTPUT_SEPARATOR << c.ell_VW << OUTPUT_SEPARATOR << c.ell_VV;
return out;
}
std::istream& operator>>(std::istream &in, alt_bn128_ate_ell_coeffs &c)
{
in >> c.ell_0;
consume_OUTPUT_SEPARATOR(in);
in >> c.ell_VW;
consume_OUTPUT_SEPARATOR(in);
in >> c.ell_VV;
return in;
}
bool alt_bn128_ate_G2_precomp::operator==(const alt_bn128_ate_G2_precomp &other) const
{
return (this->QX == other.QX &&
this->QY == other.QY &&
this->coeffs == other.coeffs);
}
std::ostream& operator<<(std::ostream& out, const alt_bn128_ate_G2_precomp &prec_Q)
{
out << prec_Q.QX << OUTPUT_SEPARATOR << prec_Q.QY << "\n";
out << prec_Q.coeffs.size() << "\n";
for (const alt_bn128_ate_ell_coeffs &c : prec_Q.coeffs)
{
out << c << OUTPUT_NEWLINE;
}
return out;
}
std::istream& operator>>(std::istream& in, alt_bn128_ate_G2_precomp &prec_Q)
{
in >> prec_Q.QX;
consume_OUTPUT_SEPARATOR(in);
in >> prec_Q.QY;
consume_newline(in);
prec_Q.coeffs.clear();
size_t s;
in >> s;
consume_newline(in);
prec_Q.coeffs.reserve(s);
for (size_t i = 0; i < s; ++i)
{
alt_bn128_ate_ell_coeffs c;
in >> c;
consume_OUTPUT_NEWLINE(in);
prec_Q.coeffs.emplace_back(c);
}
return in;
}
/* final exponentiations */
alt_bn128_Fq12 alt_bn128_final_exponentiation_first_chunk(const alt_bn128_Fq12 &elt)
{
enter_block("Call to alt_bn128_final_exponentiation_first_chunk");
/*
Computes result = elt^((q^6-1)*(q^2+1)).
Follows, e.g., Beuchat et al page 9, by computing result as follows:
elt^((q^6-1)*(q^2+1)) = (conj(elt) * elt^(-1))^(q^2+1)
More precisely:
A = conj(elt)
B = elt.inverse()
C = A * B
D = C.Frobenius_map(2)
result = D * C
*/
const alt_bn128_Fq12 A = alt_bn128_Fq12(elt.c0,-elt.c1);
const alt_bn128_Fq12 B = elt.inverse();
const alt_bn128_Fq12 C = A * B;
const alt_bn128_Fq12 D = C.Frobenius_map(2);
const alt_bn128_Fq12 result = D * C;
leave_block("Call to alt_bn128_final_exponentiation_first_chunk");
return result;
}
alt_bn128_Fq12 alt_bn128_exp_by_neg_z(const alt_bn128_Fq12 &elt)
{
enter_block("Call to alt_bn128_exp_by_neg_z");
alt_bn128_Fq12 result = elt.cyclotomic_exp(alt_bn128_final_exponent_z);
if (!alt_bn128_final_exponent_is_z_neg)
{
result = result.unitary_inverse();
}
leave_block("Call to alt_bn128_exp_by_neg_z");
return result;
}
alt_bn128_Fq12 alt_bn128_final_exponentiation_last_chunk(const alt_bn128_Fq12 &elt)
{
enter_block("Call to alt_bn128_final_exponentiation_last_chunk");
/*
Follows Laura Fuentes-Castaneda et al. "Faster hashing to G2"
by computing:
result = elt^(q^3 * (12*z^3 + 6z^2 + 4z - 1) +
q^2 * (12*z^3 + 6z^2 + 6z) +
q * (12*z^3 + 6z^2 + 4z) +
1 * (12*z^3 + 12z^2 + 6z + 1))
which equals
result = elt^( 2z * ( 6z^2 + 3z + 1 ) * (q^4 - q^2 + 1)/r ).
Using the following addition chain:
A = exp_by_neg_z(elt) // = elt^(-z)
B = A^2 // = elt^(-2*z)
C = B^2 // = elt^(-4*z)
D = C * B // = elt^(-6*z)
E = exp_by_neg_z(D) // = elt^(6*z^2)
F = E^2 // = elt^(12*z^2)
G = epx_by_neg_z(F) // = elt^(-12*z^3)
H = conj(D) // = elt^(6*z)
I = conj(G) // = elt^(12*z^3)
J = I * E // = elt^(12*z^3 + 6*z^2)
K = J * H // = elt^(12*z^3 + 6*z^2 + 6*z)
L = K * B // = elt^(12*z^3 + 6*z^2 + 4*z)
M = K * E // = elt^(12*z^3 + 12*z^2 + 6*z)
N = M * elt // = elt^(12*z^3 + 12*z^2 + 6*z + 1)
O = L.Frobenius_map(1) // = elt^(q*(12*z^3 + 6*z^2 + 4*z))
P = O * N // = elt^(q*(12*z^3 + 6*z^2 + 4*z) * (12*z^3 + 12*z^2 + 6*z + 1))
Q = K.Frobenius_map(2) // = elt^(q^2 * (12*z^3 + 6*z^2 + 6*z))
R = Q * P // = elt^(q^2 * (12*z^3 + 6*z^2 + 6*z) + q*(12*z^3 + 6*z^2 + 4*z) * (12*z^3 + 12*z^2 + 6*z + 1))
S = conj(elt) // = elt^(-1)
T = S * L // = elt^(12*z^3 + 6*z^2 + 4*z - 1)
U = T.Frobenius_map(3) // = elt^(q^3(12*z^3 + 6*z^2 + 4*z - 1))
V = U * R // = elt^(q^3(12*z^3 + 6*z^2 + 4*z - 1) + q^2 * (12*z^3 + 6*z^2 + 6*z) + q*(12*z^3 + 6*z^2 + 4*z) * (12*z^3 + 12*z^2 + 6*z + 1))
result = V
*/
const alt_bn128_Fq12 A = alt_bn128_exp_by_neg_z(elt);
const alt_bn128_Fq12 B = A.cyclotomic_squared();
const alt_bn128_Fq12 C = B.cyclotomic_squared();
const alt_bn128_Fq12 D = C * B;
const alt_bn128_Fq12 E = alt_bn128_exp_by_neg_z(D);
const alt_bn128_Fq12 F = E.cyclotomic_squared();
const alt_bn128_Fq12 G = alt_bn128_exp_by_neg_z(F);
const alt_bn128_Fq12 H = D.unitary_inverse();
const alt_bn128_Fq12 I = G.unitary_inverse();
const alt_bn128_Fq12 J = I * E;
const alt_bn128_Fq12 K = J * H;
const alt_bn128_Fq12 L = K * B;
const alt_bn128_Fq12 M = K * E;
const alt_bn128_Fq12 N = M * elt;
const alt_bn128_Fq12 O = L.Frobenius_map(1);
const alt_bn128_Fq12 P = O * N;
const alt_bn128_Fq12 Q = K.Frobenius_map(2);
const alt_bn128_Fq12 R = Q * P;
const alt_bn128_Fq12 S = elt.unitary_inverse();
const alt_bn128_Fq12 T = S * L;
const alt_bn128_Fq12 U = T.Frobenius_map(3);
const alt_bn128_Fq12 V = U * R;
const alt_bn128_Fq12 result = V;
leave_block("Call to alt_bn128_final_exponentiation_last_chunk");
return result;
}
alt_bn128_GT alt_bn128_final_exponentiation(const alt_bn128_Fq12 &elt)
{
enter_block("Call to alt_bn128_final_exponentiation");
/* OLD naive version:
alt_bn128_GT result = elt^alt_bn128_final_exponent;
*/
alt_bn128_Fq12 A = alt_bn128_final_exponentiation_first_chunk(elt);
alt_bn128_GT result = alt_bn128_final_exponentiation_last_chunk(A);
leave_block("Call to alt_bn128_final_exponentiation");
return result;
}
/* ate pairing */
void doubling_step_for_flipped_miller_loop(const alt_bn128_Fq two_inv,
alt_bn128_G2 &current,
alt_bn128_ate_ell_coeffs &c)
{
const alt_bn128_Fq2 X = current.X, Y = current.Y, Z = current.Z;
const alt_bn128_Fq2 A = two_inv * (X * Y); // A = X1 * Y1 / 2
const alt_bn128_Fq2 B = Y.squared(); // B = Y1^2
const alt_bn128_Fq2 C = Z.squared(); // C = Z1^2
const alt_bn128_Fq2 D = C+C+C; // D = 3 * C
const alt_bn128_Fq2 E = alt_bn128_twist_coeff_b * D; // E = twist_b * D
const alt_bn128_Fq2 F = E+E+E; // F = 3 * E
const alt_bn128_Fq2 G = two_inv * (B+F); // G = (B+F)/2
const alt_bn128_Fq2 H = (Y+Z).squared() - (B+C); // H = (Y1+Z1)^2-(B+C)
const alt_bn128_Fq2 I = E-B; // I = E-B
const alt_bn128_Fq2 J = X.squared(); // J = X1^2
const alt_bn128_Fq2 E_squared = E.squared(); // E_squared = E^2
current.X = A * (B-F); // X3 = A * (B-F)
current.Y = G.squared() - (E_squared+E_squared+E_squared); // Y3 = G^2 - 3*E^2
current.Z = B * H; // Z3 = B * H
c.ell_0 = alt_bn128_twist * I; // ell_0 = xi * I
c.ell_VW = -H; // ell_VW = - H (later: * yP)
c.ell_VV = J+J+J; // ell_VV = 3*J (later: * xP)
}
void mixed_addition_step_for_flipped_miller_loop(const alt_bn128_G2 base,
alt_bn128_G2 &current,
alt_bn128_ate_ell_coeffs &c)
{
const alt_bn128_Fq2 X1 = current.X, Y1 = current.Y, Z1 = current.Z;
const alt_bn128_Fq2 &x2 = base.X, &y2 = base.Y;
const alt_bn128_Fq2 D = X1 - x2 * Z1; // D = X1 - X2*Z1
const alt_bn128_Fq2 E = Y1 - y2 * Z1; // E = Y1 - Y2*Z1
const alt_bn128_Fq2 F = D.squared(); // F = D^2
const alt_bn128_Fq2 G = E.squared(); // G = E^2
const alt_bn128_Fq2 H = D*F; // H = D*F
const alt_bn128_Fq2 I = X1 * F; // I = X1 * F
const alt_bn128_Fq2 J = H + Z1*G - (I+I); // J = H + Z1*G - (I+I)
current.X = D * J; // X3 = D*J
current.Y = E * (I-J)-(H * Y1); // Y3 = E*(I-J)-(H*Y1)
current.Z = Z1 * H; // Z3 = Z1*H
c.ell_0 = alt_bn128_twist * (E * x2 - D * y2); // ell_0 = xi * (E * X2 - D * Y2)
c.ell_VV = - E; // ell_VV = - E (later: * xP)
c.ell_VW = D; // ell_VW = D (later: * yP )
}
alt_bn128_ate_G1_precomp alt_bn128_ate_precompute_G1(const alt_bn128_G1& P)
{
enter_block("Call to alt_bn128_ate_precompute_G1");
alt_bn128_G1 Pcopy = P;
Pcopy.to_affine_coordinates();
alt_bn128_ate_G1_precomp result;
result.PX = Pcopy.X;
result.PY = Pcopy.Y;
leave_block("Call to alt_bn128_ate_precompute_G1");
return result;
}
alt_bn128_ate_G2_precomp alt_bn128_ate_precompute_G2(const alt_bn128_G2& Q)
{
enter_block("Call to alt_bn128_ate_precompute_G2");
alt_bn128_G2 Qcopy(Q);
Qcopy.to_affine_coordinates();
alt_bn128_Fq two_inv = (alt_bn128_Fq("2").inverse()); // could add to global params if needed
alt_bn128_ate_G2_precomp result;
result.QX = Qcopy.X;
result.QY = Qcopy.Y;
alt_bn128_G2 R;
R.X = Qcopy.X;
R.Y = Qcopy.Y;
R.Z = alt_bn128_Fq2::one();
const bigint<alt_bn128_Fr::num_limbs> &loop_count = alt_bn128_ate_loop_count;
bool found_one = false;
alt_bn128_ate_ell_coeffs c;
for (int64_t i = loop_count.max_bits(); i >= 0; --i)
{
const bool bit = loop_count.test_bit(i);
if (!found_one)
{
/* this skips the MSB itself */
found_one |= bit;
continue;
}
doubling_step_for_flipped_miller_loop(two_inv, R, c);
result.coeffs.push_back(c);
if (bit)
{
mixed_addition_step_for_flipped_miller_loop(Qcopy, R, c);
result.coeffs.push_back(c);
}
}
alt_bn128_G2 Q1 = Qcopy.mul_by_q();
assert_except(Q1.Z == alt_bn128_Fq2::one());
alt_bn128_G2 Q2 = Q1.mul_by_q();
assert_except(Q2.Z == alt_bn128_Fq2::one());
if (alt_bn128_ate_is_loop_count_neg)
{
R.Y = - R.Y;
}
Q2.Y = - Q2.Y;
mixed_addition_step_for_flipped_miller_loop(Q1, R, c);
result.coeffs.push_back(c);
mixed_addition_step_for_flipped_miller_loop(Q2, R, c);
result.coeffs.push_back(c);
leave_block("Call to alt_bn128_ate_precompute_G2");
return result;
}
alt_bn128_Fq12 alt_bn128_ate_miller_loop(const alt_bn128_ate_G1_precomp &prec_P,
const alt_bn128_ate_G2_precomp &prec_Q)
{
enter_block("Call to alt_bn128_ate_miller_loop");
alt_bn128_Fq12 f = alt_bn128_Fq12::one();
bool found_one = false;
size_t idx = 0;
const bigint<alt_bn128_Fr::num_limbs> &loop_count = alt_bn128_ate_loop_count;
alt_bn128_ate_ell_coeffs c;
for (int64_t i = loop_count.max_bits(); i >= 0; --i)
{
const bool bit = loop_count.test_bit(i);
if (!found_one)
{
/* this skips the MSB itself */
found_one |= bit;
continue;
}
/* code below gets executed for all bits (EXCEPT the MSB itself) of
alt_bn128_param_p (skipping leading zeros) in MSB to LSB
order */
c = prec_Q.coeffs[idx++];
f = f.squared();
f = f.mul_by_024(c.ell_0, prec_P.PY * c.ell_VW, prec_P.PX * c.ell_VV);
if (bit)
{
c = prec_Q.coeffs[idx++];
f = f.mul_by_024(c.ell_0, prec_P.PY * c.ell_VW, prec_P.PX * c.ell_VV);
}
}
if (alt_bn128_ate_is_loop_count_neg)
{
f = f.inverse();
}
c = prec_Q.coeffs[idx++];
f = f.mul_by_024(c.ell_0,prec_P.PY * c.ell_VW,prec_P.PX * c.ell_VV);
c = prec_Q.coeffs[idx++];
f = f.mul_by_024(c.ell_0,prec_P.PY * c.ell_VW,prec_P.PX * c.ell_VV);
leave_block("Call to alt_bn128_ate_miller_loop");
return f;
}
alt_bn128_Fq12 alt_bn128_ate_double_miller_loop(const alt_bn128_ate_G1_precomp &prec_P1,
const alt_bn128_ate_G2_precomp &prec_Q1,
const alt_bn128_ate_G1_precomp &prec_P2,
const alt_bn128_ate_G2_precomp &prec_Q2)
{
enter_block("Call to alt_bn128_ate_double_miller_loop");
alt_bn128_Fq12 f = alt_bn128_Fq12::one();
bool found_one = false;
size_t idx = 0;
const bigint<alt_bn128_Fr::num_limbs> &loop_count = alt_bn128_ate_loop_count;
for (int64_t i = loop_count.max_bits(); i >= 0; --i)
{
const bool bit = loop_count.test_bit(i);
if (!found_one)
{
/* this skips the MSB itself */
found_one |= bit;
continue;
}
/* code below gets executed for all bits (EXCEPT the MSB itself) of
alt_bn128_param_p (skipping leading zeros) in MSB to LSB
order */
alt_bn128_ate_ell_coeffs c1 = prec_Q1.coeffs[idx];
alt_bn128_ate_ell_coeffs c2 = prec_Q2.coeffs[idx];
++idx;
f = f.squared();
f = f.mul_by_024(c1.ell_0, prec_P1.PY * c1.ell_VW, prec_P1.PX * c1.ell_VV);
f = f.mul_by_024(c2.ell_0, prec_P2.PY * c2.ell_VW, prec_P2.PX * c2.ell_VV);
if (bit)
{
alt_bn128_ate_ell_coeffs c1 = prec_Q1.coeffs[idx];
alt_bn128_ate_ell_coeffs c2 = prec_Q2.coeffs[idx];
++idx;
f = f.mul_by_024(c1.ell_0, prec_P1.PY * c1.ell_VW, prec_P1.PX * c1.ell_VV);
f = f.mul_by_024(c2.ell_0, prec_P2.PY * c2.ell_VW, prec_P2.PX * c2.ell_VV);
}
}
if (alt_bn128_ate_is_loop_count_neg)
{
f = f.inverse();
}
alt_bn128_ate_ell_coeffs c1 = prec_Q1.coeffs[idx];
alt_bn128_ate_ell_coeffs c2 = prec_Q2.coeffs[idx];
++idx;
f = f.mul_by_024(c1.ell_0, prec_P1.PY * c1.ell_VW, prec_P1.PX * c1.ell_VV);
f = f.mul_by_024(c2.ell_0, prec_P2.PY * c2.ell_VW, prec_P2.PX * c2.ell_VV);
c1 = prec_Q1.coeffs[idx];
c2 = prec_Q2.coeffs[idx];
++idx;
f = f.mul_by_024(c1.ell_0, prec_P1.PY * c1.ell_VW, prec_P1.PX * c1.ell_VV);
f = f.mul_by_024(c2.ell_0, prec_P2.PY * c2.ell_VW, prec_P2.PX * c2.ell_VV);
leave_block("Call to alt_bn128_ate_double_miller_loop");
return f;
}
alt_bn128_Fq12 alt_bn128_ate_pairing(const alt_bn128_G1& P, const alt_bn128_G2 &Q)
{
enter_block("Call to alt_bn128_ate_pairing");
alt_bn128_ate_G1_precomp prec_P = alt_bn128_ate_precompute_G1(P);
alt_bn128_ate_G2_precomp prec_Q = alt_bn128_ate_precompute_G2(Q);
alt_bn128_Fq12 result = alt_bn128_ate_miller_loop(prec_P, prec_Q);
leave_block("Call to alt_bn128_ate_pairing");
return result;
}
alt_bn128_GT alt_bn128_ate_reduced_pairing(const alt_bn128_G1 &P, const alt_bn128_G2 &Q)
{
enter_block("Call to alt_bn128_ate_reduced_pairing");
const alt_bn128_Fq12 f = alt_bn128_ate_pairing(P, Q);
const alt_bn128_GT result = alt_bn128_final_exponentiation(f);
leave_block("Call to alt_bn128_ate_reduced_pairing");
return result;
}
/* choice of pairing */
alt_bn128_G1_precomp alt_bn128_precompute_G1(const alt_bn128_G1& P)
{
return alt_bn128_ate_precompute_G1(P);
}
alt_bn128_G2_precomp alt_bn128_precompute_G2(const alt_bn128_G2& Q)
{
return alt_bn128_ate_precompute_G2(Q);
}
alt_bn128_Fq12 alt_bn128_miller_loop(const alt_bn128_G1_precomp &prec_P,
const alt_bn128_G2_precomp &prec_Q)
{
return alt_bn128_ate_miller_loop(prec_P, prec_Q);
}
alt_bn128_Fq12 alt_bn128_double_miller_loop(const alt_bn128_G1_precomp &prec_P1,
const alt_bn128_G2_precomp &prec_Q1,
const alt_bn128_G1_precomp &prec_P2,
const alt_bn128_G2_precomp &prec_Q2)
{
return alt_bn128_ate_double_miller_loop(prec_P1, prec_Q1, prec_P2, prec_Q2);
}
alt_bn128_Fq12 alt_bn128_pairing(const alt_bn128_G1& P,
const alt_bn128_G2 &Q)
{
return alt_bn128_ate_pairing(P, Q);
}
alt_bn128_GT alt_bn128_reduced_pairing(const alt_bn128_G1 &P,
const alt_bn128_G2 &Q)
{
return alt_bn128_ate_reduced_pairing(P, Q);
}
} // libsnark

View File

@ -1,92 +0,0 @@
/** @file
*****************************************************************************
* @author This file is part of libsnark, developed by SCIPR Lab
* and contributors (see AUTHORS).
* @copyright MIT license (see LICENSE file)
*****************************************************************************/
#ifndef ALT_BN128_PAIRING_HPP_
#define ALT_BN128_PAIRING_HPP_
#include <vector>
#include "algebra/curves/alt_bn128/alt_bn128_init.hpp"
namespace libsnark {
/* final exponentiation */
alt_bn128_GT alt_bn128_final_exponentiation(const alt_bn128_Fq12 &elt);
/* ate pairing */
struct alt_bn128_ate_G1_precomp {
alt_bn128_Fq PX;
alt_bn128_Fq PY;
bool operator==(const alt_bn128_ate_G1_precomp &other) const;
friend std::ostream& operator<<(std::ostream &out, const alt_bn128_ate_G1_precomp &prec_P);
friend std::istream& operator>>(std::istream &in, alt_bn128_ate_G1_precomp &prec_P);
};
struct alt_bn128_ate_ell_coeffs {
alt_bn128_Fq2 ell_0;
alt_bn128_Fq2 ell_VW;
alt_bn128_Fq2 ell_VV;
bool operator==(const alt_bn128_ate_ell_coeffs &other) const;
friend std::ostream& operator<<(std::ostream &out, const alt_bn128_ate_ell_coeffs &dc);
friend std::istream& operator>>(std::istream &in, alt_bn128_ate_ell_coeffs &dc);
};
struct alt_bn128_ate_G2_precomp {
alt_bn128_Fq2 QX;
alt_bn128_Fq2 QY;
std::vector<alt_bn128_ate_ell_coeffs> coeffs;
bool operator==(const alt_bn128_ate_G2_precomp &other) const;
friend std::ostream& operator<<(std::ostream &out, const alt_bn128_ate_G2_precomp &prec_Q);
friend std::istream& operator>>(std::istream &in, alt_bn128_ate_G2_precomp &prec_Q);
};
alt_bn128_ate_G1_precomp alt_bn128_ate_precompute_G1(const alt_bn128_G1& P);
alt_bn128_ate_G2_precomp alt_bn128_ate_precompute_G2(const alt_bn128_G2& Q);
alt_bn128_Fq12 alt_bn128_ate_miller_loop(const alt_bn128_ate_G1_precomp &prec_P,
const alt_bn128_ate_G2_precomp &prec_Q);
alt_bn128_Fq12 alt_bn128_ate_double_miller_loop(const alt_bn128_ate_G1_precomp &prec_P1,
const alt_bn128_ate_G2_precomp &prec_Q1,
const alt_bn128_ate_G1_precomp &prec_P2,
const alt_bn128_ate_G2_precomp &prec_Q2);
alt_bn128_Fq12 alt_bn128_ate_pairing(const alt_bn128_G1& P,
const alt_bn128_G2 &Q);
alt_bn128_GT alt_bn128_ate_reduced_pairing(const alt_bn128_G1 &P,
const alt_bn128_G2 &Q);
/* choice of pairing */
typedef alt_bn128_ate_G1_precomp alt_bn128_G1_precomp;
typedef alt_bn128_ate_G2_precomp alt_bn128_G2_precomp;
alt_bn128_G1_precomp alt_bn128_precompute_G1(const alt_bn128_G1& P);
alt_bn128_G2_precomp alt_bn128_precompute_G2(const alt_bn128_G2& Q);
alt_bn128_Fq12 alt_bn128_miller_loop(const alt_bn128_G1_precomp &prec_P,
const alt_bn128_G2_precomp &prec_Q);
alt_bn128_Fq12 alt_bn128_double_miller_loop(const alt_bn128_G1_precomp &prec_P1,
const alt_bn128_G2_precomp &prec_Q1,
const alt_bn128_G1_precomp &prec_P2,
const alt_bn128_G2_precomp &prec_Q2);
alt_bn128_Fq12 alt_bn128_pairing(const alt_bn128_G1& P,
const alt_bn128_G2 &Q);
alt_bn128_GT alt_bn128_reduced_pairing(const alt_bn128_G1 &P,
const alt_bn128_G2 &Q);
alt_bn128_GT alt_bn128_affine_reduced_pairing(const alt_bn128_G1 &P,
const alt_bn128_G2 &Q);
} // libsnark
#endif // ALT_BN128_PAIRING_HPP_

View File

@ -1,58 +0,0 @@
/** @file
*****************************************************************************
* @author This file is part of libsnark, developed by SCIPR Lab
* and contributors (see AUTHORS).
* @copyright MIT license (see LICENSE file)
*****************************************************************************/
#include "algebra/curves/alt_bn128/alt_bn128_pp.hpp"
namespace libsnark {
void alt_bn128_pp::init_public_params()
{
init_alt_bn128_params();
}
alt_bn128_GT alt_bn128_pp::final_exponentiation(const alt_bn128_Fq12 &elt)
{
return alt_bn128_final_exponentiation(elt);
}
alt_bn128_G1_precomp alt_bn128_pp::precompute_G1(const alt_bn128_G1 &P)
{
return alt_bn128_precompute_G1(P);
}
alt_bn128_G2_precomp alt_bn128_pp::precompute_G2(const alt_bn128_G2 &Q)
{
return alt_bn128_precompute_G2(Q);
}
alt_bn128_Fq12 alt_bn128_pp::miller_loop(const alt_bn128_G1_precomp &prec_P,
const alt_bn128_G2_precomp &prec_Q)
{
return alt_bn128_miller_loop(prec_P, prec_Q);
}
alt_bn128_Fq12 alt_bn128_pp::double_miller_loop(const alt_bn128_G1_precomp &prec_P1,
const alt_bn128_G2_precomp &prec_Q1,
const alt_bn128_G1_precomp &prec_P2,
const alt_bn128_G2_precomp &prec_Q2)
{
return alt_bn128_double_miller_loop(prec_P1, prec_Q1, prec_P2, prec_Q2);
}
alt_bn128_Fq12 alt_bn128_pp::pairing(const alt_bn128_G1 &P,
const alt_bn128_G2 &Q)
{
return alt_bn128_pairing(P, Q);
}
alt_bn128_Fq12 alt_bn128_pp::reduced_pairing(const alt_bn128_G1 &P,
const alt_bn128_G2 &Q)
{
return alt_bn128_reduced_pairing(P, Q);
}
} // libsnark

View File

@ -1,50 +0,0 @@
/** @file
*****************************************************************************
* @author This file is part of libsnark, developed by SCIPR Lab
* and contributors (see AUTHORS).
* @copyright MIT license (see LICENSE file)
*****************************************************************************/
#ifndef ALT_BN128_PP_HPP_
#define ALT_BN128_PP_HPP_
#include "algebra/curves/public_params.hpp"
#include "algebra/curves/alt_bn128/alt_bn128_init.hpp"
#include "algebra/curves/alt_bn128/alt_bn128_g1.hpp"
#include "algebra/curves/alt_bn128/alt_bn128_g2.hpp"
#include "algebra/curves/alt_bn128/alt_bn128_pairing.hpp"
namespace libsnark {
class alt_bn128_pp {
public:
typedef alt_bn128_Fr Fp_type;
typedef alt_bn128_G1 G1_type;
typedef alt_bn128_G2 G2_type;
typedef alt_bn128_G1_precomp G1_precomp_type;
typedef alt_bn128_G2_precomp G2_precomp_type;
typedef alt_bn128_Fq Fq_type;
typedef alt_bn128_Fq2 Fqe_type;
typedef alt_bn128_Fq12 Fqk_type;
typedef alt_bn128_GT GT_type;
static const bool has_affine_pairing = false;
static void init_public_params();
static alt_bn128_GT final_exponentiation(const alt_bn128_Fq12 &elt);
static alt_bn128_G1_precomp precompute_G1(const alt_bn128_G1 &P);
static alt_bn128_G2_precomp precompute_G2(const alt_bn128_G2 &Q);
static alt_bn128_Fq12 miller_loop(const alt_bn128_G1_precomp &prec_P,
const alt_bn128_G2_precomp &prec_Q);
static alt_bn128_Fq12 double_miller_loop(const alt_bn128_G1_precomp &prec_P1,
const alt_bn128_G2_precomp &prec_Q1,
const alt_bn128_G1_precomp &prec_P2,
const alt_bn128_G2_precomp &prec_Q2);
static alt_bn128_Fq12 pairing(const alt_bn128_G1 &P,
const alt_bn128_G2 &Q);
static alt_bn128_Fq12 reduced_pairing(const alt_bn128_G1 &P,
const alt_bn128_G2 &Q);
};
} // libsnark
#endif // ALT_BN128_PP_HPP_

View File

@ -1,22 +0,0 @@
/** @file
*****************************************************************************
* @author This file is part of libsnark, developed by SCIPR Lab
* and contributors (see AUTHORS).
* @copyright MIT license (see LICENSE file)
*****************************************************************************/
#ifndef CURVE_UTILS_HPP_
#define CURVE_UTILS_HPP_
#include <cstdint>
#include "algebra/fields/bigint.hpp"
namespace libsnark {
template<typename GroupT, mp_size_t m>
GroupT scalar_mul(const GroupT &base, const bigint<m> &scalar);
} // libsnark
#include "algebra/curves/curve_utils.tcc"
#endif // CURVE_UTILS_HPP_

View File

@ -1,37 +0,0 @@
/** @file
*****************************************************************************
* @author This file is part of libsnark, developed by SCIPR Lab
* and contributors (see AUTHORS).
* @copyright MIT license (see LICENSE file)
*****************************************************************************/
#ifndef CURVE_UTILS_TCC_
#define CURVE_UTILS_TCC_
namespace libsnark {
template<typename GroupT, mp_size_t m>
GroupT scalar_mul(const GroupT &base, const bigint<m> &scalar)
{
GroupT result = GroupT::zero();
bool found_one = false;
for (int64_t i = scalar.max_bits() - 1; i >= 0; --i)
{
if (found_one)
{
result = result.dbl();
}
if (scalar.test_bit(i))
{
found_one = true;
result = result + base;
}
}
return result;
}
} // libsnark
#endif // CURVE_UTILS_TCC_

View File

@ -1,103 +0,0 @@
/** @file
*****************************************************************************
* @author This file is part of libsnark, developed by SCIPR Lab
* and contributors (see AUTHORS).
* @copyright MIT license (see LICENSE file)
*****************************************************************************/
#ifndef PUBLIC_PARAMS_HPP_
#define PUBLIC_PARAMS_HPP_
#include <vector>
namespace libsnark {
/*
for every curve the user should define corresponding
public_params with the following typedefs:
Fp_type
G1_type
G2_type
G1_precomp_type
G2_precomp_type
affine_ate_G1_precomp_type
affine_ate_G2_precomp_type
Fq_type
Fqe_type
Fqk_type
GT_type
one should also define the following static methods:
void init_public_params();
GT<EC_ppT> final_exponentiation(const Fqk<EC_ppT> &elt);
G1_precomp<EC_ppT> precompute_G1(const G1<EC_ppT> &P);
G2_precomp<EC_ppT> precompute_G2(const G2<EC_ppT> &Q);
Fqk<EC_ppT> miller_loop(const G1_precomp<EC_ppT> &prec_P,
const G2_precomp<EC_ppT> &prec_Q);
affine_ate_G1_precomp<EC_ppT> affine_ate_precompute_G1(const G1<EC_ppT> &P);
affine_ate_G2_precomp<EC_ppT> affine_ate_precompute_G2(const G2<EC_ppT> &Q);
Fqk<EC_ppT> affine_ate_miller_loop(const affine_ate_G1_precomp<EC_ppT> &prec_P,
const affine_ate_G2_precomp<EC_ppT> &prec_Q);
Fqk<EC_ppT> affine_ate_e_over_e_miller_loop(const affine_ate_G1_precomp<EC_ppT> &prec_P1,
const affine_ate_G2_precomp<EC_ppT> &prec_Q1,
const affine_ate_G1_precomp<EC_ppT> &prec_P2,
const affine_ate_G2_precomp<EC_ppT> &prec_Q2);
Fqk<EC_ppT> affine_ate_e_times_e_over_e_miller_loop(const affine_ate_G1_precomp<EC_ppT> &prec_P1,
const affine_ate_G2_precomp<EC_ppT> &prec_Q1,
const affine_ate_G1_precomp<EC_ppT> &prec_P2,
const affine_ate_G2_precomp<EC_ppT> &prec_Q2,
const affine_ate_G1_precomp<EC_ppT> &prec_P3,
const affine_ate_G2_precomp<EC_ppT> &prec_Q3);
Fqk<EC_ppT> double_miller_loop(const G1_precomp<EC_ppT> &prec_P1,
const G2_precomp<EC_ppT> &prec_Q1,
const G1_precomp<EC_ppT> &prec_P2,
const G2_precomp<EC_ppT> &prec_Q2);
Fqk<EC_ppT> pairing(const G1<EC_ppT> &P,
const G2<EC_ppT> &Q);
GT<EC_ppT> reduced_pairing(const G1<EC_ppT> &P,
const G2<EC_ppT> &Q);
GT<EC_ppT> affine_reduced_pairing(const G1<EC_ppT> &P,
const G2<EC_ppT> &Q);
*/
template<typename EC_ppT>
using Fr = typename EC_ppT::Fp_type;
template<typename EC_ppT>
using G1 = typename EC_ppT::G1_type;
template<typename EC_ppT>
using G2 = typename EC_ppT::G2_type;
template<typename EC_ppT>
using G1_precomp = typename EC_ppT::G1_precomp_type;
template<typename EC_ppT>
using G2_precomp = typename EC_ppT::G2_precomp_type;
template<typename EC_ppT>
using affine_ate_G1_precomp = typename EC_ppT::affine_ate_G1_precomp_type;
template<typename EC_ppT>
using affine_ate_G2_precomp = typename EC_ppT::affine_ate_G2_precomp_type;
template<typename EC_ppT>
using Fq = typename EC_ppT::Fq_type;
template<typename EC_ppT>
using Fqe = typename EC_ppT::Fqe_type;
template<typename EC_ppT>
using Fqk = typename EC_ppT::Fqk_type;
template<typename EC_ppT>
using GT = typename EC_ppT::GT_type;
template<typename EC_ppT>
using Fr_vector = std::vector<Fr<EC_ppT> >;
template<typename EC_ppT>
using G1_vector = std::vector<G1<EC_ppT> >;
template<typename EC_ppT>
using G2_vector = std::vector<G2<EC_ppT> >;
} // libsnark
#endif // PUBLIC_PARAMS_HPP_

View File

@ -1,121 +0,0 @@
/**
*****************************************************************************
* @author This file is part of libsnark, developed by SCIPR Lab
* and contributors (see AUTHORS).
* @copyright MIT license (see LICENSE file)
*****************************************************************************/
#include "common/profiling.hpp"
#ifdef CURVE_BN128
#include "algebra/curves/bn128/bn128_pp.hpp"
#endif
#include "algebra/curves/alt_bn128/alt_bn128_pp.hpp"
#include <gtest/gtest.h>
using namespace libsnark;
template<typename ppT>
void pairing_test()
{
GT<ppT> GT_one = GT<ppT>::one();
printf("Running bilinearity tests:\n");
G1<ppT> P = (Fr<ppT>::random_element()) * G1<ppT>::one();
//G1<ppT> P = Fr<ppT>("2") * G1<ppT>::one();
G2<ppT> Q = (Fr<ppT>::random_element()) * G2<ppT>::one();
//G2<ppT> Q = Fr<ppT>("3") * G2<ppT>::one();
printf("P:\n");
P.print();
P.print_coordinates();
printf("Q:\n");
Q.print();
Q.print_coordinates();
printf("\n\n");
Fr<ppT> s = Fr<ppT>::random_element();
//Fr<ppT> s = Fr<ppT>("2");
G1<ppT> sP = s * P;
G2<ppT> sQ = s * Q;
printf("Pairing bilinearity tests (three must match):\n");
GT<ppT> ans1 = ppT::reduced_pairing(sP, Q);
GT<ppT> ans2 = ppT::reduced_pairing(P, sQ);
GT<ppT> ans3 = ppT::reduced_pairing(P, Q)^s;
ans1.print();
ans2.print();
ans3.print();
EXPECT_EQ(ans1, ans2);
EXPECT_EQ(ans2, ans3);
EXPECT_NE(ans1, GT_one);
EXPECT_EQ((ans1^Fr<ppT>::field_char()), GT_one);
printf("\n\n");
}
template<typename ppT>
void double_miller_loop_test()
{
const G1<ppT> P1 = (Fr<ppT>::random_element()) * G1<ppT>::one();
const G1<ppT> P2 = (Fr<ppT>::random_element()) * G1<ppT>::one();
const G2<ppT> Q1 = (Fr<ppT>::random_element()) * G2<ppT>::one();
const G2<ppT> Q2 = (Fr<ppT>::random_element()) * G2<ppT>::one();
const G1_precomp<ppT> prec_P1 = ppT::precompute_G1(P1);
const G1_precomp<ppT> prec_P2 = ppT::precompute_G1(P2);
const G2_precomp<ppT> prec_Q1 = ppT::precompute_G2(Q1);
const G2_precomp<ppT> prec_Q2 = ppT::precompute_G2(Q2);
const Fqk<ppT> ans_1 = ppT::miller_loop(prec_P1, prec_Q1);
const Fqk<ppT> ans_2 = ppT::miller_loop(prec_P2, prec_Q2);
const Fqk<ppT> ans_12 = ppT::double_miller_loop(prec_P1, prec_Q1, prec_P2, prec_Q2);
EXPECT_EQ(ans_1 * ans_2, ans_12);
}
template<typename ppT>
void affine_pairing_test()
{
GT<ppT> GT_one = GT<ppT>::one();
printf("Running bilinearity tests:\n");
G1<ppT> P = (Fr<ppT>::random_element()) * G1<ppT>::one();
G2<ppT> Q = (Fr<ppT>::random_element()) * G2<ppT>::one();
printf("P:\n");
P.print();
printf("Q:\n");
Q.print();
printf("\n\n");
Fr<ppT> s = Fr<ppT>::random_element();
G1<ppT> sP = s * P;
G2<ppT> sQ = s * Q;
printf("Pairing bilinearity tests (three must match):\n");
GT<ppT> ans1 = ppT::affine_reduced_pairing(sP, Q);
GT<ppT> ans2 = ppT::affine_reduced_pairing(P, sQ);
GT<ppT> ans3 = ppT::affine_reduced_pairing(P, Q)^s;
ans1.print();
ans2.print();
ans3.print();
EXPECT_EQ(ans1, ans2);
EXPECT_EQ(ans2, ans3);
EXPECT_NE(ans1, GT_one);
EXPECT_EQ((ans1^Fr<ppT>::field_char()), GT_one);
printf("\n\n");
}
TEST(algebra, bilinearity)
{
start_profiling();
alt_bn128_pp::init_public_params();
pairing_test<alt_bn128_pp>();
double_miller_loop_test<alt_bn128_pp>();
#ifdef CURVE_BN128 // BN128 has fancy dependencies so it may be disabled
bn128_pp::init_public_params();
pairing_test<bn128_pp>();
double_miller_loop_test<bn128_pp>();
#endif
}

View File

@ -1,153 +0,0 @@
/**
*****************************************************************************
* @author This file is part of libsnark, developed by SCIPR Lab
* and contributors (see AUTHORS).
* @copyright MIT license (see LICENSE file)
*****************************************************************************/
#include "common/profiling.hpp"
#ifdef CURVE_BN128
#include "algebra/curves/bn128/bn128_pp.hpp"
#endif
#include "algebra/curves/alt_bn128/alt_bn128_pp.hpp"
#include <sstream>
#include <gtest/gtest.h>
using namespace libsnark;
template<typename GroupT>
void test_mixed_add()
{
GroupT base, el, result;
base = GroupT::zero();
el = GroupT::zero();
el.to_special();
result = base.mixed_add(el);
EXPECT_EQ(result, base + el);
base = GroupT::zero();
el = GroupT::random_element();
el.to_special();
result = base.mixed_add(el);
EXPECT_EQ(result, base + el);
base = GroupT::random_element();
el = GroupT::zero();
el.to_special();
result = base.mixed_add(el);
EXPECT_EQ(result, base + el);
base = GroupT::random_element();
el = GroupT::random_element();
el.to_special();
result = base.mixed_add(el);
EXPECT_EQ(result, base + el);
base = GroupT::random_element();
el = base;
el.to_special();
result = base.mixed_add(el);
EXPECT_EQ(result, base.dbl());
}
template<typename GroupT>
void test_group()
{
bigint<1> rand1 = bigint<1>("76749407");
bigint<1> rand2 = bigint<1>("44410867");
bigint<1> randsum = bigint<1>("121160274");
GroupT zero = GroupT::zero();
EXPECT_EQ(zero, zero);
GroupT one = GroupT::one();
EXPECT_EQ(one, one);
GroupT two = bigint<1>(2l) * GroupT::one();
EXPECT_EQ(two, two);
GroupT five = bigint<1>(5l) * GroupT::one();
GroupT three = bigint<1>(3l) * GroupT::one();
GroupT four = bigint<1>(4l) * GroupT::one();
EXPECT_EQ(two+five, three+four);
GroupT a = GroupT::random_element();
GroupT b = GroupT::random_element();
EXPECT_NE(one, zero);
EXPECT_NE(a, zero);
EXPECT_NE(a, one);
EXPECT_NE(b, zero);
EXPECT_NE(b, one);
EXPECT_EQ(a.dbl(), a + a);
EXPECT_EQ(b.dbl(), b + b);
EXPECT_EQ(one.add(two), three);
EXPECT_EQ(two.add(one), three);
EXPECT_EQ(a + b, b + a);
EXPECT_EQ(a - a, zero);
EXPECT_EQ(a - b, a + (-b));
EXPECT_EQ(a - b, (-b) + a);
// handle special cases
EXPECT_EQ(zero + (-a), -a);
EXPECT_EQ(zero - a, -a);
EXPECT_EQ(a - zero, a);
EXPECT_EQ(a + zero, a);
EXPECT_EQ(zero + a, a);
EXPECT_EQ((a + b).dbl(), (a + b) + (b + a));
EXPECT_EQ(bigint<1>("2") * (a + b), (a + b) + (b + a));
EXPECT_EQ((rand1 * a) + (rand2 * a), (randsum * a));
EXPECT_EQ(GroupT::order() * a, zero);
EXPECT_EQ(GroupT::order() * one, zero);
EXPECT_NE((GroupT::order() * a) - a, zero);
EXPECT_NE((GroupT::order() * one) - one, zero);
test_mixed_add<GroupT>();
}
template<typename GroupT>
void test_mul_by_q()
{
GroupT a = GroupT::random_element();
EXPECT_EQ((GroupT::base_field_char()*a), a.mul_by_q());
}
template<typename GroupT>
void test_output()
{
GroupT g = GroupT::zero();
for (size_t i = 0; i < 1000; ++i)
{
std::stringstream ss;
ss << g;
GroupT gg;
ss >> gg;
EXPECT_EQ(g, gg);
/* use a random point in next iteration */
g = GroupT::random_element();
}
}
TEST(algebra, groups)
{
alt_bn128_pp::init_public_params();
test_group<G1<alt_bn128_pp> >();
test_output<G1<alt_bn128_pp> >();
test_group<G2<alt_bn128_pp> >();
test_output<G2<alt_bn128_pp> >();
test_mul_by_q<G2<alt_bn128_pp> >();
#ifdef CURVE_BN128 // BN128 has fancy dependencies so it may be disabled
bn128_pp::init_public_params();
test_group<G1<bn128_pp> >();
test_output<G1<bn128_pp> >();
test_group<G2<bn128_pp> >();
test_output<G2<bn128_pp> >();
#endif
}

View File

@ -1,45 +0,0 @@
/** @file
*****************************************************************************
Declaration of interfaces for the "basic radix-2" evaluation domain.
Roughly, the domain has size m = 2^k and consists of the m-th roots of unity.
*****************************************************************************
* @author This file is part of libsnark, developed by SCIPR Lab
* and contributors (see AUTHORS).
* @copyright MIT license (see LICENSE file)
*****************************************************************************/
#ifndef BASIC_RADIX2_DOMAIN_HPP_
#define BASIC_RADIX2_DOMAIN_HPP_
#include "algebra/evaluation_domain/evaluation_domain.hpp"
namespace libsnark {
template<typename FieldT>
class basic_radix2_domain : public evaluation_domain<FieldT> {
public:
FieldT omega;
basic_radix2_domain(const size_t m);
void FFT(std::vector<FieldT> &a);
void iFFT(std::vector<FieldT> &a);
void cosetFFT(std::vector<FieldT> &a, const FieldT &g);
void icosetFFT(std::vector<FieldT> &a, const FieldT &g);
std::vector<FieldT> lagrange_coeffs(const FieldT &t);
FieldT get_element(const size_t idx);
FieldT compute_Z(const FieldT &t);
void add_poly_Z(const FieldT &coeff, std::vector<FieldT> &H);
void divide_by_Z_on_coset(std::vector<FieldT> &P);
};
} // libsnark
#include "algebra/evaluation_domain/domains/basic_radix2_domain.tcc"
#endif // BASIC_RADIX2_DOMAIN_HPP_

View File

@ -1,112 +0,0 @@
/** @file
*****************************************************************************
Implementation of interfaces for the "basic radix-2" evaluation domain.
See basic_radix2_domain.hpp .
*****************************************************************************
* @author This file is part of libsnark, developed by SCIPR Lab
* and contributors (see AUTHORS).
* @copyright MIT license (see LICENSE file)
*****************************************************************************/
#ifndef BASIC_RADIX2_DOMAIN_TCC_
#define BASIC_RADIX2_DOMAIN_TCC_
#include "algebra/evaluation_domain/domains/basic_radix2_domain_aux.hpp"
namespace libsnark {
template<typename FieldT>
basic_radix2_domain<FieldT>::basic_radix2_domain(const size_t m) : evaluation_domain<FieldT>(m)
{
assert(m > 1);
const size_t logm = log2(m);
assert(logm <= (FieldT::s));
omega = get_root_of_unity<FieldT>(m);
}
template<typename FieldT>
void basic_radix2_domain<FieldT>::FFT(std::vector<FieldT> &a)
{
enter_block("Execute FFT");
assert(a.size() == this->m);
_basic_radix2_FFT(a, omega);
leave_block("Execute FFT");
}
template<typename FieldT>
void basic_radix2_domain<FieldT>::iFFT(std::vector<FieldT> &a)
{
enter_block("Execute inverse FFT");
assert(a.size() == this->m);
_basic_radix2_FFT(a, omega.inverse());
const FieldT sconst = FieldT(a.size()).inverse();
for (size_t i = 0; i < a.size(); ++i)
{
a[i] *= sconst;
}
leave_block("Execute inverse FFT");
}
template<typename FieldT>
void basic_radix2_domain<FieldT>::cosetFFT(std::vector<FieldT> &a, const FieldT &g)
{
enter_block("Execute coset FFT");
_multiply_by_coset(a, g);
FFT(a);
leave_block("Execute coset FFT");
}
template<typename FieldT>
void basic_radix2_domain<FieldT>::icosetFFT(std::vector<FieldT> &a, const FieldT &g)
{
enter_block("Execute inverse coset IFFT");
iFFT(a);
_multiply_by_coset(a, g.inverse());
leave_block("Execute inverse coset IFFT");
}
template<typename FieldT>
std::vector<FieldT> basic_radix2_domain<FieldT>::lagrange_coeffs(const FieldT &t)
{
return _basic_radix2_lagrange_coeffs(this->m, t);
}
template<typename FieldT>
FieldT basic_radix2_domain<FieldT>::get_element(const size_t idx)
{
return omega^idx;
}
template<typename FieldT>
FieldT basic_radix2_domain<FieldT>::compute_Z(const FieldT &t)
{
return (t^this->m) - FieldT::one();
}
template<typename FieldT>
void basic_radix2_domain<FieldT>::add_poly_Z(const FieldT &coeff, std::vector<FieldT> &H)
{
assert(H.size() == this->m+1);
H[this->m] += coeff;
H[0] -= coeff;
}
template<typename FieldT>
void basic_radix2_domain<FieldT>::divide_by_Z_on_coset(std::vector<FieldT> &P)
{
const FieldT coset = FieldT::multiplicative_generator;
const FieldT Z_inverse_at_coset = this->compute_Z(coset).inverse();
for (size_t i = 0; i < this->m; ++i)
{
P[i] *= Z_inverse_at_coset;
}
}
} // libsnark
#endif // BASIC_RADIX2_DOMAIN_TCC_

View File

@ -1,48 +0,0 @@
/** @file
*****************************************************************************
Declaration of interfaces for auxiliary functions for the "basic radix-2" evaluation domain.
These functions compute the radix-2 FFT (in single- or multi-thread mode) and,
also compute Lagrange coefficients.
*****************************************************************************
* @author This file is part of libsnark, developed by SCIPR Lab
* and contributors (see AUTHORS).
* @copyright MIT license (see LICENSE file)
*****************************************************************************/
#ifndef BASIC_RADIX2_DOMAIN_AUX_HPP_
#define BASIC_RADIX2_DOMAIN_AUX_HPP_
namespace libsnark {
/**
* Compute the radix-2 FFT of the vector a over the set S={omega^{0},...,omega^{m-1}}.
*/
template<typename FieldT>
void _basic_radix2_FFT(std::vector<FieldT> &a, const FieldT &omega);
/**
* A multi-thread version of _basic_radix2_FFT.
*/
template<typename FieldT>
void _parallel_basic_radix2_FFT(std::vector<FieldT> &a, const FieldT &omega);
/**
* Translate the vector a to a coset defined by g.
*/
template<typename FieldT>
void _multiply_by_coset(std::vector<FieldT> &a, const FieldT &g);
/**
* Compute the m Lagrange coefficients, relative to the set S={omega^{0},...,omega^{m-1}}, at the field element t.
*/
template<typename FieldT>
std::vector<FieldT> _basic_radix2_lagrange_coeffs(const size_t m, const FieldT &t);
} // libsnark
#include "algebra/evaluation_domain/domains/basic_radix2_domain_aux.tcc"
#endif // BASIC_RADIX2_DOMAIN_AUX_HPP_

View File

@ -1,242 +0,0 @@
/** @file
*****************************************************************************
Implementation of interfaces for auxiliary functions for the "basic radix-2" evaluation domain.
See basic_radix2_domain_aux.hpp .
*****************************************************************************
* @author This file is part of libsnark, developed by SCIPR Lab
* and contributors (see AUTHORS).
* @copyright MIT license (see LICENSE file)
*****************************************************************************/
#ifndef BASIC_RADIX2_DOMAIN_AUX_TCC_
#define BASIC_RADIX2_DOMAIN_AUX_TCC_
#include <cassert>
#ifdef MULTICORE
#include <omp.h>
#endif
#include "algebra/fields/field_utils.hpp"
#include "common/profiling.hpp"
#include "common/utils.hpp"
namespace libsnark {
#ifdef MULTICORE
#define _basic_radix2_FFT _basic_parallel_radix2_FFT
#else
#define _basic_radix2_FFT _basic_serial_radix2_FFT
#endif
/*
Below we make use of pseudocode from [CLRS 2n Ed, pp. 864].
Also, note that it's the caller's responsibility to multiply by 1/N.
*/
template<typename FieldT>
void _basic_serial_radix2_FFT(std::vector<FieldT> &a, const FieldT &omega)
{
const size_t n = a.size(), logn = log2(n);
assert(n == (1u << logn));
/* swapping in place (from Storer's book) */
for (size_t k = 0; k < n; ++k)
{
const size_t rk = bitreverse(k, logn);
if (k < rk)
std::swap(a[k], a[rk]);
}
size_t m = 1; // invariant: m = 2^{s-1}
for (size_t s = 1; s <= logn; ++s)
{
// w_m is 2^s-th root of unity now
const FieldT w_m = omega^(n/(2*m));
asm volatile ("/* pre-inner */");
for (size_t k = 0; k < n; k += 2*m)
{
FieldT w = FieldT::one();
for (size_t j = 0; j < m; ++j)
{
const FieldT t = w * a[k+j+m];
a[k+j+m] = a[k+j] - t;
a[k+j] += t;
w *= w_m;
}
}
asm volatile ("/* post-inner */");
m *= 2;
}
}
template<typename FieldT>
void _basic_parallel_radix2_FFT_inner(std::vector<FieldT> &a, const FieldT &omega, const size_t log_cpus)
{
const size_t num_cpus = UINT64_C(1)<<log_cpus;
const size_t m = a.size();
const size_t log_m = log2(m);
assert(m == UINT64_C(1)<<log_m);
if (log_m < log_cpus)
{
_basic_serial_radix2_FFT(a, omega);
return;
}
enter_block("Shuffle inputs");
std::vector<std::vector<FieldT> > tmp(num_cpus);
for (size_t j = 0; j < num_cpus; ++j)
{
tmp[j].resize(UINT64_C(1)<<(log_m-log_cpus), FieldT::zero());
}
#ifdef MULTICORE
#pragma omp parallel for
#endif
for (size_t j = 0; j < num_cpus; ++j)
{
const FieldT omega_j = omega^j;
const FieldT omega_step = omega^(j<<(log_m - log_cpus));
FieldT elt = FieldT::one();
for (size_t i = 0; i < UINT64_C(1)<<(log_m - log_cpus); ++i)
{
for (size_t s = 0; s < num_cpus; ++s)
{
// invariant: elt is omega^(j*idx)
const size_t idx = (i + (s<<(log_m - log_cpus))) % (1u << log_m);
tmp[j][i] += a[idx] * elt;
elt *= omega_step;
}
elt *= omega_j;
}
}
leave_block("Shuffle inputs");
enter_block("Execute sub-FFTs");
const FieldT omega_num_cpus = omega^num_cpus;
#ifdef MULTICORE
#pragma omp parallel for
#endif
for (size_t j = 0; j < num_cpus; ++j)
{
_basic_serial_radix2_FFT(tmp[j], omega_num_cpus);
}
leave_block("Execute sub-FFTs");
enter_block("Re-shuffle outputs");
#ifdef MULTICORE
#pragma omp parallel for
#endif
for (size_t i = 0; i < num_cpus; ++i)
{
for (size_t j = 0; j < UINT64_C(1)<<(log_m - log_cpus); ++j)
{
// now: i = idx >> (log_m - log_cpus) and j = idx % (1u << (log_m - log_cpus)), for idx = ((i<<(log_m-log_cpus))+j) % (1u << log_m)
a[(j<<log_cpus) + i] = tmp[i][j];
}
}
leave_block("Re-shuffle outputs");
}
template<typename FieldT>
void _basic_parallel_radix2_FFT(std::vector<FieldT> &a, const FieldT &omega)
{
#ifdef MULTICORE
const size_t num_cpus = omp_get_max_threads();
#else
const size_t num_cpus = 1;
#endif
const size_t log_cpus = ((num_cpus & (num_cpus - 1)) == 0 ? log2(num_cpus) : log2(num_cpus) - 1);
#ifdef DEBUG
print_indent(); printf("* Invoking parallel FFT on 2^%zu CPUs (omp_get_max_threads = %zu)\n", log_cpus, num_cpus);
#endif
if (log_cpus == 0)
{
_basic_serial_radix2_FFT(a, omega);
}
else
{
_basic_parallel_radix2_FFT_inner(a, omega, log_cpus);
}
}
template<typename FieldT>
void _multiply_by_coset(std::vector<FieldT> &a, const FieldT &g)
{
//enter_block("Multiply by coset");
FieldT u = g;
for (size_t i = 1; i < a.size(); ++i)
{
a[i] *= u;
u *= g;
}
//leave_block("Multiply by coset");
}
template<typename FieldT>
std::vector<FieldT> _basic_radix2_lagrange_coeffs(const size_t m, const FieldT &t)
{
if (m == 1)
{
return std::vector<FieldT>(1, FieldT::one());
}
assert(m == (1u << log2(m)));
const FieldT omega = get_root_of_unity<FieldT>(m);
std::vector<FieldT> u(m, FieldT::zero());
/*
If t equals one of the roots of unity in S={omega^{0},...,omega^{m-1}}
then output 1 at the right place, and 0 elsewhere
*/
if ((t^m) == (FieldT::one()))
{
FieldT omega_i = FieldT::one();
for (size_t i = 0; i < m; ++i)
{
if (omega_i == t) // i.e., t equals omega^i
{
u[i] = FieldT::one();
return u;
}
omega_i *= omega;
}
}
/*
Otherwise, if t does not equal any of the roots of unity in S,
then compute each L_{i,S}(t) as Z_{S}(t) * v_i / (t-\omega^i)
where:
- Z_{S}(t) = \prod_{j} (t-\omega^j) = (t^m-1), and
- v_{i} = 1 / \prod_{j \neq i} (\omega^i-\omega^j).
Below we use the fact that v_{0} = 1/m and v_{i+1} = \omega * v_{i}.
*/
const FieldT Z = (t^m)-FieldT::one();
FieldT l = Z * FieldT(m).inverse();
FieldT r = FieldT::one();
for (size_t i = 0; i < m; ++i)
{
u[i] = l * (t - r).inverse();
l *= omega;
r *= omega;
}
return u;
}
} // libsnark
#endif // BASIC_RADIX2_DOMAIN_AUX_TCC_

View File

@ -1,125 +0,0 @@
/** @file
*****************************************************************************
Declaration of interfaces for evaluation domains.
Roughly, given a desired size m for the domain, the constructor selects
a choice of domain S with size ~m that has been selected so to optimize
- computations of Lagrange polynomials, and
- FFT/iFFT computations.
An evaluation domain also provides other functions, e.g., accessing
individual elements in S or evaluating its vanishing polynomial.
The descriptions below make use of the definition of a *Lagrange polynomial*,
which we recall. Given a field F, a subset S=(a_i)_i of F, and an index idx
in {0,...,|S-1|}, the idx-th Lagrange polynomial (wrt to subset S) is defined to be
\f[ L_{idx,S}(z) := prod_{k \neq idx} (z - a_k) / prod_{k \neq idx} (a_{idx} - a_k) \f]
Note that, by construction:
\f[ \forall j \neq idx: L_{idx,S}(a_{idx}) = 1 \text{ and } L_{idx,S}(a_j) = 0 \f]
*****************************************************************************
* @author This file is part of libsnark, developed by SCIPR Lab
* and contributors (see AUTHORS).
* @copyright MIT license (see LICENSE file)
*****************************************************************************/
#ifndef EVALUATION_DOMAIN_HPP_
#define EVALUATION_DOMAIN_HPP_
#include <memory>
namespace libsnark {
/**
* An evaluation domain.
*/
template<typename FieldT>
class evaluation_domain {
public:
const size_t m;
/**
* Construct an evaluation domain S of size m, if possible.
*
* (See the function get_evaluation_domain below.)
*/
evaluation_domain(const size_t m) : m(m) {};
/**
* Get the idx-th element in S.
*/
virtual FieldT get_element(const size_t idx) = 0;
/**
* Compute the FFT, over the domain S, of the vector a.
*/
virtual void FFT(std::vector<FieldT> &a) = 0;
/**
* Compute the inverse FFT, over the domain S, of the vector a.
*/
virtual void iFFT(std::vector<FieldT> &a) = 0;
/**
* Compute the FFT, over the domain g*S, of the vector a.
*/
virtual void cosetFFT(std::vector<FieldT> &a, const FieldT &g) = 0;
/**
* Compute the inverse FFT, over the domain g*S, of the vector a.
*/
virtual void icosetFFT(std::vector<FieldT> &a, const FieldT &g) = 0;
/**
* Evaluate all Lagrange polynomials.
*
* The inputs are:
* - an integer m
* - an element t
* The output is a vector (b_{0},...,b_{m-1})
* where b_{i} is the evaluation of L_{i,S}(z) at z = t.
*/
virtual std::vector<FieldT> lagrange_coeffs(const FieldT &t) = 0;
/**
* Evaluate the vanishing polynomial of S at the field element t.
*/
virtual FieldT compute_Z(const FieldT &t) = 0;
/**
* Add the coefficients of the vanishing polynomial of S to the coefficients of the polynomial H.
*/
virtual void add_poly_Z(const FieldT &coeff, std::vector<FieldT> &H) = 0;
/**
* Multiply by the evaluation, on a coset of S, of the inverse of the vanishing polynomial of S.
*/
virtual void divide_by_Z_on_coset(std::vector<FieldT> &P) = 0;
};
/**
* Return an evaluation domain object in which the domain S has size |S| >= min_size.
* The function chooses from different supported domains, depending on min_size.
*/
template<typename FieldT>
std::shared_ptr<evaluation_domain<FieldT> > get_evaluation_domain(const size_t min_size);
/**
* Naive evaluation of a *single* Lagrange polynomial, used for testing purposes.
*
* The inputs are:
* - an integer m
* - a domain S = (a_{0},...,a_{m-1}) of size m
* - a field element t
* - an index idx in {0,...,m-1}
* The output is the polynomial L_{idx,S}(z) evaluated at z = t.
*/
template<typename FieldT>
FieldT lagrange_eval(const size_t m, const std::vector<FieldT> &domain, const FieldT &t, const size_t idx);
} // libsnark
#include "algebra/evaluation_domain/evaluation_domain.tcc"
#endif // EVALUATION_DOMAIN_HPP_

View File

@ -1,117 +0,0 @@
/** @file
*****************************************************************************
Imeplementation of interfaces for evaluation domains.
See evaluation_domain.hpp .
We currently implement, and select among, three types of domains:
- "basic radix-2": the domain has size m = 2^k and consists of the m-th roots of unity
- "extended radix-2": the domain has size m = 2^{k+1} and consists of "the m-th roots of unity" union "a coset"
- "step radix-2": the domain has size m = 2^k + 2^r and consists of "the 2^k-th roots of unity" union "a coset of 2^r-th roots of unity"
*****************************************************************************
* @author This file is part of libsnark, developed by SCIPR Lab
* and contributors (see AUTHORS).
* @copyright MIT license (see LICENSE file)
*****************************************************************************/
#ifndef EVALUATION_DOMAIN_TCC_
#define EVALUATION_DOMAIN_TCC_
#include <cassert>
#include "algebra/fields/field_utils.hpp"
#include "algebra/evaluation_domain/domains/basic_radix2_domain.hpp"
namespace libsnark {
template<typename FieldT>
std::shared_ptr<evaluation_domain<FieldT> > get_evaluation_domain(const size_t min_size)
{
assert(min_size > 1);
const size_t log_min_size = log2(min_size);
assert(log_min_size <= (FieldT::s+1));
std::shared_ptr<evaluation_domain<FieldT> > result;
if (min_size == (1u << log_min_size))
{
if (log_min_size == FieldT::s+1)
{
if (!inhibit_profiling_info)
{
print_indent(); printf("* Selected domain: extended_radix2\n");
}
assert(0);
}
else
{
if (!inhibit_profiling_info)
{
print_indent(); printf("* Selected domain: basic_radix2\n");
}
result.reset(new basic_radix2_domain<FieldT>(min_size));
}
}
else
{
const size_t big = UINT64_C(1)<<(log2(min_size)-1);
const size_t small = min_size - big;
const size_t rounded_small = (UINT64_C(1)<<log2(small));
if (big == rounded_small)
{
if (log2(big + rounded_small) < FieldT::s+1)
{
if (!inhibit_profiling_info)
{
print_indent(); printf("* Selected domain: basic_radix2\n");
}
result.reset(new basic_radix2_domain<FieldT>(big + rounded_small));
}
else
{
if (!inhibit_profiling_info)
{
print_indent(); printf("* Selected domain: extended_radix2\n");
}
assert(0);
}
}
else
{
if (!inhibit_profiling_info)
{
print_indent(); printf("* Selected domain: step_radix2\n");
}
assert(0);
}
}
return result;
}
template<typename FieldT>
FieldT lagrange_eval(const size_t m, const std::vector<FieldT> &domain, const FieldT &t, const size_t idx)
{
assert(m == domain.size());
assert(idx < m);
FieldT num = FieldT::one();
FieldT denom = FieldT::one();
for (size_t k = 0; k < m; ++k)
{
if (k == idx)
{
continue;
}
num *= t - domain[k];
denom *= domain[idx] - domain[k];
}
return num * denom.inverse();
}
} // libsnark
#endif // EVALUATION_DOMAIN_TCC_

View File

@ -1,31 +0,0 @@
/** @file
*****************************************************************************
Declaration of interfaces for (square-and-multiply) exponentiation.
*****************************************************************************
* @author This file is part of libsnark, developed by SCIPR Lab
* and contributors (see AUTHORS).
* @copyright MIT license (see LICENSE file)
*****************************************************************************/
#ifndef EXPONENTIATION_HPP_
#define EXPONENTIATION_HPP_
#include <cstdint>
#include "algebra/fields/bigint.hpp"
namespace libsnark {
template<typename FieldT, mp_size_t m>
FieldT power(const FieldT &base, const bigint<m> &exponent);
template<typename FieldT>
FieldT power(const FieldT &base, const uint64_t exponent);
} // libsnark
#include "algebra/exponentiation/exponentiation.tcc"
#endif // EXPONENTIATION_HPP_

View File

@ -1,53 +0,0 @@
/** @file
*****************************************************************************
Implementation of interfaces for (square-and-multiply) exponentiation.
See exponentiation.hpp .
*****************************************************************************
* @author This file is part of libsnark, developed by SCIPR Lab
* and contributors (see AUTHORS).
* @copyright MIT license (see LICENSE file)
*****************************************************************************/
#ifndef EXPONENTIATION_TCC_
#define EXPONENTIATION_TCC_
#include "common/utils.hpp"
namespace libsnark {
template<typename FieldT, mp_size_t m>
FieldT power(const FieldT &base, const bigint<m> &exponent)
{
FieldT result = FieldT::one();
bool found_one = false;
for (int64_t i = exponent.max_bits() - 1; i >= 0; --i)
{
if (found_one)
{
result = result * result;
}
if (exponent.test_bit(i))
{
found_one = true;
result = result * base;
}
}
return result;
}
template<typename FieldT>
FieldT power(const FieldT &base, const uint64_t exponent)
{
return power<FieldT>(base, bigint<1>(exponent));
}
} // libsnark
#endif // EXPONENTIATION_TCC_

View File

@ -1,70 +0,0 @@
/** @file
*****************************************************************************
Declaration of bigint wrapper class around GMP's MPZ long integers.
*****************************************************************************
* @author This file is part of libsnark, developed by SCIPR Lab
* and contributors (see AUTHORS).
* @copyright MIT license (see LICENSE file)
*****************************************************************************/
#ifndef BIGINT_HPP_
#define BIGINT_HPP_
#include <cstddef>
#include <iostream>
#include <gmp.h>
#include "common/serialization.hpp"
namespace libsnark {
template<mp_size_t n> class bigint;
template<mp_size_t n> std::ostream& operator<<(std::ostream &, const bigint<n>&);
template<mp_size_t n> std::istream& operator>>(std::istream &, bigint<n>&);
/**
* Wrapper class around GMP's MPZ long integers. It supports arithmetic operations,
* serialization and randomization. Serialization is fragile, see common/serialization.hpp.
*/
template<mp_size_t n>
class bigint {
public:
static const mp_size_t N = n;
mp_limb_t data[n] = {0};
bigint() = default;
bigint(const uint64_t x); /// Initalize from a small integer
bigint(const char* s); /// Initialize from a string containing an integer in decimal notation
bigint(const mpz_t r); /// Initialize from MPZ element
void print() const;
void print_hex() const;
bool operator==(const bigint<n>& other) const;
bool operator!=(const bigint<n>& other) const;
void clear();
bool is_zero() const;
size_t max_bits() const { return n * GMP_NUMB_BITS; }
size_t num_bits() const;
uint64_t as_uint64() const; /* return the last limb of the integer */
void to_mpz(mpz_t r) const;
bool test_bit(const std::size_t bitno) const;
template<mp_size_t m> inline void operator+=(const bigint<m>& other);
template<mp_size_t m> inline bigint<n+m> operator*(const bigint<m>& other) const;
template<mp_size_t d> static inline void div_qr(bigint<n-d+1>& quotient, bigint<d>& remainder,
const bigint<n>& dividend, const bigint<d>& divisor);
template<mp_size_t m> inline bigint<m> shorten(const bigint<m>& q, const char *msg) const;
inline void limit(const bigint<n>& q, const char *msg) const;
bool operator>(const bigint<n>& other) const;
bigint& randomize();
friend std::ostream& operator<< <n>(std::ostream &out, const bigint<n> &b);
friend std::istream& operator>> <n>(std::istream &in, bigint<n> &b);
};
} // libsnark
#include "algebra/fields/bigint.tcc"
#endif

View File

@ -1,279 +0,0 @@
/** @file
*****************************************************************************
Implementation of bigint wrapper class around GMP's MPZ long integers.
*****************************************************************************
* @author This file is part of libsnark, developed by SCIPR Lab
* and contributors (see AUTHORS).
* @copyright MIT license (see LICENSE file)
*****************************************************************************/
#ifndef BIGINT_TCC_
#define BIGINT_TCC_
#include <cassert>
#include <climits>
#include <cstring>
#include "sodium.h"
namespace libsnark {
template<mp_size_t n>
bigint<n>::bigint(const uint64_t x) /// Initialize from a small integer
{
static_assert(UINT64_MAX <= GMP_NUMB_MAX, "uint64_t does not fit in a GMP limb");
this->data[0] = x;
}
template<mp_size_t n>
bigint<n>::bigint(const char* s) /// Initialize from a string containing an integer in decimal notation
{
size_t l = strlen(s);
unsigned char* s_copy = new unsigned char[l];
for (size_t i = 0; i < l; ++i)
{
assert(s[i] >= '0' && s[i] <= '9');
s_copy[i] = s[i] - '0';
}
mp_size_t limbs_written = mpn_set_str(this->data, s_copy, l, 10);
assert(limbs_written <= n);
delete[] s_copy;
}
template<mp_size_t n>
bigint<n>::bigint(const mpz_t r) /// Initialize from MPZ element
{
mpz_t k;
mpz_init_set(k, r);
for (size_t i = 0; i < n; ++i)
{
data[i] = mpz_get_ui(k);
mpz_fdiv_q_2exp(k, k, GMP_NUMB_BITS);
}
assert(mpz_sgn(k) == 0);
mpz_clear(k);
}
template<mp_size_t n>
void bigint<n>::print() const
{
gmp_printf("%Nd\n", this->data, n);
}
template<mp_size_t n>
void bigint<n>::print_hex() const
{
gmp_printf("%Nx\n", this->data, n);
}
template<mp_size_t n>
bool bigint<n>::operator==(const bigint<n>& other) const
{
return (mpn_cmp(this->data, other.data, n) == 0);
}
template<mp_size_t n>
bool bigint<n>::operator!=(const bigint<n>& other) const
{
return !(operator==(other));
}
template<mp_size_t n>
void bigint<n>::clear()
{
mpn_zero(this->data, n);
}
template<mp_size_t n>
bool bigint<n>::is_zero() const
{
for (mp_size_t i = 0; i < n; ++i)
{
if (this->data[i])
{
return false;
}
}
return true;
}
template<mp_size_t n>
size_t bigint<n>::num_bits() const
{
/*
for (int64_t i = max_bits(); i >= 0; --i)
{
if (this->test_bit(i))
{
return i+1;
}
}
return 0;
*/
for (int64_t i = n-1; i >= 0; --i)
{
mp_limb_t x = this->data[i];
if (x == 0)
{
continue;
}
else
{
static_assert(GMP_NUMB_MAX <= ULLONG_MAX, "coercing limb to unsigned long long might truncate");
return ((i+1) * GMP_NUMB_BITS) - __builtin_clzll(x);
}
}
return 0;
}
template<mp_size_t n>
uint64_t bigint<n>::as_uint64() const
{
return this->data[0];
}
template<mp_size_t n>
void bigint<n>::to_mpz(mpz_t r) const
{
mpz_set_ui(r, 0);
for (int i = n-1; i >= 0; --i)
{
mpz_mul_2exp(r, r, GMP_NUMB_BITS);
mpz_add_ui(r, r, this->data[i]);
}
}
template<mp_size_t n>
bool bigint<n>::test_bit(const std::size_t bitno) const
{
if (bitno >= n * GMP_NUMB_BITS)
{
return false;
}
else
{
const std::size_t part = bitno/GMP_NUMB_BITS;
const std::size_t bit = bitno - (GMP_NUMB_BITS*part);
const mp_limb_t one = 1;
return (this->data[part] & (one<<bit));
}
}
template<mp_size_t n> template<mp_size_t m>
inline void bigint<n>::operator+=(const bigint<m>& other)
{
static_assert(n >= m, "first arg must not be smaller than second arg for bigint in-place add");
mpn_add(data, data, n, other.data, m);
}
template<mp_size_t n> template<mp_size_t m>
inline bigint<n+m> bigint<n>::operator*(const bigint<m>& other) const
{
static_assert(n >= m, "first arg must not be smaller than second arg for bigint mul");
bigint<n+m> res;
mpn_mul(res.data, data, n, other.data, m);
return res;
}
template<mp_size_t n> template<mp_size_t d>
inline void bigint<n>::div_qr(bigint<n-d+1>& quotient, bigint<d>& remainder,
const bigint<n>& dividend, const bigint<d>& divisor)
{
static_assert(n >= d, "dividend must not be smaller than divisor for bigint::div_qr");
assert(divisor.data[d-1] != 0);
mpn_tdiv_qr(quotient.data, remainder.data, 0, dividend.data, n, divisor.data, d);
}
// Return a copy shortened to m limbs provided it is less than limit, throwing std::domain_error if not in range.
template<mp_size_t n> template<mp_size_t m>
inline bigint<m> bigint<n>::shorten(const bigint<m>& q, const char *msg) const
{
static_assert(m <= n, "number of limbs must not increase for bigint::shorten");
for (mp_size_t i = m; i < n; i++) { // high-order limbs
if (data[i] != 0) {
throw std::domain_error(msg);
}
}
bigint<m> res;
mpn_copyi(res.data, data, m);
res.limit(q, msg);
return res;
}
template<mp_size_t n>
inline void bigint<n>::limit(const bigint<n>& q, const char *msg) const
{
if (!(q > *this)) {
throw std::domain_error(msg);
}
}
template<mp_size_t n>
inline bool bigint<n>::operator>(const bigint<n>& other) const
{
return mpn_cmp(this->data, other.data, n) > 0;
}
template<mp_size_t n>
bigint<n>& bigint<n>::randomize()
{
assert(GMP_NUMB_BITS == sizeof(mp_limb_t) * 8);
randombytes_buf(this->data, sizeof(mp_limb_t) * n);
return (*this);
}
template<mp_size_t n>
std::ostream& operator<<(std::ostream &out, const bigint<n> &b)
{
#ifdef BINARY_OUTPUT
out.write((char*)b.data, sizeof(b.data[0]) * n);
#else
mpz_t t;
mpz_init(t);
b.to_mpz(t);
out << t;
mpz_clear(t);
#endif
return out;
}
template<mp_size_t n>
std::istream& operator>>(std::istream &in, bigint<n> &b)
{
#ifdef BINARY_OUTPUT
in.read((char*)b.data, sizeof(b.data[0]) * n);
#else
std::string s;
in >> s;
size_t l = s.size();
unsigned char* s_copy = new unsigned char[l];
for (size_t i = 0; i < l; ++i)
{
assert(s[i] >= '0' && s[i] <= '9');
s_copy[i] = s[i] - '0';
}
mp_size_t limbs_written = mpn_set_str(b.data, s_copy, l, 10);
assert(limbs_written <= n);
delete[] s_copy;
#endif
return in;
}
} // libsnark
#endif // BIGINT_TCC_

View File

@ -1,51 +0,0 @@
/** @file
*****************************************************************************
* @author This file is part of libsnark, developed by SCIPR Lab
* and contributors (see AUTHORS).
* @copyright MIT license (see LICENSE file)
*****************************************************************************/
#ifndef FIELD_UTILS_HPP_
#define FIELD_UTILS_HPP_
#include <cstdint>
#include "common/utils.hpp"
#include "algebra/fields/bigint.hpp"
namespace libsnark {
// returns root of unity of order n (for n a power of 2), if one exists
template<typename FieldT>
FieldT get_root_of_unity(const size_t n);
template<typename FieldT>
std::vector<FieldT> pack_int_vector_into_field_element_vector(const std::vector<size_t> &v, const size_t w);
template<typename FieldT>
std::vector<FieldT> pack_bit_vector_into_field_element_vector(const bit_vector &v, const size_t chunk_bits);
template<typename FieldT>
std::vector<FieldT> pack_bit_vector_into_field_element_vector(const bit_vector &v);
template<typename FieldT>
std::vector<FieldT> convert_bit_vector_to_field_element_vector(const bit_vector &v);
template<typename FieldT>
bit_vector convert_field_element_vector_to_bit_vector(const std::vector<FieldT> &v);
template<typename FieldT>
bit_vector convert_field_element_to_bit_vector(const FieldT &el);
template<typename FieldT>
bit_vector convert_field_element_to_bit_vector(const FieldT &el, const size_t bitcount);
template<typename FieldT>
FieldT convert_bit_vector_to_field_element(const bit_vector &v);
template<typename FieldT>
void batch_invert(std::vector<FieldT> &vec);
} // libsnark
#include "algebra/fields/field_utils.tcc"
#endif // FIELD_UTILS_HPP_

View File

@ -1,183 +0,0 @@
/** @file
*****************************************************************************
Implementation of misc. math and serialization utility functions
*****************************************************************************
* @author This file is part of libsnark, developed by SCIPR Lab
* and contributors (see AUTHORS).
* @copyright MIT license (see LICENSE file)
*****************************************************************************/
#ifndef FIELD_UTILS_TCC_
#define FIELD_UTILS_TCC_
#include "common/utils.hpp"
namespace libsnark {
template<typename FieldT>
FieldT coset_shift()
{
return FieldT::multiplicative_generator.squared();
}
template<typename FieldT>
FieldT get_root_of_unity(const size_t n)
{
const size_t logn = log2(n);
assert(n == (1u << logn));
assert(logn <= FieldT::s);
FieldT omega = FieldT::root_of_unity;
for (size_t i = FieldT::s; i > logn; --i)
{
omega *= omega;
}
return omega;
}
template<typename FieldT>
std::vector<FieldT> pack_int_vector_into_field_element_vector(const std::vector<size_t> &v, const size_t w)
{
const size_t chunk_bits = FieldT::capacity();
const size_t repacked_size = div_ceil(v.size() * w, chunk_bits);
std::vector<FieldT> result(repacked_size);
for (size_t i = 0; i < repacked_size; ++i)
{
bigint<FieldT::num_limbs> b;
for (size_t j = 0; j < chunk_bits; ++j)
{
const size_t word_index = (i * chunk_bits + j) / w;
const size_t pos_in_word = (i * chunk_bits + j) % w;
const size_t word_or_0 = (word_index < v.size() ? v[word_index] : 0);
const size_t bit = (word_or_0 >> pos_in_word) & 1;
b.data[j / GMP_NUMB_BITS] |= bit << (j % GMP_NUMB_BITS);
}
result[i] = FieldT(b);
}
return result;
}
template<typename FieldT>
std::vector<FieldT> pack_bit_vector_into_field_element_vector(const bit_vector &v, const size_t chunk_bits)
{
assert(chunk_bits <= FieldT::capacity());
const size_t repacked_size = div_ceil(v.size(), chunk_bits);
std::vector<FieldT> result(repacked_size);
for (size_t i = 0; i < repacked_size; ++i)
{
bigint<FieldT::num_limbs> b;
for (size_t j = 0; j < chunk_bits; ++j)
{
b.data[j / GMP_NUMB_BITS] |= ((i * chunk_bits + j) < v.size() && v[i * chunk_bits + j] ? 1ll : 0ll) << (j % GMP_NUMB_BITS);
}
result[i] = FieldT(b);
}
return result;
}
template<typename FieldT>
std::vector<FieldT> pack_bit_vector_into_field_element_vector(const bit_vector &v)
{
return pack_bit_vector_into_field_element_vector<FieldT>(v, FieldT::capacity());
}
template<typename FieldT>
std::vector<FieldT> convert_bit_vector_to_field_element_vector(const bit_vector &v)
{
std::vector<FieldT> result;
result.reserve(v.size());
for (const bool b : v)
{
result.emplace_back(b ? FieldT::one() : FieldT::zero());
}
return result;
}
template<typename FieldT>
bit_vector convert_field_element_vector_to_bit_vector(const std::vector<FieldT> &v)
{
bit_vector result;
for (const FieldT &el : v)
{
const bit_vector el_bits = convert_field_element_to_bit_vector<FieldT>(el);
result.insert(result.end(), el_bits.begin(), el_bits.end());
}
return result;
}
template<typename FieldT>
bit_vector convert_field_element_to_bit_vector(const FieldT &el)
{
bit_vector result;
bigint<FieldT::num_limbs> b = el.as_bigint();
for (size_t i = 0; i < FieldT::size_in_bits(); ++i)
{
result.push_back(b.test_bit(i));
}
return result;
}
template<typename FieldT>
bit_vector convert_field_element_to_bit_vector(const FieldT &el, const size_t bitcount)
{
bit_vector result = convert_field_element_to_bit_vector(el);
result.resize(bitcount);
return result;
}
template<typename FieldT>
FieldT convert_bit_vector_to_field_element(const bit_vector &v)
{
assert(v.size() <= FieldT::size_in_bits());
FieldT res = FieldT::zero();
FieldT c = FieldT::one();
for (bool b : v)
{
res += b ? c : FieldT::zero();
c += c;
}
return res;
}
template<typename FieldT>
void batch_invert(std::vector<FieldT> &vec)
{
std::vector<FieldT> prod;
prod.reserve(vec.size());
FieldT acc = FieldT::one();
for (auto el : vec)
{
assert(!el.is_zero());
prod.emplace_back(acc);
acc = acc * el;
}
FieldT acc_inverse = acc.inverse();
for (int64_t i = vec.size()-1; i >= 0; --i)
{
const FieldT old_el = vec[i];
vec[i] = acc_inverse * prod[i];
acc_inverse = acc_inverse * old_el;
}
}
} // libsnark
#endif // FIELD_UTILS_TCC_

View File

@ -1,182 +0,0 @@
/** @file
*****************************************************************************
Declaration of arithmetic in the finite field F[p], for prime p of fixed length.
*****************************************************************************
* @author This file is part of libsnark, developed by SCIPR Lab
* and contributors (see AUTHORS).
* @copyright MIT license (see LICENSE file)
*****************************************************************************/
#ifndef FP_HPP_
#define FP_HPP_
#include "algebra/fields/bigint.hpp"
#include "algebra/exponentiation/exponentiation.hpp"
namespace libsnark {
template<mp_size_t n, const bigint<n>& modulus>
class Fp_model;
template<mp_size_t n, const bigint<n>& modulus>
std::ostream& operator<<(std::ostream &, const Fp_model<n, modulus>&);
template<mp_size_t n, const bigint<n>& modulus>
std::istream& operator>>(std::istream &, Fp_model<n, modulus> &);
/**
* Arithmetic in the finite field F[p], for prime p of fixed length.
*
* This class implements Fp-arithmetic, for a large prime p, using a fixed number
* of words. It is optimized for tight memory consumption, so the modulus p is
* passed as a template parameter, to avoid per-element overheads.
*
* The implementation is mostly a wrapper around GMP's MPN (constant-size integers).
* But for the integer sizes of interest for libsnark (3 to 5 limbs of 64 bits each),
* we implement performance-critical routines, like addition and multiplication,
* using hand-optimized assembly code.
*/
template<mp_size_t n, const bigint<n>& modulus>
class Fp_model {
public:
bigint<n> mont_repr;
public:
static const mp_size_t num_limbs = n;
static const constexpr bigint<n>& mod = modulus;
#ifdef PROFILE_OP_COUNTS
static int64_t add_cnt;
static int64_t sub_cnt;
static int64_t mul_cnt;
static int64_t sqr_cnt;
static int64_t inv_cnt;
#endif
static size_t num_bits;
static bigint<n> euler; // (modulus-1)/2
static size_t s; // modulus = 2^s * t + 1
static bigint<n> t; // with t odd
static bigint<n> t_minus_1_over_2; // (t-1)/2
static Fp_model<n, modulus> nqr; // a quadratic nonresidue
static Fp_model<n, modulus> nqr_to_t; // nqr^t
static Fp_model<n, modulus> multiplicative_generator; // generator of Fp^*
static Fp_model<n, modulus> root_of_unity; // generator^((modulus-1)/2^s)
static mp_limb_t inv; // modulus^(-1) mod W, where W = 2^(word size)
static bigint<n> Rsquared; // R^2, where R = W^k, where k = ??
static bigint<n> Rcubed; // R^3
static bool modulus_is_valid() { return modulus.data[n-1] != 0; } // mpn inverse assumes that highest limb is non-zero
Fp_model() {};
Fp_model(const bigint<n> &b);
Fp_model(const int64_t x, const bool is_unsigned=false);
void set_uint64(const uint64_t x);
void mul_reduce(const bigint<n> &other);
void clear();
/* Return the standard (not Montgomery) representation of the
Field element's requivalence class. I.e. Fp(2).as_bigint()
would return bigint(2) */
bigint<n> as_bigint() const;
/* Return the last limb of the standard representation of the
field element. E.g. on 64-bit architectures Fp(123).as_uint64()
and Fp(2^64+123).as_uint64() would both return 123. */
uint64_t as_uint64() const;
bool operator==(const Fp_model& other) const;
bool operator!=(const Fp_model& other) const;
bool is_zero() const;
void print() const;
Fp_model& operator+=(const Fp_model& other);
Fp_model& operator-=(const Fp_model& other);
Fp_model& operator*=(const Fp_model& other);
Fp_model& operator^=(const uint64_t pow);
template<mp_size_t m>
Fp_model& operator^=(const bigint<m> &pow);
Fp_model operator+(const Fp_model& other) const;
Fp_model operator-(const Fp_model& other) const;
Fp_model operator*(const Fp_model& other) const;
Fp_model operator-() const;
Fp_model squared() const;
Fp_model& invert();
Fp_model inverse() const;
Fp_model sqrt() const; // HAS TO BE A SQUARE (else does not terminate)
Fp_model operator^(const uint64_t pow) const;
template<mp_size_t m>
Fp_model operator^(const bigint<m> &pow) const;
static size_t size_in_bits() { return num_bits; }
static size_t capacity() { return num_bits - 1; }
static bigint<n> field_char() { return modulus; }
static Fp_model<n, modulus> zero();
static Fp_model<n, modulus> one();
static Fp_model<n, modulus> random_element();
friend std::ostream& operator<< <n,modulus>(std::ostream &out, const Fp_model<n, modulus> &p);
friend std::istream& operator>> <n,modulus>(std::istream &in, Fp_model<n, modulus> &p);
};
#ifdef PROFILE_OP_COUNTS
template<mp_size_t n, const bigint<n>& modulus>
int64_t Fp_model<n, modulus>::add_cnt = 0;
template<mp_size_t n, const bigint<n>& modulus>
int64_t Fp_model<n, modulus>::sub_cnt = 0;
template<mp_size_t n, const bigint<n>& modulus>
int64_t Fp_model<n, modulus>::mul_cnt = 0;
template<mp_size_t n, const bigint<n>& modulus>
int64_t Fp_model<n, modulus>::sqr_cnt = 0;
template<mp_size_t n, const bigint<n>& modulus>
int64_t Fp_model<n, modulus>::inv_cnt = 0;
#endif
template<mp_size_t n, const bigint<n>& modulus>
size_t Fp_model<n, modulus>::num_bits;
template<mp_size_t n, const bigint<n>& modulus>
bigint<n> Fp_model<n, modulus>::euler;
template<mp_size_t n, const bigint<n>& modulus>
size_t Fp_model<n, modulus>::s;
template<mp_size_t n, const bigint<n>& modulus>
bigint<n> Fp_model<n, modulus>::t;
template<mp_size_t n, const bigint<n>& modulus>
bigint<n> Fp_model<n, modulus>::t_minus_1_over_2;
template<mp_size_t n, const bigint<n>& modulus>
Fp_model<n, modulus> Fp_model<n, modulus>::nqr;
template<mp_size_t n, const bigint<n>& modulus>
Fp_model<n, modulus> Fp_model<n, modulus>::nqr_to_t;
template<mp_size_t n, const bigint<n>& modulus>
Fp_model<n, modulus> Fp_model<n, modulus>::multiplicative_generator;
template<mp_size_t n, const bigint<n>& modulus>
Fp_model<n, modulus> Fp_model<n, modulus>::root_of_unity;
template<mp_size_t n, const bigint<n>& modulus>
mp_limb_t Fp_model<n, modulus>::inv;
template<mp_size_t n, const bigint<n>& modulus>
bigint<n> Fp_model<n, modulus>::Rsquared;
template<mp_size_t n, const bigint<n>& modulus>
bigint<n> Fp_model<n, modulus>::Rcubed;
} // libsnark
#include "algebra/fields/fp.tcc"
#endif // FP_HPP_

View File

@ -1,790 +0,0 @@
/** @file
*****************************************************************************
Implementation of arithmetic in the finite field F[p], for prime p of fixed length.
*****************************************************************************
* @author This file is part of libsnark, developed by SCIPR Lab
* and contributors (see AUTHORS).
* @copyright MIT license (see LICENSE file)
*****************************************************************************/
#ifndef FP_TCC_
#define FP_TCC_
#include <cassert>
#include <cstdlib>
#include <cmath>
#include "algebra/fields/fp_aux.tcc"
#include "algebra/fields/field_utils.hpp"
#include "common/assert_except.hpp"
namespace libsnark {
template<mp_size_t n, const bigint<n>& modulus>
void Fp_model<n,modulus>::mul_reduce(const bigint<n> &other)
{
/* stupid pre-processor tricks; beware */
#if defined(__x86_64__) && defined(USE_ASM)
if (n == 3)
{ // Use asm-optimized Comba multiplication and reduction
mp_limb_t res[2*n];
mp_limb_t c0, c1, c2;
COMBA_3_BY_3_MUL(c0, c1, c2, res, this->mont_repr.data, other.data);
mp_limb_t k;
mp_limb_t tmp1, tmp2, tmp3;
REDUCE_6_LIMB_PRODUCT(k, tmp1, tmp2, tmp3, inv, res, modulus.data);
/* subtract t > mod */
__asm__
("/* check for overflow */ \n\t"
MONT_CMP(16)
MONT_CMP(8)
MONT_CMP(0)
"/* subtract mod if overflow */ \n\t"
"subtract%=: \n\t"
MONT_FIRSTSUB
MONT_NEXTSUB(8)
MONT_NEXTSUB(16)
"done%=: \n\t"
:
: [tmp] "r" (res+n), [M] "r" (modulus.data)
: "cc", "memory", "%rax");
mpn_copyi(this->mont_repr.data, res+n, n);
}
else if (n == 4)
{ // use asm-optimized "CIOS method"
mp_limb_t tmp[n+1];
mp_limb_t T0=0, T1=1, cy=2, u=3; // TODO: fix this
__asm__ (MONT_PRECOMPUTE
MONT_FIRSTITER(1)
MONT_FIRSTITER(2)
MONT_FIRSTITER(3)
MONT_FINALIZE(3)
MONT_ITERFIRST(1)
MONT_ITERITER(1, 1)
MONT_ITERITER(1, 2)
MONT_ITERITER(1, 3)
MONT_FINALIZE(3)
MONT_ITERFIRST(2)
MONT_ITERITER(2, 1)
MONT_ITERITER(2, 2)
MONT_ITERITER(2, 3)
MONT_FINALIZE(3)
MONT_ITERFIRST(3)
MONT_ITERITER(3, 1)
MONT_ITERITER(3, 2)
MONT_ITERITER(3, 3)
MONT_FINALIZE(3)
"/* check for overflow */ \n\t"
MONT_CMP(24)
MONT_CMP(16)
MONT_CMP(8)
MONT_CMP(0)
"/* subtract mod if overflow */ \n\t"
"subtract%=: \n\t"
MONT_FIRSTSUB
MONT_NEXTSUB(8)
MONT_NEXTSUB(16)
MONT_NEXTSUB(24)
"done%=: \n\t"
:
: [tmp] "r" (tmp), [A] "r" (this->mont_repr.data), [B] "r" (other.data), [inv] "r" (inv), [M] "r" (modulus.data),
[T0] "r" (T0), [T1] "r" (T1), [cy] "r" (cy), [u] "r" (u)
: "cc", "memory", "%rax", "%rdx"
);
mpn_copyi(this->mont_repr.data, tmp, n);
}
else if (n == 5)
{ // use asm-optimized "CIOS method"
mp_limb_t tmp[n+1];
mp_limb_t T0=0, T1=1, cy=2, u=3; // TODO: fix this
__asm__ (MONT_PRECOMPUTE
MONT_FIRSTITER(1)
MONT_FIRSTITER(2)
MONT_FIRSTITER(3)
MONT_FIRSTITER(4)
MONT_FINALIZE(4)
MONT_ITERFIRST(1)
MONT_ITERITER(1, 1)
MONT_ITERITER(1, 2)
MONT_ITERITER(1, 3)
MONT_ITERITER(1, 4)
MONT_FINALIZE(4)
MONT_ITERFIRST(2)
MONT_ITERITER(2, 1)
MONT_ITERITER(2, 2)
MONT_ITERITER(2, 3)
MONT_ITERITER(2, 4)
MONT_FINALIZE(4)
MONT_ITERFIRST(3)
MONT_ITERITER(3, 1)
MONT_ITERITER(3, 2)
MONT_ITERITER(3, 3)
MONT_ITERITER(3, 4)
MONT_FINALIZE(4)
MONT_ITERFIRST(4)
MONT_ITERITER(4, 1)
MONT_ITERITER(4, 2)
MONT_ITERITER(4, 3)
MONT_ITERITER(4, 4)
MONT_FINALIZE(4)
"/* check for overflow */ \n\t"
MONT_CMP(32)
MONT_CMP(24)
MONT_CMP(16)
MONT_CMP(8)
MONT_CMP(0)
"/* subtract mod if overflow */ \n\t"
"subtract%=: \n\t"
MONT_FIRSTSUB
MONT_NEXTSUB(8)
MONT_NEXTSUB(16)
MONT_NEXTSUB(24)
MONT_NEXTSUB(32)
"done%=: \n\t"
:
: [tmp] "r" (tmp), [A] "r" (this->mont_repr.data), [B] "r" (other.data), [inv] "r" (inv), [M] "r" (modulus.data),
[T0] "r" (T0), [T1] "r" (T1), [cy] "r" (cy), [u] "r" (u)
: "cc", "memory", "%rax", "%rdx"
);
mpn_copyi(this->mont_repr.data, tmp, n);
}
else
#endif
{
mp_limb_t res[2*n];
mpn_mul_n(res, this->mont_repr.data, other.data, n);
/*
The Montgomery reduction here is based on Algorithm 14.32 in
Handbook of Applied Cryptography
<http://cacr.uwaterloo.ca/hac/about/chap14.pdf>.
*/
for (size_t i = 0; i < n; ++i)
{
mp_limb_t k = inv * res[i];
/* calculate res = res + k * mod * b^i */
mp_limb_t carryout = mpn_addmul_1(res+i, modulus.data, n, k);
carryout = mpn_add_1(res+n+i, res+n+i, n-i, carryout);
assert(carryout == 0);
}
if (mpn_cmp(res+n, modulus.data, n) >= 0)
{
const mp_limb_t borrow = mpn_sub(res+n, res+n, n, modulus.data, n);
assert(borrow == 0);
}
mpn_copyi(this->mont_repr.data, res+n, n);
}
}
template<mp_size_t n, const bigint<n>& modulus>
Fp_model<n,modulus>::Fp_model(const bigint<n> &b)
{
mpn_copyi(this->mont_repr.data, Rsquared.data, n);
mul_reduce(b);
}
template<mp_size_t n, const bigint<n>& modulus>
Fp_model<n,modulus>::Fp_model(const int64_t x, const bool is_unsigned)
{
if (is_unsigned || x >= 0)
{
this->mont_repr.data[0] = x;
}
else
{
const mp_limb_t borrow = mpn_sub_1(this->mont_repr.data, modulus.data, n, -x);
assert(borrow == 0);
}
mul_reduce(Rsquared);
}
template<mp_size_t n, const bigint<n>& modulus>
void Fp_model<n,modulus>::set_uint64(const uint64_t x)
{
this->mont_repr.clear();
this->mont_repr.data[0] = x;
mul_reduce(Rsquared);
}
template<mp_size_t n, const bigint<n>& modulus>
void Fp_model<n,modulus>::clear()
{
this->mont_repr.clear();
}
template<mp_size_t n, const bigint<n>& modulus>
bigint<n> Fp_model<n,modulus>::as_bigint() const
{
bigint<n> one;
one.clear();
one.data[0] = 1;
Fp_model<n, modulus> res(*this);
res.mul_reduce(one);
return (res.mont_repr);
}
template<mp_size_t n, const bigint<n>& modulus>
uint64_t Fp_model<n,modulus>::as_uint64() const
{
return this->as_bigint().as_uint64();
}
template<mp_size_t n, const bigint<n>& modulus>
bool Fp_model<n,modulus>::operator==(const Fp_model& other) const
{
return (this->mont_repr == other.mont_repr);
}
template<mp_size_t n, const bigint<n>& modulus>
bool Fp_model<n,modulus>::operator!=(const Fp_model& other) const
{
return (this->mont_repr != other.mont_repr);
}
template<mp_size_t n, const bigint<n>& modulus>
bool Fp_model<n,modulus>::is_zero() const
{
return (this->mont_repr.is_zero()); // zero maps to zero
}
template<mp_size_t n, const bigint<n>& modulus>
void Fp_model<n,modulus>::print() const
{
Fp_model<n,modulus> tmp;
tmp.mont_repr.data[0] = 1;
tmp.mul_reduce(this->mont_repr);
tmp.mont_repr.print();
}
template<mp_size_t n, const bigint<n>& modulus>
Fp_model<n,modulus> Fp_model<n,modulus>::zero()
{
Fp_model<n,modulus> res;
res.mont_repr.clear();
return res;
}
template<mp_size_t n, const bigint<n>& modulus>
Fp_model<n,modulus> Fp_model<n,modulus>::one()
{
Fp_model<n,modulus> res;
res.mont_repr.data[0] = 1;
res.mul_reduce(Rsquared);
return res;
}
template<mp_size_t n, const bigint<n>& modulus>
Fp_model<n,modulus>& Fp_model<n,modulus>::operator+=(const Fp_model<n,modulus>& other)
{
#ifdef PROFILE_OP_COUNTS
this->add_cnt++;
#endif
#if defined(__x86_64__) && defined(USE_ASM)
if (n == 3)
{
__asm__
("/* perform bignum addition */ \n\t"
ADD_FIRSTADD
ADD_NEXTADD(8)
ADD_NEXTADD(16)
"/* if overflow: subtract */ \n\t"
"/* (tricky point: if A and B are in the range we do not need to do anything special for the possible carry flag) */ \n\t"
"jc subtract%= \n\t"
"/* check for overflow */ \n\t"
ADD_CMP(16)
ADD_CMP(8)
ADD_CMP(0)
"/* subtract mod if overflow */ \n\t"
"subtract%=: \n\t"
ADD_FIRSTSUB
ADD_NEXTSUB(8)
ADD_NEXTSUB(16)
"done%=: \n\t"
:
: [A] "r" (this->mont_repr.data), [B] "r" (other.mont_repr.data), [mod] "r" (modulus.data)
: "cc", "memory", "%rax");
}
else if (n == 4)
{
__asm__
("/* perform bignum addition */ \n\t"
ADD_FIRSTADD
ADD_NEXTADD(8)
ADD_NEXTADD(16)
ADD_NEXTADD(24)
"/* if overflow: subtract */ \n\t"
"/* (tricky point: if A and B are in the range we do not need to do anything special for the possible carry flag) */ \n\t"
"jc subtract%= \n\t"
"/* check for overflow */ \n\t"
ADD_CMP(24)
ADD_CMP(16)
ADD_CMP(8)
ADD_CMP(0)
"/* subtract mod if overflow */ \n\t"
"subtract%=: \n\t"
ADD_FIRSTSUB
ADD_NEXTSUB(8)
ADD_NEXTSUB(16)
ADD_NEXTSUB(24)
"done%=: \n\t"
:
: [A] "r" (this->mont_repr.data), [B] "r" (other.mont_repr.data), [mod] "r" (modulus.data)
: "cc", "memory", "%rax");
}
else if (n == 5)
{
__asm__
("/* perform bignum addition */ \n\t"
ADD_FIRSTADD
ADD_NEXTADD(8)
ADD_NEXTADD(16)
ADD_NEXTADD(24)
ADD_NEXTADD(32)
"/* if overflow: subtract */ \n\t"
"/* (tricky point: if A and B are in the range we do not need to do anything special for the possible carry flag) */ \n\t"
"jc subtract%= \n\t"
"/* check for overflow */ \n\t"
ADD_CMP(32)
ADD_CMP(24)
ADD_CMP(16)
ADD_CMP(8)
ADD_CMP(0)
"/* subtract mod if overflow */ \n\t"
"subtract%=: \n\t"
ADD_FIRSTSUB
ADD_NEXTSUB(8)
ADD_NEXTSUB(16)
ADD_NEXTSUB(24)
ADD_NEXTSUB(32)
"done%=: \n\t"
:
: [A] "r" (this->mont_repr.data), [B] "r" (other.mont_repr.data), [mod] "r" (modulus.data)
: "cc", "memory", "%rax");
}
else
#endif
{
mp_limb_t scratch[n+1];
const mp_limb_t carry = mpn_add_n(scratch, this->mont_repr.data, other.mont_repr.data, n);
scratch[n] = carry;
if (carry || mpn_cmp(scratch, modulus.data, n) >= 0)
{
const mp_limb_t borrow = mpn_sub(scratch, scratch, n+1, modulus.data, n);
assert(borrow == 0);
}
mpn_copyi(this->mont_repr.data, scratch, n);
}
return *this;
}
template<mp_size_t n, const bigint<n>& modulus>
Fp_model<n,modulus>& Fp_model<n,modulus>::operator-=(const Fp_model<n,modulus>& other)
{
#ifdef PROFILE_OP_COUNTS
this->sub_cnt++;
#endif
#if defined(__x86_64__) && defined(USE_ASM)
if (n == 3)
{
__asm__
(SUB_FIRSTSUB
SUB_NEXTSUB(8)
SUB_NEXTSUB(16)
"jnc done%=\n\t"
SUB_FIRSTADD
SUB_NEXTADD(8)
SUB_NEXTADD(16)
"done%=:\n\t"
:
: [A] "r" (this->mont_repr.data), [B] "r" (other.mont_repr.data), [mod] "r" (modulus.data)
: "cc", "memory", "%rax");
}
else if (n == 4)
{
__asm__
(SUB_FIRSTSUB
SUB_NEXTSUB(8)
SUB_NEXTSUB(16)
SUB_NEXTSUB(24)
"jnc done%=\n\t"
SUB_FIRSTADD
SUB_NEXTADD(8)
SUB_NEXTADD(16)
SUB_NEXTADD(24)
"done%=:\n\t"
:
: [A] "r" (this->mont_repr.data), [B] "r" (other.mont_repr.data), [mod] "r" (modulus.data)
: "cc", "memory", "%rax");
}
else if (n == 5)
{
__asm__
(SUB_FIRSTSUB
SUB_NEXTSUB(8)
SUB_NEXTSUB(16)
SUB_NEXTSUB(24)
SUB_NEXTSUB(32)
"jnc done%=\n\t"
SUB_FIRSTADD
SUB_NEXTADD(8)
SUB_NEXTADD(16)
SUB_NEXTADD(24)
SUB_NEXTADD(32)
"done%=:\n\t"
:
: [A] "r" (this->mont_repr.data), [B] "r" (other.mont_repr.data), [mod] "r" (modulus.data)
: "cc", "memory", "%rax");
}
else
#endif
{
mp_limb_t scratch[n+1];
if (mpn_cmp(this->mont_repr.data, other.mont_repr.data, n) < 0)
{
const mp_limb_t carry = mpn_add_n(scratch, this->mont_repr.data, modulus.data, n);
scratch[n] = carry;
}
else
{
mpn_copyi(scratch, this->mont_repr.data, n);
scratch[n] = 0;
}
const mp_limb_t borrow = mpn_sub(scratch, scratch, n+1, other.mont_repr.data, n);
assert(borrow == 0);
mpn_copyi(this->mont_repr.data, scratch, n);
}
return *this;
}
template<mp_size_t n, const bigint<n>& modulus>
Fp_model<n,modulus>& Fp_model<n,modulus>::operator*=(const Fp_model<n,modulus>& other)
{
#ifdef PROFILE_OP_COUNTS
this->mul_cnt++;
#endif
mul_reduce(other.mont_repr);
return *this;
}
template<mp_size_t n, const bigint<n>& modulus>
Fp_model<n,modulus>& Fp_model<n,modulus>::operator^=(const uint64_t pow)
{
(*this) = power<Fp_model<n, modulus> >(*this, pow);
return (*this);
}
template<mp_size_t n, const bigint<n>& modulus>
template<mp_size_t m>
Fp_model<n,modulus>& Fp_model<n,modulus>::operator^=(const bigint<m> &pow)
{
(*this) = power<Fp_model<n, modulus>, m>(*this, pow);
return (*this);
}
template<mp_size_t n, const bigint<n>& modulus>
Fp_model<n,modulus> Fp_model<n,modulus>::operator+(const Fp_model<n,modulus>& other) const
{
Fp_model<n, modulus> r(*this);
return (r += other);
}
template<mp_size_t n, const bigint<n>& modulus>
Fp_model<n,modulus> Fp_model<n,modulus>::operator-(const Fp_model<n,modulus>& other) const
{
Fp_model<n, modulus> r(*this);
return (r -= other);
}
template<mp_size_t n, const bigint<n>& modulus>
Fp_model<n,modulus> Fp_model<n,modulus>::operator*(const Fp_model<n,modulus>& other) const
{
Fp_model<n, modulus> r(*this);
return (r *= other);
}
template<mp_size_t n, const bigint<n>& modulus>
Fp_model<n,modulus> Fp_model<n,modulus>::operator^(const uint64_t pow) const
{
Fp_model<n, modulus> r(*this);
return (r ^= pow);
}
template<mp_size_t n, const bigint<n>& modulus>
template<mp_size_t m>
Fp_model<n,modulus> Fp_model<n,modulus>::operator^(const bigint<m> &pow) const
{
Fp_model<n, modulus> r(*this);
return (r ^= pow);
}
template<mp_size_t n, const bigint<n>& modulus>
Fp_model<n,modulus> Fp_model<n,modulus>::operator-() const
{
#ifdef PROFILE_OP_COUNTS
this->sub_cnt++;
#endif
if (this->is_zero())
{
return (*this);
}
else
{
Fp_model<n, modulus> r;
mpn_sub_n(r.mont_repr.data, modulus.data, this->mont_repr.data, n);
return r;
}
}
template<mp_size_t n, const bigint<n>& modulus>
Fp_model<n,modulus> Fp_model<n,modulus>::squared() const
{
#ifdef PROFILE_OP_COUNTS
this->sqr_cnt++;
this->mul_cnt--; // zero out the upcoming mul
#endif
/* stupid pre-processor tricks; beware */
#if defined(__x86_64__) && defined(USE_ASM)
if (n == 3)
{ // use asm-optimized Comba squaring
mp_limb_t res[2*n];
mp_limb_t c0, c1, c2;
COMBA_3_BY_3_SQR(c0, c1, c2, res, this->mont_repr.data);
mp_limb_t k;
mp_limb_t tmp1, tmp2, tmp3;
REDUCE_6_LIMB_PRODUCT(k, tmp1, tmp2, tmp3, inv, res, modulus.data);
/* subtract t > mod */
__asm__ volatile
("/* check for overflow */ \n\t"
MONT_CMP(16)
MONT_CMP(8)
MONT_CMP(0)
"/* subtract mod if overflow */ \n\t"
"subtract%=: \n\t"
MONT_FIRSTSUB
MONT_NEXTSUB(8)
MONT_NEXTSUB(16)
"done%=: \n\t"
:
: [tmp] "r" (res+n), [M] "r" (modulus.data)
: "cc", "memory", "%rax");
Fp_model<n, modulus> r;
mpn_copyi(r.mont_repr.data, res+n, n);
return r;
}
else
#endif
{
Fp_model<n, modulus> r(*this);
return (r *= r);
}
}
template<mp_size_t n, const bigint<n>& modulus>
Fp_model<n,modulus>& Fp_model<n,modulus>::invert()
{
#ifdef PROFILE_OP_COUNTS
this->inv_cnt++;
#endif
assert(!this->is_zero());
bigint<n> g; /* gp should have room for vn = n limbs */
mp_limb_t s[n+1]; /* sp should have room for vn+1 limbs */
mp_size_t sn;
bigint<n> v = modulus; // both source operands are destroyed by mpn_gcdext
/* computes gcd(u, v) = g = u*s + v*t, so s*u will be 1 (mod v) */
const mp_size_t gn = mpn_gcdext(g.data, s, &sn, this->mont_repr.data, n, v.data, n);
assert(gn == 1 && g.data[0] == 1); /* inverse exists */
mp_limb_t q; /* division result fits into q, as sn <= n+1 */
/* sn < 0 indicates negative sn; will fix up later */
if (std::abs(sn) >= n)
{
/* if sn could require modulus reduction, do it here */
mpn_tdiv_qr(&q, this->mont_repr.data, 0, s, std::abs(sn), modulus.data, n);
}
else
{
/* otherwise just copy it over */
mpn_zero(this->mont_repr.data, n);
mpn_copyi(this->mont_repr.data, s, std::abs(sn));
}
/* fix up the negative sn */
if (sn < 0)
{
const mp_limb_t borrow = mpn_sub_n(this->mont_repr.data, modulus.data, this->mont_repr.data, n);
assert(borrow == 0);
}
mul_reduce(Rcubed);
return *this;
}
template<mp_size_t n, const bigint<n>& modulus>
Fp_model<n,modulus> Fp_model<n,modulus>::inverse() const
{
Fp_model<n, modulus> r(*this);
return (r.invert());
}
template<mp_size_t n, const bigint<n>& modulus>
Fp_model<n, modulus> Fp_model<n,modulus>::random_element() /// returns random element of Fp_model
{
/* note that as Montgomery representation is a bijection then
selecting a random element of {xR} is the same as selecting a
random element of {x} */
Fp_model<n, modulus> r;
do
{
r.mont_repr.randomize();
/* clear all bits higher than MSB of modulus */
size_t bitno = GMP_NUMB_BITS * n - 1;
while (modulus.test_bit(bitno) == false)
{
const std::size_t part = bitno/GMP_NUMB_BITS;
const std::size_t bit = bitno - (GMP_NUMB_BITS*part);
r.mont_repr.data[part] &= ~(UINT64_C(1)<<bit);
bitno--;
}
}
/* if r.data is still >= modulus -- repeat (rejection sampling) */
while (mpn_cmp(r.mont_repr.data, modulus.data, n) >= 0);
return r;
}
template<mp_size_t n, const bigint<n>& modulus>
Fp_model<n,modulus> Fp_model<n,modulus>::sqrt() const
{
if (is_zero()) {
return *this;
}
Fp_model<n,modulus> one = Fp_model<n,modulus>::one();
size_t v = Fp_model<n,modulus>::s;
Fp_model<n,modulus> z = Fp_model<n,modulus>::nqr_to_t;
Fp_model<n,modulus> w = (*this)^Fp_model<n,modulus>::t_minus_1_over_2;
Fp_model<n,modulus> x = (*this) * w;
Fp_model<n,modulus> b = x * w; // b = (*this)^t
// check if square with euler's criterion
Fp_model<n,modulus> check = b;
for (size_t i = 0; i < v-1; ++i)
{
check = check.squared();
}
if (check != one)
{
assert_except(0);
}
// compute square root with Tonelli--Shanks
// (does not terminate if not a square!)
while (b != one)
{
size_t m = 0;
Fp_model<n,modulus> b2m = b;
while (b2m != one)
{
/* invariant: b2m = b^(2^m) after entering this loop */
b2m = b2m.squared();
m += 1;
}
int j = v-m-1;
w = z;
while (j > 0)
{
w = w.squared();
--j;
} // w = z^2^(v-m-1)
z = w.squared();
b = b * z;
x = x * w;
v = m;
}
return x;
}
template<mp_size_t n, const bigint<n>& modulus>
std::ostream& operator<<(std::ostream &out, const Fp_model<n, modulus> &p)
{
#ifndef MONTGOMERY_OUTPUT
Fp_model<n,modulus> tmp;
tmp.mont_repr.data[0] = 1;
tmp.mul_reduce(p.mont_repr);
out << tmp.mont_repr;
#else
out << p.mont_repr;
#endif
return out;
}
template<mp_size_t n, const bigint<n>& modulus>
std::istream& operator>>(std::istream &in, Fp_model<n, modulus> &p)
{
#ifndef MONTGOMERY_OUTPUT
in >> p.mont_repr;
p.mul_reduce(Fp_model<n, modulus>::Rsquared);
#else
in >> p.mont_repr;
#endif
return in;
}
} // libsnark
#endif // FP_TCC_

View File

@ -1,116 +0,0 @@
/** @file
*****************************************************************************
Declaration of arithmetic in the finite field F[((p^2)^3)^2].
*****************************************************************************
* @author This file is part of libsnark, developed by SCIPR Lab
* and contributors (see AUTHORS).
* @copyright MIT license (see LICENSE file)
*****************************************************************************/
#ifndef FP12_2OVER3OVER2_HPP_
#define FP12_2OVER3OVER2_HPP_
#include "algebra/fields/fp.hpp"
#include "algebra/fields/fp2.hpp"
#include "algebra/fields/fp6_3over2.hpp"
#include <vector>
namespace libsnark {
template<mp_size_t n, const bigint<n>& modulus>
class Fp12_2over3over2_model;
template<mp_size_t n, const bigint<n>& modulus>
std::ostream& operator<<(std::ostream &, const Fp12_2over3over2_model<n, modulus> &);
template<mp_size_t n, const bigint<n>& modulus>
std::istream& operator>>(std::istream &, Fp12_2over3over2_model<n, modulus> &);
/**
* Arithmetic in the finite field F[((p^2)^3)^2].
*
* Let p := modulus. This interface provides arithmetic for the extension field
* Fp12 = Fp6[W]/(W^2-V) where Fp6 = Fp2[V]/(V^3-non_residue) and non_residue is in Fp2
*
* ASSUMPTION: p = 1 (mod 6)
*/
template<mp_size_t n, const bigint<n>& modulus>
class Fp12_2over3over2_model {
public:
typedef Fp_model<n, modulus> my_Fp;
typedef Fp2_model<n, modulus> my_Fp2;
typedef Fp6_3over2_model<n, modulus> my_Fp6;
static Fp2_model<n, modulus> non_residue;
static Fp2_model<n, modulus> Frobenius_coeffs_c1[12]; // non_residue^((modulus^i-1)/6) for i=0,...,11
my_Fp6 c0, c1;
Fp12_2over3over2_model() {};
Fp12_2over3over2_model(const my_Fp6& c0, const my_Fp6& c1) : c0(c0), c1(c1) {};
void clear() { c0.clear(); c1.clear(); }
void print() const { printf("c0/c1:\n"); c0.print(); c1.print(); }
static Fp12_2over3over2_model<n, modulus> zero();
static Fp12_2over3over2_model<n, modulus> one();
static Fp12_2over3over2_model<n, modulus> random_element();
bool is_zero() const { return c0.is_zero() && c1.is_zero(); }
bool operator==(const Fp12_2over3over2_model &other) const;
bool operator!=(const Fp12_2over3over2_model &other) const;
Fp12_2over3over2_model operator+(const Fp12_2over3over2_model &other) const;
Fp12_2over3over2_model operator-(const Fp12_2over3over2_model &other) const;
Fp12_2over3over2_model operator*(const Fp12_2over3over2_model &other) const;
Fp12_2over3over2_model operator-() const;
Fp12_2over3over2_model squared() const; // default is squared_complex
Fp12_2over3over2_model squared_karatsuba() const;
Fp12_2over3over2_model squared_complex() const;
Fp12_2over3over2_model inverse() const;
Fp12_2over3over2_model Frobenius_map(uint64_t power) const;
Fp12_2over3over2_model unitary_inverse() const;
Fp12_2over3over2_model cyclotomic_squared() const;
Fp12_2over3over2_model mul_by_024(const my_Fp2 &ell_0, const my_Fp2 &ell_VW, const my_Fp2 &ell_VV) const;
static my_Fp6 mul_by_non_residue(const my_Fp6 &elt);
template<mp_size_t m>
Fp12_2over3over2_model cyclotomic_exp(const bigint<m> &exponent) const;
static bigint<n> base_field_char() { return modulus; }
static size_t extension_degree() { return 12; }
friend std::ostream& operator<< <n, modulus>(std::ostream &out, const Fp12_2over3over2_model<n, modulus> &el);
friend std::istream& operator>> <n, modulus>(std::istream &in, Fp12_2over3over2_model<n, modulus> &el);
};
template<mp_size_t n, const bigint<n>& modulus>
std::ostream& operator<<(std::ostream& out, const std::vector<Fp12_2over3over2_model<n, modulus> > &v);
template<mp_size_t n, const bigint<n>& modulus>
std::istream& operator>>(std::istream& in, std::vector<Fp12_2over3over2_model<n, modulus> > &v);
template<mp_size_t n, const bigint<n>& modulus>
Fp12_2over3over2_model<n, modulus> operator*(const Fp_model<n, modulus> &lhs, const Fp12_2over3over2_model<n, modulus> &rhs);
template<mp_size_t n, const bigint<n>& modulus>
Fp12_2over3over2_model<n, modulus> operator*(const Fp2_model<n, modulus> &lhs, const Fp12_2over3over2_model<n, modulus> &rhs);
template<mp_size_t n, const bigint<n>& modulus>
Fp12_2over3over2_model<n, modulus> operator*(const Fp6_3over2_model<n, modulus> &lhs, const Fp12_2over3over2_model<n, modulus> &rhs);
template<mp_size_t n, const bigint<n>& modulus, mp_size_t m>
Fp12_2over3over2_model<n, modulus> operator^(const Fp12_2over3over2_model<n, modulus> &self, const bigint<m> &exponent);
template<mp_size_t n, const bigint<n>& modulus, mp_size_t m, const bigint<m>& exp_modulus>
Fp12_2over3over2_model<n, modulus> operator^(const Fp12_2over3over2_model<n, modulus> &self, const Fp_model<m, exp_modulus> &exponent);
template<mp_size_t n, const bigint<n>& modulus>
Fp2_model<n, modulus> Fp12_2over3over2_model<n, modulus>::non_residue;
template<mp_size_t n, const bigint<n>& modulus>
Fp2_model<n, modulus> Fp12_2over3over2_model<n, modulus>::Frobenius_coeffs_c1[12];
} // libsnark
#include "algebra/fields/fp12_2over3over2.tcc"
#endif // FP12_2OVER3OVER2_HPP_

View File

@ -1,412 +0,0 @@
/** @file
*****************************************************************************
Implementation of arithmetic in the finite field F[((p^2)^3)^2].
*****************************************************************************
* @author This file is part of libsnark, developed by SCIPR Lab
* and contributors (see AUTHORS).
* @copyright MIT license (see LICENSE file)
*****************************************************************************/
#ifndef FP12_2OVER3OVER2_TCC_
#define FP12_2OVER3OVER2_TCC_
namespace libsnark {
template<mp_size_t n, const bigint<n>& modulus>
Fp6_3over2_model<n, modulus> Fp12_2over3over2_model<n,modulus>::mul_by_non_residue(const Fp6_3over2_model<n, modulus> &elt)
{
return Fp6_3over2_model<n, modulus>(non_residue * elt.c2, elt.c0, elt.c1);
}
template<mp_size_t n, const bigint<n>& modulus>
Fp12_2over3over2_model<n,modulus> Fp12_2over3over2_model<n,modulus>::zero()
{
return Fp12_2over3over2_model<n, modulus>(my_Fp6::zero(), my_Fp6::zero());
}
template<mp_size_t n, const bigint<n>& modulus>
Fp12_2over3over2_model<n,modulus> Fp12_2over3over2_model<n,modulus>::one()
{
return Fp12_2over3over2_model<n, modulus>(my_Fp6::one(), my_Fp6::zero());
}
template<mp_size_t n, const bigint<n>& modulus>
Fp12_2over3over2_model<n,modulus> Fp12_2over3over2_model<n,modulus>::random_element()
{
Fp12_2over3over2_model<n, modulus> r;
r.c0 = my_Fp6::random_element();
r.c1 = my_Fp6::random_element();
return r;
}
template<mp_size_t n, const bigint<n>& modulus>
bool Fp12_2over3over2_model<n,modulus>::operator==(const Fp12_2over3over2_model<n,modulus> &other) const
{
return (this->c0 == other.c0 && this->c1 == other.c1);
}
template<mp_size_t n, const bigint<n>& modulus>
bool Fp12_2over3over2_model<n,modulus>::operator!=(const Fp12_2over3over2_model<n,modulus> &other) const
{
return !(operator==(other));
}
template<mp_size_t n, const bigint<n>& modulus>
Fp12_2over3over2_model<n,modulus> Fp12_2over3over2_model<n,modulus>::operator+(const Fp12_2over3over2_model<n,modulus> &other) const
{
return Fp12_2over3over2_model<n,modulus>(this->c0 + other.c0,
this->c1 + other.c1);
}
template<mp_size_t n, const bigint<n>& modulus>
Fp12_2over3over2_model<n,modulus> Fp12_2over3over2_model<n,modulus>::operator-(const Fp12_2over3over2_model<n,modulus> &other) const
{
return Fp12_2over3over2_model<n,modulus>(this->c0 - other.c0,
this->c1 - other.c1);
}
template<mp_size_t n, const bigint<n>& modulus>
Fp12_2over3over2_model<n, modulus> operator*(const Fp_model<n, modulus> &lhs, const Fp12_2over3over2_model<n, modulus> &rhs)
{
return Fp12_2over3over2_model<n,modulus>(lhs*rhs.c0,
lhs*rhs.c1);
}
template<mp_size_t n, const bigint<n>& modulus>
Fp12_2over3over2_model<n, modulus> operator*(const Fp2_model<n, modulus> &lhs, const Fp12_2over3over2_model<n, modulus> &rhs)
{
return Fp12_2over3over2_model<n,modulus>(lhs*rhs.c0,
lhs*rhs.c1);
}
template<mp_size_t n, const bigint<n>& modulus>
Fp12_2over3over2_model<n, modulus> operator*(const Fp6_3over2_model<n, modulus> &lhs, const Fp12_2over3over2_model<n, modulus> &rhs)
{
return Fp12_2over3over2_model<n,modulus>(lhs*rhs.c0,
lhs*rhs.c1);
}
template<mp_size_t n, const bigint<n>& modulus>
Fp12_2over3over2_model<n,modulus> Fp12_2over3over2_model<n,modulus>::operator*(const Fp12_2over3over2_model<n,modulus> &other) const
{
/* Devegili OhEig Scott Dahab --- Multiplication and Squaring on Pairing-Friendly Fields.pdf; Section 3 (Karatsuba) */
const my_Fp6 &A = other.c0, &B = other.c1,
&a = this->c0, &b = this->c1;
const my_Fp6 aA = a * A;
const my_Fp6 bB = b * B;
return Fp12_2over3over2_model<n,modulus>(aA + Fp12_2over3over2_model<n, modulus>::mul_by_non_residue(bB),
(a + b)*(A+B) - aA - bB);
}
template<mp_size_t n, const bigint<n>& modulus>
Fp12_2over3over2_model<n,modulus> Fp12_2over3over2_model<n,modulus>::operator-() const
{
return Fp12_2over3over2_model<n,modulus>(-this->c0,
-this->c1);
}
template<mp_size_t n, const bigint<n>& modulus>
Fp12_2over3over2_model<n,modulus> Fp12_2over3over2_model<n,modulus>::squared() const
{
return squared_complex();
}
template<mp_size_t n, const bigint<n>& modulus>
Fp12_2over3over2_model<n,modulus> Fp12_2over3over2_model<n,modulus>::squared_karatsuba() const
{
/* Devegili OhEig Scott Dahab --- Multiplication and Squaring on Pairing-Friendly Fields.pdf; Section 3 (Karatsuba squaring) */
const my_Fp6 &a = this->c0, &b = this->c1;
const my_Fp6 asq = a.squared();
const my_Fp6 bsq = b.squared();
return Fp12_2over3over2_model<n,modulus>(asq + Fp12_2over3over2_model<n, modulus>::mul_by_non_residue(bsq),
(a + b).squared() - asq - bsq);
}
template<mp_size_t n, const bigint<n>& modulus>
Fp12_2over3over2_model<n,modulus> Fp12_2over3over2_model<n,modulus>::squared_complex() const
{
/* Devegili OhEig Scott Dahab --- Multiplication and Squaring on Pairing-Friendly Fields.pdf; Section 3 (Complex squaring) */
const my_Fp6 &a = this->c0, &b = this->c1;
const my_Fp6 ab = a * b;
return Fp12_2over3over2_model<n,modulus>((a + b) * (a + Fp12_2over3over2_model<n, modulus>::mul_by_non_residue(b)) - ab - Fp12_2over3over2_model<n, modulus>::mul_by_non_residue(ab),
ab + ab);
}
template<mp_size_t n, const bigint<n>& modulus>
Fp12_2over3over2_model<n,modulus> Fp12_2over3over2_model<n,modulus>::inverse() const
{
/* From "High-Speed Software Implementation of the Optimal Ate Pairing over Barreto-Naehrig Curves"; Algorithm 8 */
const my_Fp6 &a = this->c0, &b = this->c1;
const my_Fp6 t0 = a.squared();
const my_Fp6 t1 = b.squared();
const my_Fp6 t2 = t0 - Fp12_2over3over2_model<n, modulus>::mul_by_non_residue(t1);
const my_Fp6 t3 = t2.inverse();
const my_Fp6 c0 = a * t3;
const my_Fp6 c1 = - (b * t3);
return Fp12_2over3over2_model<n,modulus>(c0, c1);
}
template<mp_size_t n, const bigint<n>& modulus>
Fp12_2over3over2_model<n,modulus> Fp12_2over3over2_model<n,modulus>::Frobenius_map(uint64_t power) const
{
return Fp12_2over3over2_model<n,modulus>(c0.Frobenius_map(power),
Frobenius_coeffs_c1[power % 12] * c1.Frobenius_map(power));
}
template<mp_size_t n, const bigint<n>& modulus>
Fp12_2over3over2_model<n,modulus> Fp12_2over3over2_model<n,modulus>::unitary_inverse() const
{
return Fp12_2over3over2_model<n,modulus>(this->c0,
-this->c1);
}
template<mp_size_t n, const bigint<n>& modulus>
Fp12_2over3over2_model<n,modulus> Fp12_2over3over2_model<n,modulus>::cyclotomic_squared() const
{
/* OLD: naive implementation
return (*this).squared();
*/
my_Fp2 z0 = this->c0.c0;
my_Fp2 z4 = this->c0.c1;
my_Fp2 z3 = this->c0.c2;
my_Fp2 z2 = this->c1.c0;
my_Fp2 z1 = this->c1.c1;
my_Fp2 z5 = this->c1.c2;
my_Fp2 t0, t1, t2, t3, t4, t5, tmp;
// t0 + t1*y = (z0 + z1*y)^2 = a^2
tmp = z0 * z1;
t0 = (z0 + z1) * (z0 + my_Fp6::non_residue * z1) - tmp - my_Fp6::non_residue * tmp;
t1 = tmp + tmp;
// t2 + t3*y = (z2 + z3*y)^2 = b^2
tmp = z2 * z3;
t2 = (z2 + z3) * (z2 + my_Fp6::non_residue * z3) - tmp - my_Fp6::non_residue * tmp;
t3 = tmp + tmp;
// t4 + t5*y = (z4 + z5*y)^2 = c^2
tmp = z4 * z5;
t4 = (z4 + z5) * (z4 + my_Fp6::non_residue * z5) - tmp - my_Fp6::non_residue * tmp;
t5 = tmp + tmp;
// for A
// z0 = 3 * t0 - 2 * z0
z0 = t0 - z0;
z0 = z0 + z0;
z0 = z0 + t0;
// z1 = 3 * t1 + 2 * z1
z1 = t1 + z1;
z1 = z1 + z1;
z1 = z1 + t1;
// for B
// z2 = 3 * (xi * t5) + 2 * z2
tmp = my_Fp6::non_residue * t5;
z2 = tmp + z2;
z2 = z2 + z2;
z2 = z2 + tmp;
// z3 = 3 * t4 - 2 * z3
z3 = t4 - z3;
z3 = z3 + z3;
z3 = z3 + t4;
// for C
// z4 = 3 * t2 - 2 * z4
z4 = t2 - z4;
z4 = z4 + z4;
z4 = z4 + t2;
// z5 = 3 * t3 + 2 * z5
z5 = t3 + z5;
z5 = z5 + z5;
z5 = z5 + t3;
return Fp12_2over3over2_model<n,modulus>(my_Fp6(z0,z4,z3),my_Fp6(z2,z1,z5));
}
template<mp_size_t n, const bigint<n>& modulus>
Fp12_2over3over2_model<n,modulus> Fp12_2over3over2_model<n,modulus>::mul_by_024(const Fp2_model<n, modulus> &ell_0,
const Fp2_model<n, modulus> &ell_VW,
const Fp2_model<n, modulus> &ell_VV) const
{
/* OLD: naive implementation
Fp12_2over3over2_model<n,modulus> a(my_Fp6(ell_0, my_Fp2::zero(), ell_VV),
my_Fp6(my_Fp2::zero(), ell_VW, my_Fp2::zero()));
return (*this) * a;
*/
my_Fp2 z0 = this->c0.c0;
my_Fp2 z1 = this->c0.c1;
my_Fp2 z2 = this->c0.c2;
my_Fp2 z3 = this->c1.c0;
my_Fp2 z4 = this->c1.c1;
my_Fp2 z5 = this->c1.c2;
my_Fp2 x0 = ell_0;
my_Fp2 x2 = ell_VV;
my_Fp2 x4 = ell_VW;
my_Fp2 t0, t1, t2, s0, T3, T4, D0, D2, D4, S1;
D0 = z0 * x0;
D2 = z2 * x2;
D4 = z4 * x4;
t2 = z0 + z4;
t1 = z0 + z2;
s0 = z1 + z3 + z5;
// For z.a_.a_ = z0.
S1 = z1 * x2;
T3 = S1 + D4;
T4 = my_Fp6::non_residue * T3 + D0;
z0 = T4;
// For z.a_.b_ = z1
T3 = z5 * x4;
S1 = S1 + T3;
T3 = T3 + D2;
T4 = my_Fp6::non_residue * T3;
T3 = z1 * x0;
S1 = S1 + T3;
T4 = T4 + T3;
z1 = T4;
// For z.a_.c_ = z2
t0 = x0 + x2;
T3 = t1 * t0 - D0 - D2;
T4 = z3 * x4;
S1 = S1 + T4;
T3 = T3 + T4;
// For z.b_.a_ = z3 (z3 needs z2)
t0 = z2 + z4;
z2 = T3;
t1 = x2 + x4;
T3 = t0 * t1 - D2 - D4;
T4 = my_Fp6::non_residue * T3;
T3 = z3 * x0;
S1 = S1 + T3;
T4 = T4 + T3;
z3 = T4;
// For z.b_.b_ = z4
T3 = z5 * x2;
S1 = S1 + T3;
T4 = my_Fp6::non_residue * T3;
t0 = x0 + x4;
T3 = t2 * t0 - D0 - D4;
T4 = T4 + T3;
z4 = T4;
// For z.b_.c_ = z5.
t0 = x0 + x2 + x4;
T3 = s0 * t0 - S1;
z5 = T3;
return Fp12_2over3over2_model<n,modulus>(my_Fp6(z0,z1,z2),my_Fp6(z3,z4,z5));
}
template<mp_size_t n, const bigint<n>& modulus, mp_size_t m>
Fp12_2over3over2_model<n, modulus> operator^(const Fp12_2over3over2_model<n, modulus> &self, const bigint<m> &exponent)
{
return power<Fp12_2over3over2_model<n, modulus> >(self, exponent);
}
template<mp_size_t n, const bigint<n>& modulus, mp_size_t m, const bigint<m>& exp_modulus>
Fp12_2over3over2_model<n, modulus> operator^(const Fp12_2over3over2_model<n, modulus> &self, const Fp_model<m, exp_modulus> &exponent)
{
return self^(exponent.as_bigint());
}
template<mp_size_t n, const bigint<n>& modulus>
template<mp_size_t m>
Fp12_2over3over2_model<n, modulus> Fp12_2over3over2_model<n,modulus>::cyclotomic_exp(const bigint<m> &exponent) const
{
Fp12_2over3over2_model<n,modulus> res = Fp12_2over3over2_model<n,modulus>::one();
bool found_one = false;
for (int64_t i = m-1; i >= 0; --i)
{
for (int64_t j = GMP_NUMB_BITS - 1; j >= 0; --j)
{
if (found_one)
{
res = res.cyclotomic_squared();
}
if (exponent.data[i] & (((mp_limb_t) 1)<<j))
{
found_one = true;
res = res * (*this);
}
}
}
return res;
}
template<mp_size_t n, const bigint<n>& modulus>
std::ostream& operator<<(std::ostream &out, const Fp12_2over3over2_model<n, modulus> &el)
{
out << el.c0 << OUTPUT_SEPARATOR << el.c1;
return out;
}
template<mp_size_t n, const bigint<n>& modulus>
std::istream& operator>>(std::istream &in, Fp12_2over3over2_model<n, modulus> &el)
{
in >> el.c0 >> el.c1;
return in;
}
template<mp_size_t n, const bigint<n>& modulus>
std::ostream& operator<<(std::ostream& out, const std::vector<Fp12_2over3over2_model<n, modulus> > &v)
{
out << v.size() << "\n";
for (const Fp12_2over3over2_model<n, modulus>& t : v)
{
out << t << OUTPUT_NEWLINE;
}
return out;
}
template<mp_size_t n, const bigint<n>& modulus>
std::istream& operator>>(std::istream& in, std::vector<Fp12_2over3over2_model<n, modulus> > &v)
{
v.clear();
size_t s;
in >> s;
char b;
in.read(&b, 1);
v.reserve(s);
for (size_t i = 0; i < s; ++i)
{
Fp12_2over3over2_model<n, modulus> el;
in >> el;
v.emplace_back(el);
}
return in;
}
} // libsnark
#endif // FP12_2OVER3OVER2_TCC_

View File

@ -1,120 +0,0 @@
/** @file
*****************************************************************************
Implementation of arithmetic in the finite field F[p^2].
*****************************************************************************
* @author This file is part of libsnark, developed by SCIPR Lab
* and contributors (see AUTHORS).
* @copyright MIT license (see LICENSE file)
*****************************************************************************/
#ifndef FP2_HPP_
#define FP2_HPP_
#include "algebra/fields/fp.hpp"
#include <vector>
namespace libsnark {
template<mp_size_t n, const bigint<n>& modulus>
class Fp2_model;
template<mp_size_t n, const bigint<n>& modulus>
std::ostream& operator<<(std::ostream &, const Fp2_model<n, modulus> &);
template<mp_size_t n, const bigint<n>& modulus>
std::istream& operator>>(std::istream &, Fp2_model<n, modulus> &);
/**
* Arithmetic in the field F[p^3].
*
* Let p := modulus. This interface provides arithmetic for the extension field
* Fp2 = Fp[U]/(U^2-non_residue), where non_residue is in Fp.
*
* ASSUMPTION: p = 1 (mod 6)
*/
template<mp_size_t n, const bigint<n>& modulus>
class Fp2_model {
public:
typedef Fp_model<n, modulus> my_Fp;
static bigint<2*n> euler; // (modulus^2-1)/2
static size_t s; // modulus^2 = 2^s * t + 1
static bigint<2*n> t; // with t odd
static bigint<2*n> t_minus_1_over_2; // (t-1)/2
static my_Fp non_residue; // X^4-non_residue irreducible over Fp; used for constructing Fp2 = Fp[X] / (X^2 - non_residue)
static Fp2_model<n, modulus> nqr; // a quadratic nonresidue in Fp2
static Fp2_model<n, modulus> nqr_to_t; // nqr^t
static my_Fp Frobenius_coeffs_c1[2]; // non_residue^((modulus^i-1)/2) for i=0,1
my_Fp c0, c1;
Fp2_model() {};
Fp2_model(const my_Fp& c0, const my_Fp& c1) : c0(c0), c1(c1) {};
void clear() { c0.clear(); c1.clear(); }
void print() const { printf("c0/c1:\n"); c0.print(); c1.print(); }
static Fp2_model<n, modulus> zero();
static Fp2_model<n, modulus> one();
static Fp2_model<n, modulus> random_element();
bool is_zero() const { return c0.is_zero() && c1.is_zero(); }
bool operator==(const Fp2_model &other) const;
bool operator!=(const Fp2_model &other) const;
Fp2_model operator+(const Fp2_model &other) const;
Fp2_model operator-(const Fp2_model &other) const;
Fp2_model operator*(const Fp2_model &other) const;
Fp2_model operator-() const;
Fp2_model squared() const; // default is squared_complex
Fp2_model inverse() const;
Fp2_model Frobenius_map(uint64_t power) const;
Fp2_model sqrt() const; // HAS TO BE A SQUARE (else does not terminate)
Fp2_model squared_karatsuba() const;
Fp2_model squared_complex() const;
template<mp_size_t m>
Fp2_model operator^(const bigint<m> &other) const;
static size_t size_in_bits() { return 2*my_Fp::size_in_bits(); }
static bigint<n> base_field_char() { return modulus; }
friend std::ostream& operator<< <n, modulus>(std::ostream &out, const Fp2_model<n, modulus> &el);
friend std::istream& operator>> <n, modulus>(std::istream &in, Fp2_model<n, modulus> &el);
};
template<mp_size_t n, const bigint<n>& modulus>
std::ostream& operator<<(std::ostream& out, const std::vector<Fp2_model<n, modulus> > &v);
template<mp_size_t n, const bigint<n>& modulus>
std::istream& operator>>(std::istream& in, std::vector<Fp2_model<n, modulus> > &v);
template<mp_size_t n, const bigint<n>& modulus>
Fp2_model<n, modulus> operator*(const Fp_model<n, modulus> &lhs, const Fp2_model<n, modulus> &rhs);
template<mp_size_t n, const bigint<n>& modulus>
bigint<2*n> Fp2_model<n, modulus>::euler;
template<mp_size_t n, const bigint<n>& modulus>
size_t Fp2_model<n, modulus>::s;
template<mp_size_t n, const bigint<n>& modulus>
bigint<2*n> Fp2_model<n, modulus>::t;
template<mp_size_t n, const bigint<n>& modulus>
bigint<2*n> Fp2_model<n, modulus>::t_minus_1_over_2;
template<mp_size_t n, const bigint<n>& modulus>
Fp_model<n, modulus> Fp2_model<n, modulus>::non_residue;
template<mp_size_t n, const bigint<n>& modulus>
Fp2_model<n, modulus> Fp2_model<n, modulus>::nqr;
template<mp_size_t n, const bigint<n>& modulus>
Fp2_model<n, modulus> Fp2_model<n, modulus>::nqr_to_t;
template<mp_size_t n, const bigint<n>& modulus>
Fp_model<n, modulus> Fp2_model<n, modulus>::Frobenius_coeffs_c1[2];
} // libsnark
#include "algebra/fields/fp2.tcc"
#endif // FP2_HPP_

View File

@ -1,261 +0,0 @@
/** @file
*****************************************************************************
Implementation of arithmetic in the finite field F[p^2].
*****************************************************************************
* @author This file is part of libsnark, developed by SCIPR Lab
* and contributors (see AUTHORS).
* @copyright MIT license (see LICENSE file)
*****************************************************************************/
#ifndef FP2_TCC_
#define FP2_TCC_
#include "algebra/fields/field_utils.hpp"
namespace libsnark {
template<mp_size_t n, const bigint<n>& modulus>
Fp2_model<n,modulus> Fp2_model<n,modulus>::zero()
{
return Fp2_model<n, modulus>(my_Fp::zero(), my_Fp::zero());
}
template<mp_size_t n, const bigint<n>& modulus>
Fp2_model<n,modulus> Fp2_model<n,modulus>::one()
{
return Fp2_model<n, modulus>(my_Fp::one(), my_Fp::zero());
}
template<mp_size_t n, const bigint<n>& modulus>
Fp2_model<n,modulus> Fp2_model<n,modulus>::random_element()
{
Fp2_model<n, modulus> r;
r.c0 = my_Fp::random_element();
r.c1 = my_Fp::random_element();
return r;
}
template<mp_size_t n, const bigint<n>& modulus>
bool Fp2_model<n,modulus>::operator==(const Fp2_model<n,modulus> &other) const
{
return (this->c0 == other.c0 && this->c1 == other.c1);
}
template<mp_size_t n, const bigint<n>& modulus>
bool Fp2_model<n,modulus>::operator!=(const Fp2_model<n,modulus> &other) const
{
return !(operator==(other));
}
template<mp_size_t n, const bigint<n>& modulus>
Fp2_model<n,modulus> Fp2_model<n,modulus>::operator+(const Fp2_model<n,modulus> &other) const
{
return Fp2_model<n,modulus>(this->c0 + other.c0,
this->c1 + other.c1);
}
template<mp_size_t n, const bigint<n>& modulus>
Fp2_model<n,modulus> Fp2_model<n,modulus>::operator-(const Fp2_model<n,modulus> &other) const
{
return Fp2_model<n,modulus>(this->c0 - other.c0,
this->c1 - other.c1);
}
template<mp_size_t n, const bigint<n>& modulus>
Fp2_model<n, modulus> operator*(const Fp_model<n, modulus> &lhs, const Fp2_model<n, modulus> &rhs)
{
return Fp2_model<n,modulus>(lhs*rhs.c0,
lhs*rhs.c1);
}
template<mp_size_t n, const bigint<n>& modulus>
Fp2_model<n,modulus> Fp2_model<n,modulus>::operator*(const Fp2_model<n,modulus> &other) const
{
/* Devegili OhEig Scott Dahab --- Multiplication and Squaring on Pairing-Friendly Fields.pdf; Section 3 (Karatsuba) */
const my_Fp
&A = other.c0, &B = other.c1,
&a = this->c0, &b = this->c1;
const my_Fp aA = a * A;
const my_Fp bB = b * B;
return Fp2_model<n,modulus>(aA + non_residue * bB,
(a + b)*(A+B) - aA - bB);
}
template<mp_size_t n, const bigint<n>& modulus>
Fp2_model<n,modulus> Fp2_model<n,modulus>::operator-() const
{
return Fp2_model<n,modulus>(-this->c0,
-this->c1);
}
template<mp_size_t n, const bigint<n>& modulus>
Fp2_model<n,modulus> Fp2_model<n,modulus>::squared() const
{
return squared_complex();
}
template<mp_size_t n, const bigint<n>& modulus>
Fp2_model<n,modulus> Fp2_model<n,modulus>::squared_karatsuba() const
{
/* Devegili OhEig Scott Dahab --- Multiplication and Squaring on Pairing-Friendly Fields.pdf; Section 3 (Karatsuba squaring) */
const my_Fp &a = this->c0, &b = this->c1;
const my_Fp asq = a.squared();
const my_Fp bsq = b.squared();
return Fp2_model<n,modulus>(asq + non_residue * bsq,
(a + b).squared() - asq - bsq);
}
template<mp_size_t n, const bigint<n>& modulus>
Fp2_model<n,modulus> Fp2_model<n,modulus>::squared_complex() const
{
/* Devegili OhEig Scott Dahab --- Multiplication and Squaring on Pairing-Friendly Fields.pdf; Section 3 (Complex squaring) */
const my_Fp &a = this->c0, &b = this->c1;
const my_Fp ab = a * b;
return Fp2_model<n,modulus>((a + b) * (a + non_residue * b) - ab - non_residue * ab,
ab + ab);
}
template<mp_size_t n, const bigint<n>& modulus>
Fp2_model<n,modulus> Fp2_model<n,modulus>::inverse() const
{
const my_Fp &a = this->c0, &b = this->c1;
/* From "High-Speed Software Implementation of the Optimal Ate Pairing over Barreto-Naehrig Curves"; Algorithm 8 */
const my_Fp t0 = a.squared();
const my_Fp t1 = b.squared();
const my_Fp t2 = t0 - non_residue * t1;
const my_Fp t3 = t2.inverse();
const my_Fp c0 = a * t3;
const my_Fp c1 = - (b * t3);
return Fp2_model<n,modulus>(c0, c1);
}
template<mp_size_t n, const bigint<n>& modulus>
Fp2_model<n,modulus> Fp2_model<n,modulus>::Frobenius_map(uint64_t power) const
{
return Fp2_model<n,modulus>(c0,
Frobenius_coeffs_c1[power % 2] * c1);
}
template<mp_size_t n, const bigint<n>& modulus>
Fp2_model<n,modulus> Fp2_model<n,modulus>::sqrt() const
{
if (is_zero()) {
return *this;
}
Fp2_model<n,modulus> one = Fp2_model<n,modulus>::one();
size_t v = Fp2_model<n,modulus>::s;
Fp2_model<n,modulus> z = Fp2_model<n,modulus>::nqr_to_t;
Fp2_model<n,modulus> w = (*this)^Fp2_model<n,modulus>::t_minus_1_over_2;
Fp2_model<n,modulus> x = (*this) * w;
Fp2_model<n,modulus> b = x * w; // b = (*this)^t
// check if square with euler's criterion
Fp2_model<n,modulus> check = b;
for (size_t i = 0; i < v-1; ++i)
{
check = check.squared();
}
if (check != one)
{
assert_except(0);
}
// compute square root with Tonelli--Shanks
// (does not terminate if not a square!)
while (b != one)
{
size_t m = 0;
Fp2_model<n,modulus> b2m = b;
while (b2m != one)
{
/* invariant: b2m = b^(2^m) after entering this loop */
b2m = b2m.squared();
m += 1;
}
int j = v-m-1;
w = z;
while (j > 0)
{
w = w.squared();
--j;
} // w = z^2^(v-m-1)
z = w.squared();
b = b * z;
x = x * w;
v = m;
}
return x;
}
template<mp_size_t n, const bigint<n>& modulus>
template<mp_size_t m>
Fp2_model<n,modulus> Fp2_model<n,modulus>::operator^(const bigint<m> &pow) const
{
return power<Fp2_model<n, modulus>, m>(*this, pow);
}
template<mp_size_t n, const bigint<n>& modulus>
std::ostream& operator<<(std::ostream &out, const Fp2_model<n, modulus> &el)
{
out << el.c0 << OUTPUT_SEPARATOR << el.c1;
return out;
}
template<mp_size_t n, const bigint<n>& modulus>
std::istream& operator>>(std::istream &in, Fp2_model<n, modulus> &el)
{
in >> el.c0 >> el.c1;
return in;
}
template<mp_size_t n, const bigint<n>& modulus>
std::ostream& operator<<(std::ostream& out, const std::vector<Fp2_model<n, modulus> > &v)
{
out << v.size() << "\n";
for (const Fp2_model<n, modulus>& t : v)
{
out << t << OUTPUT_NEWLINE;
}
return out;
}
template<mp_size_t n, const bigint<n>& modulus>
std::istream& operator>>(std::istream& in, std::vector<Fp2_model<n, modulus> > &v)
{
v.clear();
size_t s;
in >> s;
char b;
in.read(&b, 1);
v.reserve(s);
for (size_t i = 0; i < s; ++i)
{
Fp2_model<n, modulus> el;
in >> el;
v.emplace_back(el);
}
return in;
}
} // libsnark
#endif // FP2_TCC_

View File

@ -1,104 +0,0 @@
/** @file
*****************************************************************************
Declaration of arithmetic in the finite field F[(p^2)^3]
*****************************************************************************
* @author This file is part of libsnark, developed by SCIPR Lab
* and contributors (see AUTHORS).
* @copyright MIT license (see LICENSE file)
*****************************************************************************/
#ifndef FP6_3OVER2_HPP_
#define FP6_3OVER2_HPP_
#include "algebra/fields/fp.hpp"
#include "algebra/fields/fp2.hpp"
#include <vector>
namespace libsnark {
template<mp_size_t n, const bigint<n>& modulus>
class Fp6_3over2_model;
template<mp_size_t n, const bigint<n>& modulus>
std::ostream& operator<<(std::ostream &, const Fp6_3over2_model<n, modulus> &);
template<mp_size_t n, const bigint<n>& modulus>
std::istream& operator>>(std::istream &, Fp6_3over2_model<n, modulus> &);
/**
* Arithmetic in the finite field F[(p^2)^3].
*
* Let p := modulus. This interface provides arithmetic for the extension field
* Fp6 = Fp2[V]/(V^3-non_residue) where non_residue is in Fp.
*
* ASSUMPTION: p = 1 (mod 6)
*/
template<mp_size_t n, const bigint<n>& modulus>
class Fp6_3over2_model {
public:
typedef Fp_model<n, modulus> my_Fp;
typedef Fp2_model<n, modulus> my_Fp2;
static my_Fp2 non_residue;
static my_Fp2 Frobenius_coeffs_c1[6]; // non_residue^((modulus^i-1)/3) for i=0,1,2,3,4,5
static my_Fp2 Frobenius_coeffs_c2[6]; // non_residue^((2*modulus^i-2)/3) for i=0,1,2,3,4,5
my_Fp2 c0, c1, c2;
Fp6_3over2_model() {};
Fp6_3over2_model(const my_Fp2& c0, const my_Fp2& c1, const my_Fp2& c2) : c0(c0), c1(c1), c2(c2) {};
void clear() { c0.clear(); c1.clear(); c2.clear(); }
void print() const { printf("c0/c1/c2:\n"); c0.print(); c1.print(); c2.print(); }
static Fp6_3over2_model<n, modulus> zero();
static Fp6_3over2_model<n, modulus> one();
static Fp6_3over2_model<n, modulus> random_element();
bool is_zero() const { return c0.is_zero() && c1.is_zero() && c2.is_zero(); }
bool operator==(const Fp6_3over2_model &other) const;
bool operator!=(const Fp6_3over2_model &other) const;
Fp6_3over2_model operator+(const Fp6_3over2_model &other) const;
Fp6_3over2_model operator-(const Fp6_3over2_model &other) const;
Fp6_3over2_model operator*(const Fp6_3over2_model &other) const;
Fp6_3over2_model operator-() const;
Fp6_3over2_model squared() const;
Fp6_3over2_model inverse() const;
Fp6_3over2_model Frobenius_map(uint64_t power) const;
static my_Fp2 mul_by_non_residue(const my_Fp2 &elt);
template<mp_size_t m>
Fp6_3over2_model operator^(const bigint<m> &other) const;
static bigint<n> base_field_char() { return modulus; }
static size_t extension_degree() { return 6; }
friend std::ostream& operator<< <n, modulus>(std::ostream &out, const Fp6_3over2_model<n, modulus> &el);
friend std::istream& operator>> <n, modulus>(std::istream &in, Fp6_3over2_model<n, modulus> &el);
};
template<mp_size_t n, const bigint<n>& modulus>
std::ostream& operator<<(std::ostream& out, const std::vector<Fp6_3over2_model<n, modulus> > &v);
template<mp_size_t n, const bigint<n>& modulus>
std::istream& operator>>(std::istream& in, std::vector<Fp6_3over2_model<n, modulus> > &v);
template<mp_size_t n, const bigint<n>& modulus>
Fp6_3over2_model<n, modulus> operator*(const Fp_model<n, modulus> &lhs, const Fp6_3over2_model<n, modulus> &rhs);
template<mp_size_t n, const bigint<n>& modulus>
Fp6_3over2_model<n, modulus> operator*(const Fp2_model<n, modulus> &lhs, const Fp6_3over2_model<n, modulus> &rhs);
template<mp_size_t n, const bigint<n>& modulus>
Fp2_model<n, modulus> Fp6_3over2_model<n, modulus>::non_residue;
template<mp_size_t n, const bigint<n>& modulus>
Fp2_model<n, modulus> Fp6_3over2_model<n, modulus>::Frobenius_coeffs_c1[6];
template<mp_size_t n, const bigint<n>& modulus>
Fp2_model<n, modulus> Fp6_3over2_model<n, modulus>::Frobenius_coeffs_c2[6];
} // libsnark
#include "algebra/fields/fp6_3over2.tcc"
#endif // FP6_3OVER2_HPP_

Some files were not shown because too many files have changed in this diff Show More