Squashed 'src/secp256k1/' changes from 8ab24e8da..c6b6b8f1b

c6b6b8f1b Merge #830: Rip out non-endomorphism code + dependencies
c582abade Consistency improvements to the comments
63c6b7161 Reorder comments/function around scalar_split_lambda
2edc514c9 WNAF of lambda_split output has max size 129
4232e5b7d Rip out non-endomorphism code
ebad8414b Check correctness of lambda split without -DVERIFY
fe7fc1fda Make lambda constant accessible
9d2f2b44d Add tests to exercise lambda split near bounds
9aca2f7f0 Add secp256k1_split_lambda_verify
acab934d2 Detailed comments for secp256k1_scalar_split_lambda
76ed922a5 Increase precision of g1 and g2
6173839c9 Switch to our own memcmp function
63150ab4d Merge #827: Rename testrand functions to have test in name
c5257aed0 Merge #821: travis: Explicitly set --with-valgrind
bb1f54280 Merge #818: Add static assertion that uint32_t is unsigned int or wider
a45c1fa63 Rename testrand functions to have test in name
5006895bd Merge #808: Exhaustive test improvements + exhaustive schnorrsig tests
4eecb4d6e travis: VALGRIND->RUN_VALGRIND to avoid confusion with WITH_VALGRIND
66a765c77 travis: Explicitly set --with-valgrind
d7838ba6a Merge #813: Enable configuring Valgrind support
7ceb0b761 Merge #819: Enable -Wundef warning
8b7dcdd95 Add exhaustive test for extrakeys and schnorrsig
08d7d8929 Make pubkey parsing test whether points are in the correct subgroup
87af00b51 Abstract out challenge computation in schnorrsig
63e1b2aa7 Disable output buffering in tests_exhaustive.c
39f67dd07 Support splitting exhaustive tests across cores
e99b26fcd Give exhaustive_tests count and seed cmdline inputs
49e6630bc refactor: move RNG seeding to testrand
b110c106f Change exhaustive test groups so they have a point with X=1
cec7b18a3 Select exhaustive lambda in function of order
78f6cdfaa Make the curve B constant a secp256k1_fe
d7f39ae4b Delete gej_is_valid_var: unused outside tests
8bcd78cd7 Make secp256k1_scalar_b32 detect overflow in scalar_low
c498366e5 Move exhaustive tests for recovery to module
be3179154 Make group order purely compile-time in exhaustive tests
e73ff3092 Enable -Wundef warning
c0041b5cf Add static assertion that uint32_t is unsigned int or wider
4ad408faf Merge #782: Check if variable=yes instead of if var is set in travis.sh
412bf874d configure: Allow specifying --with[out]-valgrind explicitly
34debf7a6 Modify .travis.yml to explictly pass no in env vars instead of setting to nothing
a0e99fc12 Merge #814: tests: Initialize random group elements fully
5738e8622 tests: Initialize random group elements fully
c9939ba55 Merge #812: travis: run bench_schnorrsig
a51f2af62 travis: run bench_schnorrsig
ef37761fe Change travis.sh to check if variables are equal to yes instead of not-empty. Before this, setting `VALGRIND=wat` was considered as true, and to make it evaluate as false you had to unset the variable `VALGRIND=` but not it checks if `VALGRIND=yes` and if it's not `yes` then it's evaluated to false

git-subtree-dir: src/secp256k1
git-subtree-split: c6b6b8f1bb044d7d1aa065ebb674adde98a36a8e
This commit is contained in:
Jack Grigg 2020-10-22 00:41:56 +01:00
parent 2fa8a70c11
commit 52ac87242d
39 changed files with 1586 additions and 947 deletions

View File

@ -17,33 +17,29 @@ compiler:
- gcc
env:
global:
- WIDEMUL=auto BIGNUM=auto ENDOMORPHISM=no STATICPRECOMPUTATION=yes ECMULTGENPRECISION=auto ASM=no BUILD=check EXTRAFLAGS= HOST= ECDH=no RECOVERY=no SCHNORRSIG=no EXPERIMENTAL=no CTIMETEST=yes BENCH=yes ITERS=2
- WIDEMUL=auto BIGNUM=auto STATICPRECOMPUTATION=yes ECMULTGENPRECISION=auto ASM=no BUILD=check WITH_VALGRIND=yes RUN_VALGRIND=no EXTRAFLAGS= HOST= ECDH=no RECOVERY=no SCHNORRSIG=no EXPERIMENTAL=no CTIMETEST=yes BENCH=yes ITERS=2
matrix:
- WIDEMUL=int64 RECOVERY=yes
- WIDEMUL=int64 ECDH=yes EXPERIMENTAL=yes SCHNORRSIG=yes
- WIDEMUL=int64 ENDOMORPHISM=yes
- WIDEMUL=int128
- WIDEMUL=int128 RECOVERY=yes EXPERIMENTAL=yes SCHNORRSIG=yes
- WIDEMUL=int128 ENDOMORPHISM=yes
- WIDEMUL=int128 ENDOMORPHISM=yes ECDH=yes EXPERIMENTAL=yes SCHNORRSIG=yes
- WIDEMUL=int128 ECDH=yes EXPERIMENTAL=yes SCHNORRSIG=yes
- WIDEMUL=int128 ASM=x86_64
- WIDEMUL=int128 ENDOMORPHISM=yes ASM=x86_64
- BIGNUM=no
- BIGNUM=no ENDOMORPHISM=yes RECOVERY=yes EXPERIMENTAL=yes SCHNORRSIG=yes
- BIGNUM=no RECOVERY=yes EXPERIMENTAL=yes SCHNORRSIG=yes
- BIGNUM=no STATICPRECOMPUTATION=no
- BUILD=distcheck CTIMETEST= BENCH=
- BUILD=distcheck WITH_VALGRIND=no CTIMETEST=no BENCH=no
- CPPFLAGS=-DDETERMINISTIC
- CFLAGS=-O0 CTIMETEST=
- CFLAGS=-O0 CTIMETEST=no
- ECMULTGENPRECISION=2
- ECMULTGENPRECISION=8
- VALGRIND=yes ENDOMORPHISM=yes BIGNUM=no ASM=x86_64 EXPERIMENTAL=yes ECDH=yes RECOVERY=yes EXTRAFLAGS="--disable-openssl-tests" CPPFLAGS=-DVALGRIND BUILD=
- VALGRIND=yes BIGNUM=no ASM=x86_64 EXPERIMENTAL=yes ECDH=yes RECOVERY=yes EXTRAFLAGS="--disable-openssl-tests" CPPFLAGS=-DVALGRIND BUILD=
- RUN_VALGRIND=yes BIGNUM=no ASM=x86_64 EXPERIMENTAL=yes ECDH=yes RECOVERY=yes EXTRAFLAGS="--disable-openssl-tests" BUILD=
matrix:
fast_finish: true
include:
- compiler: clang
os: linux
env: HOST=i686-linux-gnu ENDOMORPHISM=yes
env: HOST=i686-linux-gnu
addons:
apt:
packages:
@ -63,7 +59,7 @@ matrix:
- libtool-bin
- libc6-dbg:i386
- compiler: gcc
env: HOST=i686-linux-gnu ENDOMORPHISM=yes
env: HOST=i686-linux-gnu
os: linux
addons:
apt:

View File

@ -48,7 +48,7 @@ Implementation details
* Use wNAF notation for point multiplicands.
* Use a much larger window for multiples of G, using precomputed multiples.
* Use Shamir's trick to do the multiplication with the public key and the generator simultaneously.
* Optionally (off by default) use secp256k1's efficiently-computable endomorphism to split the P multiplicand into 2 half-sized ones.
* Use secp256k1's efficiently-computable endomorphism to split the P multiplicand into 2 half-sized ones.
* Point multiplication for signing
* Use a precomputed table of multiples of powers of 16 multiplied with the generator, so general multiplication becomes a series of additions.
* Intended to be completely free of timing sidechannels for secret-key operations (on reasonable hardware/toolchains)

View File

@ -67,7 +67,7 @@ esac
CFLAGS="-W $CFLAGS"
warn_CFLAGS="-std=c89 -pedantic -Wall -Wextra -Wcast-align -Wnested-externs -Wshadow -Wstrict-prototypes -Wno-unused-function -Wno-long-long -Wno-overlength-strings"
warn_CFLAGS="-std=c89 -pedantic -Wall -Wextra -Wcast-align -Wnested-externs -Wshadow -Wstrict-prototypes -Wundef -Wno-unused-function -Wno-long-long -Wno-overlength-strings"
saved_CFLAGS="$CFLAGS"
CFLAGS="$warn_CFLAGS $CFLAGS"
AC_MSG_CHECKING([if ${CC} supports ${warn_CFLAGS}])
@ -116,11 +116,6 @@ AC_ARG_ENABLE(exhaustive_tests,
[use_exhaustive_tests=$enableval],
[use_exhaustive_tests=yes])
AC_ARG_ENABLE(endomorphism,
AS_HELP_STRING([--enable-endomorphism],[enable endomorphism [default=no]]),
[use_endomorphism=$enableval],
[use_endomorphism=no])
AC_ARG_ENABLE(ecmult_static_precomputation,
AS_HELP_STRING([--enable-ecmult-static-precomputation],[enable precomputed ecmult table for signing [default=auto]]),
[use_ecmult_static_precomputation=$enableval],
@ -164,8 +159,7 @@ AC_ARG_WITH([asm], [AS_HELP_STRING([--with-asm=x86_64|arm|no|auto],
AC_ARG_WITH([ecmult-window], [AS_HELP_STRING([--with-ecmult-window=SIZE|auto],
[window size for ecmult precomputation for verification, specified as integer in range [2..24].]
[Larger values result in possibly better performance at the cost of an exponentially larger precomputed table.]
[The table will store 2^(SIZE-2) * 64 bytes of data but can be larger in memory due to platform-specific padding and alignment.]
[If the endomorphism optimization is enabled, two tables of this size are used instead of only one.]
[The table will store 2^(SIZE-1) * 64 bytes of data but can be larger in memory due to platform-specific padding and alignment.]
["auto" is a reasonable setting for desktop machines (currently 15). [default=auto]]
)],
[req_ecmult_window=$withval], [req_ecmult_window=auto])
@ -178,7 +172,21 @@ AC_ARG_WITH([ecmult-gen-precision], [AS_HELP_STRING([--with-ecmult-gen-precision
)],
[req_ecmult_gen_precision=$withval], [req_ecmult_gen_precision=auto])
AC_CHECK_HEADER([valgrind/memcheck.h], [enable_valgrind=yes], [enable_valgrind=no], [])
AC_ARG_WITH([valgrind], [AS_HELP_STRING([--with-valgrind=yes|no|auto],
[Build with extra checks for running inside Valgrind [default=auto]]
)],
[req_valgrind=$withval], [req_valgrind=auto])
if test x"$req_valgrind" = x"no"; then
enable_valgrind=no
else
AC_CHECK_HEADER([valgrind/memcheck.h], [enable_valgrind=yes], [
if test x"$req_valgrind" = x"yes"; then
AC_MSG_ERROR([Valgrind support explicitly requested but valgrind/memcheck.h header not available])
fi
enable_valgrind=no
], [])
fi
AM_CONDITIONAL([VALGRIND_ENABLED],[test "$enable_valgrind" = "yes"])
if test x"$enable_coverage" = x"yes"; then
@ -415,10 +423,6 @@ if test x"$set_bignum" = x"gmp"; then
SECP_INCLUDES="$SECP_INCLUDES $GMP_CPPFLAGS"
fi
if test x"$use_endomorphism" = x"yes"; then
AC_DEFINE(USE_ENDOMORPHISM, 1, [Define this symbol to use endomorphism optimization])
fi
if test x"$set_precomp" = x"yes"; then
AC_DEFINE(USE_ECMULT_STATIC_PRECOMPUTATION, 1, [Define this symbol to use a statically generated ecmult table])
fi
@ -500,7 +504,6 @@ AC_OUTPUT
echo
echo "Build Options:"
echo " with endomorphism = $use_endomorphism"
echo " with ecmult precomp = $set_precomp"
echo " with external callbacks = $use_external_default_callbacks"
echo " with benchmarks = $use_benchmark"

View File

@ -13,27 +13,28 @@ then
fi
./configure \
--enable-experimental="$EXPERIMENTAL" --enable-endomorphism="$ENDOMORPHISM" \
--enable-experimental="$EXPERIMENTAL" \
--with-test-override-wide-multiply="$WIDEMUL" --with-bignum="$BIGNUM" --with-asm="$ASM" \
--enable-ecmult-static-precomputation="$STATICPRECOMPUTATION" --with-ecmult-gen-precision="$ECMULTGENPRECISION" \
--enable-module-ecdh="$ECDH" --enable-module-recovery="$RECOVERY" \
--enable-module-schnorrsig="$SCHNORRSIG" \
--with-valgrind="$WITH_VALGRIND" \
--host="$HOST" $EXTRAFLAGS
if [ -n "$BUILD" ]
then
make -j2 "$BUILD"
fi
if [ -n "$VALGRIND" ]
if [ "$RUN_VALGRIND" = "yes" ]
then
make -j2
# the `--error-exitcode` is required to make the test fail if valgrind found errors, otherwise it'll return 0 (http://valgrind.org/docs/manual/manual-core.html)
valgrind --error-exitcode=42 ./tests 16
valgrind --error-exitcode=42 ./exhaustive_tests
fi
if [ -n "$BENCH" ]
if [ "$BENCH" = "yes" ]
then
if [ -n "$VALGRIND" ]
if [ "$RUN_VALGRIND" = "yes" ]
then
# Using the local `libtool` because on macOS the system's libtool has nothing to do with GNU libtool
EXEC='./libtool --mode=execute valgrind --error-exitcode=42'
@ -56,8 +57,12 @@ then
then
$EXEC ./bench_ecdh >> bench.log 2>&1
fi
if [ "$SCHNORRSIG" = "yes" ]
then
$EXEC ./bench_schnorrsig >> bench.log 2>&1
fi
fi
if [ -n "$CTIMETEST" ]
if [ "$CTIMETEST" = "yes" ]
then
./libtool --mode=execute valgrind --error-exitcode=42 ./valgrind_ctime_test > valgrind_ctime_test.log 2>&1
fi

View File

@ -0,0 +1,129 @@
# Define field size and field
P = 2^256 - 2^32 - 977
F = GF(P)
BETA = F(0x7ae96a2b657c07106e64479eac3434e99cf0497512f58995c1396c28719501ee)
assert(BETA != F(1) and BETA^3 == F(1))
orders_done = set()
results = {}
first = True
for b in range(1, P):
# There are only 6 curves (up to isomorphism) of the form y^2=x^3+B. Stop once we have tried all.
if len(orders_done) == 6:
break
E = EllipticCurve(F, [0, b])
print("Analyzing curve y^2 = x^3 + %i" % b)
n = E.order()
# Skip curves with an order we've already tried
if n in orders_done:
print("- Isomorphic to earlier curve")
continue
orders_done.add(n)
# Skip curves isomorphic to the real secp256k1
if n.is_pseudoprime():
print(" - Isomorphic to secp256k1")
continue
print("- Finding subgroups")
# Find what prime subgroups exist
for f, _ in n.factor():
print("- Analyzing subgroup of order %i" % f)
# Skip subgroups of order >1000
if f < 4 or f > 1000:
print(" - Bad size")
continue
# Iterate over X coordinates until we find one that is on the curve, has order f,
# and for which curve isomorphism exists that maps it to X coordinate 1.
for x in range(1, P):
# Skip X coordinates not on the curve, and construct the full point otherwise.
if not E.is_x_coord(x):
continue
G = E.lift_x(F(x))
print(" - Analyzing (multiples of) point with X=%i" % x)
# Skip points whose order is not a multiple of f. Project the point to have
# order f otherwise.
if (G.order() % f):
print(" - Bad order")
continue
G = G * (G.order() // f)
# Find lambda for endomorphism. Skip if none can be found.
lam = None
for l in Integers(f)(1).nth_root(3, all=True):
if int(l)*G == E(BETA*G[0], G[1]):
lam = int(l)
break
if lam is None:
print(" - No endomorphism for this subgroup")
break
# Now look for an isomorphism of the curve that gives this point an X
# coordinate equal to 1.
# If (x,y) is on y^2 = x^3 + b, then (a^2*x, a^3*y) is on y^2 = x^3 + a^6*b.
# So look for m=a^2=1/x.
m = F(1)/G[0]
if not m.is_square():
print(" - No curve isomorphism maps it to a point with X=1")
continue
a = m.sqrt()
rb = a^6*b
RE = EllipticCurve(F, [0, rb])
# Use as generator twice the image of G under the above isormorphism.
# This means that generator*(1/2 mod f) will have X coordinate 1.
RG = RE(1, a^3*G[1]) * 2
# And even Y coordinate.
if int(RG[1]) % 2:
RG = -RG
assert(RG.order() == f)
assert(lam*RG == RE(BETA*RG[0], RG[1]))
# We have found curve RE:y^2=x^3+rb with generator RG of order f. Remember it
results[f] = {"b": rb, "G": RG, "lambda": lam}
print(" - Found solution")
break
print("")
print("")
print("")
print("/* To be put in src/group_impl.h: */")
first = True
for f in sorted(results.keys()):
b = results[f]["b"]
G = results[f]["G"]
print("# %s EXHAUSTIVE_TEST_ORDER == %i" % ("if" if first else "elif", f))
first = False
print("static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST(")
print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x," % tuple((int(G[0]) >> (32 * (7 - i))) & 0xffffffff for i in range(4)))
print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x," % tuple((int(G[0]) >> (32 * (7 - i))) & 0xffffffff for i in range(4, 8)))
print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x," % tuple((int(G[1]) >> (32 * (7 - i))) & 0xffffffff for i in range(4)))
print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x" % tuple((int(G[1]) >> (32 * (7 - i))) & 0xffffffff for i in range(4, 8)))
print(");")
print("static const secp256k1_fe secp256k1_fe_const_b = SECP256K1_FE_CONST(")
print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x," % tuple((int(b) >> (32 * (7 - i))) & 0xffffffff for i in range(4)))
print(" 0x%08x, 0x%08x, 0x%08x, 0x%08x" % tuple((int(b) >> (32 * (7 - i))) & 0xffffffff for i in range(4, 8)))
print(");")
print("# else")
print("# error No known generator for the specified exhaustive test group order.")
print("# endif")
print("")
print("")
print("/* To be put in src/scalar_impl.h: */")
first = True
for f in sorted(results.keys()):
lam = results[f]["lambda"]
print("# %s EXHAUSTIVE_TEST_ORDER == %i" % ("if" if first else "elif", f))
first = False
print("# define EXHAUSTIVE_TEST_LAMBDA %i" % lam)
print("# else")
print("# error No known lambda for the specified exhaustive test group order.")
print("# endif")
print("")

View File

@ -7,6 +7,8 @@
#ifndef SECP256K1_ASSUMPTIONS_H
#define SECP256K1_ASSUMPTIONS_H
#include <limits.h>
#include "util.h"
/* This library, like most software, relies on a number of compiler implementation defined (but not undefined)
@ -19,7 +21,11 @@ struct secp256k1_assumption_checker {
allowed. */
int dummy_array[(
/* Bytes are 8 bits. */
CHAR_BIT == 8 &&
(CHAR_BIT == 8) &&
/* No integer promotion for uint32_t. This ensures that we can multiply uintXX_t values where XX >= 32
without signed overflow, which would be undefined behaviour. */
(UINT_MAX <= UINT32_MAX) &&
/* Conversions from unsigned to signed outside of the bounds of the signed type are
implementation-defined. Verify that they function as reinterpreting the lower

View File

@ -11,7 +11,6 @@
#undef USE_ASM_X86_64
#undef USE_ECMULT_STATIC_PRECOMPUTATION
#undef USE_ENDOMORPHISM
#undef USE_EXTERNAL_ASM
#undef USE_EXTERNAL_DEFAULT_CALLBACKS
#undef USE_FIELD_INV_BUILTIN

View File

@ -117,7 +117,6 @@ void bench_scalar_mul(void* arg, int iters) {
}
}
#ifdef USE_ENDOMORPHISM
void bench_scalar_split(void* arg, int iters) {
int i, j = 0;
bench_inv *data = (bench_inv*)arg;
@ -128,7 +127,6 @@ void bench_scalar_split(void* arg, int iters) {
}
CHECK(j <= iters);
}
#endif
void bench_scalar_inverse(void* arg, int iters) {
int i, j = 0;
@ -397,9 +395,7 @@ int main(int argc, char **argv) {
if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "negate")) run_benchmark("scalar_negate", bench_scalar_negate, bench_setup, NULL, &data, 10, iters*100);
if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "sqr")) run_benchmark("scalar_sqr", bench_scalar_sqr, bench_setup, NULL, &data, 10, iters*10);
if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "mul")) run_benchmark("scalar_mul", bench_scalar_mul, bench_setup, NULL, &data, 10, iters*10);
#ifdef USE_ENDOMORPHISM
if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "split")) run_benchmark("scalar_split", bench_scalar_split, bench_setup, NULL, &data, 10, iters);
#endif
if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "inverse")) run_benchmark("scalar_inverse", bench_scalar_inverse, bench_setup, NULL, &data, 10, 2000);
if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "inverse")) run_benchmark("scalar_inverse_var", bench_scalar_inverse_var, bench_setup, NULL, &data, 10, 2000);

View File

@ -15,9 +15,7 @@
typedef struct {
/* For accelerating the computation of a*P + b*G: */
secp256k1_ge_storage (*pre_g)[]; /* odd multiples of the generator */
#ifdef USE_ENDOMORPHISM
secp256k1_ge_storage (*pre_g_128)[]; /* odd multiples of 2^128*generator */
#endif
} secp256k1_ecmult_context;
static const size_t SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE;

View File

@ -140,19 +140,16 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons
secp256k1_fe Z;
int skew_1;
#ifdef USE_ENDOMORPHISM
secp256k1_ge pre_a_lam[ECMULT_TABLE_SIZE(WINDOW_A)];
int wnaf_lam[1 + WNAF_SIZE(WINDOW_A - 1)];
int skew_lam;
secp256k1_scalar q_1, q_lam;
#endif
int wnaf_1[1 + WNAF_SIZE(WINDOW_A - 1)];
int i;
/* build wnaf representation for q. */
int rsize = size;
#ifdef USE_ENDOMORPHISM
if (size > 128) {
rsize = 128;
/* split q into q_1 and q_lam (where q = q_1 + q_lam*lambda, and q_1 and q_lam are ~128 bit) */
@ -160,12 +157,9 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons
skew_1 = secp256k1_wnaf_const(wnaf_1, &q_1, WINDOW_A - 1, 128);
skew_lam = secp256k1_wnaf_const(wnaf_lam, &q_lam, WINDOW_A - 1, 128);
} else
#endif
{
skew_1 = secp256k1_wnaf_const(wnaf_1, scalar, WINDOW_A - 1, size);
#ifdef USE_ENDOMORPHISM
skew_lam = 0;
#endif
}
/* Calculate odd multiples of a.
@ -179,14 +173,12 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons
for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) {
secp256k1_fe_normalize_weak(&pre_a[i].y);
}
#ifdef USE_ENDOMORPHISM
if (size > 128) {
for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) {
secp256k1_ge_mul_lambda(&pre_a_lam[i], &pre_a[i]);
}
}
#endif
/* first loop iteration (separated out so we can directly set r, rather
* than having it start at infinity, get doubled several times, then have
@ -195,14 +187,12 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons
VERIFY_CHECK(i != 0);
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, i, WINDOW_A);
secp256k1_gej_set_ge(r, &tmpa);
#ifdef USE_ENDOMORPHISM
if (size > 128) {
i = wnaf_lam[WNAF_SIZE_BITS(rsize, WINDOW_A - 1)];
VERIFY_CHECK(i != 0);
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a_lam, i, WINDOW_A);
secp256k1_gej_add_ge(r, r, &tmpa);
}
#endif
/* remaining loop iterations */
for (i = WNAF_SIZE_BITS(rsize, WINDOW_A - 1) - 1; i >= 0; i--) {
int n;
@ -215,14 +205,12 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, n, WINDOW_A);
VERIFY_CHECK(n != 0);
secp256k1_gej_add_ge(r, r, &tmpa);
#ifdef USE_ENDOMORPHISM
if (size > 128) {
n = wnaf_lam[i];
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a_lam, n, WINDOW_A);
VERIFY_CHECK(n != 0);
secp256k1_gej_add_ge(r, r, &tmpa);
}
#endif
}
secp256k1_fe_mul(&r->z, &r->z, &Z);
@ -231,43 +219,35 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons
/* Correct for wNAF skew */
secp256k1_ge correction = *a;
secp256k1_ge_storage correction_1_stor;
#ifdef USE_ENDOMORPHISM
secp256k1_ge_storage correction_lam_stor;
#endif
secp256k1_ge_storage a2_stor;
secp256k1_gej tmpj;
secp256k1_gej_set_ge(&tmpj, &correction);
secp256k1_gej_double_var(&tmpj, &tmpj, NULL);
secp256k1_ge_set_gej(&correction, &tmpj);
secp256k1_ge_to_storage(&correction_1_stor, a);
#ifdef USE_ENDOMORPHISM
if (size > 128) {
secp256k1_ge_to_storage(&correction_lam_stor, a);
}
#endif
secp256k1_ge_to_storage(&a2_stor, &correction);
/* For odd numbers this is 2a (so replace it), for even ones a (so no-op) */
secp256k1_ge_storage_cmov(&correction_1_stor, &a2_stor, skew_1 == 2);
#ifdef USE_ENDOMORPHISM
if (size > 128) {
secp256k1_ge_storage_cmov(&correction_lam_stor, &a2_stor, skew_lam == 2);
}
#endif
/* Apply the correction */
secp256k1_ge_from_storage(&correction, &correction_1_stor);
secp256k1_ge_neg(&correction, &correction);
secp256k1_gej_add_ge(r, r, &correction);
#ifdef USE_ENDOMORPHISM
if (size > 128) {
secp256k1_ge_from_storage(&correction, &correction_lam_stor);
secp256k1_ge_neg(&correction, &correction);
secp256k1_ge_mul_lambda(&correction, &correction);
secp256k1_gej_add_ge(r, r, &correction);
}
#endif
}
}

View File

@ -38,8 +38,8 @@
* (1 << (WINDOW_G - 2)) * sizeof(secp256k1_ge_storage) bytes,
* where sizeof(secp256k1_ge_storage) is typically 64 bytes but can
* be larger due to platform-specific padding and alignment.
* If the endomorphism optimization is enabled (USE_ENDOMORMPHSIM)
* two tables of this size are used instead of only one.
* Two tables of this size are used (due to the endomorphism
* optimization).
*/
# define WINDOW_G ECMULT_WINDOW_SIZE
#endif
@ -59,11 +59,7 @@
# error Set ECMULT_WINDOW_SIZE to an integer in range [2..24].
#endif
#ifdef USE_ENDOMORPHISM
#define WNAF_BITS 128
#else
#define WNAF_BITS 256
#endif
#define WNAF_BITS 128
#define WNAF_SIZE_BITS(bits, w) (((bits) + (w) - 1) / (w))
#define WNAF_SIZE(w) WNAF_SIZE_BITS(WNAF_BITS, w)
@ -77,17 +73,9 @@
#define PIPPENGER_MAX_BUCKET_WINDOW 12
/* Minimum number of points for which pippenger_wnaf is faster than strauss wnaf */
#ifdef USE_ENDOMORPHISM
#define ECMULT_PIPPENGER_THRESHOLD 88
#else
#define ECMULT_PIPPENGER_THRESHOLD 160
#endif
#define ECMULT_PIPPENGER_THRESHOLD 88
#ifdef USE_ENDOMORPHISM
#define ECMULT_MAX_POINTS_PER_BATCH 5000000
#else
#define ECMULT_MAX_POINTS_PER_BATCH 10000000
#endif
#define ECMULT_MAX_POINTS_PER_BATCH 5000000
/** Fill a table 'prej' with precomputed odd multiples of a. Prej will contain
* the values [1*a,3*a,...,(2*n-1)*a], so it space for n values. zr[0] will
@ -313,16 +301,12 @@ static void secp256k1_ecmult_odd_multiples_table_storage_var(const int n, secp25
static const size_t SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE =
ROUND_TO_ALIGN(sizeof((*((secp256k1_ecmult_context*) NULL)->pre_g)[0]) * ECMULT_TABLE_SIZE(WINDOW_G))
#ifdef USE_ENDOMORPHISM
+ ROUND_TO_ALIGN(sizeof((*((secp256k1_ecmult_context*) NULL)->pre_g_128)[0]) * ECMULT_TABLE_SIZE(WINDOW_G))
#endif
;
static void secp256k1_ecmult_context_init(secp256k1_ecmult_context *ctx) {
ctx->pre_g = NULL;
#ifdef USE_ENDOMORPHISM
ctx->pre_g_128 = NULL;
#endif
}
static void secp256k1_ecmult_context_build(secp256k1_ecmult_context *ctx, void **prealloc) {
@ -347,7 +331,6 @@ static void secp256k1_ecmult_context_build(secp256k1_ecmult_context *ctx, void *
/* precompute the tables with odd multiples */
secp256k1_ecmult_odd_multiples_table_storage_var(ECMULT_TABLE_SIZE(WINDOW_G), *ctx->pre_g, &gj);
#ifdef USE_ENDOMORPHISM
{
secp256k1_gej g_128j;
int i;
@ -364,7 +347,6 @@ static void secp256k1_ecmult_context_build(secp256k1_ecmult_context *ctx, void *
}
secp256k1_ecmult_odd_multiples_table_storage_var(ECMULT_TABLE_SIZE(WINDOW_G), *ctx->pre_g_128, &g_128j);
}
#endif
}
static void secp256k1_ecmult_context_finalize_memcpy(secp256k1_ecmult_context *dst, const secp256k1_ecmult_context *src) {
@ -372,11 +354,9 @@ static void secp256k1_ecmult_context_finalize_memcpy(secp256k1_ecmult_context *d
/* We cast to void* first to suppress a -Wcast-align warning. */
dst->pre_g = (secp256k1_ge_storage (*)[])(void*)((unsigned char*)dst + ((unsigned char*)(src->pre_g) - (unsigned char*)src));
}
#ifdef USE_ENDOMORPHISM
if (src->pre_g_128 != NULL) {
dst->pre_g_128 = (secp256k1_ge_storage (*)[])(void*)((unsigned char*)dst + ((unsigned char*)(src->pre_g_128) - (unsigned char*)src));
}
#endif
}
static int secp256k1_ecmult_context_is_built(const secp256k1_ecmult_context *ctx) {
@ -447,16 +427,11 @@ static int secp256k1_ecmult_wnaf(int *wnaf, int len, const secp256k1_scalar *a,
}
struct secp256k1_strauss_point_state {
#ifdef USE_ENDOMORPHISM
secp256k1_scalar na_1, na_lam;
int wnaf_na_1[130];
int wnaf_na_lam[130];
int wnaf_na_1[129];
int wnaf_na_lam[129];
int bits_na_1;
int bits_na_lam;
#else
int wnaf_na[256];
int bits_na;
#endif
size_t input_pos;
};
@ -464,26 +439,19 @@ struct secp256k1_strauss_state {
secp256k1_gej* prej;
secp256k1_fe* zr;
secp256k1_ge* pre_a;
#ifdef USE_ENDOMORPHISM
secp256k1_ge* pre_a_lam;
#endif
struct secp256k1_strauss_point_state* ps;
};
static void secp256k1_ecmult_strauss_wnaf(const secp256k1_ecmult_context *ctx, const struct secp256k1_strauss_state *state, secp256k1_gej *r, int num, const secp256k1_gej *a, const secp256k1_scalar *na, const secp256k1_scalar *ng) {
secp256k1_ge tmpa;
secp256k1_fe Z;
#ifdef USE_ENDOMORPHISM
/* Splitted G factors. */
secp256k1_scalar ng_1, ng_128;
int wnaf_ng_1[129];
int bits_ng_1 = 0;
int wnaf_ng_128[129];
int bits_ng_128 = 0;
#else
int wnaf_ng[256];
int bits_ng = 0;
#endif
int i;
int bits = 0;
int np;
@ -494,28 +462,20 @@ static void secp256k1_ecmult_strauss_wnaf(const secp256k1_ecmult_context *ctx, c
continue;
}
state->ps[no].input_pos = np;
#ifdef USE_ENDOMORPHISM
/* split na into na_1 and na_lam (where na = na_1 + na_lam*lambda, and na_1 and na_lam are ~128 bit) */
secp256k1_scalar_split_lambda(&state->ps[no].na_1, &state->ps[no].na_lam, &na[np]);
/* build wnaf representation for na_1 and na_lam. */
state->ps[no].bits_na_1 = secp256k1_ecmult_wnaf(state->ps[no].wnaf_na_1, 130, &state->ps[no].na_1, WINDOW_A);
state->ps[no].bits_na_lam = secp256k1_ecmult_wnaf(state->ps[no].wnaf_na_lam, 130, &state->ps[no].na_lam, WINDOW_A);
VERIFY_CHECK(state->ps[no].bits_na_1 <= 130);
VERIFY_CHECK(state->ps[no].bits_na_lam <= 130);
state->ps[no].bits_na_1 = secp256k1_ecmult_wnaf(state->ps[no].wnaf_na_1, 129, &state->ps[no].na_1, WINDOW_A);
state->ps[no].bits_na_lam = secp256k1_ecmult_wnaf(state->ps[no].wnaf_na_lam, 129, &state->ps[no].na_lam, WINDOW_A);
VERIFY_CHECK(state->ps[no].bits_na_1 <= 129);
VERIFY_CHECK(state->ps[no].bits_na_lam <= 129);
if (state->ps[no].bits_na_1 > bits) {
bits = state->ps[no].bits_na_1;
}
if (state->ps[no].bits_na_lam > bits) {
bits = state->ps[no].bits_na_lam;
}
#else
/* build wnaf representation for na. */
state->ps[no].bits_na = secp256k1_ecmult_wnaf(state->ps[no].wnaf_na, 256, &na[np], WINDOW_A);
if (state->ps[no].bits_na > bits) {
bits = state->ps[no].bits_na;
}
#endif
++no;
}
@ -547,7 +507,6 @@ static void secp256k1_ecmult_strauss_wnaf(const secp256k1_ecmult_context *ctx, c
secp256k1_fe_set_int(&Z, 1);
}
#ifdef USE_ENDOMORPHISM
for (np = 0; np < no; ++np) {
for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) {
secp256k1_ge_mul_lambda(&state->pre_a_lam[np * ECMULT_TABLE_SIZE(WINDOW_A) + i], &state->pre_a[np * ECMULT_TABLE_SIZE(WINDOW_A) + i]);
@ -568,21 +527,12 @@ static void secp256k1_ecmult_strauss_wnaf(const secp256k1_ecmult_context *ctx, c
bits = bits_ng_128;
}
}
#else
if (ng) {
bits_ng = secp256k1_ecmult_wnaf(wnaf_ng, 256, ng, WINDOW_G);
if (bits_ng > bits) {
bits = bits_ng;
}
}
#endif
secp256k1_gej_set_infinity(r);
for (i = bits - 1; i >= 0; i--) {
int n;
secp256k1_gej_double_var(r, r, NULL);
#ifdef USE_ENDOMORPHISM
for (np = 0; np < no; ++np) {
if (i < state->ps[np].bits_na_1 && (n = state->ps[np].wnaf_na_1[i])) {
ECMULT_TABLE_GET_GE(&tmpa, state->pre_a + np * ECMULT_TABLE_SIZE(WINDOW_A), n, WINDOW_A);
@ -601,18 +551,6 @@ static void secp256k1_ecmult_strauss_wnaf(const secp256k1_ecmult_context *ctx, c
ECMULT_TABLE_GET_GE_STORAGE(&tmpa, *ctx->pre_g_128, n, WINDOW_G);
secp256k1_gej_add_zinv_var(r, r, &tmpa, &Z);
}
#else
for (np = 0; np < no; ++np) {
if (i < state->ps[np].bits_na && (n = state->ps[np].wnaf_na[i])) {
ECMULT_TABLE_GET_GE(&tmpa, state->pre_a + np * ECMULT_TABLE_SIZE(WINDOW_A), n, WINDOW_A);
secp256k1_gej_add_ge_var(r, r, &tmpa, NULL);
}
}
if (i < bits_ng && (n = wnaf_ng[i])) {
ECMULT_TABLE_GET_GE_STORAGE(&tmpa, *ctx->pre_g, n, WINDOW_G);
secp256k1_gej_add_zinv_var(r, r, &tmpa, &Z);
}
#endif
}
if (!r->infinity) {
@ -625,27 +563,19 @@ static void secp256k1_ecmult(const secp256k1_ecmult_context *ctx, secp256k1_gej
secp256k1_fe zr[ECMULT_TABLE_SIZE(WINDOW_A)];
secp256k1_ge pre_a[ECMULT_TABLE_SIZE(WINDOW_A)];
struct secp256k1_strauss_point_state ps[1];
#ifdef USE_ENDOMORPHISM
secp256k1_ge pre_a_lam[ECMULT_TABLE_SIZE(WINDOW_A)];
#endif
struct secp256k1_strauss_state state;
state.prej = prej;
state.zr = zr;
state.pre_a = pre_a;
#ifdef USE_ENDOMORPHISM
state.pre_a_lam = pre_a_lam;
#endif
state.ps = ps;
secp256k1_ecmult_strauss_wnaf(ctx, &state, r, 1, a, na, ng);
}
static size_t secp256k1_strauss_scratch_size(size_t n_points) {
#ifdef USE_ENDOMORPHISM
static const size_t point_size = (2 * sizeof(secp256k1_ge) + sizeof(secp256k1_gej) + sizeof(secp256k1_fe)) * ECMULT_TABLE_SIZE(WINDOW_A) + sizeof(struct secp256k1_strauss_point_state) + sizeof(secp256k1_gej) + sizeof(secp256k1_scalar);
#else
static const size_t point_size = (sizeof(secp256k1_ge) + sizeof(secp256k1_gej) + sizeof(secp256k1_fe)) * ECMULT_TABLE_SIZE(WINDOW_A) + sizeof(struct secp256k1_strauss_point_state) + sizeof(secp256k1_gej) + sizeof(secp256k1_scalar);
#endif
return n_points*point_size;
}
@ -665,12 +595,8 @@ static int secp256k1_ecmult_strauss_batch(const secp256k1_callback* error_callba
scalars = (secp256k1_scalar*)secp256k1_scratch_alloc(error_callback, scratch, n_points * sizeof(secp256k1_scalar));
state.prej = (secp256k1_gej*)secp256k1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_gej));
state.zr = (secp256k1_fe*)secp256k1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_fe));
#ifdef USE_ENDOMORPHISM
state.pre_a = (secp256k1_ge*)secp256k1_scratch_alloc(error_callback, scratch, n_points * 2 * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_ge));
state.pre_a_lam = state.pre_a + n_points * ECMULT_TABLE_SIZE(WINDOW_A);
#else
state.pre_a = (secp256k1_ge*)secp256k1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_ge));
#endif
state.ps = (struct secp256k1_strauss_point_state*)secp256k1_scratch_alloc(error_callback, scratch, n_points * sizeof(struct secp256k1_strauss_point_state));
if (points == NULL || scalars == NULL || state.prej == NULL || state.zr == NULL || state.pre_a == NULL) {
@ -868,7 +794,6 @@ static int secp256k1_ecmult_pippenger_wnaf(secp256k1_gej *buckets, int bucket_wi
* set of buckets) for a given number of points.
*/
static int secp256k1_pippenger_bucket_window(size_t n) {
#ifdef USE_ENDOMORPHISM
if (n <= 1) {
return 1;
} else if (n <= 4) {
@ -892,33 +817,6 @@ static int secp256k1_pippenger_bucket_window(size_t n) {
} else {
return PIPPENGER_MAX_BUCKET_WINDOW;
}
#else
if (n <= 1) {
return 1;
} else if (n <= 11) {
return 2;
} else if (n <= 45) {
return 3;
} else if (n <= 100) {
return 4;
} else if (n <= 275) {
return 5;
} else if (n <= 625) {
return 6;
} else if (n <= 1850) {
return 7;
} else if (n <= 3400) {
return 8;
} else if (n <= 9630) {
return 9;
} else if (n <= 17900) {
return 10;
} else if (n <= 32800) {
return 11;
} else {
return PIPPENGER_MAX_BUCKET_WINDOW;
}
#endif
}
/**
@ -926,7 +824,6 @@ static int secp256k1_pippenger_bucket_window(size_t n) {
*/
static size_t secp256k1_pippenger_bucket_window_inv(int bucket_window) {
switch(bucket_window) {
#ifdef USE_ENDOMORPHISM
case 1: return 1;
case 2: return 4;
case 3: return 20;
@ -939,26 +836,11 @@ static size_t secp256k1_pippenger_bucket_window_inv(int bucket_window) {
case 10: return 7880;
case 11: return 16050;
case PIPPENGER_MAX_BUCKET_WINDOW: return SIZE_MAX;
#else
case 1: return 1;
case 2: return 11;
case 3: return 45;
case 4: return 100;
case 5: return 275;
case 6: return 625;
case 7: return 1850;
case 8: return 3400;
case 9: return 9630;
case 10: return 17900;
case 11: return 32800;
case PIPPENGER_MAX_BUCKET_WINDOW: return SIZE_MAX;
#endif
}
return 0;
}
#ifdef USE_ENDOMORPHISM
SECP256K1_INLINE static void secp256k1_ecmult_endo_split(secp256k1_scalar *s1, secp256k1_scalar *s2, secp256k1_ge *p1, secp256k1_ge *p2) {
secp256k1_scalar tmp = *s1;
secp256k1_scalar_split_lambda(s1, s2, &tmp);
@ -973,32 +855,23 @@ SECP256K1_INLINE static void secp256k1_ecmult_endo_split(secp256k1_scalar *s1, s
secp256k1_ge_neg(p2, p2);
}
}
#endif
/**
* Returns the scratch size required for a given number of points (excluding
* base point G) without considering alignment.
*/
static size_t secp256k1_pippenger_scratch_size(size_t n_points, int bucket_window) {
#ifdef USE_ENDOMORPHISM
size_t entries = 2*n_points + 2;
#else
size_t entries = n_points + 1;
#endif
size_t entry_size = sizeof(secp256k1_ge) + sizeof(secp256k1_scalar) + sizeof(struct secp256k1_pippenger_point_state) + (WNAF_SIZE(bucket_window+1)+1)*sizeof(int);
return (sizeof(secp256k1_gej) << bucket_window) + sizeof(struct secp256k1_pippenger_state) + entries * entry_size;
}
static int secp256k1_ecmult_pippenger_batch(const secp256k1_callback* error_callback, const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) {
const size_t scratch_checkpoint = secp256k1_scratch_checkpoint(error_callback, scratch);
/* Use 2(n+1) with the endomorphism, n+1 without, when calculating batch
/* Use 2(n+1) with the endomorphism, when calculating batch
* sizes. The reason for +1 is that we add the G scalar to the list of
* other scalars. */
#ifdef USE_ENDOMORPHISM
size_t entries = 2*n_points + 2;
#else
size_t entries = n_points + 1;
#endif
secp256k1_ge *points;
secp256k1_scalar *scalars;
secp256k1_gej *buckets;
@ -1035,10 +908,8 @@ static int secp256k1_ecmult_pippenger_batch(const secp256k1_callback* error_call
scalars[0] = *inp_g_sc;
points[0] = secp256k1_ge_const_g;
idx++;
#ifdef USE_ENDOMORPHISM
secp256k1_ecmult_endo_split(&scalars[0], &scalars[1], &points[0], &points[1]);
idx++;
#endif
}
while (point_idx < n_points) {
@ -1047,10 +918,8 @@ static int secp256k1_ecmult_pippenger_batch(const secp256k1_callback* error_call
return 0;
}
idx++;
#ifdef USE_ENDOMORPHISM
secp256k1_ecmult_endo_split(&scalars[idx - 1], &scalars[idx], &points[idx - 1], &points[idx]);
idx++;
#endif
point_idx++;
}
@ -1093,9 +962,7 @@ static size_t secp256k1_pippenger_max_points(const secp256k1_callback* error_cal
size_t space_overhead;
size_t entry_size = sizeof(secp256k1_ge) + sizeof(secp256k1_scalar) + sizeof(struct secp256k1_pippenger_point_state) + (WNAF_SIZE(bucket_window+1)+1)*sizeof(int);
#ifdef USE_ENDOMORPHISM
entry_size = 2*entry_size;
#endif
space_overhead = (sizeof(secp256k1_gej) << bucket_window) + entry_size + sizeof(struct secp256k1_pippenger_state);
if (space_overhead > max_alloc) {
break;

View File

@ -59,6 +59,7 @@ static int secp256k1_ge_is_infinity(const secp256k1_ge *a);
/** Check whether a group element is valid (i.e., on the curve). */
static int secp256k1_ge_is_valid_var(const secp256k1_ge *a);
/** Set r equal to the inverse of a (i.e., mirrored around the X axis) */
static void secp256k1_ge_neg(secp256k1_ge *r, const secp256k1_ge *a);
/** Set a group element equal to another which is given in jacobian coordinates */
@ -115,10 +116,8 @@ static void secp256k1_gej_add_ge_var(secp256k1_gej *r, const secp256k1_gej *a, c
/** Set r equal to the sum of a and b (with the inverse of b's Z coordinate passed as bzinv). */
static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b, const secp256k1_fe *bzinv);
#ifdef USE_ENDOMORPHISM
/** Set r to be equal to lambda times a, where lambda is chosen in a way such that this is very fast. */
static void secp256k1_ge_mul_lambda(secp256k1_ge *r, const secp256k1_ge *a);
#endif
/** Clear a secp256k1_gej to prevent leaking sensitive information. */
static void secp256k1_gej_clear(secp256k1_gej *r);
@ -138,4 +137,15 @@ static void secp256k1_ge_storage_cmov(secp256k1_ge_storage *r, const secp256k1_g
/** Rescale a jacobian point by b which must be non-zero. Constant-time. */
static void secp256k1_gej_rescale(secp256k1_gej *r, const secp256k1_fe *b);
/** Determine if a point (which is assumed to be on the curve) is in the correct (sub)group of the curve.
*
* In normal mode, the used group is secp256k1, which has cofactor=1 meaning that every point on the curve is in the
* group, and this function returns always true.
*
* When compiling in exhaustive test mode, a slightly different curve equation is used, leading to a group with a
* (very) small subgroup, and that subgroup is what is used for all cryptographic operations. In that mode, this
* function checks whether a point that is on the curve is in fact also in that subgroup.
*/
static int secp256k1_ge_is_in_correct_subgroup(const secp256k1_ge* ge);
#endif /* SECP256K1_GROUP_H */

View File

@ -11,49 +11,38 @@
#include "field.h"
#include "group.h"
/* These points can be generated in sage as follows:
/* These exhaustive group test orders and generators are chosen such that:
* - The field size is equal to that of secp256k1, so field code is the same.
* - The curve equation is of the form y^2=x^3+B for some constant B.
* - The subgroup has a generator 2*P, where P.x=1.
* - The subgroup has size less than 1000 to permit exhaustive testing.
* - The subgroup admits an endomorphism of the form lambda*(x,y) == (beta*x,y).
*
* 0. Setup a worksheet with the following parameters.
* b = 4 # whatever CURVE_B will be set to
* F = FiniteField (0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F)
* C = EllipticCurve ([F (0), F (b)])
*
* 1. Determine all the small orders available to you. (If there are
* no satisfactory ones, go back and change b.)
* print C.order().factor(limit=1000)
*
* 2. Choose an order as one of the prime factors listed in the above step.
* (You can also multiply some to get a composite order, though the
* tests will crash trying to invert scalars during signing.) We take a
* random point and scale it to drop its order to the desired value.
* There is some probability this won't work; just try again.
* order = 199
* P = C.random_point()
* P = (int(P.order()) / int(order)) * P
* assert(P.order() == order)
*
* 3. Print the values. You'll need to use a vim macro or something to
* split the hex output into 4-byte chunks.
* print "%x %x" % P.xy()
* These parameters are generated using sage/gen_exhaustive_groups.sage.
*/
#if defined(EXHAUSTIVE_TEST_ORDER)
# if EXHAUSTIVE_TEST_ORDER == 199
# if EXHAUSTIVE_TEST_ORDER == 13
static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST(
0xFA7CC9A7, 0x0737F2DB, 0xA749DD39, 0x2B4FB069,
0x3B017A7D, 0xA808C2F1, 0xFB12940C, 0x9EA66C18,
0x78AC123A, 0x5ED8AEF3, 0x8732BC91, 0x1F3A2868,
0x48DF246C, 0x808DAE72, 0xCFE52572, 0x7F0501ED
0xc3459c3d, 0x35326167, 0xcd86cce8, 0x07a2417f,
0x5b8bd567, 0xde8538ee, 0x0d507b0c, 0xd128f5bb,
0x8e467fec, 0xcd30000a, 0x6cc1184e, 0x25d382c2,
0xa2f4494e, 0x2fbe9abc, 0x8b64abac, 0xd005fb24
);
static const int CURVE_B = 4;
# elif EXHAUSTIVE_TEST_ORDER == 13
static const secp256k1_fe secp256k1_fe_const_b = SECP256K1_FE_CONST(
0x3d3486b2, 0x159a9ca5, 0xc75638be, 0xb23a69bc,
0x946a45ab, 0x24801247, 0xb4ed2b8e, 0x26b6a417
);
# elif EXHAUSTIVE_TEST_ORDER == 199
static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST(
0xedc60018, 0xa51a786b, 0x2ea91f4d, 0x4c9416c0,
0x9de54c3b, 0xa1316554, 0x6cf4345c, 0x7277ef15,
0x54cb1b6b, 0xdc8c1273, 0x087844ea, 0x43f4603e,
0x0eaf9a43, 0xf6effe55, 0x939f806d, 0x37adf8ac
0x226e653f, 0xc8df7744, 0x9bacbf12, 0x7d1dcbf9,
0x87f05b2a, 0xe7edbd28, 0x1f564575, 0xc48dcf18,
0xa13872c2, 0xe933bb17, 0x5d9ffd5b, 0xb5b6e10c,
0x57fe3c00, 0xbaaaa15a, 0xe003ec3e, 0x9c269bae
);
static const secp256k1_fe secp256k1_fe_const_b = SECP256K1_FE_CONST(
0x2cca28fa, 0xfc614b80, 0x2a3db42b, 0x00ba00b1,
0xbea8d943, 0xdace9ab2, 0x9536daea, 0x0074defb
);
static const int CURVE_B = 2;
# else
# error No known generator for the specified exhaustive test group order.
# endif
@ -68,7 +57,7 @@ static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST(
0xFD17B448UL, 0xA6855419UL, 0x9C47D08FUL, 0xFB10D4B8UL
);
static const int CURVE_B = 7;
static const secp256k1_fe secp256k1_fe_const_b = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 7);
#endif
static void secp256k1_ge_set_gej_zinv(secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_fe *zi) {
@ -219,14 +208,13 @@ static void secp256k1_ge_clear(secp256k1_ge *r) {
}
static int secp256k1_ge_set_xquad(secp256k1_ge *r, const secp256k1_fe *x) {
secp256k1_fe x2, x3, c;
secp256k1_fe x2, x3;
r->x = *x;
secp256k1_fe_sqr(&x2, x);
secp256k1_fe_mul(&x3, x, &x2);
r->infinity = 0;
secp256k1_fe_set_int(&c, CURVE_B);
secp256k1_fe_add(&c, &x3);
return secp256k1_fe_sqrt(&r->y, &c);
secp256k1_fe_add(&x3, &secp256k1_fe_const_b);
return secp256k1_fe_sqrt(&r->y, &x3);
}
static int secp256k1_ge_set_xo_var(secp256k1_ge *r, const secp256k1_fe *x, int odd) {
@ -269,36 +257,15 @@ static int secp256k1_gej_is_infinity(const secp256k1_gej *a) {
return a->infinity;
}
static int secp256k1_gej_is_valid_var(const secp256k1_gej *a) {
secp256k1_fe y2, x3, z2, z6;
if (a->infinity) {
return 0;
}
/** y^2 = x^3 + 7
* (Y/Z^3)^2 = (X/Z^2)^3 + 7
* Y^2 / Z^6 = X^3 / Z^6 + 7
* Y^2 = X^3 + 7*Z^6
*/
secp256k1_fe_sqr(&y2, &a->y);
secp256k1_fe_sqr(&x3, &a->x); secp256k1_fe_mul(&x3, &x3, &a->x);
secp256k1_fe_sqr(&z2, &a->z);
secp256k1_fe_sqr(&z6, &z2); secp256k1_fe_mul(&z6, &z6, &z2);
secp256k1_fe_mul_int(&z6, CURVE_B);
secp256k1_fe_add(&x3, &z6);
secp256k1_fe_normalize_weak(&x3);
return secp256k1_fe_equal_var(&y2, &x3);
}
static int secp256k1_ge_is_valid_var(const secp256k1_ge *a) {
secp256k1_fe y2, x3, c;
secp256k1_fe y2, x3;
if (a->infinity) {
return 0;
}
/* y^2 = x^3 + 7 */
secp256k1_fe_sqr(&y2, &a->y);
secp256k1_fe_sqr(&x3, &a->x); secp256k1_fe_mul(&x3, &x3, &a->x);
secp256k1_fe_set_int(&c, CURVE_B);
secp256k1_fe_add(&x3, &c);
secp256k1_fe_add(&x3, &secp256k1_fe_const_b);
secp256k1_fe_normalize_weak(&x3);
return secp256k1_fe_equal_var(&y2, &x3);
}
@ -679,7 +646,6 @@ static SECP256K1_INLINE void secp256k1_ge_storage_cmov(secp256k1_ge_storage *r,
secp256k1_fe_storage_cmov(&r->y, &a->y, flag);
}
#ifdef USE_ENDOMORPHISM
static void secp256k1_ge_mul_lambda(secp256k1_ge *r, const secp256k1_ge *a) {
static const secp256k1_fe beta = SECP256K1_FE_CONST(
0x7ae96a2bul, 0x657c0710ul, 0x6e64479eul, 0xac3434e9ul,
@ -688,7 +654,6 @@ static void secp256k1_ge_mul_lambda(secp256k1_ge *r, const secp256k1_ge *a) {
*r = *a;
secp256k1_fe_mul(&r->x, &r->x, &beta);
}
#endif
static int secp256k1_gej_has_quad_y_var(const secp256k1_gej *a) {
secp256k1_fe yz;
@ -704,4 +669,25 @@ static int secp256k1_gej_has_quad_y_var(const secp256k1_gej *a) {
return secp256k1_fe_is_quad_var(&yz);
}
static int secp256k1_ge_is_in_correct_subgroup(const secp256k1_ge* ge) {
#ifdef EXHAUSTIVE_TEST_ORDER
secp256k1_gej out;
int i;
/* A very simple EC multiplication ladder that avoids a dependecy on ecmult. */
secp256k1_gej_set_infinity(&out);
for (i = 0; i < 32; ++i) {
secp256k1_gej_double_var(&out, &out, NULL);
if ((((uint32_t)EXHAUSTIVE_TEST_ORDER) >> (31 - i)) & 1) {
secp256k1_gej_add_ge_var(&out, &out, ge, NULL);
}
}
return secp256k1_gej_is_infinity(&out);
#else
(void)ge;
/* The real secp256k1 group has cofactor 1, so the subgroup is the entire curve. */
return 1;
#endif
}
#endif /* SECP256K1_GROUP_IMPL_H */

View File

@ -80,7 +80,7 @@ void test_ecdh_generator_basepoint(void) {
/* compute "explicitly" */
CHECK(secp256k1_ec_pubkey_serialize(ctx, point_ser, &point_ser_len, &point[1], SECP256K1_EC_UNCOMPRESSED) == 1);
/* compare */
CHECK(memcmp(output_ecdh, point_ser, 65) == 0);
CHECK(secp256k1_memcmp_var(output_ecdh, point_ser, 65) == 0);
/* compute using ECDH function with default hash function */
CHECK(secp256k1_ecdh(ctx, output_ecdh, &point[0], s_b32, NULL, NULL) == 1);
@ -90,7 +90,7 @@ void test_ecdh_generator_basepoint(void) {
secp256k1_sha256_write(&sha, point_ser, point_ser_len);
secp256k1_sha256_finalize(&sha, output_ser);
/* compare */
CHECK(memcmp(output_ecdh, output_ser, 32) == 0);
CHECK(secp256k1_memcmp_var(output_ecdh, output_ser, 32) == 0);
}
}

View File

@ -1,3 +1,4 @@
include_HEADERS += include/secp256k1_extrakeys.h
noinst_HEADERS += src/modules/extrakeys/tests_impl.h
noinst_HEADERS += src/modules/extrakeys/tests_exhaustive_impl.h
noinst_HEADERS += src/modules/extrakeys/main_impl.h

View File

@ -33,6 +33,9 @@ int secp256k1_xonly_pubkey_parse(const secp256k1_context* ctx, secp256k1_xonly_p
if (!secp256k1_ge_set_xo_var(&pk, &x, 0)) {
return 0;
}
if (!secp256k1_ge_is_in_correct_subgroup(&pk)) {
return 0;
}
secp256k1_xonly_pubkey_save(pubkey, &pk);
return 1;
}
@ -121,7 +124,7 @@ int secp256k1_xonly_pubkey_tweak_add_check(const secp256k1_context* ctx, const u
secp256k1_fe_normalize_var(&pk.y);
secp256k1_fe_get_b32(pk_expected32, &pk.x);
return memcmp(&pk_expected32, tweaked_pubkey32, 32) == 0
return secp256k1_memcmp_var(&pk_expected32, tweaked_pubkey32, 32) == 0
&& secp256k1_fe_is_odd(&pk.y) == tweaked_pk_parity;
}

View File

@ -0,0 +1,68 @@
/**********************************************************************
* Copyright (c) 2020 Pieter Wuille *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#ifndef _SECP256K1_MODULE_EXTRAKEYS_TESTS_EXHAUSTIVE_
#define _SECP256K1_MODULE_EXTRAKEYS_TESTS_EXHAUSTIVE_
#include "src/modules/extrakeys/main_impl.h"
#include "include/secp256k1_extrakeys.h"
static void test_exhaustive_extrakeys(const secp256k1_context *ctx, const secp256k1_ge* group) {
secp256k1_keypair keypair[EXHAUSTIVE_TEST_ORDER - 1];
secp256k1_pubkey pubkey[EXHAUSTIVE_TEST_ORDER - 1];
secp256k1_xonly_pubkey xonly_pubkey[EXHAUSTIVE_TEST_ORDER - 1];
int parities[EXHAUSTIVE_TEST_ORDER - 1];
unsigned char xonly_pubkey_bytes[EXHAUSTIVE_TEST_ORDER - 1][32];
int i;
for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) {
secp256k1_fe fe;
secp256k1_scalar scalar_i;
unsigned char buf[33];
int parity;
secp256k1_scalar_set_int(&scalar_i, i);
secp256k1_scalar_get_b32(buf, &scalar_i);
/* Construct pubkey and keypair. */
CHECK(secp256k1_keypair_create(ctx, &keypair[i - 1], buf));
CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey[i - 1], buf));
/* Construct serialized xonly_pubkey from keypair. */
CHECK(secp256k1_keypair_xonly_pub(ctx, &xonly_pubkey[i - 1], &parities[i - 1], &keypair[i - 1]));
CHECK(secp256k1_xonly_pubkey_serialize(ctx, xonly_pubkey_bytes[i - 1], &xonly_pubkey[i - 1]));
/* Parse the xonly_pubkey back and verify it matches the previously serialized value. */
CHECK(secp256k1_xonly_pubkey_parse(ctx, &xonly_pubkey[i - 1], xonly_pubkey_bytes[i - 1]));
CHECK(secp256k1_xonly_pubkey_serialize(ctx, buf, &xonly_pubkey[i - 1]));
CHECK(secp256k1_memcmp_var(xonly_pubkey_bytes[i - 1], buf, 32) == 0);
/* Construct the xonly_pubkey from the pubkey, and verify it matches the same. */
CHECK(secp256k1_xonly_pubkey_from_pubkey(ctx, &xonly_pubkey[i - 1], &parity, &pubkey[i - 1]));
CHECK(parity == parities[i - 1]);
CHECK(secp256k1_xonly_pubkey_serialize(ctx, buf, &xonly_pubkey[i - 1]));
CHECK(secp256k1_memcmp_var(xonly_pubkey_bytes[i - 1], buf, 32) == 0);
/* Compare the xonly_pubkey bytes against the precomputed group. */
secp256k1_fe_set_b32(&fe, xonly_pubkey_bytes[i - 1]);
CHECK(secp256k1_fe_equal_var(&fe, &group[i].x));
/* Check the parity against the precomputed group. */
fe = group[i].y;
secp256k1_fe_normalize_var(&fe);
CHECK(secp256k1_fe_is_odd(&fe) == parities[i - 1]);
/* Verify that the higher half is identical to the lower half mirrored. */
if (i > EXHAUSTIVE_TEST_ORDER / 2) {
CHECK(secp256k1_memcmp_var(xonly_pubkey_bytes[i - 1], xonly_pubkey_bytes[EXHAUSTIVE_TEST_ORDER - i - 1], 32) == 0);
CHECK(parities[i - 1] == 1 - parities[EXHAUSTIVE_TEST_ORDER - i - 1]);
}
}
/* TODO: keypair/xonly_pubkey tweak tests */
}
#endif

View File

@ -35,9 +35,9 @@ void test_xonly_pubkey(void) {
secp256k1_context *sign = api_test_context(SECP256K1_CONTEXT_SIGN, &ecount);
secp256k1_context *verify = api_test_context(SECP256K1_CONTEXT_VERIFY, &ecount);
secp256k1_rand256(sk);
secp256k1_testrand256(sk);
memset(ones32, 0xFF, 32);
secp256k1_rand256(xy_sk);
secp256k1_testrand256(xy_sk);
CHECK(secp256k1_ec_pubkey_create(sign, &pk, sk) == 1);
CHECK(secp256k1_xonly_pubkey_from_pubkey(none, &xonly_pk, &pk_parity, &pk) == 1);
@ -60,7 +60,7 @@ void test_xonly_pubkey(void) {
sk[0] = 1;
CHECK(secp256k1_ec_pubkey_create(ctx, &pk, sk) == 1);
CHECK(secp256k1_xonly_pubkey_from_pubkey(ctx, &xonly_pk, &pk_parity, &pk) == 1);
CHECK(memcmp(&pk, &xonly_pk, sizeof(pk)) == 0);
CHECK(secp256k1_memcmp_var(&pk, &xonly_pk, sizeof(pk)) == 0);
CHECK(pk_parity == 0);
/* Choose a secret key such that pubkey and xonly_pubkey are each others
@ -68,7 +68,7 @@ void test_xonly_pubkey(void) {
sk[0] = 2;
CHECK(secp256k1_ec_pubkey_create(ctx, &pk, sk) == 1);
CHECK(secp256k1_xonly_pubkey_from_pubkey(ctx, &xonly_pk, &pk_parity, &pk) == 1);
CHECK(memcmp(&xonly_pk, &pk, sizeof(xonly_pk)) != 0);
CHECK(secp256k1_memcmp_var(&xonly_pk, &pk, sizeof(xonly_pk)) != 0);
CHECK(pk_parity == 1);
secp256k1_pubkey_load(ctx, &pk1, &pk);
secp256k1_pubkey_load(ctx, &pk2, (secp256k1_pubkey *) &xonly_pk);
@ -81,7 +81,7 @@ void test_xonly_pubkey(void) {
CHECK(secp256k1_xonly_pubkey_serialize(none, NULL, &xonly_pk) == 0);
CHECK(ecount == 1);
CHECK(secp256k1_xonly_pubkey_serialize(none, buf32, NULL) == 0);
CHECK(memcmp(buf32, zeros64, 32) == 0);
CHECK(secp256k1_memcmp_var(buf32, zeros64, 32) == 0);
CHECK(ecount == 2);
{
/* A pubkey filled with 0s will fail to serialize due to pubkey_load
@ -104,28 +104,28 @@ void test_xonly_pubkey(void) {
CHECK(secp256k1_xonly_pubkey_from_pubkey(none, &xonly_pk, NULL, &pk) == 1);
CHECK(secp256k1_xonly_pubkey_serialize(ctx, buf32, &xonly_pk) == 1);
CHECK(secp256k1_xonly_pubkey_parse(ctx, &xonly_pk_tmp, buf32) == 1);
CHECK(memcmp(&xonly_pk, &xonly_pk_tmp, sizeof(xonly_pk)) == 0);
CHECK(secp256k1_memcmp_var(&xonly_pk, &xonly_pk_tmp, sizeof(xonly_pk)) == 0);
/* Test parsing invalid field elements */
memset(&xonly_pk, 1, sizeof(xonly_pk));
/* Overflowing field element */
CHECK(secp256k1_xonly_pubkey_parse(none, &xonly_pk, ones32) == 0);
CHECK(memcmp(&xonly_pk, zeros64, sizeof(xonly_pk)) == 0);
CHECK(secp256k1_memcmp_var(&xonly_pk, zeros64, sizeof(xonly_pk)) == 0);
memset(&xonly_pk, 1, sizeof(xonly_pk));
/* There's no point with x-coordinate 0 on secp256k1 */
CHECK(secp256k1_xonly_pubkey_parse(none, &xonly_pk, zeros64) == 0);
CHECK(memcmp(&xonly_pk, zeros64, sizeof(xonly_pk)) == 0);
CHECK(secp256k1_memcmp_var(&xonly_pk, zeros64, sizeof(xonly_pk)) == 0);
/* If a random 32-byte string can not be parsed with ec_pubkey_parse
* (because interpreted as X coordinate it does not correspond to a point on
* the curve) then xonly_pubkey_parse should fail as well. */
for (i = 0; i < count; i++) {
unsigned char rand33[33];
secp256k1_rand256(&rand33[1]);
secp256k1_testrand256(&rand33[1]);
rand33[0] = SECP256K1_TAG_PUBKEY_EVEN;
if (!secp256k1_ec_pubkey_parse(ctx, &pk, rand33, 33)) {
memset(&xonly_pk, 1, sizeof(xonly_pk));
CHECK(secp256k1_xonly_pubkey_parse(ctx, &xonly_pk, &rand33[1]) == 0);
CHECK(memcmp(&xonly_pk, zeros64, sizeof(xonly_pk)) == 0);
CHECK(secp256k1_memcmp_var(&xonly_pk, zeros64, sizeof(xonly_pk)) == 0);
} else {
CHECK(secp256k1_xonly_pubkey_parse(ctx, &xonly_pk, &rand33[1]) == 1);
}
@ -154,8 +154,8 @@ void test_xonly_pubkey_tweak(void) {
secp256k1_context *verify = api_test_context(SECP256K1_CONTEXT_VERIFY, &ecount);
memset(overflows, 0xff, sizeof(overflows));
secp256k1_rand256(tweak);
secp256k1_rand256(sk);
secp256k1_testrand256(tweak);
secp256k1_testrand256(sk);
CHECK(secp256k1_ec_pubkey_create(ctx, &internal_pk, sk) == 1);
CHECK(secp256k1_xonly_pubkey_from_pubkey(none, &internal_xonly_pk, &pk_parity, &internal_pk) == 1);
@ -170,15 +170,15 @@ void test_xonly_pubkey_tweak(void) {
CHECK(secp256k1_xonly_pubkey_tweak_add(verify, &output_pk, NULL, tweak) == 0);
CHECK(ecount == 4);
/* NULL internal_xonly_pk zeroes the output_pk */
CHECK(memcmp(&output_pk, zeros64, sizeof(output_pk)) == 0);
CHECK(secp256k1_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0);
CHECK(secp256k1_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, NULL) == 0);
CHECK(ecount == 5);
/* NULL tweak zeroes the output_pk */
CHECK(memcmp(&output_pk, zeros64, sizeof(output_pk)) == 0);
CHECK(secp256k1_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0);
/* Invalid tweak zeroes the output_pk */
CHECK(secp256k1_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, overflows) == 0);
CHECK(memcmp(&output_pk, zeros64, sizeof(output_pk)) == 0);
CHECK(secp256k1_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0);
/* A zero tweak is fine */
CHECK(secp256k1_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, zeros64) == 1);
@ -193,16 +193,16 @@ void test_xonly_pubkey_tweak(void) {
secp256k1_scalar_get_b32(tweak, &scalar_tweak);
CHECK((secp256k1_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, sk) == 0)
|| (secp256k1_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, tweak) == 0));
CHECK(memcmp(&output_pk, zeros64, sizeof(output_pk)) == 0);
CHECK(secp256k1_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0);
}
/* Invalid pk with a valid tweak */
memset(&internal_xonly_pk, 0, sizeof(internal_xonly_pk));
secp256k1_rand256(tweak);
secp256k1_testrand256(tweak);
ecount = 0;
CHECK(secp256k1_xonly_pubkey_tweak_add(verify, &output_pk, &internal_xonly_pk, tweak) == 0);
CHECK(ecount == 1);
CHECK(memcmp(&output_pk, zeros64, sizeof(output_pk)) == 0);
CHECK(secp256k1_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0);
secp256k1_context_destroy(none);
secp256k1_context_destroy(sign);
@ -228,8 +228,8 @@ void test_xonly_pubkey_tweak_check(void) {
secp256k1_context *verify = api_test_context(SECP256K1_CONTEXT_VERIFY, &ecount);
memset(overflows, 0xff, sizeof(overflows));
secp256k1_rand256(tweak);
secp256k1_rand256(sk);
secp256k1_testrand256(tweak);
secp256k1_testrand256(sk);
CHECK(secp256k1_ec_pubkey_create(ctx, &internal_pk, sk) == 1);
CHECK(secp256k1_xonly_pubkey_from_pubkey(none, &internal_xonly_pk, &pk_parity, &internal_pk) == 1);
@ -268,7 +268,7 @@ void test_xonly_pubkey_tweak_check(void) {
/* Overflowing tweak not allowed */
CHECK(secp256k1_xonly_pubkey_tweak_add_check(ctx, output_pk32, pk_parity, &internal_xonly_pk, overflows) == 0);
CHECK(secp256k1_xonly_pubkey_tweak_add(ctx, &output_pk, &internal_xonly_pk, overflows) == 0);
CHECK(memcmp(&output_pk, zeros64, sizeof(output_pk)) == 0);
CHECK(secp256k1_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0);
CHECK(ecount == 5);
secp256k1_context_destroy(none);
@ -287,7 +287,7 @@ void test_xonly_pubkey_tweak_recursive(void) {
unsigned char tweak[N_PUBKEYS - 1][32];
int i;
secp256k1_rand256(sk);
secp256k1_testrand256(sk);
CHECK(secp256k1_ec_pubkey_create(ctx, &pk[0], sk) == 1);
/* Add tweaks */
for (i = 0; i < N_PUBKEYS - 1; i++) {
@ -327,51 +327,51 @@ void test_keypair(void) {
/* Test keypair_create */
ecount = 0;
secp256k1_rand256(sk);
secp256k1_testrand256(sk);
CHECK(secp256k1_keypair_create(none, &keypair, sk) == 0);
CHECK(memcmp(zeros96, &keypair, sizeof(keypair)) == 0);
CHECK(secp256k1_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0);
CHECK(ecount == 1);
CHECK(secp256k1_keypair_create(verify, &keypair, sk) == 0);
CHECK(memcmp(zeros96, &keypair, sizeof(keypair)) == 0);
CHECK(secp256k1_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0);
CHECK(ecount == 2);
CHECK(secp256k1_keypair_create(sign, &keypair, sk) == 1);
CHECK(secp256k1_keypair_create(sign, NULL, sk) == 0);
CHECK(ecount == 3);
CHECK(secp256k1_keypair_create(sign, &keypair, NULL) == 0);
CHECK(memcmp(zeros96, &keypair, sizeof(keypair)) == 0);
CHECK(secp256k1_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0);
CHECK(ecount == 4);
/* Invalid secret key */
CHECK(secp256k1_keypair_create(sign, &keypair, zeros96) == 0);
CHECK(memcmp(zeros96, &keypair, sizeof(keypair)) == 0);
CHECK(secp256k1_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0);
CHECK(secp256k1_keypair_create(sign, &keypair, overflows) == 0);
CHECK(memcmp(zeros96, &keypair, sizeof(keypair)) == 0);
CHECK(secp256k1_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0);
/* Test keypair_pub */
ecount = 0;
secp256k1_rand256(sk);
secp256k1_testrand256(sk);
CHECK(secp256k1_keypair_create(ctx, &keypair, sk) == 1);
CHECK(secp256k1_keypair_pub(none, &pk, &keypair) == 1);
CHECK(secp256k1_keypair_pub(none, NULL, &keypair) == 0);
CHECK(ecount == 1);
CHECK(secp256k1_keypair_pub(none, &pk, NULL) == 0);
CHECK(ecount == 2);
CHECK(memcmp(zeros96, &pk, sizeof(pk)) == 0);
CHECK(secp256k1_memcmp_var(zeros96, &pk, sizeof(pk)) == 0);
/* Using an invalid keypair is fine for keypair_pub */
memset(&keypair, 0, sizeof(keypair));
CHECK(secp256k1_keypair_pub(none, &pk, &keypair) == 1);
CHECK(memcmp(zeros96, &pk, sizeof(pk)) == 0);
CHECK(secp256k1_memcmp_var(zeros96, &pk, sizeof(pk)) == 0);
/* keypair holds the same pubkey as pubkey_create */
CHECK(secp256k1_ec_pubkey_create(sign, &pk, sk) == 1);
CHECK(secp256k1_keypair_create(sign, &keypair, sk) == 1);
CHECK(secp256k1_keypair_pub(none, &pk_tmp, &keypair) == 1);
CHECK(memcmp(&pk, &pk_tmp, sizeof(pk)) == 0);
CHECK(secp256k1_memcmp_var(&pk, &pk_tmp, sizeof(pk)) == 0);
/** Test keypair_xonly_pub **/
ecount = 0;
secp256k1_rand256(sk);
secp256k1_testrand256(sk);
CHECK(secp256k1_keypair_create(ctx, &keypair, sk) == 1);
CHECK(secp256k1_keypair_xonly_pub(none, &xonly_pk, &pk_parity, &keypair) == 1);
CHECK(secp256k1_keypair_xonly_pub(none, NULL, &pk_parity, &keypair) == 0);
@ -379,13 +379,13 @@ void test_keypair(void) {
CHECK(secp256k1_keypair_xonly_pub(none, &xonly_pk, NULL, &keypair) == 1);
CHECK(secp256k1_keypair_xonly_pub(none, &xonly_pk, &pk_parity, NULL) == 0);
CHECK(ecount == 2);
CHECK(memcmp(zeros96, &xonly_pk, sizeof(xonly_pk)) == 0);
CHECK(secp256k1_memcmp_var(zeros96, &xonly_pk, sizeof(xonly_pk)) == 0);
/* Using an invalid keypair will set the xonly_pk to 0 (first reset
* xonly_pk). */
CHECK(secp256k1_keypair_xonly_pub(none, &xonly_pk, &pk_parity, &keypair) == 1);
memset(&keypair, 0, sizeof(keypair));
CHECK(secp256k1_keypair_xonly_pub(none, &xonly_pk, &pk_parity, &keypair) == 0);
CHECK(memcmp(zeros96, &xonly_pk, sizeof(xonly_pk)) == 0);
CHECK(secp256k1_memcmp_var(zeros96, &xonly_pk, sizeof(xonly_pk)) == 0);
CHECK(ecount == 3);
/** keypair holds the same xonly pubkey as pubkey_create **/
@ -393,7 +393,7 @@ void test_keypair(void) {
CHECK(secp256k1_xonly_pubkey_from_pubkey(none, &xonly_pk, &pk_parity, &pk) == 1);
CHECK(secp256k1_keypair_create(sign, &keypair, sk) == 1);
CHECK(secp256k1_keypair_xonly_pub(none, &xonly_pk_tmp, &pk_parity_tmp, &keypair) == 1);
CHECK(memcmp(&xonly_pk, &xonly_pk_tmp, sizeof(pk)) == 0);
CHECK(secp256k1_memcmp_var(&xonly_pk, &xonly_pk_tmp, sizeof(pk)) == 0);
CHECK(pk_parity == pk_parity_tmp);
secp256k1_context_destroy(none);
@ -414,8 +414,8 @@ void test_keypair_add(void) {
secp256k1_context *verify = api_test_context(SECP256K1_CONTEXT_VERIFY, &ecount);
CHECK(sizeof(zeros96) == sizeof(keypair));
secp256k1_rand256(sk);
secp256k1_rand256(tweak);
secp256k1_testrand256(sk);
secp256k1_testrand256(tweak);
memset(overflows, 0xFF, 32);
CHECK(secp256k1_keypair_create(ctx, &keypair, sk) == 1);
@ -429,12 +429,12 @@ void test_keypair_add(void) {
CHECK(secp256k1_keypair_xonly_tweak_add(verify, &keypair, NULL) == 0);
CHECK(ecount == 4);
/* This does not set the keypair to zeroes */
CHECK(memcmp(&keypair, zeros96, sizeof(keypair)) != 0);
CHECK(secp256k1_memcmp_var(&keypair, zeros96, sizeof(keypair)) != 0);
/* Invalid tweak zeroes the keypair */
CHECK(secp256k1_keypair_create(ctx, &keypair, sk) == 1);
CHECK(secp256k1_keypair_xonly_tweak_add(ctx, &keypair, overflows) == 0);
CHECK(memcmp(&keypair, zeros96, sizeof(keypair)) == 0);
CHECK(secp256k1_memcmp_var(&keypair, zeros96, sizeof(keypair)) == 0);
/* A zero tweak is fine */
CHECK(secp256k1_keypair_create(ctx, &keypair, sk) == 1);
@ -444,7 +444,7 @@ void test_keypair_add(void) {
for (i = 0; i < count; i++) {
secp256k1_scalar scalar_tweak;
secp256k1_keypair keypair_tmp;
secp256k1_rand256(sk);
secp256k1_testrand256(sk);
CHECK(secp256k1_keypair_create(ctx, &keypair, sk) == 1);
memcpy(&keypair_tmp, &keypair, sizeof(keypair));
/* Because sk may be negated before adding, we need to try with tweak =
@ -454,17 +454,17 @@ void test_keypair_add(void) {
secp256k1_scalar_get_b32(tweak, &scalar_tweak);
CHECK((secp256k1_keypair_xonly_tweak_add(ctx, &keypair, sk) == 0)
|| (secp256k1_keypair_xonly_tweak_add(ctx, &keypair_tmp, tweak) == 0));
CHECK(memcmp(&keypair, zeros96, sizeof(keypair)) == 0
|| memcmp(&keypair_tmp, zeros96, sizeof(keypair_tmp)) == 0);
CHECK(secp256k1_memcmp_var(&keypair, zeros96, sizeof(keypair)) == 0
|| secp256k1_memcmp_var(&keypair_tmp, zeros96, sizeof(keypair_tmp)) == 0);
}
/* Invalid keypair with a valid tweak */
memset(&keypair, 0, sizeof(keypair));
secp256k1_rand256(tweak);
secp256k1_testrand256(tweak);
ecount = 0;
CHECK(secp256k1_keypair_xonly_tweak_add(verify, &keypair, tweak) == 0);
CHECK(ecount == 1);
CHECK(memcmp(&keypair, zeros96, sizeof(keypair)) == 0);
CHECK(secp256k1_memcmp_var(&keypair, zeros96, sizeof(keypair)) == 0);
/* Only seckey part of keypair invalid */
CHECK(secp256k1_keypair_create(ctx, &keypair, sk) == 1);
memset(&keypair, 0, 32);
@ -486,7 +486,7 @@ void test_keypair_add(void) {
unsigned char pk32[32];
int pk_parity;
secp256k1_rand256(tweak);
secp256k1_testrand256(tweak);
CHECK(secp256k1_keypair_xonly_pub(ctx, &internal_pk, NULL, &keypair) == 1);
CHECK(secp256k1_keypair_xonly_tweak_add(ctx, &keypair, tweak) == 1);
CHECK(secp256k1_keypair_xonly_pub(ctx, &output_pk, &pk_parity, &keypair) == 1);
@ -498,11 +498,11 @@ void test_keypair_add(void) {
/* Check that the resulting pubkey matches xonly_pubkey_tweak_add */
CHECK(secp256k1_keypair_pub(ctx, &output_pk_xy, &keypair) == 1);
CHECK(secp256k1_xonly_pubkey_tweak_add(ctx, &output_pk_expected, &internal_pk, tweak) == 1);
CHECK(memcmp(&output_pk_xy, &output_pk_expected, sizeof(output_pk_xy)) == 0);
CHECK(secp256k1_memcmp_var(&output_pk_xy, &output_pk_expected, sizeof(output_pk_xy)) == 0);
/* Check that the secret key in the keypair is tweaked correctly */
CHECK(secp256k1_ec_pubkey_create(ctx, &output_pk_expected, &keypair.data[0]) == 1);
CHECK(memcmp(&output_pk_xy, &output_pk_expected, sizeof(output_pk_xy)) == 0);
CHECK(secp256k1_memcmp_var(&output_pk_xy, &output_pk_expected, sizeof(output_pk_xy)) == 0);
}
secp256k1_context_destroy(none);
secp256k1_context_destroy(sign);

View File

@ -1,6 +1,7 @@
include_HEADERS += include/secp256k1_recovery.h
noinst_HEADERS += src/modules/recovery/main_impl.h
noinst_HEADERS += src/modules/recovery/tests_impl.h
noinst_HEADERS += src/modules/recovery/tests_exhaustive_impl.h
if USE_BENCHMARK
noinst_PROGRAMS += bench_recover
bench_recover_SOURCES = src/bench_recover.c

View File

@ -0,0 +1,149 @@
/**********************************************************************
* Copyright (c) 2016 Andrew Poelstra *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#ifndef SECP256K1_MODULE_RECOVERY_EXHAUSTIVE_TESTS_H
#define SECP256K1_MODULE_RECOVERY_EXHAUSTIVE_TESTS_H
#include "src/modules/recovery/main_impl.h"
#include "include/secp256k1_recovery.h"
void test_exhaustive_recovery_sign(const secp256k1_context *ctx, const secp256k1_ge *group) {
int i, j, k;
uint64_t iter = 0;
/* Loop */
for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) { /* message */
for (j = 1; j < EXHAUSTIVE_TEST_ORDER; j++) { /* key */
if (skip_section(&iter)) continue;
for (k = 1; k < EXHAUSTIVE_TEST_ORDER; k++) { /* nonce */
const int starting_k = k;
secp256k1_fe r_dot_y_normalized;
secp256k1_ecdsa_recoverable_signature rsig;
secp256k1_ecdsa_signature sig;
secp256k1_scalar sk, msg, r, s, expected_r;
unsigned char sk32[32], msg32[32];
int expected_recid;
int recid;
int overflow;
secp256k1_scalar_set_int(&msg, i);
secp256k1_scalar_set_int(&sk, j);
secp256k1_scalar_get_b32(sk32, &sk);
secp256k1_scalar_get_b32(msg32, &msg);
secp256k1_ecdsa_sign_recoverable(ctx, &rsig, msg32, sk32, secp256k1_nonce_function_smallint, &k);
/* Check directly */
secp256k1_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, &rsig);
r_from_k(&expected_r, group, k, &overflow);
CHECK(r == expected_r);
CHECK((k * s) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER ||
(k * (EXHAUSTIVE_TEST_ORDER - s)) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER);
/* The recid's second bit is for conveying overflow (R.x value >= group order).
* In the actual secp256k1 this is an astronomically unlikely event, but in the
* small group used here, it will be the case for all points except the ones where
* R.x=1 (which the group is specifically selected to have).
* Note that this isn't actually useful; full recovery would need to convey
* floor(R.x / group_order), but only one bit is used as that is sufficient
* in the real group. */
expected_recid = overflow ? 2 : 0;
r_dot_y_normalized = group[k].y;
secp256k1_fe_normalize(&r_dot_y_normalized);
/* Also the recovery id is flipped depending if we hit the low-s branch */
if ((k * s) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER) {
expected_recid |= secp256k1_fe_is_odd(&r_dot_y_normalized);
} else {
expected_recid |= !secp256k1_fe_is_odd(&r_dot_y_normalized);
}
CHECK(recid == expected_recid);
/* Convert to a standard sig then check */
secp256k1_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig);
secp256k1_ecdsa_signature_load(ctx, &r, &s, &sig);
/* Note that we compute expected_r *after* signing -- this is important
* because our nonce-computing function function might change k during
* signing. */
r_from_k(&expected_r, group, k, NULL);
CHECK(r == expected_r);
CHECK((k * s) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER ||
(k * (EXHAUSTIVE_TEST_ORDER - s)) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER);
/* Overflow means we've tried every possible nonce */
if (k < starting_k) {
break;
}
}
}
}
}
void test_exhaustive_recovery_verify(const secp256k1_context *ctx, const secp256k1_ge *group) {
/* This is essentially a copy of test_exhaustive_verify, with recovery added */
int s, r, msg, key;
uint64_t iter = 0;
for (s = 1; s < EXHAUSTIVE_TEST_ORDER; s++) {
for (r = 1; r < EXHAUSTIVE_TEST_ORDER; r++) {
for (msg = 1; msg < EXHAUSTIVE_TEST_ORDER; msg++) {
for (key = 1; key < EXHAUSTIVE_TEST_ORDER; key++) {
secp256k1_ge nonconst_ge;
secp256k1_ecdsa_recoverable_signature rsig;
secp256k1_ecdsa_signature sig;
secp256k1_pubkey pk;
secp256k1_scalar sk_s, msg_s, r_s, s_s;
secp256k1_scalar s_times_k_s, msg_plus_r_times_sk_s;
int recid = 0;
int k, should_verify;
unsigned char msg32[32];
if (skip_section(&iter)) continue;
secp256k1_scalar_set_int(&s_s, s);
secp256k1_scalar_set_int(&r_s, r);
secp256k1_scalar_set_int(&msg_s, msg);
secp256k1_scalar_set_int(&sk_s, key);
secp256k1_scalar_get_b32(msg32, &msg_s);
/* Verify by hand */
/* Run through every k value that gives us this r and check that *one* works.
* Note there could be none, there could be multiple, ECDSA is weird. */
should_verify = 0;
for (k = 0; k < EXHAUSTIVE_TEST_ORDER; k++) {
secp256k1_scalar check_x_s;
r_from_k(&check_x_s, group, k, NULL);
if (r_s == check_x_s) {
secp256k1_scalar_set_int(&s_times_k_s, k);
secp256k1_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s);
secp256k1_scalar_mul(&msg_plus_r_times_sk_s, &r_s, &sk_s);
secp256k1_scalar_add(&msg_plus_r_times_sk_s, &msg_plus_r_times_sk_s, &msg_s);
should_verify |= secp256k1_scalar_eq(&s_times_k_s, &msg_plus_r_times_sk_s);
}
}
/* nb we have a "high s" rule */
should_verify &= !secp256k1_scalar_is_high(&s_s);
/* We would like to try recovering the pubkey and checking that it matches,
* but pubkey recovery is impossible in the exhaustive tests (the reason
* being that there are 12 nonzero r values, 12 nonzero points, and no
* overlap between the sets, so there are no valid signatures). */
/* Verify by converting to a standard signature and calling verify */
secp256k1_ecdsa_recoverable_signature_save(&rsig, &r_s, &s_s, recid);
secp256k1_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig);
memcpy(&nonconst_ge, &group[sk_s], sizeof(nonconst_ge));
secp256k1_pubkey_save(&pk, &nonconst_ge);
CHECK(should_verify ==
secp256k1_ecdsa_verify(ctx, &sig, msg32, &pk));
}
}
}
}
}
static void test_exhaustive_recovery(const secp256k1_context *ctx, const secp256k1_ge *group) {
test_exhaustive_recovery_sign(ctx, group);
test_exhaustive_recovery_verify(ctx, group);
}
#endif /* SECP256K1_MODULE_RECOVERY_EXHAUSTIVE_TESTS_H */

View File

@ -25,7 +25,7 @@ static int recovery_test_nonce_function(unsigned char *nonce32, const unsigned c
}
/* On the next run, return a valid nonce, but flip a coin as to whether or not to fail signing. */
memset(nonce32, 1, 32);
return secp256k1_rand_bits(1);
return secp256k1_testrand_bits(1);
}
void test_ecdsa_recovery_api(void) {
@ -184,7 +184,7 @@ void test_ecdsa_recovery_end_to_end(void) {
CHECK(secp256k1_ecdsa_sign_recoverable(ctx, &rsignature[3], message, privkey, NULL, extra) == 1);
CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(ctx, sig, &recid, &rsignature[4]) == 1);
CHECK(secp256k1_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1);
CHECK(memcmp(&signature[4], &signature[0], 64) == 0);
CHECK(secp256k1_memcmp_var(&signature[4], &signature[0], 64) == 0);
CHECK(secp256k1_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 1);
memset(&rsignature[4], 0, sizeof(rsignature[4]));
CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1);
@ -193,16 +193,16 @@ void test_ecdsa_recovery_end_to_end(void) {
/* Parse compact (with recovery id) and recover. */
CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1);
CHECK(secp256k1_ecdsa_recover(ctx, &recpubkey, &rsignature[4], message) == 1);
CHECK(memcmp(&pubkey, &recpubkey, sizeof(pubkey)) == 0);
CHECK(secp256k1_memcmp_var(&pubkey, &recpubkey, sizeof(pubkey)) == 0);
/* Serialize/destroy/parse signature and verify again. */
CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(ctx, sig, &recid, &rsignature[4]) == 1);
sig[secp256k1_rand_bits(6)] += 1 + secp256k1_rand_int(255);
sig[secp256k1_testrand_bits(6)] += 1 + secp256k1_testrand_int(255);
CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1);
CHECK(secp256k1_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1);
CHECK(secp256k1_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 0);
/* Recover again */
CHECK(secp256k1_ecdsa_recover(ctx, &recpubkey, &rsignature[4], message) == 0 ||
memcmp(&pubkey, &recpubkey, sizeof(pubkey)) != 0);
secp256k1_memcmp_var(&pubkey, &recpubkey, sizeof(pubkey)) != 0);
}
/* Tests several edge cases. */

View File

@ -1,6 +1,7 @@
include_HEADERS += include/secp256k1_schnorrsig.h
noinst_HEADERS += src/modules/schnorrsig/main_impl.h
noinst_HEADERS += src/modules/schnorrsig/tests_impl.h
noinst_HEADERS += src/modules/schnorrsig/tests_exhaustive_impl.h
if USE_BENCHMARK
noinst_PROGRAMS += bench_schnorrsig
bench_schnorrsig_SOURCES = src/bench_schnorrsig.c

View File

@ -68,7 +68,7 @@ static int nonce_function_bip340(unsigned char *nonce32, const unsigned char *ms
/* Tag the hash with algo16 which is important to avoid nonce reuse across
* algorithms. If this nonce function is used in BIP-340 signing as defined
* in the spec, an optimized tagging implementation is used. */
if (memcmp(algo16, bip340_algo16, 16) == 0) {
if (secp256k1_memcmp_var(algo16, bip340_algo16, 16) == 0) {
secp256k1_nonce_function_bip340_sha256_tagged(&sha);
} else {
int algo16_len = 16;
@ -108,6 +108,22 @@ static void secp256k1_schnorrsig_sha256_tagged(secp256k1_sha256 *sha) {
sha->bytes = 64;
}
static void secp256k1_schnorrsig_challenge(secp256k1_scalar* e, const unsigned char *r32, const unsigned char *msg32, const unsigned char *pubkey32)
{
unsigned char buf[32];
secp256k1_sha256 sha;
/* tagged hash(r.x, pk.x, msg32) */
secp256k1_schnorrsig_sha256_tagged(&sha);
secp256k1_sha256_write(&sha, r32, 32);
secp256k1_sha256_write(&sha, pubkey32, 32);
secp256k1_sha256_write(&sha, msg32, 32);
secp256k1_sha256_finalize(&sha, buf);
/* Set scalar e to the challenge hash modulo the curve order as per
* BIP340. */
secp256k1_scalar_set_b32(e, buf, NULL);
}
int secp256k1_schnorrsig_sign(const secp256k1_context* ctx, unsigned char *sig64, const unsigned char *msg32, const secp256k1_keypair *keypair, secp256k1_nonce_function_hardened noncefp, void *ndata) {
secp256k1_scalar sk;
secp256k1_scalar e;
@ -115,7 +131,6 @@ int secp256k1_schnorrsig_sign(const secp256k1_context* ctx, unsigned char *sig64
secp256k1_gej rj;
secp256k1_ge pk;
secp256k1_ge r;
secp256k1_sha256 sha;
unsigned char buf[32] = { 0 };
unsigned char pk_buf[32];
unsigned char seckey[32];
@ -159,16 +174,7 @@ int secp256k1_schnorrsig_sign(const secp256k1_context* ctx, unsigned char *sig64
secp256k1_fe_normalize_var(&r.x);
secp256k1_fe_get_b32(&sig64[0], &r.x);
/* tagged hash(r.x, pk.x, msg32) */
secp256k1_schnorrsig_sha256_tagged(&sha);
secp256k1_sha256_write(&sha, &sig64[0], 32);
secp256k1_sha256_write(&sha, pk_buf, sizeof(pk_buf));
secp256k1_sha256_write(&sha, msg32, 32);
secp256k1_sha256_finalize(&sha, buf);
/* Set scalar e to the challenge hash modulo the curve order as per
* BIP340. */
secp256k1_scalar_set_b32(&e, buf, NULL);
secp256k1_schnorrsig_challenge(&e, &sig64[0], msg32, pk_buf);
secp256k1_scalar_mul(&e, &e, &sk);
secp256k1_scalar_add(&e, &e, &k);
secp256k1_scalar_get_b32(&sig64[32], &e);
@ -189,7 +195,6 @@ int secp256k1_schnorrsig_verify(const secp256k1_context* ctx, const unsigned cha
secp256k1_gej pkj;
secp256k1_fe rx;
secp256k1_ge r;
secp256k1_sha256 sha;
unsigned char buf[32];
int overflow;
@ -212,13 +217,9 @@ int secp256k1_schnorrsig_verify(const secp256k1_context* ctx, const unsigned cha
return 0;
}
secp256k1_schnorrsig_sha256_tagged(&sha);
secp256k1_sha256_write(&sha, &sig64[0], 32);
/* Compute e. */
secp256k1_fe_get_b32(buf, &pk.x);
secp256k1_sha256_write(&sha, buf, sizeof(buf));
secp256k1_sha256_write(&sha, msg32, 32);
secp256k1_sha256_finalize(&sha, buf);
secp256k1_scalar_set_b32(&e, buf, NULL);
secp256k1_schnorrsig_challenge(&e, &sig64[0], msg32, buf);
/* Compute rj = s*G + (-e)*pkj */
secp256k1_scalar_negate(&e, &e);

View File

@ -0,0 +1,206 @@
/**********************************************************************
* Copyright (c) 2020 Pieter Wuille *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#ifndef _SECP256K1_MODULE_SCHNORRSIG_TESTS_EXHAUSTIVE_
#define _SECP256K1_MODULE_SCHNORRSIG_TESTS_EXHAUSTIVE_
#include "include/secp256k1_schnorrsig.h"
#include "src/modules/schnorrsig/main_impl.h"
static const unsigned char invalid_pubkey_bytes[][32] = {
/* 0 */
{
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
},
/* 2 */
{
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2
},
/* order */
{
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
((EXHAUSTIVE_TEST_ORDER + 0UL) >> 24) & 0xFF,
((EXHAUSTIVE_TEST_ORDER + 0UL) >> 16) & 0xFF,
((EXHAUSTIVE_TEST_ORDER + 0UL) >> 8) & 0xFF,
(EXHAUSTIVE_TEST_ORDER + 0UL) & 0xFF
},
/* order + 1 */
{
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
((EXHAUSTIVE_TEST_ORDER + 1UL) >> 24) & 0xFF,
((EXHAUSTIVE_TEST_ORDER + 1UL) >> 16) & 0xFF,
((EXHAUSTIVE_TEST_ORDER + 1UL) >> 8) & 0xFF,
(EXHAUSTIVE_TEST_ORDER + 1UL) & 0xFF
},
/* field size */
{
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFC, 0x2F
},
/* field size + 1 (note that 1 is legal) */
{
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFC, 0x30
},
/* 2^256 - 1 */
{
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
}
};
#define NUM_INVALID_KEYS (sizeof(invalid_pubkey_bytes) / sizeof(invalid_pubkey_bytes[0]))
static int secp256k1_hardened_nonce_function_smallint(unsigned char *nonce32, const unsigned char *msg32,
const unsigned char *key32, const unsigned char *xonly_pk32,
const unsigned char *algo16, void* data) {
secp256k1_scalar s;
int *idata = data;
(void)msg32;
(void)key32;
(void)xonly_pk32;
(void)algo16;
secp256k1_scalar_set_int(&s, *idata);
secp256k1_scalar_get_b32(nonce32, &s);
return 1;
}
static void test_exhaustive_schnorrsig_verify(const secp256k1_context *ctx, const secp256k1_xonly_pubkey* pubkeys, unsigned char (*xonly_pubkey_bytes)[32], const int* parities) {
int d;
uint64_t iter = 0;
/* Iterate over the possible public keys to verify against (through their corresponding DL d). */
for (d = 1; d <= EXHAUSTIVE_TEST_ORDER / 2; ++d) {
int actual_d;
unsigned k;
unsigned char pk32[32];
memcpy(pk32, xonly_pubkey_bytes[d - 1], 32);
actual_d = parities[d - 1] ? EXHAUSTIVE_TEST_ORDER - d : d;
/* Iterate over the possible valid first 32 bytes in the signature, through their corresponding DL k.
Values above EXHAUSTIVE_TEST_ORDER/2 refer to the entries in invalid_pubkey_bytes. */
for (k = 1; k <= EXHAUSTIVE_TEST_ORDER / 2 + NUM_INVALID_KEYS; ++k) {
unsigned char sig64[64];
int actual_k = -1;
int e_done[EXHAUSTIVE_TEST_ORDER] = {0};
int e_count_done = 0;
if (skip_section(&iter)) continue;
if (k <= EXHAUSTIVE_TEST_ORDER / 2) {
memcpy(sig64, xonly_pubkey_bytes[k - 1], 32);
actual_k = parities[k - 1] ? EXHAUSTIVE_TEST_ORDER - k : k;
} else {
memcpy(sig64, invalid_pubkey_bytes[k - 1 - EXHAUSTIVE_TEST_ORDER / 2], 32);
}
/* Randomly generate messages until all challenges have been hit. */
while (e_count_done < EXHAUSTIVE_TEST_ORDER) {
secp256k1_scalar e;
unsigned char msg32[32];
secp256k1_testrand256(msg32);
secp256k1_schnorrsig_challenge(&e, sig64, msg32, pk32);
/* Only do work if we hit a challenge we haven't tried before. */
if (!e_done[e]) {
/* Iterate over the possible valid last 32 bytes in the signature.
0..order=that s value; order+1=random bytes */
int count_valid = 0, s;
for (s = 0; s <= EXHAUSTIVE_TEST_ORDER + 1; ++s) {
int expect_valid, valid;
if (s <= EXHAUSTIVE_TEST_ORDER) {
secp256k1_scalar s_s;
secp256k1_scalar_set_int(&s_s, s);
secp256k1_scalar_get_b32(sig64 + 32, &s_s);
expect_valid = actual_k != -1 && s != EXHAUSTIVE_TEST_ORDER &&
(s_s == (actual_k + actual_d * e) % EXHAUSTIVE_TEST_ORDER);
} else {
secp256k1_testrand256(sig64 + 32);
expect_valid = 0;
}
valid = secp256k1_schnorrsig_verify(ctx, sig64, msg32, &pubkeys[d - 1]);
CHECK(valid == expect_valid);
count_valid += valid;
}
/* Exactly one s value must verify, unless R is illegal. */
CHECK(count_valid == (actual_k != -1));
/* Don't retry other messages that result in the same challenge. */
e_done[e] = 1;
++e_count_done;
}
}
}
}
}
static void test_exhaustive_schnorrsig_sign(const secp256k1_context *ctx, unsigned char (*xonly_pubkey_bytes)[32], const secp256k1_keypair* keypairs, const int* parities) {
int d, k;
uint64_t iter = 0;
/* Loop over keys. */
for (d = 1; d < EXHAUSTIVE_TEST_ORDER; ++d) {
int actual_d = d;
if (parities[d - 1]) actual_d = EXHAUSTIVE_TEST_ORDER - d;
/* Loop over nonces. */
for (k = 1; k < EXHAUSTIVE_TEST_ORDER; ++k) {
int e_done[EXHAUSTIVE_TEST_ORDER] = {0};
int e_count_done = 0;
unsigned char msg32[32];
unsigned char sig64[64];
int actual_k = k;
if (skip_section(&iter)) continue;
if (parities[k - 1]) actual_k = EXHAUSTIVE_TEST_ORDER - k;
/* Generate random messages until all challenges have been tried. */
while (e_count_done < EXHAUSTIVE_TEST_ORDER) {
secp256k1_scalar e;
secp256k1_testrand256(msg32);
secp256k1_schnorrsig_challenge(&e, xonly_pubkey_bytes[k - 1], msg32, xonly_pubkey_bytes[d - 1]);
/* Only do work if we hit a challenge we haven't tried before. */
if (!e_done[e]) {
secp256k1_scalar expected_s = (actual_k + e * actual_d) % EXHAUSTIVE_TEST_ORDER;
unsigned char expected_s_bytes[32];
secp256k1_scalar_get_b32(expected_s_bytes, &expected_s);
/* Invoke the real function to construct a signature. */
CHECK(secp256k1_schnorrsig_sign(ctx, sig64, msg32, &keypairs[d - 1], secp256k1_hardened_nonce_function_smallint, &k));
/* The first 32 bytes must match the xonly pubkey for the specified k. */
CHECK(secp256k1_memcmp_var(sig64, xonly_pubkey_bytes[k - 1], 32) == 0);
/* The last 32 bytes must match the expected s value. */
CHECK(secp256k1_memcmp_var(sig64 + 32, expected_s_bytes, 32) == 0);
/* Don't retry other messages that result in the same challenge. */
e_done[e] = 1;
++e_count_done;
}
}
}
}
}
static void test_exhaustive_schnorrsig(const secp256k1_context *ctx) {
secp256k1_keypair keypair[EXHAUSTIVE_TEST_ORDER - 1];
secp256k1_xonly_pubkey xonly_pubkey[EXHAUSTIVE_TEST_ORDER - 1];
int parity[EXHAUSTIVE_TEST_ORDER - 1];
unsigned char xonly_pubkey_bytes[EXHAUSTIVE_TEST_ORDER - 1][32];
unsigned i;
/* Verify that all invalid_pubkey_bytes are actually invalid. */
for (i = 0; i < NUM_INVALID_KEYS; ++i) {
secp256k1_xonly_pubkey pk;
CHECK(!secp256k1_xonly_pubkey_parse(ctx, &pk, invalid_pubkey_bytes[i]));
}
/* Construct keypairs and xonly-pubkeys for the entire group. */
for (i = 1; i < EXHAUSTIVE_TEST_ORDER; ++i) {
secp256k1_scalar scalar_i;
unsigned char buf[32];
secp256k1_scalar_set_int(&scalar_i, i);
secp256k1_scalar_get_b32(buf, &scalar_i);
CHECK(secp256k1_keypair_create(ctx, &keypair[i - 1], buf));
CHECK(secp256k1_keypair_xonly_pub(ctx, &xonly_pubkey[i - 1], &parity[i - 1], &keypair[i - 1]));
CHECK(secp256k1_xonly_pubkey_serialize(ctx, xonly_pubkey_bytes[i - 1], &xonly_pubkey[i - 1]));
}
test_exhaustive_schnorrsig_sign(ctx, xonly_pubkey_bytes, keypair, parity);
test_exhaustive_schnorrsig_verify(ctx, xonly_pubkey, xonly_pubkey_bytes, parity);
}
#endif

View File

@ -15,9 +15,9 @@
void nonce_function_bip340_bitflip(unsigned char **args, size_t n_flip, size_t n_bytes) {
unsigned char nonces[2][32];
CHECK(nonce_function_bip340(nonces[0], args[0], args[1], args[2], args[3], args[4]) == 1);
secp256k1_rand_flip(args[n_flip], n_bytes);
secp256k1_testrand_flip(args[n_flip], n_bytes);
CHECK(nonce_function_bip340(nonces[1], args[0], args[1], args[2], args[3], args[4]) == 1);
CHECK(memcmp(nonces[0], nonces[1], 32) != 0);
CHECK(secp256k1_memcmp_var(nonces[0], nonces[1], 32) != 0);
}
/* Tests for the equality of two sha256 structs. This function only produces a
@ -28,7 +28,7 @@ void test_sha256_eq(const secp256k1_sha256 *sha1, const secp256k1_sha256 *sha2)
CHECK((sha1->bytes & 0x3F) == 0);
CHECK(sha1->bytes == sha2->bytes);
CHECK(memcmp(sha1->s, sha2->s, sizeof(sha1->s)) == 0);
CHECK(secp256k1_memcmp_var(sha1->s, sha2->s, sizeof(sha1->s)) == 0);
}
void run_nonce_function_bip340_tests(void) {
@ -59,10 +59,10 @@ void run_nonce_function_bip340_tests(void) {
secp256k1_nonce_function_bip340_sha256_tagged_aux(&sha_optimized);
test_sha256_eq(&sha, &sha_optimized);
secp256k1_rand256(msg);
secp256k1_rand256(key);
secp256k1_rand256(pk);
secp256k1_rand256(aux_rand);
secp256k1_testrand256(msg);
secp256k1_testrand256(key);
secp256k1_testrand256(pk);
secp256k1_testrand256(aux_rand);
/* Check that a bitflip in an argument results in different nonces. */
args[0] = msg;
@ -124,10 +124,10 @@ void test_schnorrsig_api(void) {
secp256k1_context_set_illegal_callback(vrfy, counting_illegal_callback_fn, &ecount);
secp256k1_context_set_illegal_callback(both, counting_illegal_callback_fn, &ecount);
secp256k1_rand256(sk1);
secp256k1_rand256(sk2);
secp256k1_rand256(sk3);
secp256k1_rand256(msg);
secp256k1_testrand256(sk1);
secp256k1_testrand256(sk2);
secp256k1_testrand256(sk3);
secp256k1_testrand256(msg);
CHECK(secp256k1_keypair_create(ctx, &keypairs[0], sk1) == 1);
CHECK(secp256k1_keypair_create(ctx, &keypairs[1], sk2) == 1);
CHECK(secp256k1_keypair_create(ctx, &keypairs[2], sk3) == 1);
@ -197,11 +197,11 @@ void test_schnorrsig_bip_vectors_check_signing(const unsigned char *sk, const un
CHECK(secp256k1_keypair_create(ctx, &keypair, sk));
CHECK(secp256k1_schnorrsig_sign(ctx, sig, msg, &keypair, NULL, aux_rand));
CHECK(memcmp(sig, expected_sig, 64) == 0);
CHECK(secp256k1_memcmp_var(sig, expected_sig, 64) == 0);
CHECK(secp256k1_xonly_pubkey_parse(ctx, &pk_expected, pk_serialized));
CHECK(secp256k1_keypair_xonly_pub(ctx, &pk, NULL, &keypair));
CHECK(memcmp(&pk, &pk_expected, sizeof(pk)) == 0);
CHECK(secp256k1_memcmp_var(&pk, &pk_expected, sizeof(pk)) == 0);
CHECK(secp256k1_schnorrsig_verify(ctx, sig, msg, &pk));
}
@ -675,19 +675,19 @@ void test_schnorrsig_sign(void) {
unsigned char sig[64];
unsigned char zeros64[64] = { 0 };
secp256k1_rand256(sk);
secp256k1_testrand256(sk);
CHECK(secp256k1_keypair_create(ctx, &keypair, sk));
CHECK(secp256k1_schnorrsig_sign(ctx, sig, msg, &keypair, NULL, NULL) == 1);
/* Test different nonce functions */
memset(sig, 1, sizeof(sig));
CHECK(secp256k1_schnorrsig_sign(ctx, sig, msg, &keypair, nonce_function_failing, NULL) == 0);
CHECK(memcmp(sig, zeros64, sizeof(sig)) == 0);
CHECK(secp256k1_memcmp_var(sig, zeros64, sizeof(sig)) == 0);
memset(&sig, 1, sizeof(sig));
CHECK(secp256k1_schnorrsig_sign(ctx, sig, msg, &keypair, nonce_function_0, NULL) == 0);
CHECK(memcmp(sig, zeros64, sizeof(sig)) == 0);
CHECK(secp256k1_memcmp_var(sig, zeros64, sizeof(sig)) == 0);
CHECK(secp256k1_schnorrsig_sign(ctx, sig, msg, &keypair, nonce_function_overflowing, NULL) == 1);
CHECK(memcmp(sig, zeros64, sizeof(sig)) != 0);
CHECK(secp256k1_memcmp_var(sig, zeros64, sizeof(sig)) != 0);
}
#define N_SIGS 3
@ -703,12 +703,12 @@ void test_schnorrsig_sign_verify(void) {
secp256k1_xonly_pubkey pk;
secp256k1_scalar s;
secp256k1_rand256(sk);
secp256k1_testrand256(sk);
CHECK(secp256k1_keypair_create(ctx, &keypair, sk));
CHECK(secp256k1_keypair_xonly_pub(ctx, &pk, NULL, &keypair));
for (i = 0; i < N_SIGS; i++) {
secp256k1_rand256(msg[i]);
secp256k1_testrand256(msg[i]);
CHECK(secp256k1_schnorrsig_sign(ctx, sig[i], msg[i], &keypair, NULL, NULL));
CHECK(secp256k1_schnorrsig_verify(ctx, sig[i], msg[i], &pk));
}
@ -716,19 +716,19 @@ void test_schnorrsig_sign_verify(void) {
{
/* Flip a few bits in the signature and in the message and check that
* verify and verify_batch (TODO) fail */
size_t sig_idx = secp256k1_rand_int(N_SIGS);
size_t byte_idx = secp256k1_rand_int(32);
unsigned char xorbyte = secp256k1_rand_int(254)+1;
size_t sig_idx = secp256k1_testrand_int(N_SIGS);
size_t byte_idx = secp256k1_testrand_int(32);
unsigned char xorbyte = secp256k1_testrand_int(254)+1;
sig[sig_idx][byte_idx] ^= xorbyte;
CHECK(!secp256k1_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], &pk));
sig[sig_idx][byte_idx] ^= xorbyte;
byte_idx = secp256k1_rand_int(32);
byte_idx = secp256k1_testrand_int(32);
sig[sig_idx][32+byte_idx] ^= xorbyte;
CHECK(!secp256k1_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], &pk));
sig[sig_idx][32+byte_idx] ^= xorbyte;
byte_idx = secp256k1_rand_int(32);
byte_idx = secp256k1_testrand_int(32);
msg[sig_idx][byte_idx] ^= xorbyte;
CHECK(!secp256k1_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], &pk));
msg[sig_idx][byte_idx] ^= xorbyte;
@ -766,7 +766,7 @@ void test_schnorrsig_taproot(void) {
unsigned char sig[64];
/* Create output key */
secp256k1_rand256(sk);
secp256k1_testrand256(sk);
CHECK(secp256k1_keypair_create(ctx, &keypair, sk) == 1);
CHECK(secp256k1_keypair_xonly_pub(ctx, &internal_pk, NULL, &keypair) == 1);
/* In actual taproot the tweak would be hash of internal_pk */
@ -776,7 +776,7 @@ void test_schnorrsig_taproot(void) {
CHECK(secp256k1_xonly_pubkey_serialize(ctx, output_pk_bytes, &output_pk) == 1);
/* Key spend */
secp256k1_rand256(msg);
secp256k1_testrand256(msg);
CHECK(secp256k1_schnorrsig_sign(ctx, sig, msg, &keypair, NULL, NULL) == 1);
/* Verify key spend */
CHECK(secp256k1_xonly_pubkey_parse(ctx, &output_pk, output_pk_bytes) == 1);

View File

@ -102,12 +102,11 @@ static void secp256k1_scalar_order_get_num(secp256k1_num *r);
/** Compare two scalars. */
static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b);
#ifdef USE_ENDOMORPHISM
/** Find r1 and r2 such that r1+r2*2^128 = a. */
static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a);
/** Find r1 and r2 such that r1+r2*lambda = a, and r1 and r2 are maximum 128 bits long (see secp256k1_gej_mul_lambda). */
static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a);
#endif
/** Find r1 and r2 such that r1+r2*2^128 = k. */
static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k);
/** Find r1 and r2 such that r1+r2*lambda = k,
* where r1 and r2 or their negations are maximum 128 bits long (see secp256k1_ge_mul_lambda). */
static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k);
/** Multiply a and b (without taking the modulus!), divide by 2**shift, and round to the nearest integer. Shift must be at least 256. */
static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b, unsigned int shift);

View File

@ -912,18 +912,16 @@ static void secp256k1_scalar_sqr(secp256k1_scalar *r, const secp256k1_scalar *a)
secp256k1_scalar_reduce_512(r, l);
}
#ifdef USE_ENDOMORPHISM
static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) {
r1->d[0] = a->d[0];
r1->d[1] = a->d[1];
static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) {
r1->d[0] = k->d[0];
r1->d[1] = k->d[1];
r1->d[2] = 0;
r1->d[3] = 0;
r2->d[0] = a->d[2];
r2->d[1] = a->d[3];
r2->d[0] = k->d[2];
r2->d[1] = k->d[3];
r2->d[2] = 0;
r2->d[3] = 0;
}
#endif
SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) {
return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3])) == 0;

View File

@ -672,26 +672,24 @@ static void secp256k1_scalar_sqr(secp256k1_scalar *r, const secp256k1_scalar *a)
secp256k1_scalar_reduce_512(r, l);
}
#ifdef USE_ENDOMORPHISM
static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) {
r1->d[0] = a->d[0];
r1->d[1] = a->d[1];
r1->d[2] = a->d[2];
r1->d[3] = a->d[3];
static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) {
r1->d[0] = k->d[0];
r1->d[1] = k->d[1];
r1->d[2] = k->d[2];
r1->d[3] = k->d[3];
r1->d[4] = 0;
r1->d[5] = 0;
r1->d[6] = 0;
r1->d[7] = 0;
r2->d[0] = a->d[4];
r2->d[1] = a->d[5];
r2->d[2] = a->d[6];
r2->d[3] = a->d[7];
r2->d[0] = k->d[4];
r2->d[1] = k->d[5];
r2->d[2] = k->d[6];
r2->d[3] = k->d[7];
r2->d[4] = 0;
r2->d[5] = 0;
r2->d[6] = 0;
r2->d[7] = 0;
}
#endif
SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) {
return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3]) | (a->d[4] ^ b->d[4]) | (a->d[5] ^ b->d[5]) | (a->d[6] ^ b->d[6]) | (a->d[7] ^ b->d[7])) == 0;

View File

@ -7,6 +7,10 @@
#ifndef SECP256K1_SCALAR_IMPL_H
#define SECP256K1_SCALAR_IMPL_H
#ifdef VERIFY
#include <string.h>
#endif
#include "scalar.h"
#include "util.h"
@ -252,37 +256,65 @@ static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_sc
#endif
}
#ifdef USE_ENDOMORPHISM
/* These parameters are generated using sage/gen_exhaustive_groups.sage. */
#if defined(EXHAUSTIVE_TEST_ORDER)
# if EXHAUSTIVE_TEST_ORDER == 13
# define EXHAUSTIVE_TEST_LAMBDA 9
# elif EXHAUSTIVE_TEST_ORDER == 199
# define EXHAUSTIVE_TEST_LAMBDA 92
# else
# error No known lambda for the specified exhaustive test group order.
# endif
/**
* Find k1 and k2 given k, such that k1 + k2 * lambda == k mod n; unlike in the
* full case we don't bother making k1 and k2 be small, we just want them to be
* Find r1 and r2 given k, such that r1 + r2 * lambda == k mod n; unlike in the
* full case we don't bother making r1 and r2 be small, we just want them to be
* nontrivial to get full test coverage for the exhaustive tests. We therefore
* (arbitrarily) set k2 = k + 5 and k1 = k - k2 * lambda.
* (arbitrarily) set r2 = k + 5 (mod n) and r1 = k - r2 * lambda (mod n).
*/
static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) {
*r2 = (*a + 5) % EXHAUSTIVE_TEST_ORDER;
*r1 = (*a + (EXHAUSTIVE_TEST_ORDER - *r2) * EXHAUSTIVE_TEST_LAMBDA) % EXHAUSTIVE_TEST_ORDER;
static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) {
*r2 = (*k + 5) % EXHAUSTIVE_TEST_ORDER;
*r1 = (*k + (EXHAUSTIVE_TEST_ORDER - *r2) * EXHAUSTIVE_TEST_LAMBDA) % EXHAUSTIVE_TEST_ORDER;
}
#else
/**
* The Secp256k1 curve has an endomorphism, where lambda * (x, y) = (beta * x, y), where
* lambda is {0x53,0x63,0xad,0x4c,0xc0,0x5c,0x30,0xe0,0xa5,0x26,0x1c,0x02,0x88,0x12,0x64,0x5a,
* 0x12,0x2e,0x22,0xea,0x20,0x81,0x66,0x78,0xdf,0x02,0x96,0x7c,0x1b,0x23,0xbd,0x72}
* lambda is: */
static const secp256k1_scalar secp256k1_const_lambda = SECP256K1_SCALAR_CONST(
0x5363AD4CUL, 0xC05C30E0UL, 0xA5261C02UL, 0x8812645AUL,
0x122E22EAUL, 0x20816678UL, 0xDF02967CUL, 0x1B23BD72UL
);
#ifdef VERIFY
static void secp256k1_scalar_split_lambda_verify(const secp256k1_scalar *r1, const secp256k1_scalar *r2, const secp256k1_scalar *k);
#endif
/*
* Both lambda and beta are primitive cube roots of unity. That is lamba^3 == 1 mod n and
* beta^3 == 1 mod p, where n is the curve order and p is the field order.
*
* "Guide to Elliptic Curve Cryptography" (Hankerson, Menezes, Vanstone) gives an algorithm
* (algorithm 3.74) to find k1 and k2 given k, such that k1 + k2 * lambda == k mod n, and k1
* and k2 have a small size.
* It relies on constants a1, b1, a2, b2. These constants for the value of lambda above are:
* Futhermore, because (X^3 - 1) = (X - 1)(X^2 + X + 1), the primitive cube roots of unity are
* roots of X^2 + X + 1. Therefore lambda^2 + lamba == -1 mod n and beta^2 + beta == -1 mod p.
* (The other primitive cube roots of unity are lambda^2 and beta^2 respectively.)
*
* Let l = -1/2 + i*sqrt(3)/2, the complex root of X^2 + X + 1. We can define a ring
* homomorphism phi : Z[l] -> Z_n where phi(a + b*l) == a + b*lambda mod n. The kernel of phi
* is a lattice over Z[l] (considering Z[l] as a Z-module). This lattice is generated by a
* reduced basis {a1 + b1*l, a2 + b2*l} where
*
* - a1 = {0x30,0x86,0xd2,0x21,0xa7,0xd4,0x6b,0xcd,0xe8,0x6c,0x90,0xe4,0x92,0x84,0xeb,0x15}
* - b1 = -{0xe4,0x43,0x7e,0xd6,0x01,0x0e,0x88,0x28,0x6f,0x54,0x7f,0xa9,0x0a,0xbf,0xe4,0xc3}
* - a2 = {0x01,0x14,0xca,0x50,0xf7,0xa8,0xe2,0xf3,0xf6,0x57,0xc1,0x10,0x8d,0x9d,0x44,0xcf,0xd8}
* - b2 = {0x30,0x86,0xd2,0x21,0xa7,0xd4,0x6b,0xcd,0xe8,0x6c,0x90,0xe4,0x92,0x84,0xeb,0x15}
*
* The algorithm then computes c1 = round(b1 * k / n) and c2 = round(b2 * k / n), and gives
* "Guide to Elliptic Curve Cryptography" (Hankerson, Menezes, Vanstone) gives an algorithm
* (algorithm 3.74) to find k1 and k2 given k, such that k1 + k2 * lambda == k mod n, and k1
* and k2 are small in absolute value.
*
* The algorithm computes c1 = round(b2 * k / n) and c2 = round((-b1) * k / n), and gives
* k1 = k - (c1*a1 + c2*a2) and k2 = -(c1*b1 + c2*b2). Instead, we use modular arithmetic, and
* compute k1 as k - k2 * lambda, avoiding the need for constants a1 and a2.
* compute r2 = k2 mod n, and r1 = k1 mod n = (k - r2 * lambda) mod n, avoiding the need for
* the constants a1 and a2.
*
* g1, g2 are precomputed constants used to replace division with a rounded multiplication
* when decomposing the scalar for an endomorphism-based point multiplication.
@ -294,21 +326,21 @@ static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar
* Cryptography on Sensor Networks Using the MSP430X Microcontroller" (Gouvea, Oliveira, Lopez),
* Section 4.3 (here we use a somewhat higher-precision estimate):
* d = a1*b2 - b1*a2
* g1 = round((2^272)*b2/d)
* g2 = round((2^272)*b1/d)
* g1 = round(2^384 * b2/d)
* g2 = round(2^384 * (-b1)/d)
*
* (Note that 'd' is also equal to the curve order here because [a1,b1] and [a2,b2] are found
* as outputs of the Extended Euclidean Algorithm on inputs 'order' and 'lambda').
* (Note that d is also equal to the curve order, n, here because [a1,b1] and [a2,b2]
* can be found as outputs of the Extended Euclidean Algorithm on inputs n and lambda).
*
* The function below splits a in r1 and r2, such that r1 + lambda * r2 == a (mod order).
* The function below splits k into r1 and r2, such that
* - r1 + lambda * r2 == k (mod n)
* - either r1 < 2^128 or -r1 mod n < 2^128
* - either r2 < 2^128 or -r2 mod n < 2^128
*
* See proof below.
*/
static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) {
static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) {
secp256k1_scalar c1, c2;
static const secp256k1_scalar minus_lambda = SECP256K1_SCALAR_CONST(
0xAC9C52B3UL, 0x3FA3CF1FUL, 0x5AD9E3FDUL, 0x77ED9BA4UL,
0xA880B9FCUL, 0x8EC739C2UL, 0xE0CFC810UL, 0xB51283CFUL
);
static const secp256k1_scalar minus_b1 = SECP256K1_SCALAR_CONST(
0x00000000UL, 0x00000000UL, 0x00000000UL, 0x00000000UL,
0xE4437ED6UL, 0x010E8828UL, 0x6F547FA9UL, 0x0ABFE4C3UL
@ -318,25 +350,167 @@ static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar
0x8A280AC5UL, 0x0774346DUL, 0xD765CDA8UL, 0x3DB1562CUL
);
static const secp256k1_scalar g1 = SECP256K1_SCALAR_CONST(
0x00000000UL, 0x00000000UL, 0x00000000UL, 0x00003086UL,
0xD221A7D4UL, 0x6BCDE86CUL, 0x90E49284UL, 0xEB153DABUL
0x3086D221UL, 0xA7D46BCDUL, 0xE86C90E4UL, 0x9284EB15UL,
0x3DAA8A14UL, 0x71E8CA7FUL, 0xE893209AUL, 0x45DBB031UL
);
static const secp256k1_scalar g2 = SECP256K1_SCALAR_CONST(
0x00000000UL, 0x00000000UL, 0x00000000UL, 0x0000E443UL,
0x7ED6010EUL, 0x88286F54UL, 0x7FA90ABFUL, 0xE4C42212UL
0xE4437ED6UL, 0x010E8828UL, 0x6F547FA9UL, 0x0ABFE4C4UL,
0x221208ACUL, 0x9DF506C6UL, 0x1571B4AEUL, 0x8AC47F71UL
);
VERIFY_CHECK(r1 != a);
VERIFY_CHECK(r2 != a);
VERIFY_CHECK(r1 != k);
VERIFY_CHECK(r2 != k);
/* these _var calls are constant time since the shift amount is constant */
secp256k1_scalar_mul_shift_var(&c1, a, &g1, 272);
secp256k1_scalar_mul_shift_var(&c2, a, &g2, 272);
secp256k1_scalar_mul_shift_var(&c1, k, &g1, 384);
secp256k1_scalar_mul_shift_var(&c2, k, &g2, 384);
secp256k1_scalar_mul(&c1, &c1, &minus_b1);
secp256k1_scalar_mul(&c2, &c2, &minus_b2);
secp256k1_scalar_add(r2, &c1, &c2);
secp256k1_scalar_mul(r1, r2, &minus_lambda);
secp256k1_scalar_add(r1, r1, a);
secp256k1_scalar_mul(r1, r2, &secp256k1_const_lambda);
secp256k1_scalar_negate(r1, r1);
secp256k1_scalar_add(r1, r1, k);
#ifdef VERIFY
secp256k1_scalar_split_lambda_verify(r1, r2, k);
#endif
}
#endif
#endif
#ifdef VERIFY
/*
* Proof for secp256k1_scalar_split_lambda's bounds.
*
* Let
* - epsilon1 = 2^256 * |g1/2^384 - b2/d|
* - epsilon2 = 2^256 * |g2/2^384 - (-b1)/d|
* - c1 = round(k*g1/2^384)
* - c2 = round(k*g2/2^384)
*
* Lemma 1: |c1 - k*b2/d| < 2^-1 + epsilon1
*
* |c1 - k*b2/d|
* =
* |c1 - k*g1/2^384 + k*g1/2^384 - k*b2/d|
* <= {triangle inequality}
* |c1 - k*g1/2^384| + |k*g1/2^384 - k*b2/d|
* =
* |c1 - k*g1/2^384| + k*|g1/2^384 - b2/d|
* < {rounding in c1 and 0 <= k < 2^256}
* 2^-1 + 2^256 * |g1/2^384 - b2/d|
* = {definition of epsilon1}
* 2^-1 + epsilon1
*
* Lemma 2: |c2 - k*(-b1)/d| < 2^-1 + epsilon2
*
* |c2 - k*(-b1)/d|
* =
* |c2 - k*g2/2^384 + k*g2/2^384 - k*(-b1)/d|
* <= {triangle inequality}
* |c2 - k*g2/2^384| + |k*g2/2^384 - k*(-b1)/d|
* =
* |c2 - k*g2/2^384| + k*|g2/2^384 - (-b1)/d|
* < {rounding in c2 and 0 <= k < 2^256}
* 2^-1 + 2^256 * |g2/2^384 - (-b1)/d|
* = {definition of epsilon2}
* 2^-1 + epsilon2
*
* Let
* - k1 = k - c1*a1 - c2*a2
* - k2 = - c1*b1 - c2*b2
*
* Lemma 3: |k1| < (a1 + a2 + 1)/2 < 2^128
*
* |k1|
* = {definition of k1}
* |k - c1*a1 - c2*a2|
* = {(a1*b2 - b1*a2)/n = 1}
* |k*(a1*b2 - b1*a2)/n - c1*a1 - c2*a2|
* =
* |a1*(k*b2/n - c1) + a2*(k*(-b1)/n - c2)|
* <= {triangle inequality}
* a1*|k*b2/n - c1| + a2*|k*(-b1)/n - c2|
* < {Lemma 1 and Lemma 2}
* a1*(2^-1 + epslion1) + a2*(2^-1 + epsilon2)
* < {rounding up to an integer}
* (a1 + a2 + 1)/2
* < {rounding up to a power of 2}
* 2^128
*
* Lemma 4: |k2| < (-b1 + b2)/2 + 1 < 2^128
*
* |k2|
* = {definition of k2}
* |- c1*a1 - c2*a2|
* = {(b1*b2 - b1*b2)/n = 0}
* |k*(b1*b2 - b1*b2)/n - c1*b1 - c2*b2|
* =
* |b1*(k*b2/n - c1) + b2*(k*(-b1)/n - c2)|
* <= {triangle inequality}
* (-b1)*|k*b2/n - c1| + b2*|k*(-b1)/n - c2|
* < {Lemma 1 and Lemma 2}
* (-b1)*(2^-1 + epslion1) + b2*(2^-1 + epsilon2)
* < {rounding up to an integer}
* (-b1 + b2)/2 + 1
* < {rounding up to a power of 2}
* 2^128
*
* Let
* - r2 = k2 mod n
* - r1 = k - r2*lambda mod n.
*
* Notice that r1 is defined such that r1 + r2 * lambda == k (mod n).
*
* Lemma 5: r1 == k1 mod n.
*
* r1
* == {definition of r1 and r2}
* k - k2*lambda
* == {definition of k2}
* k - (- c1*b1 - c2*b2)*lambda
* ==
* k + c1*b1*lambda + c2*b2*lambda
* == {a1 + b1*lambda == 0 mod n and a2 + b2*lambda == 0 mod n}
* k - c1*a1 - c2*a2
* == {definition of k1}
* k1
*
* From Lemma 3, Lemma 4, Lemma 5 and the definition of r2, we can conclude that
*
* - either r1 < 2^128 or -r1 mod n < 2^128
* - either r2 < 2^128 or -r2 mod n < 2^128.
*
* Q.E.D.
*/
static void secp256k1_scalar_split_lambda_verify(const secp256k1_scalar *r1, const secp256k1_scalar *r2, const secp256k1_scalar *k) {
secp256k1_scalar s;
unsigned char buf1[32];
unsigned char buf2[32];
/* (a1 + a2 + 1)/2 is 0xa2a8918ca85bafe22016d0b917e4dd77 */
static const unsigned char k1_bound[32] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xa2, 0xa8, 0x91, 0x8c, 0xa8, 0x5b, 0xaf, 0xe2, 0x20, 0x16, 0xd0, 0xb9, 0x17, 0xe4, 0xdd, 0x77
};
/* (-b1 + b2)/2 + 1 is 0x8a65287bd47179fb2be08846cea267ed */
static const unsigned char k2_bound[32] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x8a, 0x65, 0x28, 0x7b, 0xd4, 0x71, 0x79, 0xfb, 0x2b, 0xe0, 0x88, 0x46, 0xce, 0xa2, 0x67, 0xed
};
secp256k1_scalar_mul(&s, &secp256k1_const_lambda, r2);
secp256k1_scalar_add(&s, &s, r1);
VERIFY_CHECK(secp256k1_scalar_eq(&s, k));
secp256k1_scalar_negate(&s, r1);
secp256k1_scalar_get_b32(buf1, r1);
secp256k1_scalar_get_b32(buf2, &s);
VERIFY_CHECK(secp256k1_memcmp_var(buf1, k1_bound, 32) < 0 || secp256k1_memcmp_var(buf2, k1_bound, 32) < 0);
secp256k1_scalar_negate(&s, r2);
secp256k1_scalar_get_b32(buf1, r2);
secp256k1_scalar_get_b32(buf2, &s);
VERIFY_CHECK(secp256k1_memcmp_var(buf1, k2_bound, 32) < 0 || secp256k1_memcmp_var(buf2, k2_bound, 32) < 0);
}
#endif /* VERIFY */
#endif /* !defined(EXHAUSTIVE_TEST_ORDER) */
#endif /* SECP256K1_SCALAR_IMPL_H */

View File

@ -48,14 +48,17 @@ static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int
}
static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) {
const int base = 0x100 % EXHAUSTIVE_TEST_ORDER;
int i;
int over = 0;
*r = 0;
for (i = 0; i < 32; i++) {
*r = ((*r * base) + b32[i]) % EXHAUSTIVE_TEST_ORDER;
*r = (*r * 0x100) + b32[i];
if (*r >= EXHAUSTIVE_TEST_ORDER) {
over = 1;
*r %= EXHAUSTIVE_TEST_ORDER;
}
}
/* just deny overflow, it basically always happens */
if (overflow) *overflow = 0;
if (overflow) *overflow = over;
}
static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) {

View File

@ -26,7 +26,7 @@ static secp256k1_scratch* secp256k1_scratch_create(const secp256k1_callback* err
static void secp256k1_scratch_destroy(const secp256k1_callback* error_callback, secp256k1_scratch* scratch) {
if (scratch != NULL) {
VERIFY_CHECK(scratch->alloc_size == 0); /* all checkpoints should be applied */
if (memcmp(scratch->magic, "scratch", 8) != 0) {
if (secp256k1_memcmp_var(scratch->magic, "scratch", 8) != 0) {
secp256k1_callback_call(error_callback, "invalid scratch space");
return;
}
@ -36,7 +36,7 @@ static void secp256k1_scratch_destroy(const secp256k1_callback* error_callback,
}
static size_t secp256k1_scratch_checkpoint(const secp256k1_callback* error_callback, const secp256k1_scratch* scratch) {
if (memcmp(scratch->magic, "scratch", 8) != 0) {
if (secp256k1_memcmp_var(scratch->magic, "scratch", 8) != 0) {
secp256k1_callback_call(error_callback, "invalid scratch space");
return 0;
}
@ -44,7 +44,7 @@ static size_t secp256k1_scratch_checkpoint(const secp256k1_callback* error_callb
}
static void secp256k1_scratch_apply_checkpoint(const secp256k1_callback* error_callback, secp256k1_scratch* scratch, size_t checkpoint) {
if (memcmp(scratch->magic, "scratch", 8) != 0) {
if (secp256k1_memcmp_var(scratch->magic, "scratch", 8) != 0) {
secp256k1_callback_call(error_callback, "invalid scratch space");
return;
}
@ -56,7 +56,7 @@ static void secp256k1_scratch_apply_checkpoint(const secp256k1_callback* error_c
}
static size_t secp256k1_scratch_max_allocation(const secp256k1_callback* error_callback, const secp256k1_scratch* scratch, size_t objects) {
if (memcmp(scratch->magic, "scratch", 8) != 0) {
if (secp256k1_memcmp_var(scratch->magic, "scratch", 8) != 0) {
secp256k1_callback_call(error_callback, "invalid scratch space");
return 0;
}
@ -81,7 +81,7 @@ static void *secp256k1_scratch_alloc(const secp256k1_callback* error_callback, s
}
size = rounded_size;
if (memcmp(scratch->magic, "scratch", 8) != 0) {
if (secp256k1_memcmp_var(scratch->magic, "scratch", 8) != 0) {
secp256k1_callback_call(error_callback, "invalid scratch space");
return NULL;
}

View File

@ -284,6 +284,9 @@ int secp256k1_ec_pubkey_parse(const secp256k1_context* ctx, secp256k1_pubkey* pu
if (!secp256k1_eckey_pubkey_parse(&Q, input, inputlen)) {
return 0;
}
if (!secp256k1_ge_is_in_correct_subgroup(&Q)) {
return 0;
}
secp256k1_pubkey_save(pubkey, &Q);
secp256k1_ge_clear(&Q);
return 1;

View File

@ -22,7 +22,7 @@ static int secp256k1_selftest_sha256(void) {
secp256k1_sha256_initialize(&hasher);
secp256k1_sha256_write(&hasher, (const unsigned char*)input63, 63);
secp256k1_sha256_finalize(&hasher, out);
return memcmp(out, output32, 32) == 0;
return secp256k1_memcmp_var(out, output32, 32) == 0;
}
static int secp256k1_selftest(void) {

View File

@ -14,28 +14,34 @@
/* A non-cryptographic RNG used only for test infrastructure. */
/** Seed the pseudorandom number generator for testing. */
SECP256K1_INLINE static void secp256k1_rand_seed(const unsigned char *seed16);
SECP256K1_INLINE static void secp256k1_testrand_seed(const unsigned char *seed16);
/** Generate a pseudorandom number in the range [0..2**32-1]. */
static uint32_t secp256k1_rand32(void);
static uint32_t secp256k1_testrand32(void);
/** Generate a pseudorandom number in the range [0..2**bits-1]. Bits must be 1 or
* more. */
static uint32_t secp256k1_rand_bits(int bits);
static uint32_t secp256k1_testrand_bits(int bits);
/** Generate a pseudorandom number in the range [0..range-1]. */
static uint32_t secp256k1_rand_int(uint32_t range);
static uint32_t secp256k1_testrand_int(uint32_t range);
/** Generate a pseudorandom 32-byte array. */
static void secp256k1_rand256(unsigned char *b32);
static void secp256k1_testrand256(unsigned char *b32);
/** Generate a pseudorandom 32-byte array with long sequences of zero and one bits. */
static void secp256k1_rand256_test(unsigned char *b32);
static void secp256k1_testrand256_test(unsigned char *b32);
/** Generate pseudorandom bytes with long sequences of zero and one bits. */
static void secp256k1_rand_bytes_test(unsigned char *bytes, size_t len);
static void secp256k1_testrand_bytes_test(unsigned char *bytes, size_t len);
/** Flip a single random bit in a byte array */
static void secp256k1_rand_flip(unsigned char *b, size_t len);
static void secp256k1_testrand_flip(unsigned char *b, size_t len);
/** Initialize the test RNG using (hex encoded) array up to 16 bytes, or randomly if hexseed is NULL. */
static void secp256k1_testrand_init(const char* hexseed);
/** Print final test information. */
static void secp256k1_testrand_finish(void);
#endif /* SECP256K1_TESTRAND_H */

View File

@ -8,6 +8,7 @@
#define SECP256K1_TESTRAND_IMPL_H
#include <stdint.h>
#include <stdio.h>
#include <string.h>
#include "testrand.h"
@ -19,11 +20,11 @@ static int secp256k1_test_rng_precomputed_used = 8;
static uint64_t secp256k1_test_rng_integer;
static int secp256k1_test_rng_integer_bits_left = 0;
SECP256K1_INLINE static void secp256k1_rand_seed(const unsigned char *seed16) {
SECP256K1_INLINE static void secp256k1_testrand_seed(const unsigned char *seed16) {
secp256k1_rfc6979_hmac_sha256_initialize(&secp256k1_test_rng, seed16, 16);
}
SECP256K1_INLINE static uint32_t secp256k1_rand32(void) {
SECP256K1_INLINE static uint32_t secp256k1_testrand32(void) {
if (secp256k1_test_rng_precomputed_used == 8) {
secp256k1_rfc6979_hmac_sha256_generate(&secp256k1_test_rng, (unsigned char*)(&secp256k1_test_rng_precomputed[0]), sizeof(secp256k1_test_rng_precomputed));
secp256k1_test_rng_precomputed_used = 0;
@ -31,10 +32,10 @@ SECP256K1_INLINE static uint32_t secp256k1_rand32(void) {
return secp256k1_test_rng_precomputed[secp256k1_test_rng_precomputed_used++];
}
static uint32_t secp256k1_rand_bits(int bits) {
static uint32_t secp256k1_testrand_bits(int bits) {
uint32_t ret;
if (secp256k1_test_rng_integer_bits_left < bits) {
secp256k1_test_rng_integer |= (((uint64_t)secp256k1_rand32()) << secp256k1_test_rng_integer_bits_left);
secp256k1_test_rng_integer |= (((uint64_t)secp256k1_testrand32()) << secp256k1_test_rng_integer_bits_left);
secp256k1_test_rng_integer_bits_left += 32;
}
ret = secp256k1_test_rng_integer;
@ -44,7 +45,7 @@ static uint32_t secp256k1_rand_bits(int bits) {
return ret;
}
static uint32_t secp256k1_rand_int(uint32_t range) {
static uint32_t secp256k1_testrand_int(uint32_t range) {
/* We want a uniform integer between 0 and range-1, inclusive.
* B is the smallest number such that range <= 2**B.
* two mechanisms implemented here:
@ -76,25 +77,25 @@ static uint32_t secp256k1_rand_int(uint32_t range) {
mult = 1;
}
while(1) {
uint32_t x = secp256k1_rand_bits(bits);
uint32_t x = secp256k1_testrand_bits(bits);
if (x < trange) {
return (mult == 1) ? x : (x % range);
}
}
}
static void secp256k1_rand256(unsigned char *b32) {
static void secp256k1_testrand256(unsigned char *b32) {
secp256k1_rfc6979_hmac_sha256_generate(&secp256k1_test_rng, b32, 32);
}
static void secp256k1_rand_bytes_test(unsigned char *bytes, size_t len) {
static void secp256k1_testrand_bytes_test(unsigned char *bytes, size_t len) {
size_t bits = 0;
memset(bytes, 0, len);
while (bits < len * 8) {
int now;
uint32_t val;
now = 1 + (secp256k1_rand_bits(6) * secp256k1_rand_bits(5) + 16) / 31;
val = secp256k1_rand_bits(1);
now = 1 + (secp256k1_testrand_bits(6) * secp256k1_testrand_bits(5) + 16) / 31;
val = secp256k1_testrand_bits(1);
while (now > 0 && bits < len * 8) {
bytes[bits / 8] |= val << (bits % 8);
now--;
@ -103,12 +104,55 @@ static void secp256k1_rand_bytes_test(unsigned char *bytes, size_t len) {
}
}
static void secp256k1_rand256_test(unsigned char *b32) {
secp256k1_rand_bytes_test(b32, 32);
static void secp256k1_testrand256_test(unsigned char *b32) {
secp256k1_testrand_bytes_test(b32, 32);
}
static void secp256k1_rand_flip(unsigned char *b, size_t len) {
b[secp256k1_rand_int(len)] ^= (1 << secp256k1_rand_int(8));
static void secp256k1_testrand_flip(unsigned char *b, size_t len) {
b[secp256k1_testrand_int(len)] ^= (1 << secp256k1_testrand_int(8));
}
static void secp256k1_testrand_init(const char* hexseed) {
unsigned char seed16[16] = {0};
if (hexseed && strlen(hexseed) != 0) {
int pos = 0;
while (pos < 16 && hexseed[0] != 0 && hexseed[1] != 0) {
unsigned short sh;
if ((sscanf(hexseed, "%2hx", &sh)) == 1) {
seed16[pos] = sh;
} else {
break;
}
hexseed += 2;
pos++;
}
} else {
FILE *frand = fopen("/dev/urandom", "r");
if ((frand == NULL) || fread(&seed16, 1, sizeof(seed16), frand) != sizeof(seed16)) {
uint64_t t = time(NULL) * (uint64_t)1337;
fprintf(stderr, "WARNING: could not read 16 bytes from /dev/urandom; falling back to insecure PRNG\n");
seed16[0] ^= t;
seed16[1] ^= t >> 8;
seed16[2] ^= t >> 16;
seed16[3] ^= t >> 24;
seed16[4] ^= t >> 32;
seed16[5] ^= t >> 40;
seed16[6] ^= t >> 48;
seed16[7] ^= t >> 56;
}
if (frand) {
fclose(frand);
}
}
printf("random seed = %02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", seed16[0], seed16[1], seed16[2], seed16[3], seed16[4], seed16[5], seed16[6], seed16[7], seed16[8], seed16[9], seed16[10], seed16[11], seed16[12], seed16[13], seed16[14], seed16[15]);
secp256k1_testrand_seed(seed16);
}
static void secp256k1_testrand_finish(void) {
unsigned char run32[32];
secp256k1_testrand256(run32);
printf("random run = %02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", run32[0], run32[1], run32[2], run32[3], run32[4], run32[5], run32[6], run32[7], run32[8], run32[9], run32[10], run32[11], run32[12], run32[13], run32[14], run32[15]);
}
#endif /* SECP256K1_TESTRAND_IMPL_H */

File diff suppressed because it is too large Load Diff

View File

@ -18,7 +18,6 @@
#ifndef EXHAUSTIVE_TEST_ORDER
/* see group_impl.h for allowable values */
#define EXHAUSTIVE_TEST_ORDER 13
#define EXHAUSTIVE_TEST_LAMBDA 9 /* cube root of 1 mod 13 */
#endif
#include "include/secp256k1.h"
@ -27,10 +26,7 @@
#include "secp256k1.c"
#include "testrand_impl.h"
#ifdef ENABLE_MODULE_RECOVERY
#include "src/modules/recovery/main_impl.h"
#include "include/secp256k1_recovery.h"
#endif
static int count = 2;
/** stolen from tests.c */
void ge_equals_ge(const secp256k1_ge *a, const secp256k1_ge *b) {
@ -62,7 +58,7 @@ void ge_equals_gej(const secp256k1_ge *a, const secp256k1_gej *b) {
void random_fe(secp256k1_fe *x) {
unsigned char bin[32];
do {
secp256k1_rand256(bin);
secp256k1_testrand256(bin);
if (secp256k1_fe_set_b32(x, bin)) {
return;
}
@ -70,6 +66,15 @@ void random_fe(secp256k1_fe *x) {
}
/** END stolen from tests.c */
static uint32_t num_cores = 1;
static uint32_t this_core = 0;
SECP256K1_INLINE static int skip_section(uint64_t* iter) {
if (num_cores == 1) return 0;
*iter += 0xe7037ed1a0b428dbULL;
return ((((uint32_t)*iter ^ (*iter >> 32)) * num_cores) >> 32) != this_core;
}
int secp256k1_nonce_function_smallint(unsigned char *nonce32, const unsigned char *msg32,
const unsigned char *key32, const unsigned char *algo16,
void *data, unsigned int attempt) {
@ -90,91 +95,93 @@ int secp256k1_nonce_function_smallint(unsigned char *nonce32, const unsigned cha
return 1;
}
#ifdef USE_ENDOMORPHISM
void test_exhaustive_endomorphism(const secp256k1_ge *group, int order) {
void test_exhaustive_endomorphism(const secp256k1_ge *group) {
int i;
for (i = 0; i < order; i++) {
for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) {
secp256k1_ge res;
secp256k1_ge_mul_lambda(&res, &group[i]);
ge_equals_ge(&group[i * EXHAUSTIVE_TEST_LAMBDA % EXHAUSTIVE_TEST_ORDER], &res);
}
}
#endif
void test_exhaustive_addition(const secp256k1_ge *group, const secp256k1_gej *groupj, int order) {
void test_exhaustive_addition(const secp256k1_ge *group, const secp256k1_gej *groupj) {
int i, j;
uint64_t iter = 0;
/* Sanity-check (and check infinity functions) */
CHECK(secp256k1_ge_is_infinity(&group[0]));
CHECK(secp256k1_gej_is_infinity(&groupj[0]));
for (i = 1; i < order; i++) {
for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) {
CHECK(!secp256k1_ge_is_infinity(&group[i]));
CHECK(!secp256k1_gej_is_infinity(&groupj[i]));
}
/* Check all addition formulae */
for (j = 0; j < order; j++) {
for (j = 0; j < EXHAUSTIVE_TEST_ORDER; j++) {
secp256k1_fe fe_inv;
if (skip_section(&iter)) continue;
secp256k1_fe_inv(&fe_inv, &groupj[j].z);
for (i = 0; i < order; i++) {
for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) {
secp256k1_ge zless_gej;
secp256k1_gej tmp;
/* add_var */
secp256k1_gej_add_var(&tmp, &groupj[i], &groupj[j], NULL);
ge_equals_gej(&group[(i + j) % order], &tmp);
ge_equals_gej(&group[(i + j) % EXHAUSTIVE_TEST_ORDER], &tmp);
/* add_ge */
if (j > 0) {
secp256k1_gej_add_ge(&tmp, &groupj[i], &group[j]);
ge_equals_gej(&group[(i + j) % order], &tmp);
ge_equals_gej(&group[(i + j) % EXHAUSTIVE_TEST_ORDER], &tmp);
}
/* add_ge_var */
secp256k1_gej_add_ge_var(&tmp, &groupj[i], &group[j], NULL);
ge_equals_gej(&group[(i + j) % order], &tmp);
ge_equals_gej(&group[(i + j) % EXHAUSTIVE_TEST_ORDER], &tmp);
/* add_zinv_var */
zless_gej.infinity = groupj[j].infinity;
zless_gej.x = groupj[j].x;
zless_gej.y = groupj[j].y;
secp256k1_gej_add_zinv_var(&tmp, &groupj[i], &zless_gej, &fe_inv);
ge_equals_gej(&group[(i + j) % order], &tmp);
ge_equals_gej(&group[(i + j) % EXHAUSTIVE_TEST_ORDER], &tmp);
}
}
/* Check doubling */
for (i = 0; i < order; i++) {
for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) {
secp256k1_gej tmp;
secp256k1_gej_double(&tmp, &groupj[i]);
ge_equals_gej(&group[(2 * i) % order], &tmp);
ge_equals_gej(&group[(2 * i) % EXHAUSTIVE_TEST_ORDER], &tmp);
secp256k1_gej_double_var(&tmp, &groupj[i], NULL);
ge_equals_gej(&group[(2 * i) % order], &tmp);
ge_equals_gej(&group[(2 * i) % EXHAUSTIVE_TEST_ORDER], &tmp);
}
/* Check negation */
for (i = 1; i < order; i++) {
for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) {
secp256k1_ge tmp;
secp256k1_gej tmpj;
secp256k1_ge_neg(&tmp, &group[i]);
ge_equals_ge(&group[order - i], &tmp);
ge_equals_ge(&group[EXHAUSTIVE_TEST_ORDER - i], &tmp);
secp256k1_gej_neg(&tmpj, &groupj[i]);
ge_equals_gej(&group[order - i], &tmpj);
ge_equals_gej(&group[EXHAUSTIVE_TEST_ORDER - i], &tmpj);
}
}
void test_exhaustive_ecmult(const secp256k1_context *ctx, const secp256k1_ge *group, const secp256k1_gej *groupj, int order) {
void test_exhaustive_ecmult(const secp256k1_context *ctx, const secp256k1_ge *group, const secp256k1_gej *groupj) {
int i, j, r_log;
for (r_log = 1; r_log < order; r_log++) {
for (j = 0; j < order; j++) {
for (i = 0; i < order; i++) {
uint64_t iter = 0;
for (r_log = 1; r_log < EXHAUSTIVE_TEST_ORDER; r_log++) {
for (j = 0; j < EXHAUSTIVE_TEST_ORDER; j++) {
if (skip_section(&iter)) continue;
for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) {
secp256k1_gej tmp;
secp256k1_scalar na, ng;
secp256k1_scalar_set_int(&na, i);
secp256k1_scalar_set_int(&ng, j);
secp256k1_ecmult(&ctx->ecmult_ctx, &tmp, &groupj[r_log], &na, &ng);
ge_equals_gej(&group[(i * r_log + j) % order], &tmp);
ge_equals_gej(&group[(i * r_log + j) % EXHAUSTIVE_TEST_ORDER], &tmp);
if (i > 0) {
secp256k1_ecmult_const(&tmp, &group[i], &ng, 256);
ge_equals_gej(&group[(i * j) % order], &tmp);
ge_equals_gej(&group[(i * j) % EXHAUSTIVE_TEST_ORDER], &tmp);
}
}
}
@ -193,14 +200,16 @@ static int ecmult_multi_callback(secp256k1_scalar *sc, secp256k1_ge *pt, size_t
return 1;
}
void test_exhaustive_ecmult_multi(const secp256k1_context *ctx, const secp256k1_ge *group, int order) {
void test_exhaustive_ecmult_multi(const secp256k1_context *ctx, const secp256k1_ge *group) {
int i, j, k, x, y;
uint64_t iter = 0;
secp256k1_scratch *scratch = secp256k1_scratch_create(&ctx->error_callback, 4096);
for (i = 0; i < order; i++) {
for (j = 0; j < order; j++) {
for (k = 0; k < order; k++) {
for (x = 0; x < order; x++) {
for (y = 0; y < order; y++) {
for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) {
for (j = 0; j < EXHAUSTIVE_TEST_ORDER; j++) {
for (k = 0; k < EXHAUSTIVE_TEST_ORDER; k++) {
for (x = 0; x < EXHAUSTIVE_TEST_ORDER; x++) {
if (skip_section(&iter)) continue;
for (y = 0; y < EXHAUSTIVE_TEST_ORDER; y++) {
secp256k1_gej tmp;
secp256k1_scalar g_sc;
ecmult_multi_data data;
@ -212,7 +221,7 @@ void test_exhaustive_ecmult_multi(const secp256k1_context *ctx, const secp256k1_
data.pt[1] = group[y];
secp256k1_ecmult_multi_var(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &tmp, &g_sc, ecmult_multi_callback, &data, 2);
ge_equals_gej(&group[(i * x + j * y + k) % order], &tmp);
ge_equals_gej(&group[(i * x + j * y + k) % EXHAUSTIVE_TEST_ORDER], &tmp);
}
}
}
@ -221,22 +230,23 @@ void test_exhaustive_ecmult_multi(const secp256k1_context *ctx, const secp256k1_
secp256k1_scratch_destroy(&ctx->error_callback, scratch);
}
void r_from_k(secp256k1_scalar *r, const secp256k1_ge *group, int k) {
void r_from_k(secp256k1_scalar *r, const secp256k1_ge *group, int k, int* overflow) {
secp256k1_fe x;
unsigned char x_bin[32];
k %= EXHAUSTIVE_TEST_ORDER;
x = group[k].x;
secp256k1_fe_normalize(&x);
secp256k1_fe_get_b32(x_bin, &x);
secp256k1_scalar_set_b32(r, x_bin, NULL);
secp256k1_scalar_set_b32(r, x_bin, overflow);
}
void test_exhaustive_verify(const secp256k1_context *ctx, const secp256k1_ge *group, int order) {
void test_exhaustive_verify(const secp256k1_context *ctx, const secp256k1_ge *group) {
int s, r, msg, key;
for (s = 1; s < order; s++) {
for (r = 1; r < order; r++) {
for (msg = 1; msg < order; msg++) {
for (key = 1; key < order; key++) {
uint64_t iter = 0;
for (s = 1; s < EXHAUSTIVE_TEST_ORDER; s++) {
for (r = 1; r < EXHAUSTIVE_TEST_ORDER; r++) {
for (msg = 1; msg < EXHAUSTIVE_TEST_ORDER; msg++) {
for (key = 1; key < EXHAUSTIVE_TEST_ORDER; key++) {
secp256k1_ge nonconst_ge;
secp256k1_ecdsa_signature sig;
secp256k1_pubkey pk;
@ -245,6 +255,8 @@ void test_exhaustive_verify(const secp256k1_context *ctx, const secp256k1_ge *gr
int k, should_verify;
unsigned char msg32[32];
if (skip_section(&iter)) continue;
secp256k1_scalar_set_int(&s_s, s);
secp256k1_scalar_set_int(&r_s, r);
secp256k1_scalar_set_int(&msg_s, msg);
@ -254,9 +266,9 @@ void test_exhaustive_verify(const secp256k1_context *ctx, const secp256k1_ge *gr
/* Run through every k value that gives us this r and check that *one* works.
* Note there could be none, there could be multiple, ECDSA is weird. */
should_verify = 0;
for (k = 0; k < order; k++) {
for (k = 0; k < EXHAUSTIVE_TEST_ORDER; k++) {
secp256k1_scalar check_x_s;
r_from_k(&check_x_s, group, k);
r_from_k(&check_x_s, group, k, NULL);
if (r_s == check_x_s) {
secp256k1_scalar_set_int(&s_times_k_s, k);
secp256k1_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s);
@ -281,13 +293,15 @@ void test_exhaustive_verify(const secp256k1_context *ctx, const secp256k1_ge *gr
}
}
void test_exhaustive_sign(const secp256k1_context *ctx, const secp256k1_ge *group, int order) {
void test_exhaustive_sign(const secp256k1_context *ctx, const secp256k1_ge *group) {
int i, j, k;
uint64_t iter = 0;
/* Loop */
for (i = 1; i < order; i++) { /* message */
for (j = 1; j < order; j++) { /* key */
for (k = 1; k < order; k++) { /* nonce */
for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) { /* message */
for (j = 1; j < EXHAUSTIVE_TEST_ORDER; j++) { /* key */
if (skip_section(&iter)) continue;
for (k = 1; k < EXHAUSTIVE_TEST_ORDER; k++) { /* nonce */
const int starting_k = k;
secp256k1_ecdsa_signature sig;
secp256k1_scalar sk, msg, r, s, expected_r;
@ -303,10 +317,10 @@ void test_exhaustive_sign(const secp256k1_context *ctx, const secp256k1_ge *grou
/* Note that we compute expected_r *after* signing -- this is important
* because our nonce-computing function function might change k during
* signing. */
r_from_k(&expected_r, group, k);
r_from_k(&expected_r, group, k, NULL);
CHECK(r == expected_r);
CHECK((k * s) % order == (i + r * j) % order ||
(k * (EXHAUSTIVE_TEST_ORDER - s)) % order == (i + r * j) % order);
CHECK((k * s) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER ||
(k * (EXHAUSTIVE_TEST_ORDER - s)) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER);
/* Overflow means we've tried every possible nonce */
if (k < starting_k) {
@ -327,184 +341,114 @@ void test_exhaustive_sign(const secp256k1_context *ctx, const secp256k1_ge *grou
}
#ifdef ENABLE_MODULE_RECOVERY
void test_exhaustive_recovery_sign(const secp256k1_context *ctx, const secp256k1_ge *group, int order) {
int i, j, k;
/* Loop */
for (i = 1; i < order; i++) { /* message */
for (j = 1; j < order; j++) { /* key */
for (k = 1; k < order; k++) { /* nonce */
const int starting_k = k;
secp256k1_fe r_dot_y_normalized;
secp256k1_ecdsa_recoverable_signature rsig;
secp256k1_ecdsa_signature sig;
secp256k1_scalar sk, msg, r, s, expected_r;
unsigned char sk32[32], msg32[32];
int expected_recid;
int recid;
secp256k1_scalar_set_int(&msg, i);
secp256k1_scalar_set_int(&sk, j);
secp256k1_scalar_get_b32(sk32, &sk);
secp256k1_scalar_get_b32(msg32, &msg);
secp256k1_ecdsa_sign_recoverable(ctx, &rsig, msg32, sk32, secp256k1_nonce_function_smallint, &k);
/* Check directly */
secp256k1_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, &rsig);
r_from_k(&expected_r, group, k);
CHECK(r == expected_r);
CHECK((k * s) % order == (i + r * j) % order ||
(k * (EXHAUSTIVE_TEST_ORDER - s)) % order == (i + r * j) % order);
/* In computing the recid, there is an overflow condition that is disabled in
* scalar_low_impl.h `secp256k1_scalar_set_b32` because almost every r.y value
* will exceed the group order, and our signing code always holds out for r
* values that don't overflow, so with a proper overflow check the tests would
* loop indefinitely. */
r_dot_y_normalized = group[k].y;
secp256k1_fe_normalize(&r_dot_y_normalized);
/* Also the recovery id is flipped depending if we hit the low-s branch */
if ((k * s) % order == (i + r * j) % order) {
expected_recid = secp256k1_fe_is_odd(&r_dot_y_normalized) ? 1 : 0;
} else {
expected_recid = secp256k1_fe_is_odd(&r_dot_y_normalized) ? 0 : 1;
}
CHECK(recid == expected_recid);
/* Convert to a standard sig then check */
secp256k1_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig);
secp256k1_ecdsa_signature_load(ctx, &r, &s, &sig);
/* Note that we compute expected_r *after* signing -- this is important
* because our nonce-computing function function might change k during
* signing. */
r_from_k(&expected_r, group, k);
CHECK(r == expected_r);
CHECK((k * s) % order == (i + r * j) % order ||
(k * (EXHAUSTIVE_TEST_ORDER - s)) % order == (i + r * j) % order);
/* Overflow means we've tried every possible nonce */
if (k < starting_k) {
break;
}
}
}
}
}
void test_exhaustive_recovery_verify(const secp256k1_context *ctx, const secp256k1_ge *group, int order) {
/* This is essentially a copy of test_exhaustive_verify, with recovery added */
int s, r, msg, key;
for (s = 1; s < order; s++) {
for (r = 1; r < order; r++) {
for (msg = 1; msg < order; msg++) {
for (key = 1; key < order; key++) {
secp256k1_ge nonconst_ge;
secp256k1_ecdsa_recoverable_signature rsig;
secp256k1_ecdsa_signature sig;
secp256k1_pubkey pk;
secp256k1_scalar sk_s, msg_s, r_s, s_s;
secp256k1_scalar s_times_k_s, msg_plus_r_times_sk_s;
int recid = 0;
int k, should_verify;
unsigned char msg32[32];
secp256k1_scalar_set_int(&s_s, s);
secp256k1_scalar_set_int(&r_s, r);
secp256k1_scalar_set_int(&msg_s, msg);
secp256k1_scalar_set_int(&sk_s, key);
secp256k1_scalar_get_b32(msg32, &msg_s);
/* Verify by hand */
/* Run through every k value that gives us this r and check that *one* works.
* Note there could be none, there could be multiple, ECDSA is weird. */
should_verify = 0;
for (k = 0; k < order; k++) {
secp256k1_scalar check_x_s;
r_from_k(&check_x_s, group, k);
if (r_s == check_x_s) {
secp256k1_scalar_set_int(&s_times_k_s, k);
secp256k1_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s);
secp256k1_scalar_mul(&msg_plus_r_times_sk_s, &r_s, &sk_s);
secp256k1_scalar_add(&msg_plus_r_times_sk_s, &msg_plus_r_times_sk_s, &msg_s);
should_verify |= secp256k1_scalar_eq(&s_times_k_s, &msg_plus_r_times_sk_s);
}
}
/* nb we have a "high s" rule */
should_verify &= !secp256k1_scalar_is_high(&s_s);
/* We would like to try recovering the pubkey and checking that it matches,
* but pubkey recovery is impossible in the exhaustive tests (the reason
* being that there are 12 nonzero r values, 12 nonzero points, and no
* overlap between the sets, so there are no valid signatures). */
/* Verify by converting to a standard signature and calling verify */
secp256k1_ecdsa_recoverable_signature_save(&rsig, &r_s, &s_s, recid);
secp256k1_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig);
memcpy(&nonconst_ge, &group[sk_s], sizeof(nonconst_ge));
secp256k1_pubkey_save(&pk, &nonconst_ge);
CHECK(should_verify ==
secp256k1_ecdsa_verify(ctx, &sig, msg32, &pk));
}
}
}
}
}
#include "src/modules/recovery/tests_exhaustive_impl.h"
#endif
int main(void) {
#ifdef ENABLE_MODULE_EXTRAKEYS
#include "src/modules/extrakeys/tests_exhaustive_impl.h"
#endif
#ifdef ENABLE_MODULE_SCHNORRSIG
#include "src/modules/schnorrsig/tests_exhaustive_impl.h"
#endif
int main(int argc, char** argv) {
int i;
secp256k1_gej groupj[EXHAUSTIVE_TEST_ORDER];
secp256k1_ge group[EXHAUSTIVE_TEST_ORDER];
unsigned char rand32[32];
secp256k1_context *ctx;
/* Build context */
secp256k1_context *ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
/* Disable buffering for stdout to improve reliability of getting
* diagnostic information. Happens right at the start of main because
* setbuf must be used before any other operation on the stream. */
setbuf(stdout, NULL);
/* Also disable buffering for stderr because it's not guaranteed that it's
* unbuffered on all systems. */
setbuf(stderr, NULL);
/* TODO set z = 1, then do num_tests runs with random z values */
printf("Exhaustive tests for order %lu\n", (unsigned long)EXHAUSTIVE_TEST_ORDER);
/* Generate the entire group */
secp256k1_gej_set_infinity(&groupj[0]);
secp256k1_ge_set_gej(&group[0], &groupj[0]);
for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) {
/* Set a different random z-value for each Jacobian point */
secp256k1_fe z;
random_fe(&z);
/* find iteration count */
if (argc > 1) {
count = strtol(argv[1], NULL, 0);
}
printf("test count = %i\n", count);
secp256k1_gej_add_ge(&groupj[i], &groupj[i - 1], &secp256k1_ge_const_g);
secp256k1_ge_set_gej(&group[i], &groupj[i]);
secp256k1_gej_rescale(&groupj[i], &z);
/* find random seed */
secp256k1_testrand_init(argc > 2 ? argv[2] : NULL);
/* Verify against ecmult_gen */
{
secp256k1_scalar scalar_i;
secp256k1_gej generatedj;
secp256k1_ge generated;
secp256k1_scalar_set_int(&scalar_i, i);
secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &generatedj, &scalar_i);
secp256k1_ge_set_gej(&generated, &generatedj);
CHECK(group[i].infinity == 0);
CHECK(generated.infinity == 0);
CHECK(secp256k1_fe_equal_var(&generated.x, &group[i].x));
CHECK(secp256k1_fe_equal_var(&generated.y, &group[i].y));
/* set up split processing */
if (argc > 4) {
num_cores = strtol(argv[3], NULL, 0);
this_core = strtol(argv[4], NULL, 0);
if (num_cores < 1 || this_core >= num_cores) {
fprintf(stderr, "Usage: %s [count] [seed] [numcores] [thiscore]\n", argv[0]);
return 1;
}
printf("running tests for core %lu (out of [0..%lu])\n", (unsigned long)this_core, (unsigned long)num_cores - 1);
}
/* Run the tests */
#ifdef USE_ENDOMORPHISM
test_exhaustive_endomorphism(group, EXHAUSTIVE_TEST_ORDER);
#endif
test_exhaustive_addition(group, groupj, EXHAUSTIVE_TEST_ORDER);
test_exhaustive_ecmult(ctx, group, groupj, EXHAUSTIVE_TEST_ORDER);
test_exhaustive_ecmult_multi(ctx, group, EXHAUSTIVE_TEST_ORDER);
test_exhaustive_sign(ctx, group, EXHAUSTIVE_TEST_ORDER);
test_exhaustive_verify(ctx, group, EXHAUSTIVE_TEST_ORDER);
while (count--) {
/* Build context */
ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
secp256k1_testrand256(rand32);
CHECK(secp256k1_context_randomize(ctx, rand32));
/* Generate the entire group */
secp256k1_gej_set_infinity(&groupj[0]);
secp256k1_ge_set_gej(&group[0], &groupj[0]);
for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) {
secp256k1_gej_add_ge(&groupj[i], &groupj[i - 1], &secp256k1_ge_const_g);
secp256k1_ge_set_gej(&group[i], &groupj[i]);
if (count != 0) {
/* Set a different random z-value for each Jacobian point, except z=1
is used in the last iteration. */
secp256k1_fe z;
random_fe(&z);
secp256k1_gej_rescale(&groupj[i], &z);
}
/* Verify against ecmult_gen */
{
secp256k1_scalar scalar_i;
secp256k1_gej generatedj;
secp256k1_ge generated;
secp256k1_scalar_set_int(&scalar_i, i);
secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &generatedj, &scalar_i);
secp256k1_ge_set_gej(&generated, &generatedj);
CHECK(group[i].infinity == 0);
CHECK(generated.infinity == 0);
CHECK(secp256k1_fe_equal_var(&generated.x, &group[i].x));
CHECK(secp256k1_fe_equal_var(&generated.y, &group[i].y));
}
}
/* Run the tests */
test_exhaustive_endomorphism(group);
test_exhaustive_addition(group, groupj);
test_exhaustive_ecmult(ctx, group, groupj);
test_exhaustive_ecmult_multi(ctx, group);
test_exhaustive_sign(ctx, group);
test_exhaustive_verify(ctx, group);
#ifdef ENABLE_MODULE_RECOVERY
test_exhaustive_recovery_sign(ctx, group, EXHAUSTIVE_TEST_ORDER);
test_exhaustive_recovery_verify(ctx, group, EXHAUSTIVE_TEST_ORDER);
test_exhaustive_recovery(ctx, group);
#endif
#ifdef ENABLE_MODULE_EXTRAKEYS
test_exhaustive_extrakeys(ctx, group);
#endif
#ifdef ENABLE_MODULE_SCHNORRSIG
test_exhaustive_schnorrsig(ctx);
#endif
secp256k1_context_destroy(ctx);
secp256k1_context_destroy(ctx);
}
secp256k1_testrand_finish();
printf("no problems found\n");
return 0;
}

View File

@ -216,6 +216,24 @@ static SECP256K1_INLINE void memczero(void *s, size_t len, int flag) {
}
}
/** Semantics like memcmp. Variable-time.
*
* We use this to avoid possible compiler bugs with memcmp, e.g.
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=95189
*/
static SECP256K1_INLINE int secp256k1_memcmp_var(const void *s1, const void *s2, size_t n) {
const unsigned char *p1 = s1, *p2 = s2;
size_t i;
for (i = 0; i < n; i++) {
int diff = p1[i] - p2[i];
if (diff != 0) {
return diff;
}
}
return 0;
}
/** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. Both *r and *a must be initialized and non-negative.*/
static SECP256K1_INLINE void secp256k1_int_cmov(int *r, const int *a, int flag) {
unsigned int mask0, mask1, r_masked, a_masked;

View File

@ -9,19 +9,19 @@
#include "assumptions.h"
#include "util.h"
#if ENABLE_MODULE_ECDH
#ifdef ENABLE_MODULE_ECDH
# include "include/secp256k1_ecdh.h"
#endif
#if ENABLE_MODULE_RECOVERY
#ifdef ENABLE_MODULE_RECOVERY
# include "include/secp256k1_recovery.h"
#endif
#if ENABLE_MODULE_EXTRAKEYS
#ifdef ENABLE_MODULE_EXTRAKEYS
# include "include/secp256k1_extrakeys.h"
#endif
#if ENABLE_MODULE_SCHNORRSIG
#ifdef ENABLE_MODULE_SCHNORRSIG
#include "include/secp256k1_schnorrsig.h"
#endif
@ -37,11 +37,11 @@ int main(void) {
unsigned char key[32];
unsigned char sig[74];
unsigned char spubkey[33];
#if ENABLE_MODULE_RECOVERY
#ifdef ENABLE_MODULE_RECOVERY
secp256k1_ecdsa_recoverable_signature recoverable_signature;
int recid;
#endif
#if ENABLE_MODULE_EXTRAKEYS
#ifdef ENABLE_MODULE_EXTRAKEYS
secp256k1_keypair keypair;
#endif
@ -81,7 +81,7 @@ int main(void) {
CHECK(ret);
CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, sig, &siglen, &signature));
#if ENABLE_MODULE_ECDH
#ifdef ENABLE_MODULE_ECDH
/* Test ECDH. */
VALGRIND_MAKE_MEM_UNDEFINED(key, 32);
ret = secp256k1_ecdh(ctx, msg, &pubkey, key, NULL, NULL);
@ -89,7 +89,7 @@ int main(void) {
CHECK(ret == 1);
#endif
#if ENABLE_MODULE_RECOVERY
#ifdef ENABLE_MODULE_RECOVERY
/* Test signing a recoverable signature. */
VALGRIND_MAKE_MEM_UNDEFINED(key, 32);
ret = secp256k1_ecdsa_sign_recoverable(ctx, &recoverable_signature, msg, key, NULL, NULL);
@ -129,7 +129,7 @@ int main(void) {
CHECK(ret);
/* Test keypair_create and keypair_xonly_tweak_add. */
#if ENABLE_MODULE_EXTRAKEYS
#ifdef ENABLE_MODULE_EXTRAKEYS
VALGRIND_MAKE_MEM_UNDEFINED(key, 32);
ret = secp256k1_keypair_create(ctx, &keypair, key);
VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret));
@ -142,7 +142,7 @@ int main(void) {
CHECK(ret == 1);
#endif
#if ENABLE_MODULE_SCHNORRSIG
#ifdef ENABLE_MODULE_SCHNORRSIG
VALGRIND_MAKE_MEM_UNDEFINED(key, 32);
ret = secp256k1_keypair_create(ctx, &keypair, key);
VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret));