aboutsummaryrefslogtreecommitdiffstats
path: root/crypto/secp256k1/libsecp256k1/src
diff options
context:
space:
mode:
Diffstat (limited to 'crypto/secp256k1/libsecp256k1/src')
-rw-r--r--crypto/secp256k1/libsecp256k1/src/basic-config.h32
-rw-r--r--crypto/secp256k1/libsecp256k1/src/bench.h66
-rw-r--r--crypto/secp256k1/libsecp256k1/src/bench_ecdh.c53
-rw-r--r--crypto/secp256k1/libsecp256k1/src/bench_internal.c354
-rw-r--r--crypto/secp256k1/libsecp256k1/src/bench_recover.c60
-rw-r--r--crypto/secp256k1/libsecp256k1/src/bench_schnorr_verify.c73
-rw-r--r--crypto/secp256k1/libsecp256k1/src/bench_sign.c56
-rw-r--r--crypto/secp256k1/libsecp256k1/src/bench_verify.c67
-rw-r--r--crypto/secp256k1/libsecp256k1/src/ecdsa.h22
-rw-r--r--crypto/secp256k1/libsecp256k1/src/ecdsa_impl.h264
-rw-r--r--crypto/secp256k1/libsecp256k1/src/eckey.h28
-rw-r--r--crypto/secp256k1/libsecp256k1/src/eckey_impl.h202
-rw-r--r--crypto/secp256k1/libsecp256k1/src/ecmult.h31
-rw-r--r--crypto/secp256k1/libsecp256k1/src/ecmult_const.h15
-rw-r--r--crypto/secp256k1/libsecp256k1/src/ecmult_const_impl.h260
-rw-r--r--crypto/secp256k1/libsecp256k1/src/ecmult_gen.h43
-rw-r--r--crypto/secp256k1/libsecp256k1/src/ecmult_gen_impl.h205
-rw-r--r--crypto/secp256k1/libsecp256k1/src/ecmult_impl.h389
-rw-r--r--crypto/secp256k1/libsecp256k1/src/field.h119
-rw-r--r--crypto/secp256k1/libsecp256k1/src/field_10x26.h47
-rw-r--r--crypto/secp256k1/libsecp256k1/src/field_10x26_impl.h1138
-rw-r--r--crypto/secp256k1/libsecp256k1/src/field_5x52.h47
-rw-r--r--crypto/secp256k1/libsecp256k1/src/field_5x52_asm_impl.h502
-rw-r--r--crypto/secp256k1/libsecp256k1/src/field_5x52_impl.h456
-rw-r--r--crypto/secp256k1/libsecp256k1/src/field_5x52_int128_impl.h277
-rw-r--r--crypto/secp256k1/libsecp256k1/src/field_impl.h271
-rw-r--r--crypto/secp256k1/libsecp256k1/src/gen_context.c74
-rw-r--r--crypto/secp256k1/libsecp256k1/src/group.h141
-rw-r--r--crypto/secp256k1/libsecp256k1/src/group_impl.h632
-rw-r--r--crypto/secp256k1/libsecp256k1/src/hash.h41
-rw-r--r--crypto/secp256k1/libsecp256k1/src/hash_impl.h283
-rw-r--r--crypto/secp256k1/libsecp256k1/src/java/org/bitcoin/NativeSecp256k1.java60
-rw-r--r--crypto/secp256k1/libsecp256k1/src/java/org_bitcoin_NativeSecp256k1.c23
-rw-r--r--crypto/secp256k1/libsecp256k1/src/java/org_bitcoin_NativeSecp256k1.h21
-rw-r--r--crypto/secp256k1/libsecp256k1/src/modules/ecdh/Makefile.am.include9
-rw-r--r--crypto/secp256k1/libsecp256k1/src/modules/ecdh/main_impl.h54
-rw-r--r--crypto/secp256k1/libsecp256k1/src/modules/ecdh/tests_impl.h75
-rw-r--r--crypto/secp256k1/libsecp256k1/src/modules/recovery/Makefile.am.include9
-rw-r--r--crypto/secp256k1/libsecp256k1/src/modules/recovery/main_impl.h156
-rw-r--r--crypto/secp256k1/libsecp256k1/src/modules/recovery/tests_impl.h249
-rw-r--r--crypto/secp256k1/libsecp256k1/src/modules/schnorr/Makefile.am.include11
-rw-r--r--crypto/secp256k1/libsecp256k1/src/modules/schnorr/main_impl.h164
-rw-r--r--crypto/secp256k1/libsecp256k1/src/modules/schnorr/schnorr.h20
-rw-r--r--crypto/secp256k1/libsecp256k1/src/modules/schnorr/schnorr_impl.h207
-rw-r--r--crypto/secp256k1/libsecp256k1/src/modules/schnorr/tests_impl.h175
-rw-r--r--crypto/secp256k1/libsecp256k1/src/num.h68
-rw-r--r--crypto/secp256k1/libsecp256k1/src/num_gmp.h20
-rw-r--r--crypto/secp256k1/libsecp256k1/src/num_gmp_impl.h260
-rw-r--r--crypto/secp256k1/libsecp256k1/src/num_impl.h24
-rw-r--r--crypto/secp256k1/libsecp256k1/src/scalar.h104
-rw-r--r--crypto/secp256k1/libsecp256k1/src/scalar_4x64.h19
-rw-r--r--crypto/secp256k1/libsecp256k1/src/scalar_4x64_impl.h947
-rw-r--r--crypto/secp256k1/libsecp256k1/src/scalar_8x32.h19
-rw-r--r--crypto/secp256k1/libsecp256k1/src/scalar_8x32_impl.h721
-rw-r--r--crypto/secp256k1/libsecp256k1/src/scalar_impl.h337
-rw-r--r--crypto/secp256k1/libsecp256k1/src/secp256k1.c513
-rw-r--r--crypto/secp256k1/libsecp256k1/src/testrand.h28
-rw-r--r--crypto/secp256k1/libsecp256k1/src/testrand_impl.h60
-rw-r--r--crypto/secp256k1/libsecp256k1/src/tests.c2357
-rw-r--r--crypto/secp256k1/libsecp256k1/src/util.h110
60 files changed, 13068 insertions, 0 deletions
diff --git a/crypto/secp256k1/libsecp256k1/src/basic-config.h b/crypto/secp256k1/libsecp256k1/src/basic-config.h
new file mode 100644
index 000000000..c4c16eb7c
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/basic-config.h
@@ -0,0 +1,32 @@
+/**********************************************************************
+ * Copyright (c) 2013, 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_BASIC_CONFIG_
+#define _SECP256K1_BASIC_CONFIG_
+
+#ifdef USE_BASIC_CONFIG
+
+#undef USE_ASM_X86_64
+#undef USE_ENDOMORPHISM
+#undef USE_FIELD_10X26
+#undef USE_FIELD_5X52
+#undef USE_FIELD_INV_BUILTIN
+#undef USE_FIELD_INV_NUM
+#undef USE_NUM_GMP
+#undef USE_NUM_NONE
+#undef USE_SCALAR_4X64
+#undef USE_SCALAR_8X32
+#undef USE_SCALAR_INV_BUILTIN
+#undef USE_SCALAR_INV_NUM
+
+#define USE_NUM_NONE 1
+#define USE_FIELD_INV_BUILTIN 1
+#define USE_SCALAR_INV_BUILTIN 1
+#define USE_FIELD_10X26 1
+#define USE_SCALAR_8X32 1
+
+#endif // USE_BASIC_CONFIG
+#endif // _SECP256K1_BASIC_CONFIG_
diff --git a/crypto/secp256k1/libsecp256k1/src/bench.h b/crypto/secp256k1/libsecp256k1/src/bench.h
new file mode 100644
index 000000000..3a71b4aaf
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/bench.h
@@ -0,0 +1,66 @@
+/**********************************************************************
+ * Copyright (c) 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_BENCH_H_
+#define _SECP256K1_BENCH_H_
+
+#include <stdio.h>
+#include <math.h>
+#include "sys/time.h"
+
+static double gettimedouble(void) {
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+ return tv.tv_usec * 0.000001 + tv.tv_sec;
+}
+
+void print_number(double x) {
+ double y = x;
+ int c = 0;
+ if (y < 0.0) {
+ y = -y;
+ }
+ while (y < 100.0) {
+ y *= 10.0;
+ c++;
+ }
+ printf("%.*f", c, x);
+}
+
+void run_benchmark(char *name, void (*benchmark)(void*), void (*setup)(void*), void (*teardown)(void*), void* data, int count, int iter) {
+ int i;
+ double min = HUGE_VAL;
+ double sum = 0.0;
+ double max = 0.0;
+ for (i = 0; i < count; i++) {
+ double begin, total;
+ if (setup != NULL) {
+ setup(data);
+ }
+ begin = gettimedouble();
+ benchmark(data);
+ total = gettimedouble() - begin;
+ if (teardown != NULL) {
+ teardown(data);
+ }
+ if (total < min) {
+ min = total;
+ }
+ if (total > max) {
+ max = total;
+ }
+ sum += total;
+ }
+ printf("%s: min ", name);
+ print_number(min * 1000000.0 / iter);
+ printf("us / avg ");
+ print_number((sum / count) * 1000000.0 / iter);
+ printf("us / max ");
+ print_number(max * 1000000.0 / iter);
+ printf("us\n");
+}
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/bench_ecdh.c b/crypto/secp256k1/libsecp256k1/src/bench_ecdh.c
new file mode 100644
index 000000000..5a7c6376e
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/bench_ecdh.c
@@ -0,0 +1,53 @@
+/**********************************************************************
+ * Copyright (c) 2015 Pieter Wuille, Andrew Poelstra *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#include <string.h>
+
+#include "include/secp256k1.h"
+#include "include/secp256k1_ecdh.h"
+#include "util.h"
+#include "bench.h"
+
+typedef struct {
+ secp256k1_context *ctx;
+ secp256k1_pubkey point;
+ unsigned char scalar[32];
+} bench_ecdh_t;
+
+static void bench_ecdh_setup(void* arg) {
+ int i;
+ bench_ecdh_t *data = (bench_ecdh_t*)arg;
+ const unsigned char point[] = {
+ 0x03,
+ 0x54, 0x94, 0xc1, 0x5d, 0x32, 0x09, 0x97, 0x06,
+ 0xc2, 0x39, 0x5f, 0x94, 0x34, 0x87, 0x45, 0xfd,
+ 0x75, 0x7c, 0xe3, 0x0e, 0x4e, 0x8c, 0x90, 0xfb,
+ 0xa2, 0xba, 0xd1, 0x84, 0xf8, 0x83, 0xc6, 0x9f
+ };
+
+ data->ctx = secp256k1_context_create(0);
+ for (i = 0; i < 32; i++) {
+ data->scalar[i] = i + 1;
+ }
+ CHECK(secp256k1_ec_pubkey_parse(data->ctx, &data->point, point, sizeof(point)) == 1);
+}
+
+static void bench_ecdh(void* arg) {
+ int i;
+ unsigned char res[32];
+ bench_ecdh_t *data = (bench_ecdh_t*)arg;
+
+ for (i = 0; i < 20000; i++) {
+ CHECK(secp256k1_ecdh(data->ctx, res, &data->point, data->scalar) == 1);
+ }
+}
+
+int main(void) {
+ bench_ecdh_t data;
+
+ run_benchmark("ecdh", bench_ecdh, bench_ecdh_setup, NULL, &data, 10, 20000);
+ return 0;
+}
diff --git a/crypto/secp256k1/libsecp256k1/src/bench_internal.c b/crypto/secp256k1/libsecp256k1/src/bench_internal.c
new file mode 100644
index 000000000..7809f5f8c
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/bench_internal.c
@@ -0,0 +1,354 @@
+/**********************************************************************
+ * Copyright (c) 2014-2015 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+#include <stdio.h>
+
+#include "include/secp256k1.h"
+
+#include "util.h"
+#include "hash_impl.h"
+#include "num_impl.h"
+#include "field_impl.h"
+#include "group_impl.h"
+#include "scalar_impl.h"
+#include "ecmult_const_impl.h"
+#include "ecmult_impl.h"
+#include "bench.h"
+#include "secp256k1.c"
+
+typedef struct {
+ secp256k1_scalar scalar_x, scalar_y;
+ secp256k1_fe fe_x, fe_y;
+ secp256k1_ge ge_x, ge_y;
+ secp256k1_gej gej_x, gej_y;
+ unsigned char data[64];
+ int wnaf[256];
+} bench_inv_t;
+
+void bench_setup(void* arg) {
+ bench_inv_t *data = (bench_inv_t*)arg;
+
+ static const unsigned char init_x[32] = {
+ 0x02, 0x03, 0x05, 0x07, 0x0b, 0x0d, 0x11, 0x13,
+ 0x17, 0x1d, 0x1f, 0x25, 0x29, 0x2b, 0x2f, 0x35,
+ 0x3b, 0x3d, 0x43, 0x47, 0x49, 0x4f, 0x53, 0x59,
+ 0x61, 0x65, 0x67, 0x6b, 0x6d, 0x71, 0x7f, 0x83
+ };
+
+ static const unsigned char init_y[32] = {
+ 0x82, 0x83, 0x85, 0x87, 0x8b, 0x8d, 0x81, 0x83,
+ 0x97, 0xad, 0xaf, 0xb5, 0xb9, 0xbb, 0xbf, 0xc5,
+ 0xdb, 0xdd, 0xe3, 0xe7, 0xe9, 0xef, 0xf3, 0xf9,
+ 0x11, 0x15, 0x17, 0x1b, 0x1d, 0xb1, 0xbf, 0xd3
+ };
+
+ secp256k1_scalar_set_b32(&data->scalar_x, init_x, NULL);
+ secp256k1_scalar_set_b32(&data->scalar_y, init_y, NULL);
+ secp256k1_fe_set_b32(&data->fe_x, init_x);
+ secp256k1_fe_set_b32(&data->fe_y, init_y);
+ CHECK(secp256k1_ge_set_xo_var(&data->ge_x, &data->fe_x, 0));
+ CHECK(secp256k1_ge_set_xo_var(&data->ge_y, &data->fe_y, 1));
+ secp256k1_gej_set_ge(&data->gej_x, &data->ge_x);
+ secp256k1_gej_set_ge(&data->gej_y, &data->ge_y);
+ memcpy(data->data, init_x, 32);
+ memcpy(data->data + 32, init_y, 32);
+}
+
+void bench_scalar_add(void* arg) {
+ int i;
+ bench_inv_t *data = (bench_inv_t*)arg;
+
+ for (i = 0; i < 2000000; i++) {
+ secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y);
+ }
+}
+
+void bench_scalar_negate(void* arg) {
+ int i;
+ bench_inv_t *data = (bench_inv_t*)arg;
+
+ for (i = 0; i < 2000000; i++) {
+ secp256k1_scalar_negate(&data->scalar_x, &data->scalar_x);
+ }
+}
+
+void bench_scalar_sqr(void* arg) {
+ int i;
+ bench_inv_t *data = (bench_inv_t*)arg;
+
+ for (i = 0; i < 200000; i++) {
+ secp256k1_scalar_sqr(&data->scalar_x, &data->scalar_x);
+ }
+}
+
+void bench_scalar_mul(void* arg) {
+ int i;
+ bench_inv_t *data = (bench_inv_t*)arg;
+
+ for (i = 0; i < 200000; i++) {
+ secp256k1_scalar_mul(&data->scalar_x, &data->scalar_x, &data->scalar_y);
+ }
+}
+
+#ifdef USE_ENDOMORPHISM
+void bench_scalar_split(void* arg) {
+ int i;
+ bench_inv_t *data = (bench_inv_t*)arg;
+
+ for (i = 0; i < 20000; i++) {
+ secp256k1_scalar l, r;
+ secp256k1_scalar_split_lambda(&l, &r, &data->scalar_x);
+ secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y);
+ }
+}
+#endif
+
+void bench_scalar_inverse(void* arg) {
+ int i;
+ bench_inv_t *data = (bench_inv_t*)arg;
+
+ for (i = 0; i < 2000; i++) {
+ secp256k1_scalar_inverse(&data->scalar_x, &data->scalar_x);
+ secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y);
+ }
+}
+
+void bench_scalar_inverse_var(void* arg) {
+ int i;
+ bench_inv_t *data = (bench_inv_t*)arg;
+
+ for (i = 0; i < 2000; i++) {
+ secp256k1_scalar_inverse_var(&data->scalar_x, &data->scalar_x);
+ secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y);
+ }
+}
+
+void bench_field_normalize(void* arg) {
+ int i;
+ bench_inv_t *data = (bench_inv_t*)arg;
+
+ for (i = 0; i < 2000000; i++) {
+ secp256k1_fe_normalize(&data->fe_x);
+ }
+}
+
+void bench_field_normalize_weak(void* arg) {
+ int i;
+ bench_inv_t *data = (bench_inv_t*)arg;
+
+ for (i = 0; i < 2000000; i++) {
+ secp256k1_fe_normalize_weak(&data->fe_x);
+ }
+}
+
+void bench_field_mul(void* arg) {
+ int i;
+ bench_inv_t *data = (bench_inv_t*)arg;
+
+ for (i = 0; i < 200000; i++) {
+ secp256k1_fe_mul(&data->fe_x, &data->fe_x, &data->fe_y);
+ }
+}
+
+void bench_field_sqr(void* arg) {
+ int i;
+ bench_inv_t *data = (bench_inv_t*)arg;
+
+ for (i = 0; i < 200000; i++) {
+ secp256k1_fe_sqr(&data->fe_x, &data->fe_x);
+ }
+}
+
+void bench_field_inverse(void* arg) {
+ int i;
+ bench_inv_t *data = (bench_inv_t*)arg;
+
+ for (i = 0; i < 20000; i++) {
+ secp256k1_fe_inv(&data->fe_x, &data->fe_x);
+ secp256k1_fe_add(&data->fe_x, &data->fe_y);
+ }
+}
+
+void bench_field_inverse_var(void* arg) {
+ int i;
+ bench_inv_t *data = (bench_inv_t*)arg;
+
+ for (i = 0; i < 20000; i++) {
+ secp256k1_fe_inv_var(&data->fe_x, &data->fe_x);
+ secp256k1_fe_add(&data->fe_x, &data->fe_y);
+ }
+}
+
+void bench_field_sqrt_var(void* arg) {
+ int i;
+ bench_inv_t *data = (bench_inv_t*)arg;
+
+ for (i = 0; i < 20000; i++) {
+ secp256k1_fe_sqrt_var(&data->fe_x, &data->fe_x);
+ secp256k1_fe_add(&data->fe_x, &data->fe_y);
+ }
+}
+
+void bench_group_double_var(void* arg) {
+ int i;
+ bench_inv_t *data = (bench_inv_t*)arg;
+
+ for (i = 0; i < 200000; i++) {
+ secp256k1_gej_double_var(&data->gej_x, &data->gej_x, NULL);
+ }
+}
+
+void bench_group_add_var(void* arg) {
+ int i;
+ bench_inv_t *data = (bench_inv_t*)arg;
+
+ for (i = 0; i < 200000; i++) {
+ secp256k1_gej_add_var(&data->gej_x, &data->gej_x, &data->gej_y, NULL);
+ }
+}
+
+void bench_group_add_affine(void* arg) {
+ int i;
+ bench_inv_t *data = (bench_inv_t*)arg;
+
+ for (i = 0; i < 200000; i++) {
+ secp256k1_gej_add_ge(&data->gej_x, &data->gej_x, &data->ge_y);
+ }
+}
+
+void bench_group_add_affine_var(void* arg) {
+ int i;
+ bench_inv_t *data = (bench_inv_t*)arg;
+
+ for (i = 0; i < 200000; i++) {
+ secp256k1_gej_add_ge_var(&data->gej_x, &data->gej_x, &data->ge_y, NULL);
+ }
+}
+
+void bench_ecmult_wnaf(void* arg) {
+ int i;
+ bench_inv_t *data = (bench_inv_t*)arg;
+
+ for (i = 0; i < 20000; i++) {
+ secp256k1_ecmult_wnaf(data->wnaf, 256, &data->scalar_x, WINDOW_A);
+ secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y);
+ }
+}
+
+void bench_wnaf_const(void* arg) {
+ int i;
+ bench_inv_t *data = (bench_inv_t*)arg;
+
+ for (i = 0; i < 20000; i++) {
+ secp256k1_wnaf_const(data->wnaf, data->scalar_x, WINDOW_A);
+ secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y);
+ }
+}
+
+
+void bench_sha256(void* arg) {
+ int i;
+ bench_inv_t *data = (bench_inv_t*)arg;
+ secp256k1_sha256_t sha;
+
+ for (i = 0; i < 20000; i++) {
+ secp256k1_sha256_initialize(&sha);
+ secp256k1_sha256_write(&sha, data->data, 32);
+ secp256k1_sha256_finalize(&sha, data->data);
+ }
+}
+
+void bench_hmac_sha256(void* arg) {
+ int i;
+ bench_inv_t *data = (bench_inv_t*)arg;
+ secp256k1_hmac_sha256_t hmac;
+
+ for (i = 0; i < 20000; i++) {
+ secp256k1_hmac_sha256_initialize(&hmac, data->data, 32);
+ secp256k1_hmac_sha256_write(&hmac, data->data, 32);
+ secp256k1_hmac_sha256_finalize(&hmac, data->data);
+ }
+}
+
+void bench_rfc6979_hmac_sha256(void* arg) {
+ int i;
+ bench_inv_t *data = (bench_inv_t*)arg;
+ secp256k1_rfc6979_hmac_sha256_t rng;
+
+ for (i = 0; i < 20000; i++) {
+ secp256k1_rfc6979_hmac_sha256_initialize(&rng, data->data, 64);
+ secp256k1_rfc6979_hmac_sha256_generate(&rng, data->data, 32);
+ }
+}
+
+void bench_context_verify(void* arg) {
+ int i;
+ (void)arg;
+ for (i = 0; i < 20; i++) {
+ secp256k1_context_destroy(secp256k1_context_create(SECP256K1_CONTEXT_VERIFY));
+ }
+}
+
+void bench_context_sign(void* arg) {
+ int i;
+ (void)arg;
+ for (i = 0; i < 200; i++) {
+ secp256k1_context_destroy(secp256k1_context_create(SECP256K1_CONTEXT_SIGN));
+ }
+}
+
+
+int have_flag(int argc, char** argv, char *flag) {
+ char** argm = argv + argc;
+ argv++;
+ if (argv == argm) {
+ return 1;
+ }
+ while (argv != NULL && argv != argm) {
+ if (strcmp(*argv, flag) == 0) {
+ return 1;
+ }
+ argv++;
+ }
+ return 0;
+}
+
+int main(int argc, char **argv) {
+ bench_inv_t data;
+ if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "add")) run_benchmark("scalar_add", bench_scalar_add, bench_setup, NULL, &data, 10, 2000000);
+ if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "negate")) run_benchmark("scalar_negate", bench_scalar_negate, bench_setup, NULL, &data, 10, 2000000);
+ if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "sqr")) run_benchmark("scalar_sqr", bench_scalar_sqr, bench_setup, NULL, &data, 10, 200000);
+ if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "mul")) run_benchmark("scalar_mul", bench_scalar_mul, bench_setup, NULL, &data, 10, 200000);
+#ifdef USE_ENDOMORPHISM
+ if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "split")) run_benchmark("scalar_split", bench_scalar_split, bench_setup, NULL, &data, 10, 20000);
+#endif
+ if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "inverse")) run_benchmark("scalar_inverse", bench_scalar_inverse, bench_setup, NULL, &data, 10, 2000);
+ if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "inverse")) run_benchmark("scalar_inverse_var", bench_scalar_inverse_var, bench_setup, NULL, &data, 10, 2000);
+
+ if (have_flag(argc, argv, "field") || have_flag(argc, argv, "normalize")) run_benchmark("field_normalize", bench_field_normalize, bench_setup, NULL, &data, 10, 2000000);
+ if (have_flag(argc, argv, "field") || have_flag(argc, argv, "normalize")) run_benchmark("field_normalize_weak", bench_field_normalize_weak, bench_setup, NULL, &data, 10, 2000000);
+ if (have_flag(argc, argv, "field") || have_flag(argc, argv, "sqr")) run_benchmark("field_sqr", bench_field_sqr, bench_setup, NULL, &data, 10, 200000);
+ if (have_flag(argc, argv, "field") || have_flag(argc, argv, "mul")) run_benchmark("field_mul", bench_field_mul, bench_setup, NULL, &data, 10, 200000);
+ if (have_flag(argc, argv, "field") || have_flag(argc, argv, "inverse")) run_benchmark("field_inverse", bench_field_inverse, bench_setup, NULL, &data, 10, 20000);
+ if (have_flag(argc, argv, "field") || have_flag(argc, argv, "inverse")) run_benchmark("field_inverse_var", bench_field_inverse_var, bench_setup, NULL, &data, 10, 20000);
+ if (have_flag(argc, argv, "field") || have_flag(argc, argv, "sqrt")) run_benchmark("field_sqrt_var", bench_field_sqrt_var, bench_setup, NULL, &data, 10, 20000);
+
+ if (have_flag(argc, argv, "group") || have_flag(argc, argv, "double")) run_benchmark("group_double_var", bench_group_double_var, bench_setup, NULL, &data, 10, 200000);
+ if (have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_var", bench_group_add_var, bench_setup, NULL, &data, 10, 200000);
+ if (have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_affine", bench_group_add_affine, bench_setup, NULL, &data, 10, 200000);
+ if (have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_affine_var", bench_group_add_affine_var, bench_setup, NULL, &data, 10, 200000);
+
+ if (have_flag(argc, argv, "ecmult") || have_flag(argc, argv, "wnaf")) run_benchmark("wnaf_const", bench_wnaf_const, bench_setup, NULL, &data, 10, 20000);
+ if (have_flag(argc, argv, "ecmult") || have_flag(argc, argv, "wnaf")) run_benchmark("ecmult_wnaf", bench_ecmult_wnaf, bench_setup, NULL, &data, 10, 20000);
+
+ if (have_flag(argc, argv, "hash") || have_flag(argc, argv, "sha256")) run_benchmark("hash_sha256", bench_sha256, bench_setup, NULL, &data, 10, 20000);
+ if (have_flag(argc, argv, "hash") || have_flag(argc, argv, "hmac")) run_benchmark("hash_hmac_sha256", bench_hmac_sha256, bench_setup, NULL, &data, 10, 20000);
+ if (have_flag(argc, argv, "hash") || have_flag(argc, argv, "rng6979")) run_benchmark("hash_rfc6979_hmac_sha256", bench_rfc6979_hmac_sha256, bench_setup, NULL, &data, 10, 20000);
+
+ if (have_flag(argc, argv, "context") || have_flag(argc, argv, "verify")) run_benchmark("context_verify", bench_context_verify, bench_setup, NULL, &data, 10, 20);
+ if (have_flag(argc, argv, "context") || have_flag(argc, argv, "sign")) run_benchmark("context_sign", bench_context_sign, bench_setup, NULL, &data, 10, 200);
+
+ return 0;
+}
diff --git a/crypto/secp256k1/libsecp256k1/src/bench_recover.c b/crypto/secp256k1/libsecp256k1/src/bench_recover.c
new file mode 100644
index 000000000..6489378cc
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/bench_recover.c
@@ -0,0 +1,60 @@
+/**********************************************************************
+ * Copyright (c) 2014-2015 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#include "include/secp256k1.h"
+#include "include/secp256k1_recovery.h"
+#include "util.h"
+#include "bench.h"
+
+typedef struct {
+ secp256k1_context *ctx;
+ unsigned char msg[32];
+ unsigned char sig[64];
+} bench_recover_t;
+
+void bench_recover(void* arg) {
+ int i;
+ bench_recover_t *data = (bench_recover_t*)arg;
+ secp256k1_pubkey pubkey;
+ unsigned char pubkeyc[33];
+
+ for (i = 0; i < 20000; i++) {
+ int j;
+ size_t pubkeylen = 33;
+ secp256k1_ecdsa_recoverable_signature sig;
+ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(data->ctx, &sig, data->sig, i % 2));
+ CHECK(secp256k1_ecdsa_recover(data->ctx, &pubkey, &sig, data->msg));
+ CHECK(secp256k1_ec_pubkey_serialize(data->ctx, pubkeyc, &pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED));
+ for (j = 0; j < 32; j++) {
+ data->sig[j + 32] = data->msg[j]; /* Move former message to S. */
+ data->msg[j] = data->sig[j]; /* Move former R to message. */
+ data->sig[j] = pubkeyc[j + 1]; /* Move recovered pubkey X coordinate to R (which must be a valid X coordinate). */
+ }
+ }
+}
+
+void bench_recover_setup(void* arg) {
+ int i;
+ bench_recover_t *data = (bench_recover_t*)arg;
+
+ for (i = 0; i < 32; i++) {
+ data->msg[i] = 1 + i;
+ }
+ for (i = 0; i < 64; i++) {
+ data->sig[i] = 65 + i;
+ }
+}
+
+int main(void) {
+ bench_recover_t data;
+
+ data.ctx = secp256k1_context_create(SECP256K1_CONTEXT_VERIFY);
+
+ run_benchmark("ecdsa_recover", bench_recover, bench_recover_setup, NULL, &data, 10, 20000);
+
+ secp256k1_context_destroy(data.ctx);
+ return 0;
+}
diff --git a/crypto/secp256k1/libsecp256k1/src/bench_schnorr_verify.c b/crypto/secp256k1/libsecp256k1/src/bench_schnorr_verify.c
new file mode 100644
index 000000000..5f137dda2
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/bench_schnorr_verify.c
@@ -0,0 +1,73 @@
+/**********************************************************************
+ * Copyright (c) 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#include <stdio.h>
+#include <string.h>
+
+#include "include/secp256k1.h"
+#include "include/secp256k1_schnorr.h"
+#include "util.h"
+#include "bench.h"
+
+typedef struct {
+ unsigned char key[32];
+ unsigned char sig[64];
+ unsigned char pubkey[33];
+ size_t pubkeylen;
+} benchmark_schnorr_sig_t;
+
+typedef struct {
+ secp256k1_context *ctx;
+ unsigned char msg[32];
+ benchmark_schnorr_sig_t sigs[64];
+ int numsigs;
+} benchmark_schnorr_verify_t;
+
+static void benchmark_schnorr_init(void* arg) {
+ int i, k;
+ benchmark_schnorr_verify_t* data = (benchmark_schnorr_verify_t*)arg;
+
+ for (i = 0; i < 32; i++) {
+ data->msg[i] = 1 + i;
+ }
+ for (k = 0; k < data->numsigs; k++) {
+ secp256k1_pubkey pubkey;
+ for (i = 0; i < 32; i++) {
+ data->sigs[k].key[i] = 33 + i + k;
+ }
+ secp256k1_schnorr_sign(data->ctx, data->sigs[k].sig, data->msg, data->sigs[k].key, NULL, NULL);
+ data->sigs[k].pubkeylen = 33;
+ CHECK(secp256k1_ec_pubkey_create(data->ctx, &pubkey, data->sigs[k].key));
+ CHECK(secp256k1_ec_pubkey_serialize(data->ctx, data->sigs[k].pubkey, &data->sigs[k].pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED));
+ }
+}
+
+static void benchmark_schnorr_verify(void* arg) {
+ int i;
+ benchmark_schnorr_verify_t* data = (benchmark_schnorr_verify_t*)arg;
+
+ for (i = 0; i < 20000 / data->numsigs; i++) {
+ secp256k1_pubkey pubkey;
+ data->sigs[0].sig[(i >> 8) % 64] ^= (i & 0xFF);
+ CHECK(secp256k1_ec_pubkey_parse(data->ctx, &pubkey, data->sigs[0].pubkey, data->sigs[0].pubkeylen));
+ CHECK(secp256k1_schnorr_verify(data->ctx, data->sigs[0].sig, data->msg, &pubkey) == ((i & 0xFF) == 0));
+ data->sigs[0].sig[(i >> 8) % 64] ^= (i & 0xFF);
+ }
+}
+
+
+
+int main(void) {
+ benchmark_schnorr_verify_t data;
+
+ data.ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
+
+ data.numsigs = 1;
+ run_benchmark("schnorr_verify", benchmark_schnorr_verify, benchmark_schnorr_init, NULL, &data, 10, 20000);
+
+ secp256k1_context_destroy(data.ctx);
+ return 0;
+}
diff --git a/crypto/secp256k1/libsecp256k1/src/bench_sign.c b/crypto/secp256k1/libsecp256k1/src/bench_sign.c
new file mode 100644
index 000000000..ed7224d75
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/bench_sign.c
@@ -0,0 +1,56 @@
+/**********************************************************************
+ * Copyright (c) 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#include "include/secp256k1.h"
+#include "util.h"
+#include "bench.h"
+
+typedef struct {
+ secp256k1_context* ctx;
+ unsigned char msg[32];
+ unsigned char key[32];
+} bench_sign_t;
+
+static void bench_sign_setup(void* arg) {
+ int i;
+ bench_sign_t *data = (bench_sign_t*)arg;
+
+ for (i = 0; i < 32; i++) {
+ data->msg[i] = i + 1;
+ }
+ for (i = 0; i < 32; i++) {
+ data->key[i] = i + 65;
+ }
+}
+
+static void bench_sign(void* arg) {
+ int i;
+ bench_sign_t *data = (bench_sign_t*)arg;
+
+ unsigned char sig[74];
+ for (i = 0; i < 20000; i++) {
+ size_t siglen = 74;
+ int j;
+ secp256k1_ecdsa_signature signature;
+ CHECK(secp256k1_ecdsa_sign(data->ctx, &signature, data->msg, data->key, NULL, NULL));
+ CHECK(secp256k1_ecdsa_signature_serialize_der(data->ctx, sig, &siglen, &signature));
+ for (j = 0; j < 32; j++) {
+ data->msg[j] = sig[j];
+ data->key[j] = sig[j + 32];
+ }
+ }
+}
+
+int main(void) {
+ bench_sign_t data;
+
+ data.ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN);
+
+ run_benchmark("ecdsa_sign", bench_sign, bench_sign_setup, NULL, &data, 10, 20000);
+
+ secp256k1_context_destroy(data.ctx);
+ return 0;
+}
diff --git a/crypto/secp256k1/libsecp256k1/src/bench_verify.c b/crypto/secp256k1/libsecp256k1/src/bench_verify.c
new file mode 100644
index 000000000..0cafbdc4e
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/bench_verify.c
@@ -0,0 +1,67 @@
+/**********************************************************************
+ * Copyright (c) 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#include <stdio.h>
+#include <string.h>
+
+#include "include/secp256k1.h"
+#include "util.h"
+#include "bench.h"
+
+typedef struct {
+ secp256k1_context *ctx;
+ unsigned char msg[32];
+ unsigned char key[32];
+ unsigned char sig[72];
+ size_t siglen;
+ unsigned char pubkey[33];
+ size_t pubkeylen;
+} benchmark_verify_t;
+
+static void benchmark_verify(void* arg) {
+ int i;
+ benchmark_verify_t* data = (benchmark_verify_t*)arg;
+
+ for (i = 0; i < 20000; i++) {
+ secp256k1_pubkey pubkey;
+ secp256k1_ecdsa_signature sig;
+ data->sig[data->siglen - 1] ^= (i & 0xFF);
+ data->sig[data->siglen - 2] ^= ((i >> 8) & 0xFF);
+ data->sig[data->siglen - 3] ^= ((i >> 16) & 0xFF);
+ CHECK(secp256k1_ec_pubkey_parse(data->ctx, &pubkey, data->pubkey, data->pubkeylen) == 1);
+ CHECK(secp256k1_ecdsa_signature_parse_der(data->ctx, &sig, data->sig, data->siglen) == 1);
+ CHECK(secp256k1_ecdsa_verify(data->ctx, &sig, data->msg, &pubkey) == (i == 0));
+ data->sig[data->siglen - 1] ^= (i & 0xFF);
+ data->sig[data->siglen - 2] ^= ((i >> 8) & 0xFF);
+ data->sig[data->siglen - 3] ^= ((i >> 16) & 0xFF);
+ }
+}
+
+int main(void) {
+ int i;
+ secp256k1_pubkey pubkey;
+ secp256k1_ecdsa_signature sig;
+ benchmark_verify_t data;
+
+ data.ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
+
+ for (i = 0; i < 32; i++) {
+ data.msg[i] = 1 + i;
+ }
+ for (i = 0; i < 32; i++) {
+ data.key[i] = 33 + i;
+ }
+ data.siglen = 72;
+ CHECK(secp256k1_ecdsa_sign(data.ctx, &sig, data.msg, data.key, NULL, NULL));
+ CHECK(secp256k1_ecdsa_signature_serialize_der(data.ctx, data.sig, &data.siglen, &sig));
+ CHECK(secp256k1_ec_pubkey_create(data.ctx, &pubkey, data.key));
+ CHECK(secp256k1_ec_pubkey_serialize(data.ctx, data.pubkey, &data.pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED) == 1);
+
+ run_benchmark("ecdsa_verify", benchmark_verify, NULL, NULL, &data, 10, 20000);
+
+ secp256k1_context_destroy(data.ctx);
+ return 0;
+}
diff --git a/crypto/secp256k1/libsecp256k1/src/ecdsa.h b/crypto/secp256k1/libsecp256k1/src/ecdsa.h
new file mode 100644
index 000000000..4c0a4a89e
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/ecdsa.h
@@ -0,0 +1,22 @@
+/**********************************************************************
+ * Copyright (c) 2013, 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_ECDSA_
+#define _SECP256K1_ECDSA_
+
+#include <stddef.h>
+
+#include "scalar.h"
+#include "group.h"
+#include "ecmult.h"
+
+static int secp256k1_ecdsa_sig_parse(secp256k1_scalar *r, secp256k1_scalar *s, const unsigned char *sig, size_t size);
+static int secp256k1_ecdsa_sig_serialize(unsigned char *sig, size_t *size, const secp256k1_scalar *r, const secp256k1_scalar *s);
+static int secp256k1_ecdsa_sig_verify(const secp256k1_ecmult_context *ctx, const secp256k1_scalar* r, const secp256k1_scalar* s, const secp256k1_ge *pubkey, const secp256k1_scalar *message);
+static int secp256k1_ecdsa_sig_sign(const secp256k1_ecmult_gen_context *ctx, secp256k1_scalar* r, secp256k1_scalar* s, const secp256k1_scalar *seckey, const secp256k1_scalar *message, const secp256k1_scalar *nonce, int *recid);
+static int secp256k1_ecdsa_sig_recover(const secp256k1_ecmult_context *ctx, const secp256k1_scalar* r, const secp256k1_scalar* s, secp256k1_ge *pubkey, const secp256k1_scalar *message, int recid);
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/ecdsa_impl.h b/crypto/secp256k1/libsecp256k1/src/ecdsa_impl.h
new file mode 100644
index 000000000..4a172b3c5
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/ecdsa_impl.h
@@ -0,0 +1,264 @@
+/**********************************************************************
+ * Copyright (c) 2013, 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+
+#ifndef _SECP256K1_ECDSA_IMPL_H_
+#define _SECP256K1_ECDSA_IMPL_H_
+
+#include "scalar.h"
+#include "field.h"
+#include "group.h"
+#include "ecmult.h"
+#include "ecmult_gen.h"
+#include "ecdsa.h"
+
+/** Group order for secp256k1 defined as 'n' in "Standards for Efficient Cryptography" (SEC2) 2.7.1
+ * sage: for t in xrange(1023, -1, -1):
+ * .. p = 2**256 - 2**32 - t
+ * .. if p.is_prime():
+ * .. print '%x'%p
+ * .. break
+ * 'fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f'
+ * sage: a = 0
+ * sage: b = 7
+ * sage: F = FiniteField (p)
+ * sage: '%x' % (EllipticCurve ([F (a), F (b)]).order())
+ * 'fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141'
+ */
+static const secp256k1_fe secp256k1_ecdsa_const_order_as_fe = SECP256K1_FE_CONST(
+ 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFEUL,
+ 0xBAAEDCE6UL, 0xAF48A03BUL, 0xBFD25E8CUL, 0xD0364141UL
+);
+
+/** Difference between field and order, values 'p' and 'n' values defined in
+ * "Standards for Efficient Cryptography" (SEC2) 2.7.1.
+ * sage: p = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F
+ * sage: a = 0
+ * sage: b = 7
+ * sage: F = FiniteField (p)
+ * sage: '%x' % (p - EllipticCurve ([F (a), F (b)]).order())
+ * '14551231950b75fc4402da1722fc9baee'
+ */
+static const secp256k1_fe secp256k1_ecdsa_const_p_minus_order = SECP256K1_FE_CONST(
+ 0, 0, 0, 1, 0x45512319UL, 0x50B75FC4UL, 0x402DA172UL, 0x2FC9BAEEUL
+);
+
+static int secp256k1_ecdsa_sig_parse(secp256k1_scalar *rr, secp256k1_scalar *rs, const unsigned char *sig, size_t size) {
+ unsigned char ra[32] = {0}, sa[32] = {0};
+ const unsigned char *rp;
+ const unsigned char *sp;
+ size_t lenr;
+ size_t lens;
+ int overflow;
+ if (sig[0] != 0x30) {
+ return 0;
+ }
+ lenr = sig[3];
+ if (5+lenr >= size) {
+ return 0;
+ }
+ lens = sig[lenr+5];
+ if (sig[1] != lenr+lens+4) {
+ return 0;
+ }
+ if (lenr+lens+6 > size) {
+ return 0;
+ }
+ if (sig[2] != 0x02) {
+ return 0;
+ }
+ if (lenr == 0) {
+ return 0;
+ }
+ if (sig[lenr+4] != 0x02) {
+ return 0;
+ }
+ if (lens == 0) {
+ return 0;
+ }
+ sp = sig + 6 + lenr;
+ while (lens > 0 && sp[0] == 0) {
+ lens--;
+ sp++;
+ }
+ if (lens > 32) {
+ return 0;
+ }
+ rp = sig + 4;
+ while (lenr > 0 && rp[0] == 0) {
+ lenr--;
+ rp++;
+ }
+ if (lenr > 32) {
+ return 0;
+ }
+ memcpy(ra + 32 - lenr, rp, lenr);
+ memcpy(sa + 32 - lens, sp, lens);
+ overflow = 0;
+ secp256k1_scalar_set_b32(rr, ra, &overflow);
+ if (overflow) {
+ return 0;
+ }
+ secp256k1_scalar_set_b32(rs, sa, &overflow);
+ if (overflow) {
+ return 0;
+ }
+ return 1;
+}
+
+static int secp256k1_ecdsa_sig_serialize(unsigned char *sig, size_t *size, const secp256k1_scalar* ar, const secp256k1_scalar* as) {
+ unsigned char r[33] = {0}, s[33] = {0};
+ unsigned char *rp = r, *sp = s;
+ size_t lenR = 33, lenS = 33;
+ secp256k1_scalar_get_b32(&r[1], ar);
+ secp256k1_scalar_get_b32(&s[1], as);
+ while (lenR > 1 && rp[0] == 0 && rp[1] < 0x80) { lenR--; rp++; }
+ while (lenS > 1 && sp[0] == 0 && sp[1] < 0x80) { lenS--; sp++; }
+ if (*size < 6+lenS+lenR) {
+ *size = 6 + lenS + lenR;
+ return 0;
+ }
+ *size = 6 + lenS + lenR;
+ sig[0] = 0x30;
+ sig[1] = 4 + lenS + lenR;
+ sig[2] = 0x02;
+ sig[3] = lenR;
+ memcpy(sig+4, rp, lenR);
+ sig[4+lenR] = 0x02;
+ sig[5+lenR] = lenS;
+ memcpy(sig+lenR+6, sp, lenS);
+ return 1;
+}
+
+static int secp256k1_ecdsa_sig_verify(const secp256k1_ecmult_context *ctx, const secp256k1_scalar *sigr, const secp256k1_scalar *sigs, const secp256k1_ge *pubkey, const secp256k1_scalar *message) {
+ unsigned char c[32];
+ secp256k1_scalar sn, u1, u2;
+ secp256k1_fe xr;
+ secp256k1_gej pubkeyj;
+ secp256k1_gej pr;
+
+ if (secp256k1_scalar_is_zero(sigr) || secp256k1_scalar_is_zero(sigs)) {
+ return 0;
+ }
+
+ secp256k1_scalar_inverse_var(&sn, sigs);
+ secp256k1_scalar_mul(&u1, &sn, message);
+ secp256k1_scalar_mul(&u2, &sn, sigr);
+ secp256k1_gej_set_ge(&pubkeyj, pubkey);
+ secp256k1_ecmult(ctx, &pr, &pubkeyj, &u2, &u1);
+ if (secp256k1_gej_is_infinity(&pr)) {
+ return 0;
+ }
+ secp256k1_scalar_get_b32(c, sigr);
+ secp256k1_fe_set_b32(&xr, c);
+
+ /** We now have the recomputed R point in pr, and its claimed x coordinate (modulo n)
+ * in xr. Naively, we would extract the x coordinate from pr (requiring a inversion modulo p),
+ * compute the remainder modulo n, and compare it to xr. However:
+ *
+ * xr == X(pr) mod n
+ * <=> exists h. (xr + h * n < p && xr + h * n == X(pr))
+ * [Since 2 * n > p, h can only be 0 or 1]
+ * <=> (xr == X(pr)) || (xr + n < p && xr + n == X(pr))
+ * [In Jacobian coordinates, X(pr) is pr.x / pr.z^2 mod p]
+ * <=> (xr == pr.x / pr.z^2 mod p) || (xr + n < p && xr + n == pr.x / pr.z^2 mod p)
+ * [Multiplying both sides of the equations by pr.z^2 mod p]
+ * <=> (xr * pr.z^2 mod p == pr.x) || (xr + n < p && (xr + n) * pr.z^2 mod p == pr.x)
+ *
+ * Thus, we can avoid the inversion, but we have to check both cases separately.
+ * secp256k1_gej_eq_x implements the (xr * pr.z^2 mod p == pr.x) test.
+ */
+ if (secp256k1_gej_eq_x_var(&xr, &pr)) {
+ /* xr.x == xr * xr.z^2 mod p, so the signature is valid. */
+ return 1;
+ }
+ if (secp256k1_fe_cmp_var(&xr, &secp256k1_ecdsa_const_p_minus_order) >= 0) {
+ /* xr + p >= n, so we can skip testing the second case. */
+ return 0;
+ }
+ secp256k1_fe_add(&xr, &secp256k1_ecdsa_const_order_as_fe);
+ if (secp256k1_gej_eq_x_var(&xr, &pr)) {
+ /* (xr + n) * pr.z^2 mod p == pr.x, so the signature is valid. */
+ return 1;
+ }
+ return 0;
+}
+
+static int secp256k1_ecdsa_sig_recover(const secp256k1_ecmult_context *ctx, const secp256k1_scalar *sigr, const secp256k1_scalar* sigs, secp256k1_ge *pubkey, const secp256k1_scalar *message, int recid) {
+ unsigned char brx[32];
+ secp256k1_fe fx;
+ secp256k1_ge x;
+ secp256k1_gej xj;
+ secp256k1_scalar rn, u1, u2;
+ secp256k1_gej qj;
+
+ if (secp256k1_scalar_is_zero(sigr) || secp256k1_scalar_is_zero(sigs)) {
+ return 0;
+ }
+
+ secp256k1_scalar_get_b32(brx, sigr);
+ VERIFY_CHECK(secp256k1_fe_set_b32(&fx, brx)); /* brx comes from a scalar, so is less than the order; certainly less than p */
+ if (recid & 2) {
+ if (secp256k1_fe_cmp_var(&fx, &secp256k1_ecdsa_const_p_minus_order) >= 0) {
+ return 0;
+ }
+ secp256k1_fe_add(&fx, &secp256k1_ecdsa_const_order_as_fe);
+ }
+ if (!secp256k1_ge_set_xo_var(&x, &fx, recid & 1)) {
+ return 0;
+ }
+ secp256k1_gej_set_ge(&xj, &x);
+ secp256k1_scalar_inverse_var(&rn, sigr);
+ secp256k1_scalar_mul(&u1, &rn, message);
+ secp256k1_scalar_negate(&u1, &u1);
+ secp256k1_scalar_mul(&u2, &rn, sigs);
+ secp256k1_ecmult(ctx, &qj, &xj, &u2, &u1);
+ secp256k1_ge_set_gej_var(pubkey, &qj);
+ return !secp256k1_gej_is_infinity(&qj);
+}
+
+static int secp256k1_ecdsa_sig_sign(const secp256k1_ecmult_gen_context *ctx, secp256k1_scalar *sigr, secp256k1_scalar *sigs, const secp256k1_scalar *seckey, const secp256k1_scalar *message, const secp256k1_scalar *nonce, int *recid) {
+ unsigned char b[32];
+ secp256k1_gej rp;
+ secp256k1_ge r;
+ secp256k1_scalar n;
+ int overflow = 0;
+
+ secp256k1_ecmult_gen(ctx, &rp, nonce);
+ secp256k1_ge_set_gej(&r, &rp);
+ secp256k1_fe_normalize(&r.x);
+ secp256k1_fe_normalize(&r.y);
+ secp256k1_fe_get_b32(b, &r.x);
+ secp256k1_scalar_set_b32(sigr, b, &overflow);
+ if (secp256k1_scalar_is_zero(sigr)) {
+ /* P.x = order is on the curve, so technically sig->r could end up zero, which would be an invalid signature. */
+ secp256k1_gej_clear(&rp);
+ secp256k1_ge_clear(&r);
+ return 0;
+ }
+ if (recid) {
+ *recid = (overflow ? 2 : 0) | (secp256k1_fe_is_odd(&r.y) ? 1 : 0);
+ }
+ secp256k1_scalar_mul(&n, sigr, seckey);
+ secp256k1_scalar_add(&n, &n, message);
+ secp256k1_scalar_inverse(sigs, nonce);
+ secp256k1_scalar_mul(sigs, sigs, &n);
+ secp256k1_scalar_clear(&n);
+ secp256k1_gej_clear(&rp);
+ secp256k1_ge_clear(&r);
+ if (secp256k1_scalar_is_zero(sigs)) {
+ return 0;
+ }
+ if (secp256k1_scalar_is_high(sigs)) {
+ secp256k1_scalar_negate(sigs, sigs);
+ if (recid) {
+ *recid ^= 1;
+ }
+ }
+ return 1;
+}
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/eckey.h b/crypto/secp256k1/libsecp256k1/src/eckey.h
new file mode 100644
index 000000000..71c4096df
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/eckey.h
@@ -0,0 +1,28 @@
+/**********************************************************************
+ * Copyright (c) 2013, 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_ECKEY_
+#define _SECP256K1_ECKEY_
+
+#include <stddef.h>
+
+#include "group.h"
+#include "scalar.h"
+#include "ecmult.h"
+#include "ecmult_gen.h"
+
+static int secp256k1_eckey_pubkey_parse(secp256k1_ge *elem, const unsigned char *pub, size_t size);
+static int secp256k1_eckey_pubkey_serialize(secp256k1_ge *elem, unsigned char *pub, size_t *size, unsigned int flags);
+
+static int secp256k1_eckey_privkey_parse(secp256k1_scalar *key, const unsigned char *privkey, size_t privkeylen);
+static int secp256k1_eckey_privkey_serialize(const secp256k1_ecmult_gen_context *ctx, unsigned char *privkey, size_t *privkeylen, const secp256k1_scalar *key, unsigned int flags);
+
+static int secp256k1_eckey_privkey_tweak_add(secp256k1_scalar *key, const secp256k1_scalar *tweak);
+static int secp256k1_eckey_pubkey_tweak_add(const secp256k1_ecmult_context *ctx, secp256k1_ge *key, const secp256k1_scalar *tweak);
+static int secp256k1_eckey_privkey_tweak_mul(secp256k1_scalar *key, const secp256k1_scalar *tweak);
+static int secp256k1_eckey_pubkey_tweak_mul(const secp256k1_ecmult_context *ctx, secp256k1_ge *key, const secp256k1_scalar *tweak);
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/eckey_impl.h b/crypto/secp256k1/libsecp256k1/src/eckey_impl.h
new file mode 100644
index 000000000..ae4424015
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/eckey_impl.h
@@ -0,0 +1,202 @@
+/**********************************************************************
+ * Copyright (c) 2013, 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_ECKEY_IMPL_H_
+#define _SECP256K1_ECKEY_IMPL_H_
+
+#include "eckey.h"
+
+#include "scalar.h"
+#include "field.h"
+#include "group.h"
+#include "ecmult_gen.h"
+
+static int secp256k1_eckey_pubkey_parse(secp256k1_ge *elem, const unsigned char *pub, size_t size) {
+ if (size == 33 && (pub[0] == 0x02 || pub[0] == 0x03)) {
+ secp256k1_fe x;
+ return secp256k1_fe_set_b32(&x, pub+1) && secp256k1_ge_set_xo_var(elem, &x, pub[0] == 0x03);
+ } else if (size == 65 && (pub[0] == 0x04 || pub[0] == 0x06 || pub[0] == 0x07)) {
+ secp256k1_fe x, y;
+ if (!secp256k1_fe_set_b32(&x, pub+1) || !secp256k1_fe_set_b32(&y, pub+33)) {
+ return 0;
+ }
+ secp256k1_ge_set_xy(elem, &x, &y);
+ if ((pub[0] == 0x06 || pub[0] == 0x07) && secp256k1_fe_is_odd(&y) != (pub[0] == 0x07)) {
+ return 0;
+ }
+ return secp256k1_ge_is_valid_var(elem);
+ } else {
+ return 0;
+ }
+}
+
+static int secp256k1_eckey_pubkey_serialize(secp256k1_ge *elem, unsigned char *pub, size_t *size, unsigned int flags) {
+ if (secp256k1_ge_is_infinity(elem)) {
+ return 0;
+ }
+ secp256k1_fe_normalize_var(&elem->x);
+ secp256k1_fe_normalize_var(&elem->y);
+ secp256k1_fe_get_b32(&pub[1], &elem->x);
+ if (flags & SECP256K1_EC_COMPRESSED) {
+ *size = 33;
+ pub[0] = 0x02 | (secp256k1_fe_is_odd(&elem->y) ? 0x01 : 0x00);
+ } else {
+ *size = 65;
+ pub[0] = 0x04;
+ secp256k1_fe_get_b32(&pub[33], &elem->y);
+ }
+ return 1;
+}
+
+static int secp256k1_eckey_privkey_parse(secp256k1_scalar *key, const unsigned char *privkey, size_t privkeylen) {
+ unsigned char c[32] = {0};
+ const unsigned char *end = privkey + privkeylen;
+ int lenb = 0;
+ int len = 0;
+ int overflow = 0;
+ /* sequence header */
+ if (end < privkey+1 || *privkey != 0x30) {
+ return 0;
+ }
+ privkey++;
+ /* sequence length constructor */
+ if (end < privkey+1 || !(*privkey & 0x80)) {
+ return 0;
+ }
+ lenb = *privkey & ~0x80; privkey++;
+ if (lenb < 1 || lenb > 2) {
+ return 0;
+ }
+ if (end < privkey+lenb) {
+ return 0;
+ }
+ /* sequence length */
+ len = privkey[lenb-1] | (lenb > 1 ? privkey[lenb-2] << 8 : 0);
+ privkey += lenb;
+ if (end < privkey+len) {
+ return 0;
+ }
+ /* sequence element 0: version number (=1) */
+ if (end < privkey+3 || privkey[0] != 0x02 || privkey[1] != 0x01 || privkey[2] != 0x01) {
+ return 0;
+ }
+ privkey += 3;
+ /* sequence element 1: octet string, up to 32 bytes */
+ if (end < privkey+2 || privkey[0] != 0x04 || privkey[1] > 0x20 || end < privkey+2+privkey[1]) {
+ return 0;
+ }
+ memcpy(c + 32 - privkey[1], privkey + 2, privkey[1]);
+ secp256k1_scalar_set_b32(key, c, &overflow);
+ memset(c, 0, 32);
+ return !overflow;
+}
+
+static int secp256k1_eckey_privkey_serialize(const secp256k1_ecmult_gen_context *ctx, unsigned char *privkey, size_t *privkeylen, const secp256k1_scalar *key, unsigned int flags) {
+ secp256k1_gej rp;
+ secp256k1_ge r;
+ size_t pubkeylen = 0;
+ secp256k1_ecmult_gen(ctx, &rp, key);
+ secp256k1_ge_set_gej(&r, &rp);
+ if (flags & SECP256K1_EC_COMPRESSED) {
+ static const unsigned char begin[] = {
+ 0x30,0x81,0xD3,0x02,0x01,0x01,0x04,0x20
+ };
+ static const unsigned char middle[] = {
+ 0xA0,0x81,0x85,0x30,0x81,0x82,0x02,0x01,0x01,0x30,0x2C,0x06,0x07,0x2A,0x86,0x48,
+ 0xCE,0x3D,0x01,0x01,0x02,0x21,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+ 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+ 0xFF,0xFF,0xFE,0xFF,0xFF,0xFC,0x2F,0x30,0x06,0x04,0x01,0x00,0x04,0x01,0x07,0x04,
+ 0x21,0x02,0x79,0xBE,0x66,0x7E,0xF9,0xDC,0xBB,0xAC,0x55,0xA0,0x62,0x95,0xCE,0x87,
+ 0x0B,0x07,0x02,0x9B,0xFC,0xDB,0x2D,0xCE,0x28,0xD9,0x59,0xF2,0x81,0x5B,0x16,0xF8,
+ 0x17,0x98,0x02,0x21,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+ 0xFF,0xFF,0xFF,0xFF,0xFE,0xBA,0xAE,0xDC,0xE6,0xAF,0x48,0xA0,0x3B,0xBF,0xD2,0x5E,
+ 0x8C,0xD0,0x36,0x41,0x41,0x02,0x01,0x01,0xA1,0x24,0x03,0x22,0x00
+ };
+ unsigned char *ptr = privkey;
+ memcpy(ptr, begin, sizeof(begin)); ptr += sizeof(begin);
+ secp256k1_scalar_get_b32(ptr, key); ptr += 32;
+ memcpy(ptr, middle, sizeof(middle)); ptr += sizeof(middle);
+ if (!secp256k1_eckey_pubkey_serialize(&r, ptr, &pubkeylen, 1)) {
+ return 0;
+ }
+ ptr += pubkeylen;
+ *privkeylen = ptr - privkey;
+ } else {
+ static const unsigned char begin[] = {
+ 0x30,0x82,0x01,0x13,0x02,0x01,0x01,0x04,0x20
+ };
+ static const unsigned char middle[] = {
+ 0xA0,0x81,0xA5,0x30,0x81,0xA2,0x02,0x01,0x01,0x30,0x2C,0x06,0x07,0x2A,0x86,0x48,
+ 0xCE,0x3D,0x01,0x01,0x02,0x21,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+ 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+ 0xFF,0xFF,0xFE,0xFF,0xFF,0xFC,0x2F,0x30,0x06,0x04,0x01,0x00,0x04,0x01,0x07,0x04,
+ 0x41,0x04,0x79,0xBE,0x66,0x7E,0xF9,0xDC,0xBB,0xAC,0x55,0xA0,0x62,0x95,0xCE,0x87,
+ 0x0B,0x07,0x02,0x9B,0xFC,0xDB,0x2D,0xCE,0x28,0xD9,0x59,0xF2,0x81,0x5B,0x16,0xF8,
+ 0x17,0x98,0x48,0x3A,0xDA,0x77,0x26,0xA3,0xC4,0x65,0x5D,0xA4,0xFB,0xFC,0x0E,0x11,
+ 0x08,0xA8,0xFD,0x17,0xB4,0x48,0xA6,0x85,0x54,0x19,0x9C,0x47,0xD0,0x8F,0xFB,0x10,
+ 0xD4,0xB8,0x02,0x21,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+ 0xFF,0xFF,0xFF,0xFF,0xFE,0xBA,0xAE,0xDC,0xE6,0xAF,0x48,0xA0,0x3B,0xBF,0xD2,0x5E,
+ 0x8C,0xD0,0x36,0x41,0x41,0x02,0x01,0x01,0xA1,0x44,0x03,0x42,0x00
+ };
+ unsigned char *ptr = privkey;
+ memcpy(ptr, begin, sizeof(begin)); ptr += sizeof(begin);
+ secp256k1_scalar_get_b32(ptr, key); ptr += 32;
+ memcpy(ptr, middle, sizeof(middle)); ptr += sizeof(middle);
+ if (!secp256k1_eckey_pubkey_serialize(&r, ptr, &pubkeylen, 0)) {
+ return 0;
+ }
+ ptr += pubkeylen;
+ *privkeylen = ptr - privkey;
+ }
+ return 1;
+}
+
+static int secp256k1_eckey_privkey_tweak_add(secp256k1_scalar *key, const secp256k1_scalar *tweak) {
+ secp256k1_scalar_add(key, key, tweak);
+ if (secp256k1_scalar_is_zero(key)) {
+ return 0;
+ }
+ return 1;
+}
+
+static int secp256k1_eckey_pubkey_tweak_add(const secp256k1_ecmult_context *ctx, secp256k1_ge *key, const secp256k1_scalar *tweak) {
+ secp256k1_gej pt;
+ secp256k1_scalar one;
+ secp256k1_gej_set_ge(&pt, key);
+ secp256k1_scalar_set_int(&one, 1);
+ secp256k1_ecmult(ctx, &pt, &pt, &one, tweak);
+
+ if (secp256k1_gej_is_infinity(&pt)) {
+ return 0;
+ }
+ secp256k1_ge_set_gej(key, &pt);
+ return 1;
+}
+
+static int secp256k1_eckey_privkey_tweak_mul(secp256k1_scalar *key, const secp256k1_scalar *tweak) {
+ if (secp256k1_scalar_is_zero(tweak)) {
+ return 0;
+ }
+
+ secp256k1_scalar_mul(key, key, tweak);
+ return 1;
+}
+
+static int secp256k1_eckey_pubkey_tweak_mul(const secp256k1_ecmult_context *ctx, secp256k1_ge *key, const secp256k1_scalar *tweak) {
+ secp256k1_scalar zero;
+ secp256k1_gej pt;
+ if (secp256k1_scalar_is_zero(tweak)) {
+ return 0;
+ }
+
+ secp256k1_scalar_set_int(&zero, 0);
+ secp256k1_gej_set_ge(&pt, key);
+ secp256k1_ecmult(ctx, &pt, &pt, tweak, &zero);
+ secp256k1_ge_set_gej(key, &pt);
+ return 1;
+}
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/ecmult.h b/crypto/secp256k1/libsecp256k1/src/ecmult.h
new file mode 100644
index 000000000..20484134f
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/ecmult.h
@@ -0,0 +1,31 @@
+/**********************************************************************
+ * Copyright (c) 2013, 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_ECMULT_
+#define _SECP256K1_ECMULT_
+
+#include "num.h"
+#include "group.h"
+
+typedef struct {
+ /* For accelerating the computation of a*P + b*G: */
+ secp256k1_ge_storage (*pre_g)[]; /* odd multiples of the generator */
+#ifdef USE_ENDOMORPHISM
+ secp256k1_ge_storage (*pre_g_128)[]; /* odd multiples of 2^128*generator */
+#endif
+} secp256k1_ecmult_context;
+
+static void secp256k1_ecmult_context_init(secp256k1_ecmult_context *ctx);
+static void secp256k1_ecmult_context_build(secp256k1_ecmult_context *ctx, const secp256k1_callback *cb);
+static void secp256k1_ecmult_context_clone(secp256k1_ecmult_context *dst,
+ const secp256k1_ecmult_context *src, const secp256k1_callback *cb);
+static void secp256k1_ecmult_context_clear(secp256k1_ecmult_context *ctx);
+static int secp256k1_ecmult_context_is_built(const secp256k1_ecmult_context *ctx);
+
+/** Double multiply: R = na*A + ng*G */
+static void secp256k1_ecmult(const secp256k1_ecmult_context *ctx, secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_scalar *na, const secp256k1_scalar *ng);
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/ecmult_const.h b/crypto/secp256k1/libsecp256k1/src/ecmult_const.h
new file mode 100644
index 000000000..2b0097655
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/ecmult_const.h
@@ -0,0 +1,15 @@
+/**********************************************************************
+ * Copyright (c) 2015 Andrew Poelstra *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_ECMULT_CONST_
+#define _SECP256K1_ECMULT_CONST_
+
+#include "scalar.h"
+#include "group.h"
+
+static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, const secp256k1_scalar *q);
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/ecmult_const_impl.h b/crypto/secp256k1/libsecp256k1/src/ecmult_const_impl.h
new file mode 100644
index 000000000..90ac94770
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/ecmult_const_impl.h
@@ -0,0 +1,260 @@
+/**********************************************************************
+ * Copyright (c) 2015 Pieter Wuille, Andrew Poelstra *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_ECMULT_CONST_IMPL_
+#define _SECP256K1_ECMULT_CONST_IMPL_
+
+#include "scalar.h"
+#include "group.h"
+#include "ecmult_const.h"
+#include "ecmult_impl.h"
+
+#ifdef USE_ENDOMORPHISM
+ #define WNAF_BITS 128
+#else
+ #define WNAF_BITS 256
+#endif
+#define WNAF_SIZE(w) ((WNAF_BITS + (w) - 1) / (w))
+
+/* This is like `ECMULT_TABLE_GET_GE` but is constant time */
+#define ECMULT_CONST_TABLE_GET_GE(r,pre,n,w) do { \
+ int m; \
+ int abs_n = (n) * (((n) > 0) * 2 - 1); \
+ int idx_n = abs_n / 2; \
+ secp256k1_fe neg_y; \
+ VERIFY_CHECK(((n) & 1) == 1); \
+ VERIFY_CHECK((n) >= -((1 << ((w)-1)) - 1)); \
+ VERIFY_CHECK((n) <= ((1 << ((w)-1)) - 1)); \
+ VERIFY_SETUP(secp256k1_fe_clear(&(r)->x)); \
+ VERIFY_SETUP(secp256k1_fe_clear(&(r)->y)); \
+ for (m = 0; m < ECMULT_TABLE_SIZE(w); m++) { \
+ /* This loop is used to avoid secret data in array indices. See
+ * the comment in ecmult_gen_impl.h for rationale. */ \
+ secp256k1_fe_cmov(&(r)->x, &(pre)[m].x, m == idx_n); \
+ secp256k1_fe_cmov(&(r)->y, &(pre)[m].y, m == idx_n); \
+ } \
+ (r)->infinity = 0; \
+ secp256k1_fe_negate(&neg_y, &(r)->y, 1); \
+ secp256k1_fe_cmov(&(r)->y, &neg_y, (n) != abs_n); \
+} while(0)
+
+
+/** Convert a number to WNAF notation. The number becomes represented by sum(2^{wi} * wnaf[i], i=0..return_val)
+ * with the following guarantees:
+ * - each wnaf[i] an odd integer between -(1 << w) and (1 << w)
+ * - each wnaf[i] is nonzero
+ * - the number of words set is returned; this is always (WNAF_BITS + w - 1) / w
+ *
+ * Adapted from `The Width-w NAF Method Provides Small Memory and Fast Elliptic Scalar
+ * Multiplications Secure against Side Channel Attacks`, Okeya and Tagaki. M. Joye (Ed.)
+ * CT-RSA 2003, LNCS 2612, pp. 328-443, 2003. Springer-Verlagy Berlin Heidelberg 2003
+ *
+ * Numbers reference steps of `Algorithm SPA-resistant Width-w NAF with Odd Scalar` on pp. 335
+ */
+static int secp256k1_wnaf_const(int *wnaf, secp256k1_scalar s, int w) {
+ int global_sign;
+ int skew = 0;
+ int word = 0;
+ /* 1 2 3 */
+ int u_last;
+ int u;
+
+#ifdef USE_ENDOMORPHISM
+ int flip;
+ int bit;
+ secp256k1_scalar neg_s;
+ int not_neg_one;
+ /* If we are using the endomorphism, we cannot handle even numbers by negating
+ * them, since we are working with 128-bit numbers whose negations would be 256
+ * bits, eliminating the performance advantage. Instead we use a technique from
+ * Section 4.2 of the Okeya/Tagaki paper, which is to add either 1 (for even)
+ * or 2 (for odd) to the number we are encoding, then compensating after the
+ * multiplication. */
+ /* Negative 128-bit numbers will be negated, since otherwise they are 256-bit */
+ flip = secp256k1_scalar_is_high(&s);
+ /* We add 1 to even numbers, 2 to odd ones, noting that negation flips parity */
+ bit = flip ^ (s.d[0] & 1);
+ /* We check for negative one, since adding 2 to it will cause an overflow */
+ secp256k1_scalar_negate(&neg_s, &s);
+ not_neg_one = !secp256k1_scalar_is_one(&neg_s);
+ secp256k1_scalar_cadd_bit(&s, bit, not_neg_one);
+ /* If we had negative one, flip == 1, s.d[0] == 0, bit == 1, so caller expects
+ * that we added two to it and flipped it. In fact for -1 these operations are
+ * identical. We only flipped, but since skewing is required (in the sense that
+ * the skew must be 1 or 2, never zero) and flipping is not, we need to change
+ * our flags to claim that we only skewed. */
+ global_sign = secp256k1_scalar_cond_negate(&s, flip);
+ global_sign *= not_neg_one * 2 - 1;
+ skew = 1 << bit;
+#else
+ /* Otherwise, we just negate to force oddness */
+ int is_even = secp256k1_scalar_is_even(&s);
+ global_sign = secp256k1_scalar_cond_negate(&s, is_even);
+#endif
+
+ /* 4 */
+ u_last = secp256k1_scalar_shr_int(&s, w);
+ while (word * w < WNAF_BITS) {
+ int sign;
+ int even;
+
+ /* 4.1 4.4 */
+ u = secp256k1_scalar_shr_int(&s, w);
+ /* 4.2 */
+ even = ((u & 1) == 0);
+ sign = 2 * (u_last > 0) - 1;
+ u += sign * even;
+ u_last -= sign * even * (1 << w);
+
+ /* 4.3, adapted for global sign change */
+ wnaf[word++] = u_last * global_sign;
+
+ u_last = u;
+ }
+ wnaf[word] = u * global_sign;
+
+ VERIFY_CHECK(secp256k1_scalar_is_zero(&s));
+ VERIFY_CHECK(word == WNAF_SIZE(w));
+ return skew;
+}
+
+
+static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, const secp256k1_scalar *scalar) {
+ secp256k1_ge pre_a[ECMULT_TABLE_SIZE(WINDOW_A)];
+ secp256k1_ge tmpa;
+ secp256k1_fe Z;
+
+#ifdef USE_ENDOMORPHISM
+ secp256k1_ge pre_a_lam[ECMULT_TABLE_SIZE(WINDOW_A)];
+ int wnaf_1[1 + WNAF_SIZE(WINDOW_A - 1)];
+ int wnaf_lam[1 + WNAF_SIZE(WINDOW_A - 1)];
+ int skew_1;
+ int skew_lam;
+ secp256k1_scalar q_1, q_lam;
+#else
+ int wnaf[1 + WNAF_SIZE(WINDOW_A - 1)];
+#endif
+
+ int i;
+ secp256k1_scalar sc = *scalar;
+
+ /* build wnaf representation for q. */
+#ifdef USE_ENDOMORPHISM
+ /* split q into q_1 and q_lam (where q = q_1 + q_lam*lambda, and q_1 and q_lam are ~128 bit) */
+ secp256k1_scalar_split_lambda(&q_1, &q_lam, &sc);
+ /* no need for zero correction when using endomorphism since even
+ * numbers have one added to them anyway */
+ skew_1 = secp256k1_wnaf_const(wnaf_1, q_1, WINDOW_A - 1);
+ skew_lam = secp256k1_wnaf_const(wnaf_lam, q_lam, WINDOW_A - 1);
+#else
+ int is_zero = secp256k1_scalar_is_zero(scalar);
+ /* the wNAF ladder cannot handle zero, so bump this to one .. we will
+ * correct the result after the fact */
+ sc.d[0] += is_zero;
+ VERIFY_CHECK(!secp256k1_scalar_is_zero(&sc));
+
+ secp256k1_wnaf_const(wnaf, sc, WINDOW_A - 1);
+#endif
+
+ /* Calculate odd multiples of a.
+ * All multiples are brought to the same Z 'denominator', which is stored
+ * in Z. Due to secp256k1' isomorphism we can do all operations pretending
+ * that the Z coordinate was 1, use affine addition formulae, and correct
+ * the Z coordinate of the result once at the end.
+ */
+ secp256k1_gej_set_ge(r, a);
+ secp256k1_ecmult_odd_multiples_table_globalz_windowa(pre_a, &Z, r);
+ for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) {
+ secp256k1_fe_normalize_weak(&pre_a[i].y);
+ }
+#ifdef USE_ENDOMORPHISM
+ for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) {
+ secp256k1_ge_mul_lambda(&pre_a_lam[i], &pre_a[i]);
+ }
+#endif
+
+ /* first loop iteration (separated out so we can directly set r, rather
+ * than having it start at infinity, get doubled several times, then have
+ * its new value added to it) */
+#ifdef USE_ENDOMORPHISM
+ i = wnaf_1[WNAF_SIZE(WINDOW_A - 1)];
+ VERIFY_CHECK(i != 0);
+ ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, i, WINDOW_A);
+ secp256k1_gej_set_ge(r, &tmpa);
+
+ i = wnaf_lam[WNAF_SIZE(WINDOW_A - 1)];
+ VERIFY_CHECK(i != 0);
+ ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a_lam, i, WINDOW_A);
+ secp256k1_gej_add_ge(r, r, &tmpa);
+#else
+ i = wnaf[WNAF_SIZE(WINDOW_A - 1)];
+ VERIFY_CHECK(i != 0);
+ ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, i, WINDOW_A);
+ secp256k1_gej_set_ge(r, &tmpa);
+#endif
+ /* remaining loop iterations */
+ for (i = WNAF_SIZE(WINDOW_A - 1) - 1; i >= 0; i--) {
+ int n;
+ int j;
+ for (j = 0; j < WINDOW_A - 1; ++j) {
+ secp256k1_gej_double_nonzero(r, r, NULL);
+ }
+#ifdef USE_ENDOMORPHISM
+ n = wnaf_1[i];
+ ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, n, WINDOW_A);
+ VERIFY_CHECK(n != 0);
+ secp256k1_gej_add_ge(r, r, &tmpa);
+
+ n = wnaf_lam[i];
+ ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a_lam, n, WINDOW_A);
+ VERIFY_CHECK(n != 0);
+ secp256k1_gej_add_ge(r, r, &tmpa);
+#else
+ n = wnaf[i];
+ VERIFY_CHECK(n != 0);
+ ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, n, WINDOW_A);
+ secp256k1_gej_add_ge(r, r, &tmpa);
+#endif
+ }
+
+ secp256k1_fe_mul(&r->z, &r->z, &Z);
+
+#ifdef USE_ENDOMORPHISM
+ {
+ /* Correct for wNAF skew */
+ secp256k1_ge correction = *a;
+ secp256k1_ge_storage correction_1_stor;
+ secp256k1_ge_storage correction_lam_stor;
+ secp256k1_ge_storage a2_stor;
+ secp256k1_gej tmpj;
+ secp256k1_gej_set_ge(&tmpj, &correction);
+ secp256k1_gej_double_var(&tmpj, &tmpj, NULL);
+ secp256k1_ge_set_gej(&correction, &tmpj);
+ secp256k1_ge_to_storage(&correction_1_stor, a);
+ secp256k1_ge_to_storage(&correction_lam_stor, a);
+ secp256k1_ge_to_storage(&a2_stor, &correction);
+
+ /* For odd numbers this is 2a (so replace it), for even ones a (so no-op) */
+ secp256k1_ge_storage_cmov(&correction_1_stor, &a2_stor, skew_1 == 2);
+ secp256k1_ge_storage_cmov(&correction_lam_stor, &a2_stor, skew_lam == 2);
+
+ /* Apply the correction */
+ secp256k1_ge_from_storage(&correction, &correction_1_stor);
+ secp256k1_ge_neg(&correction, &correction);
+ secp256k1_gej_add_ge(r, r, &correction);
+
+ secp256k1_ge_from_storage(&correction, &correction_lam_stor);
+ secp256k1_ge_neg(&correction, &correction);
+ secp256k1_ge_mul_lambda(&correction, &correction);
+ secp256k1_gej_add_ge(r, r, &correction);
+ }
+#else
+ /* correct for zero */
+ r->infinity |= is_zero;
+#endif
+}
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/ecmult_gen.h b/crypto/secp256k1/libsecp256k1/src/ecmult_gen.h
new file mode 100644
index 000000000..eb2cc9ead
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/ecmult_gen.h
@@ -0,0 +1,43 @@
+/**********************************************************************
+ * Copyright (c) 2013, 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_ECMULT_GEN_
+#define _SECP256K1_ECMULT_GEN_
+
+#include "scalar.h"
+#include "group.h"
+
+typedef struct {
+ /* For accelerating the computation of a*G:
+ * To harden against timing attacks, use the following mechanism:
+ * * Break up the multiplicand into groups of 4 bits, called n_0, n_1, n_2, ..., n_63.
+ * * Compute sum(n_i * 16^i * G + U_i, i=0..63), where:
+ * * U_i = U * 2^i (for i=0..62)
+ * * U_i = U * (1-2^63) (for i=63)
+ * where U is a point with no known corresponding scalar. Note that sum(U_i, i=0..63) = 0.
+ * For each i, and each of the 16 possible values of n_i, (n_i * 16^i * G + U_i) is
+ * precomputed (call it prec(i, n_i)). The formula now becomes sum(prec(i, n_i), i=0..63).
+ * None of the resulting prec group elements have a known scalar, and neither do any of
+ * the intermediate sums while computing a*G.
+ */
+ secp256k1_ge_storage (*prec)[64][16]; /* prec[j][i] = 16^j * i * G + U_i */
+ secp256k1_scalar blind;
+ secp256k1_gej initial;
+} secp256k1_ecmult_gen_context;
+
+static void secp256k1_ecmult_gen_context_init(secp256k1_ecmult_gen_context* ctx);
+static void secp256k1_ecmult_gen_context_build(secp256k1_ecmult_gen_context* ctx, const secp256k1_callback* cb);
+static void secp256k1_ecmult_gen_context_clone(secp256k1_ecmult_gen_context *dst,
+ const secp256k1_ecmult_gen_context* src, const secp256k1_callback* cb);
+static void secp256k1_ecmult_gen_context_clear(secp256k1_ecmult_gen_context* ctx);
+static int secp256k1_ecmult_gen_context_is_built(const secp256k1_ecmult_gen_context* ctx);
+
+/** Multiply with the generator: R = a*G */
+static void secp256k1_ecmult_gen(const secp256k1_ecmult_gen_context* ctx, secp256k1_gej *r, const secp256k1_scalar *a);
+
+static void secp256k1_ecmult_gen_blind(secp256k1_ecmult_gen_context *ctx, const unsigned char *seed32);
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/ecmult_gen_impl.h b/crypto/secp256k1/libsecp256k1/src/ecmult_gen_impl.h
new file mode 100644
index 000000000..2ee27377f
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/ecmult_gen_impl.h
@@ -0,0 +1,205 @@
+/**********************************************************************
+ * Copyright (c) 2013, 2014, 2015 Pieter Wuille, Gregory Maxwell *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_ECMULT_GEN_IMPL_H_
+#define _SECP256K1_ECMULT_GEN_IMPL_H_
+
+#include "scalar.h"
+#include "group.h"
+#include "ecmult_gen.h"
+#include "hash_impl.h"
+#ifdef USE_ECMULT_STATIC_PRECOMPUTATION
+#include "ecmult_static_context.h"
+#endif
+static void secp256k1_ecmult_gen_context_init(secp256k1_ecmult_gen_context *ctx) {
+ ctx->prec = NULL;
+}
+
+static void secp256k1_ecmult_gen_context_build(secp256k1_ecmult_gen_context *ctx, const secp256k1_callback* cb) {
+#ifndef USE_ECMULT_STATIC_PRECOMPUTATION
+ secp256k1_ge prec[1024];
+ secp256k1_gej gj;
+ secp256k1_gej nums_gej;
+ int i, j;
+#endif
+
+ if (ctx->prec != NULL) {
+ return;
+ }
+#ifndef USE_ECMULT_STATIC_PRECOMPUTATION
+ ctx->prec = (secp256k1_ge_storage (*)[64][16])checked_malloc(cb, sizeof(*ctx->prec));
+
+ /* get the generator */
+ secp256k1_gej_set_ge(&gj, &secp256k1_ge_const_g);
+
+ /* Construct a group element with no known corresponding scalar (nothing up my sleeve). */
+ {
+ static const unsigned char nums_b32[33] = "The scalar for this x is unknown";
+ secp256k1_fe nums_x;
+ secp256k1_ge nums_ge;
+ VERIFY_CHECK(secp256k1_fe_set_b32(&nums_x, nums_b32));
+ VERIFY_CHECK(secp256k1_ge_set_xo_var(&nums_ge, &nums_x, 0));
+ secp256k1_gej_set_ge(&nums_gej, &nums_ge);
+ /* Add G to make the bits in x uniformly distributed. */
+ secp256k1_gej_add_ge_var(&nums_gej, &nums_gej, &secp256k1_ge_const_g, NULL);
+ }
+
+ /* compute prec. */
+ {
+ secp256k1_gej precj[1024]; /* Jacobian versions of prec. */
+ secp256k1_gej gbase;
+ secp256k1_gej numsbase;
+ gbase = gj; /* 16^j * G */
+ numsbase = nums_gej; /* 2^j * nums. */
+ for (j = 0; j < 64; j++) {
+ /* Set precj[j*16 .. j*16+15] to (numsbase, numsbase + gbase, ..., numsbase + 15*gbase). */
+ precj[j*16] = numsbase;
+ for (i = 1; i < 16; i++) {
+ secp256k1_gej_add_var(&precj[j*16 + i], &precj[j*16 + i - 1], &gbase, NULL);
+ }
+ /* Multiply gbase by 16. */
+ for (i = 0; i < 4; i++) {
+ secp256k1_gej_double_var(&gbase, &gbase, NULL);
+ }
+ /* Multiply numbase by 2. */
+ secp256k1_gej_double_var(&numsbase, &numsbase, NULL);
+ if (j == 62) {
+ /* In the last iteration, numsbase is (1 - 2^j) * nums instead. */
+ secp256k1_gej_neg(&numsbase, &numsbase);
+ secp256k1_gej_add_var(&numsbase, &numsbase, &nums_gej, NULL);
+ }
+ }
+ secp256k1_ge_set_all_gej_var(1024, prec, precj, cb);
+ }
+ for (j = 0; j < 64; j++) {
+ for (i = 0; i < 16; i++) {
+ secp256k1_ge_to_storage(&(*ctx->prec)[j][i], &prec[j*16 + i]);
+ }
+ }
+#else
+ (void)cb;
+ ctx->prec = (secp256k1_ge_storage (*)[64][16])secp256k1_ecmult_static_context;
+#endif
+ secp256k1_ecmult_gen_blind(ctx, NULL);
+}
+
+static int secp256k1_ecmult_gen_context_is_built(const secp256k1_ecmult_gen_context* ctx) {
+ return ctx->prec != NULL;
+}
+
+static void secp256k1_ecmult_gen_context_clone(secp256k1_ecmult_gen_context *dst,
+ const secp256k1_ecmult_gen_context *src, const secp256k1_callback* cb) {
+ if (src->prec == NULL) {
+ dst->prec = NULL;
+ } else {
+#ifndef USE_ECMULT_STATIC_PRECOMPUTATION
+ dst->prec = (secp256k1_ge_storage (*)[64][16])checked_malloc(cb, sizeof(*dst->prec));
+ memcpy(dst->prec, src->prec, sizeof(*dst->prec));
+#else
+ (void)cb;
+ dst->prec = src->prec;
+#endif
+ dst->initial = src->initial;
+ dst->blind = src->blind;
+ }
+}
+
+static void secp256k1_ecmult_gen_context_clear(secp256k1_ecmult_gen_context *ctx) {
+#ifndef USE_ECMULT_STATIC_PRECOMPUTATION
+ free(ctx->prec);
+#endif
+ secp256k1_scalar_clear(&ctx->blind);
+ secp256k1_gej_clear(&ctx->initial);
+ ctx->prec = NULL;
+}
+
+static void secp256k1_ecmult_gen(const secp256k1_ecmult_gen_context *ctx, secp256k1_gej *r, const secp256k1_scalar *gn) {
+ secp256k1_ge add;
+ secp256k1_ge_storage adds;
+ secp256k1_scalar gnb;
+ int bits;
+ int i, j;
+ memset(&adds, 0, sizeof(adds));
+ *r = ctx->initial;
+ /* Blind scalar/point multiplication by computing (n-b)G + bG instead of nG. */
+ secp256k1_scalar_add(&gnb, gn, &ctx->blind);
+ add.infinity = 0;
+ for (j = 0; j < 64; j++) {
+ bits = secp256k1_scalar_get_bits(&gnb, j * 4, 4);
+ for (i = 0; i < 16; i++) {
+ /** This uses a conditional move to avoid any secret data in array indexes.
+ * _Any_ use of secret indexes has been demonstrated to result in timing
+ * sidechannels, even when the cache-line access patterns are uniform.
+ * See also:
+ * "A word of warning", CHES 2013 Rump Session, by Daniel J. Bernstein and Peter Schwabe
+ * (https://cryptojedi.org/peter/data/chesrump-20130822.pdf) and
+ * "Cache Attacks and Countermeasures: the Case of AES", RSA 2006,
+ * by Dag Arne Osvik, Adi Shamir, and Eran Tromer
+ * (http://www.tau.ac.il/~tromer/papers/cache.pdf)
+ */
+ secp256k1_ge_storage_cmov(&adds, &(*ctx->prec)[j][i], i == bits);
+ }
+ secp256k1_ge_from_storage(&add, &adds);
+ secp256k1_gej_add_ge(r, r, &add);
+ }
+ bits = 0;
+ secp256k1_ge_clear(&add);
+ secp256k1_scalar_clear(&gnb);
+}
+
+/* Setup blinding values for secp256k1_ecmult_gen. */
+static void secp256k1_ecmult_gen_blind(secp256k1_ecmult_gen_context *ctx, const unsigned char *seed32) {
+ secp256k1_scalar b;
+ secp256k1_gej gb;
+ secp256k1_fe s;
+ unsigned char nonce32[32];
+ secp256k1_rfc6979_hmac_sha256_t rng;
+ int retry;
+ unsigned char keydata[64] = {0};
+ if (seed32 == NULL) {
+ /* When seed is NULL, reset the initial point and blinding value. */
+ secp256k1_gej_set_ge(&ctx->initial, &secp256k1_ge_const_g);
+ secp256k1_gej_neg(&ctx->initial, &ctx->initial);
+ secp256k1_scalar_set_int(&ctx->blind, 1);
+ }
+ /* The prior blinding value (if not reset) is chained forward by including it in the hash. */
+ secp256k1_scalar_get_b32(nonce32, &ctx->blind);
+ /** Using a CSPRNG allows a failure free interface, avoids needing large amounts of random data,
+ * and guards against weak or adversarial seeds. This is a simpler and safer interface than
+ * asking the caller for blinding values directly and expecting them to retry on failure.
+ */
+ memcpy(keydata, nonce32, 32);
+ if (seed32 != NULL) {
+ memcpy(keydata + 32, seed32, 32);
+ }
+ secp256k1_rfc6979_hmac_sha256_initialize(&rng, keydata, seed32 ? 64 : 32);
+ memset(keydata, 0, sizeof(keydata));
+ /* Retry for out of range results to achieve uniformity. */
+ do {
+ secp256k1_rfc6979_hmac_sha256_generate(&rng, nonce32, 32);
+ retry = !secp256k1_fe_set_b32(&s, nonce32);
+ retry |= secp256k1_fe_is_zero(&s);
+ } while (retry);
+ /* Randomize the projection to defend against multiplier sidechannels. */
+ secp256k1_gej_rescale(&ctx->initial, &s);
+ secp256k1_fe_clear(&s);
+ do {
+ secp256k1_rfc6979_hmac_sha256_generate(&rng, nonce32, 32);
+ secp256k1_scalar_set_b32(&b, nonce32, &retry);
+ /* A blinding value of 0 works, but would undermine the projection hardening. */
+ retry |= secp256k1_scalar_is_zero(&b);
+ } while (retry);
+ secp256k1_rfc6979_hmac_sha256_finalize(&rng);
+ memset(nonce32, 0, 32);
+ secp256k1_ecmult_gen(ctx, &gb, &b);
+ secp256k1_scalar_negate(&b, &b);
+ ctx->blind = b;
+ ctx->initial = gb;
+ secp256k1_scalar_clear(&b);
+ secp256k1_gej_clear(&gb);
+}
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/ecmult_impl.h b/crypto/secp256k1/libsecp256k1/src/ecmult_impl.h
new file mode 100644
index 000000000..e6e5f4718
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/ecmult_impl.h
@@ -0,0 +1,389 @@
+/**********************************************************************
+ * Copyright (c) 2013, 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_ECMULT_IMPL_H_
+#define _SECP256K1_ECMULT_IMPL_H_
+
+#include "group.h"
+#include "scalar.h"
+#include "ecmult.h"
+
+/* optimal for 128-bit and 256-bit exponents. */
+#define WINDOW_A 5
+
+/** larger numbers may result in slightly better performance, at the cost of
+ exponentially larger precomputed tables. */
+#ifdef USE_ENDOMORPHISM
+/** Two tables for window size 15: 1.375 MiB. */
+#define WINDOW_G 15
+#else
+/** One table for window size 16: 1.375 MiB. */
+#define WINDOW_G 16
+#endif
+
+/** The number of entries a table with precomputed multiples needs to have. */
+#define ECMULT_TABLE_SIZE(w) (1 << ((w)-2))
+
+/** Fill a table 'prej' with precomputed odd multiples of a. Prej will contain
+ * the values [1*a,3*a,...,(2*n-1)*a], so it space for n values. zr[0] will
+ * contain prej[0].z / a.z. The other zr[i] values = prej[i].z / prej[i-1].z.
+ * Prej's Z values are undefined, except for the last value.
+ */
+static void secp256k1_ecmult_odd_multiples_table(int n, secp256k1_gej *prej, secp256k1_fe *zr, const secp256k1_gej *a) {
+ secp256k1_gej d;
+ secp256k1_ge a_ge, d_ge;
+ int i;
+
+ VERIFY_CHECK(!a->infinity);
+
+ secp256k1_gej_double_var(&d, a, NULL);
+
+ /*
+ * Perform the additions on an isomorphism where 'd' is affine: drop the z coordinate
+ * of 'd', and scale the 1P starting value's x/y coordinates without changing its z.
+ */
+ d_ge.x = d.x;
+ d_ge.y = d.y;
+ d_ge.infinity = 0;
+
+ secp256k1_ge_set_gej_zinv(&a_ge, a, &d.z);
+ prej[0].x = a_ge.x;
+ prej[0].y = a_ge.y;
+ prej[0].z = a->z;
+ prej[0].infinity = 0;
+
+ zr[0] = d.z;
+ for (i = 1; i < n; i++) {
+ secp256k1_gej_add_ge_var(&prej[i], &prej[i-1], &d_ge, &zr[i]);
+ }
+
+ /*
+ * Each point in 'prej' has a z coordinate too small by a factor of 'd.z'. Only
+ * the final point's z coordinate is actually used though, so just update that.
+ */
+ secp256k1_fe_mul(&prej[n-1].z, &prej[n-1].z, &d.z);
+}
+
+/** Fill a table 'pre' with precomputed odd multiples of a.
+ *
+ * There are two versions of this function:
+ * - secp256k1_ecmult_odd_multiples_table_globalz_windowa which brings its
+ * resulting point set to a single constant Z denominator, stores the X and Y
+ * coordinates as ge_storage points in pre, and stores the global Z in rz.
+ * It only operates on tables sized for WINDOW_A wnaf multiples.
+ * - secp256k1_ecmult_odd_multiples_table_storage_var, which converts its
+ * resulting point set to actually affine points, and stores those in pre.
+ * It operates on tables of any size, but uses heap-allocated temporaries.
+ *
+ * To compute a*P + b*G, we compute a table for P using the first function,
+ * and for G using the second (which requires an inverse, but it only needs to
+ * happen once).
+ */
+static void secp256k1_ecmult_odd_multiples_table_globalz_windowa(secp256k1_ge *pre, secp256k1_fe *globalz, const secp256k1_gej *a) {
+ secp256k1_gej prej[ECMULT_TABLE_SIZE(WINDOW_A)];
+ secp256k1_fe zr[ECMULT_TABLE_SIZE(WINDOW_A)];
+
+ /* Compute the odd multiples in Jacobian form. */
+ secp256k1_ecmult_odd_multiples_table(ECMULT_TABLE_SIZE(WINDOW_A), prej, zr, a);
+ /* Bring them to the same Z denominator. */
+ secp256k1_ge_globalz_set_table_gej(ECMULT_TABLE_SIZE(WINDOW_A), pre, globalz, prej, zr);
+}
+
+static void secp256k1_ecmult_odd_multiples_table_storage_var(int n, secp256k1_ge_storage *pre, const secp256k1_gej *a, const secp256k1_callback *cb) {
+ secp256k1_gej *prej = (secp256k1_gej*)checked_malloc(cb, sizeof(secp256k1_gej) * n);
+ secp256k1_ge *prea = (secp256k1_ge*)checked_malloc(cb, sizeof(secp256k1_ge) * n);
+ secp256k1_fe *zr = (secp256k1_fe*)checked_malloc(cb, sizeof(secp256k1_fe) * n);
+ int i;
+
+ /* Compute the odd multiples in Jacobian form. */
+ secp256k1_ecmult_odd_multiples_table(n, prej, zr, a);
+ /* Convert them in batch to affine coordinates. */
+ secp256k1_ge_set_table_gej_var(n, prea, prej, zr);
+ /* Convert them to compact storage form. */
+ for (i = 0; i < n; i++) {
+ secp256k1_ge_to_storage(&pre[i], &prea[i]);
+ }
+
+ free(prea);
+ free(prej);
+ free(zr);
+}
+
+/** The following two macro retrieves a particular odd multiple from a table
+ * of precomputed multiples. */
+#define ECMULT_TABLE_GET_GE(r,pre,n,w) do { \
+ VERIFY_CHECK(((n) & 1) == 1); \
+ VERIFY_CHECK((n) >= -((1 << ((w)-1)) - 1)); \
+ VERIFY_CHECK((n) <= ((1 << ((w)-1)) - 1)); \
+ if ((n) > 0) { \
+ *(r) = (pre)[((n)-1)/2]; \
+ } else { \
+ secp256k1_ge_neg((r), &(pre)[(-(n)-1)/2]); \
+ } \
+} while(0)
+
+#define ECMULT_TABLE_GET_GE_STORAGE(r,pre,n,w) do { \
+ VERIFY_CHECK(((n) & 1) == 1); \
+ VERIFY_CHECK((n) >= -((1 << ((w)-1)) - 1)); \
+ VERIFY_CHECK((n) <= ((1 << ((w)-1)) - 1)); \
+ if ((n) > 0) { \
+ secp256k1_ge_from_storage((r), &(pre)[((n)-1)/2]); \
+ } else { \
+ secp256k1_ge_from_storage((r), &(pre)[(-(n)-1)/2]); \
+ secp256k1_ge_neg((r), (r)); \
+ } \
+} while(0)
+
+static void secp256k1_ecmult_context_init(secp256k1_ecmult_context *ctx) {
+ ctx->pre_g = NULL;
+#ifdef USE_ENDOMORPHISM
+ ctx->pre_g_128 = NULL;
+#endif
+}
+
+static void secp256k1_ecmult_context_build(secp256k1_ecmult_context *ctx, const secp256k1_callback *cb) {
+ secp256k1_gej gj;
+
+ if (ctx->pre_g != NULL) {
+ return;
+ }
+
+ /* get the generator */
+ secp256k1_gej_set_ge(&gj, &secp256k1_ge_const_g);
+
+ ctx->pre_g = (secp256k1_ge_storage (*)[])checked_malloc(cb, sizeof((*ctx->pre_g)[0]) * ECMULT_TABLE_SIZE(WINDOW_G));
+
+ /* precompute the tables with odd multiples */
+ secp256k1_ecmult_odd_multiples_table_storage_var(ECMULT_TABLE_SIZE(WINDOW_G), *ctx->pre_g, &gj, cb);
+
+#ifdef USE_ENDOMORPHISM
+ {
+ secp256k1_gej g_128j;
+ int i;
+
+ ctx->pre_g_128 = (secp256k1_ge_storage (*)[])checked_malloc(cb, sizeof((*ctx->pre_g_128)[0]) * ECMULT_TABLE_SIZE(WINDOW_G));
+
+ /* calculate 2^128*generator */
+ g_128j = gj;
+ for (i = 0; i < 128; i++) {
+ secp256k1_gej_double_var(&g_128j, &g_128j, NULL);
+ }
+ secp256k1_ecmult_odd_multiples_table_storage_var(ECMULT_TABLE_SIZE(WINDOW_G), *ctx->pre_g_128, &g_128j, cb);
+ }
+#endif
+}
+
+static void secp256k1_ecmult_context_clone(secp256k1_ecmult_context *dst,
+ const secp256k1_ecmult_context *src, const secp256k1_callback *cb) {
+ if (src->pre_g == NULL) {
+ dst->pre_g = NULL;
+ } else {
+ size_t size = sizeof((*dst->pre_g)[0]) * ECMULT_TABLE_SIZE(WINDOW_G);
+ dst->pre_g = (secp256k1_ge_storage (*)[])checked_malloc(cb, size);
+ memcpy(dst->pre_g, src->pre_g, size);
+ }
+#ifdef USE_ENDOMORPHISM
+ if (src->pre_g_128 == NULL) {
+ dst->pre_g_128 = NULL;
+ } else {
+ size_t size = sizeof((*dst->pre_g_128)[0]) * ECMULT_TABLE_SIZE(WINDOW_G);
+ dst->pre_g_128 = (secp256k1_ge_storage (*)[])checked_malloc(cb, size);
+ memcpy(dst->pre_g_128, src->pre_g_128, size);
+ }
+#endif
+}
+
+static int secp256k1_ecmult_context_is_built(const secp256k1_ecmult_context *ctx) {
+ return ctx->pre_g != NULL;
+}
+
+static void secp256k1_ecmult_context_clear(secp256k1_ecmult_context *ctx) {
+ free(ctx->pre_g);
+#ifdef USE_ENDOMORPHISM
+ free(ctx->pre_g_128);
+#endif
+ secp256k1_ecmult_context_init(ctx);
+}
+
+/** Convert a number to WNAF notation. The number becomes represented by sum(2^i * wnaf[i], i=0..bits),
+ * with the following guarantees:
+ * - each wnaf[i] is either 0, or an odd integer between -(1<<(w-1) - 1) and (1<<(w-1) - 1)
+ * - two non-zero entries in wnaf are separated by at least w-1 zeroes.
+ * - the number of set values in wnaf is returned. This number is at most 256, and at most one more
+ * than the number of bits in the (absolute value) of the input.
+ */
+static int secp256k1_ecmult_wnaf(int *wnaf, int len, const secp256k1_scalar *a, int w) {
+ secp256k1_scalar s = *a;
+ int last_set_bit = -1;
+ int bit = 0;
+ int sign = 1;
+ int carry = 0;
+
+ VERIFY_CHECK(wnaf != NULL);
+ VERIFY_CHECK(0 <= len && len <= 256);
+ VERIFY_CHECK(a != NULL);
+ VERIFY_CHECK(2 <= w && w <= 31);
+
+ memset(wnaf, 0, len * sizeof(wnaf[0]));
+
+ if (secp256k1_scalar_get_bits(&s, 255, 1)) {
+ secp256k1_scalar_negate(&s, &s);
+ sign = -1;
+ }
+
+ while (bit < len) {
+ int now;
+ int word;
+ if (secp256k1_scalar_get_bits(&s, bit, 1) == (unsigned int)carry) {
+ bit++;
+ continue;
+ }
+
+ now = w;
+ if (now > len - bit) {
+ now = len - bit;
+ }
+
+ word = secp256k1_scalar_get_bits_var(&s, bit, now) + carry;
+
+ carry = (word >> (w-1)) & 1;
+ word -= carry << w;
+
+ wnaf[bit] = sign * word;
+ last_set_bit = bit;
+
+ bit += now;
+ }
+#ifdef VERIFY
+ CHECK(carry == 0);
+ while (bit < 256) {
+ CHECK(secp256k1_scalar_get_bits(&s, bit++, 1) == 0);
+ }
+#endif
+ return last_set_bit + 1;
+}
+
+static void secp256k1_ecmult(const secp256k1_ecmult_context *ctx, secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_scalar *na, const secp256k1_scalar *ng) {
+ secp256k1_ge pre_a[ECMULT_TABLE_SIZE(WINDOW_A)];
+ secp256k1_ge tmpa;
+ secp256k1_fe Z;
+#ifdef USE_ENDOMORPHISM
+ secp256k1_ge pre_a_lam[ECMULT_TABLE_SIZE(WINDOW_A)];
+ secp256k1_scalar na_1, na_lam;
+ /* Splitted G factors. */
+ secp256k1_scalar ng_1, ng_128;
+ int wnaf_na_1[130];
+ int wnaf_na_lam[130];
+ int bits_na_1;
+ int bits_na_lam;
+ int wnaf_ng_1[129];
+ int bits_ng_1;
+ int wnaf_ng_128[129];
+ int bits_ng_128;
+#else
+ int wnaf_na[256];
+ int bits_na;
+ int wnaf_ng[256];
+ int bits_ng;
+#endif
+ int i;
+ int bits;
+
+#ifdef USE_ENDOMORPHISM
+ /* split na into na_1 and na_lam (where na = na_1 + na_lam*lambda, and na_1 and na_lam are ~128 bit) */
+ secp256k1_scalar_split_lambda(&na_1, &na_lam, na);
+
+ /* build wnaf representation for na_1 and na_lam. */
+ bits_na_1 = secp256k1_ecmult_wnaf(wnaf_na_1, 130, &na_1, WINDOW_A);
+ bits_na_lam = secp256k1_ecmult_wnaf(wnaf_na_lam, 130, &na_lam, WINDOW_A);
+ VERIFY_CHECK(bits_na_1 <= 130);
+ VERIFY_CHECK(bits_na_lam <= 130);
+ bits = bits_na_1;
+ if (bits_na_lam > bits) {
+ bits = bits_na_lam;
+ }
+#else
+ /* build wnaf representation for na. */
+ bits_na = secp256k1_ecmult_wnaf(wnaf_na, 256, na, WINDOW_A);
+ bits = bits_na;
+#endif
+
+ /* Calculate odd multiples of a.
+ * All multiples are brought to the same Z 'denominator', which is stored
+ * in Z. Due to secp256k1' isomorphism we can do all operations pretending
+ * that the Z coordinate was 1, use affine addition formulae, and correct
+ * the Z coordinate of the result once at the end.
+ * The exception is the precomputed G table points, which are actually
+ * affine. Compared to the base used for other points, they have a Z ratio
+ * of 1/Z, so we can use secp256k1_gej_add_zinv_var, which uses the same
+ * isomorphism to efficiently add with a known Z inverse.
+ */
+ secp256k1_ecmult_odd_multiples_table_globalz_windowa(pre_a, &Z, a);
+
+#ifdef USE_ENDOMORPHISM
+ for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) {
+ secp256k1_ge_mul_lambda(&pre_a_lam[i], &pre_a[i]);
+ }
+
+ /* split ng into ng_1 and ng_128 (where gn = gn_1 + gn_128*2^128, and gn_1 and gn_128 are ~128 bit) */
+ secp256k1_scalar_split_128(&ng_1, &ng_128, ng);
+
+ /* Build wnaf representation for ng_1 and ng_128 */
+ bits_ng_1 = secp256k1_ecmult_wnaf(wnaf_ng_1, 129, &ng_1, WINDOW_G);
+ bits_ng_128 = secp256k1_ecmult_wnaf(wnaf_ng_128, 129, &ng_128, WINDOW_G);
+ if (bits_ng_1 > bits) {
+ bits = bits_ng_1;
+ }
+ if (bits_ng_128 > bits) {
+ bits = bits_ng_128;
+ }
+#else
+ bits_ng = secp256k1_ecmult_wnaf(wnaf_ng, 256, ng, WINDOW_G);
+ if (bits_ng > bits) {
+ bits = bits_ng;
+ }
+#endif
+
+ secp256k1_gej_set_infinity(r);
+
+ for (i = bits - 1; i >= 0; i--) {
+ int n;
+ secp256k1_gej_double_var(r, r, NULL);
+#ifdef USE_ENDOMORPHISM
+ if (i < bits_na_1 && (n = wnaf_na_1[i])) {
+ ECMULT_TABLE_GET_GE(&tmpa, pre_a, n, WINDOW_A);
+ secp256k1_gej_add_ge_var(r, r, &tmpa, NULL);
+ }
+ if (i < bits_na_lam && (n = wnaf_na_lam[i])) {
+ ECMULT_TABLE_GET_GE(&tmpa, pre_a_lam, n, WINDOW_A);
+ secp256k1_gej_add_ge_var(r, r, &tmpa, NULL);
+ }
+ if (i < bits_ng_1 && (n = wnaf_ng_1[i])) {
+ ECMULT_TABLE_GET_GE_STORAGE(&tmpa, *ctx->pre_g, n, WINDOW_G);
+ secp256k1_gej_add_zinv_var(r, r, &tmpa, &Z);
+ }
+ if (i < bits_ng_128 && (n = wnaf_ng_128[i])) {
+ ECMULT_TABLE_GET_GE_STORAGE(&tmpa, *ctx->pre_g_128, n, WINDOW_G);
+ secp256k1_gej_add_zinv_var(r, r, &tmpa, &Z);
+ }
+#else
+ if (i < bits_na && (n = wnaf_na[i])) {
+ ECMULT_TABLE_GET_GE(&tmpa, pre_a, n, WINDOW_A);
+ secp256k1_gej_add_ge_var(r, r, &tmpa, NULL);
+ }
+ if (i < bits_ng && (n = wnaf_ng[i])) {
+ ECMULT_TABLE_GET_GE_STORAGE(&tmpa, *ctx->pre_g, n, WINDOW_G);
+ secp256k1_gej_add_zinv_var(r, r, &tmpa, &Z);
+ }
+#endif
+ }
+
+ if (!r->infinity) {
+ secp256k1_fe_mul(&r->z, &r->z, &Z);
+ }
+}
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/field.h b/crypto/secp256k1/libsecp256k1/src/field.h
new file mode 100644
index 000000000..311329b92
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/field.h
@@ -0,0 +1,119 @@
+/**********************************************************************
+ * Copyright (c) 2013, 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_FIELD_
+#define _SECP256K1_FIELD_
+
+/** Field element module.
+ *
+ * Field elements can be represented in several ways, but code accessing
+ * it (and implementations) need to take certain properaties into account:
+ * - Each field element can be normalized or not.
+ * - Each field element has a magnitude, which represents how far away
+ * its representation is away from normalization. Normalized elements
+ * always have a magnitude of 1, but a magnitude of 1 doesn't imply
+ * normality.
+ */
+
+#if defined HAVE_CONFIG_H
+#include "libsecp256k1-config.h"
+#endif
+
+#if defined(USE_FIELD_10X26)
+#include "field_10x26.h"
+#elif defined(USE_FIELD_5X52)
+#include "field_5x52.h"
+#else
+#error "Please select field implementation"
+#endif
+
+/** Normalize a field element. */
+static void secp256k1_fe_normalize(secp256k1_fe *r);
+
+/** Weakly normalize a field element: reduce it magnitude to 1, but don't fully normalize. */
+static void secp256k1_fe_normalize_weak(secp256k1_fe *r);
+
+/** Normalize a field element, without constant-time guarantee. */
+static void secp256k1_fe_normalize_var(secp256k1_fe *r);
+
+/** Verify whether a field element represents zero i.e. would normalize to a zero value. The field
+ * implementation may optionally normalize the input, but this should not be relied upon. */
+static int secp256k1_fe_normalizes_to_zero(secp256k1_fe *r);
+
+/** Verify whether a field element represents zero i.e. would normalize to a zero value. The field
+ * implementation may optionally normalize the input, but this should not be relied upon. */
+static int secp256k1_fe_normalizes_to_zero_var(secp256k1_fe *r);
+
+/** Set a field element equal to a small integer. Resulting field element is normalized. */
+static void secp256k1_fe_set_int(secp256k1_fe *r, int a);
+
+/** Verify whether a field element is zero. Requires the input to be normalized. */
+static int secp256k1_fe_is_zero(const secp256k1_fe *a);
+
+/** Check the "oddness" of a field element. Requires the input to be normalized. */
+static int secp256k1_fe_is_odd(const secp256k1_fe *a);
+
+/** Compare two field elements. Requires magnitude-1 inputs. */
+static int secp256k1_fe_equal_var(const secp256k1_fe *a, const secp256k1_fe *b);
+
+/** Compare two field elements. Requires both inputs to be normalized */
+static int secp256k1_fe_cmp_var(const secp256k1_fe *a, const secp256k1_fe *b);
+
+/** Set a field element equal to 32-byte big endian value. If successful, the resulting field element is normalized. */
+static int secp256k1_fe_set_b32(secp256k1_fe *r, const unsigned char *a);
+
+/** Convert a field element to a 32-byte big endian value. Requires the input to be normalized */
+static void secp256k1_fe_get_b32(unsigned char *r, const secp256k1_fe *a);
+
+/** Set a field element equal to the additive inverse of another. Takes a maximum magnitude of the input
+ * as an argument. The magnitude of the output is one higher. */
+static void secp256k1_fe_negate(secp256k1_fe *r, const secp256k1_fe *a, int m);
+
+/** Multiplies the passed field element with a small integer constant. Multiplies the magnitude by that
+ * small integer. */
+static void secp256k1_fe_mul_int(secp256k1_fe *r, int a);
+
+/** Adds a field element to another. The result has the sum of the inputs' magnitudes as magnitude. */
+static void secp256k1_fe_add(secp256k1_fe *r, const secp256k1_fe *a);
+
+/** Sets a field element to be the product of two others. Requires the inputs' magnitudes to be at most 8.
+ * The output magnitude is 1 (but not guaranteed to be normalized). */
+static void secp256k1_fe_mul(secp256k1_fe *r, const secp256k1_fe *a, const secp256k1_fe * SECP256K1_RESTRICT b);
+
+/** Sets a field element to be the square of another. Requires the input's magnitude to be at most 8.
+ * The output magnitude is 1 (but not guaranteed to be normalized). */
+static void secp256k1_fe_sqr(secp256k1_fe *r, const secp256k1_fe *a);
+
+/** Sets a field element to be the (modular) square root (if any exist) of another. Requires the
+ * input's magnitude to be at most 8. The output magnitude is 1 (but not guaranteed to be
+ * normalized). Return value indicates whether a square root was found. */
+static int secp256k1_fe_sqrt_var(secp256k1_fe *r, const secp256k1_fe *a);
+
+/** Sets a field element to be the (modular) inverse of another. Requires the input's magnitude to be
+ * at most 8. The output magnitude is 1 (but not guaranteed to be normalized). */
+static void secp256k1_fe_inv(secp256k1_fe *r, const secp256k1_fe *a);
+
+/** Potentially faster version of secp256k1_fe_inv, without constant-time guarantee. */
+static void secp256k1_fe_inv_var(secp256k1_fe *r, const secp256k1_fe *a);
+
+/** Calculate the (modular) inverses of a batch of field elements. Requires the inputs' magnitudes to be
+ * at most 8. The output magnitudes are 1 (but not guaranteed to be normalized). The inputs and
+ * outputs must not overlap in memory. */
+static void secp256k1_fe_inv_all_var(size_t len, secp256k1_fe *r, const secp256k1_fe *a);
+
+/** Convert a field element to the storage type. */
+static void secp256k1_fe_to_storage(secp256k1_fe_storage *r, const secp256k1_fe *a);
+
+/** Convert a field element back from the storage type. */
+static void secp256k1_fe_from_storage(secp256k1_fe *r, const secp256k1_fe_storage *a);
+
+/** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. */
+static void secp256k1_fe_storage_cmov(secp256k1_fe_storage *r, const secp256k1_fe_storage *a, int flag);
+
+/** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. */
+static void secp256k1_fe_cmov(secp256k1_fe *r, const secp256k1_fe *a, int flag);
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/field_10x26.h b/crypto/secp256k1/libsecp256k1/src/field_10x26.h
new file mode 100644
index 000000000..61ee1e096
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/field_10x26.h
@@ -0,0 +1,47 @@
+/**********************************************************************
+ * Copyright (c) 2013, 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_FIELD_REPR_
+#define _SECP256K1_FIELD_REPR_
+
+#include <stdint.h>
+
+typedef struct {
+ /* X = sum(i=0..9, elem[i]*2^26) mod n */
+ uint32_t n[10];
+#ifdef VERIFY
+ int magnitude;
+ int normalized;
+#endif
+} secp256k1_fe;
+
+/* Unpacks a constant into a overlapping multi-limbed FE element. */
+#define SECP256K1_FE_CONST_INNER(d7, d6, d5, d4, d3, d2, d1, d0) { \
+ (d0) & 0x3FFFFFFUL, \
+ (((uint32_t)d0) >> 26) | (((uint32_t)(d1) & 0xFFFFFUL) << 6), \
+ (((uint32_t)d1) >> 20) | (((uint32_t)(d2) & 0x3FFFUL) << 12), \
+ (((uint32_t)d2) >> 14) | (((uint32_t)(d3) & 0xFFUL) << 18), \
+ (((uint32_t)d3) >> 8) | (((uint32_t)(d4) & 0x3UL) << 24), \
+ (((uint32_t)d4) >> 2) & 0x3FFFFFFUL, \
+ (((uint32_t)d4) >> 28) | (((uint32_t)(d5) & 0x3FFFFFUL) << 4), \
+ (((uint32_t)d5) >> 22) | (((uint32_t)(d6) & 0xFFFFUL) << 10), \
+ (((uint32_t)d6) >> 16) | (((uint32_t)(d7) & 0x3FFUL) << 16), \
+ (((uint32_t)d7) >> 10) \
+}
+
+#ifdef VERIFY
+#define SECP256K1_FE_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {SECP256K1_FE_CONST_INNER((d7), (d6), (d5), (d4), (d3), (d2), (d1), (d0)), 1, 1}
+#else
+#define SECP256K1_FE_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {SECP256K1_FE_CONST_INNER((d7), (d6), (d5), (d4), (d3), (d2), (d1), (d0))}
+#endif
+
+typedef struct {
+ uint32_t n[8];
+} secp256k1_fe_storage;
+
+#define SECP256K1_FE_STORAGE_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {{ (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }}
+#define SECP256K1_FE_STORAGE_CONST_GET(d) d.n[7], d.n[6], d.n[5], d.n[4],d.n[3], d.n[2], d.n[1], d.n[0]
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/field_10x26_impl.h b/crypto/secp256k1/libsecp256k1/src/field_10x26_impl.h
new file mode 100644
index 000000000..212cc5396
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/field_10x26_impl.h
@@ -0,0 +1,1138 @@
+/**********************************************************************
+ * Copyright (c) 2013, 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_FIELD_REPR_IMPL_H_
+#define _SECP256K1_FIELD_REPR_IMPL_H_
+
+#include <stdio.h>
+#include <string.h>
+#include "util.h"
+#include "num.h"
+#include "field.h"
+
+#ifdef VERIFY
+static void secp256k1_fe_verify(const secp256k1_fe *a) {
+ const uint32_t *d = a->n;
+ int m = a->normalized ? 1 : 2 * a->magnitude, r = 1;
+ r &= (d[0] <= 0x3FFFFFFUL * m);
+ r &= (d[1] <= 0x3FFFFFFUL * m);
+ r &= (d[2] <= 0x3FFFFFFUL * m);
+ r &= (d[3] <= 0x3FFFFFFUL * m);
+ r &= (d[4] <= 0x3FFFFFFUL * m);
+ r &= (d[5] <= 0x3FFFFFFUL * m);
+ r &= (d[6] <= 0x3FFFFFFUL * m);
+ r &= (d[7] <= 0x3FFFFFFUL * m);
+ r &= (d[8] <= 0x3FFFFFFUL * m);
+ r &= (d[9] <= 0x03FFFFFUL * m);
+ r &= (a->magnitude >= 0);
+ r &= (a->magnitude <= 32);
+ if (a->normalized) {
+ r &= (a->magnitude <= 1);
+ if (r && (d[9] == 0x03FFFFFUL)) {
+ uint32_t mid = d[8] & d[7] & d[6] & d[5] & d[4] & d[3] & d[2];
+ if (mid == 0x3FFFFFFUL) {
+ r &= ((d[1] + 0x40UL + ((d[0] + 0x3D1UL) >> 26)) <= 0x3FFFFFFUL);
+ }
+ }
+ }
+ VERIFY_CHECK(r == 1);
+}
+#else
+static void secp256k1_fe_verify(const secp256k1_fe *a) {
+ (void)a;
+}
+#endif
+
+static void secp256k1_fe_normalize(secp256k1_fe *r) {
+ uint32_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4],
+ t5 = r->n[5], t6 = r->n[6], t7 = r->n[7], t8 = r->n[8], t9 = r->n[9];
+
+ /* Reduce t9 at the start so there will be at most a single carry from the first pass */
+ uint32_t m;
+ uint32_t x = t9 >> 22; t9 &= 0x03FFFFFUL;
+
+ /* The first pass ensures the magnitude is 1, ... */
+ t0 += x * 0x3D1UL; t1 += (x << 6);
+ t1 += (t0 >> 26); t0 &= 0x3FFFFFFUL;
+ t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL;
+ t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL; m = t2;
+ t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL; m &= t3;
+ t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL; m &= t4;
+ t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL; m &= t5;
+ t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL; m &= t6;
+ t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL; m &= t7;
+ t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL; m &= t8;
+
+ /* ... except for a possible carry at bit 22 of t9 (i.e. bit 256 of the field element) */
+ VERIFY_CHECK(t9 >> 23 == 0);
+
+ /* At most a single final reduction is needed; check if the value is >= the field characteristic */
+ x = (t9 >> 22) | ((t9 == 0x03FFFFFUL) & (m == 0x3FFFFFFUL)
+ & ((t1 + 0x40UL + ((t0 + 0x3D1UL) >> 26)) > 0x3FFFFFFUL));
+
+ /* Apply the final reduction (for constant-time behaviour, we do it always) */
+ t0 += x * 0x3D1UL; t1 += (x << 6);
+ t1 += (t0 >> 26); t0 &= 0x3FFFFFFUL;
+ t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL;
+ t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL;
+ t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL;
+ t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL;
+ t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL;
+ t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL;
+ t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL;
+ t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL;
+
+ /* If t9 didn't carry to bit 22 already, then it should have after any final reduction */
+ VERIFY_CHECK(t9 >> 22 == x);
+
+ /* Mask off the possible multiple of 2^256 from the final reduction */
+ t9 &= 0x03FFFFFUL;
+
+ r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4;
+ r->n[5] = t5; r->n[6] = t6; r->n[7] = t7; r->n[8] = t8; r->n[9] = t9;
+
+#ifdef VERIFY
+ r->magnitude = 1;
+ r->normalized = 1;
+ secp256k1_fe_verify(r);
+#endif
+}
+
+static void secp256k1_fe_normalize_weak(secp256k1_fe *r) {
+ uint32_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4],
+ t5 = r->n[5], t6 = r->n[6], t7 = r->n[7], t8 = r->n[8], t9 = r->n[9];
+
+ /* Reduce t9 at the start so there will be at most a single carry from the first pass */
+ uint32_t x = t9 >> 22; t9 &= 0x03FFFFFUL;
+
+ /* The first pass ensures the magnitude is 1, ... */
+ t0 += x * 0x3D1UL; t1 += (x << 6);
+ t1 += (t0 >> 26); t0 &= 0x3FFFFFFUL;
+ t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL;
+ t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL;
+ t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL;
+ t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL;
+ t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL;
+ t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL;
+ t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL;
+ t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL;
+
+ /* ... except for a possible carry at bit 22 of t9 (i.e. bit 256 of the field element) */
+ VERIFY_CHECK(t9 >> 23 == 0);
+
+ r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4;
+ r->n[5] = t5; r->n[6] = t6; r->n[7] = t7; r->n[8] = t8; r->n[9] = t9;
+
+#ifdef VERIFY
+ r->magnitude = 1;
+ secp256k1_fe_verify(r);
+#endif
+}
+
+static void secp256k1_fe_normalize_var(secp256k1_fe *r) {
+ uint32_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4],
+ t5 = r->n[5], t6 = r->n[6], t7 = r->n[7], t8 = r->n[8], t9 = r->n[9];
+
+ /* Reduce t9 at the start so there will be at most a single carry from the first pass */
+ uint32_t m;
+ uint32_t x = t9 >> 22; t9 &= 0x03FFFFFUL;
+
+ /* The first pass ensures the magnitude is 1, ... */
+ t0 += x * 0x3D1UL; t1 += (x << 6);
+ t1 += (t0 >> 26); t0 &= 0x3FFFFFFUL;
+ t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL;
+ t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL; m = t2;
+ t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL; m &= t3;
+ t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL; m &= t4;
+ t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL; m &= t5;
+ t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL; m &= t6;
+ t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL; m &= t7;
+ t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL; m &= t8;
+
+ /* ... except for a possible carry at bit 22 of t9 (i.e. bit 256 of the field element) */
+ VERIFY_CHECK(t9 >> 23 == 0);
+
+ /* At most a single final reduction is needed; check if the value is >= the field characteristic */
+ x = (t9 >> 22) | ((t9 == 0x03FFFFFUL) & (m == 0x3FFFFFFUL)
+ & ((t1 + 0x40UL + ((t0 + 0x3D1UL) >> 26)) > 0x3FFFFFFUL));
+
+ if (x) {
+ t0 += 0x3D1UL; t1 += (x << 6);
+ t1 += (t0 >> 26); t0 &= 0x3FFFFFFUL;
+ t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL;
+ t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL;
+ t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL;
+ t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL;
+ t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL;
+ t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL;
+ t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL;
+ t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL;
+
+ /* If t9 didn't carry to bit 22 already, then it should have after any final reduction */
+ VERIFY_CHECK(t9 >> 22 == x);
+
+ /* Mask off the possible multiple of 2^256 from the final reduction */
+ t9 &= 0x03FFFFFUL;
+ }
+
+ r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4;
+ r->n[5] = t5; r->n[6] = t6; r->n[7] = t7; r->n[8] = t8; r->n[9] = t9;
+
+#ifdef VERIFY
+ r->magnitude = 1;
+ r->normalized = 1;
+ secp256k1_fe_verify(r);
+#endif
+}
+
+static int secp256k1_fe_normalizes_to_zero(secp256k1_fe *r) {
+ uint32_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4],
+ t5 = r->n[5], t6 = r->n[6], t7 = r->n[7], t8 = r->n[8], t9 = r->n[9];
+
+ /* z0 tracks a possible raw value of 0, z1 tracks a possible raw value of P */
+ uint32_t z0, z1;
+
+ /* Reduce t9 at the start so there will be at most a single carry from the first pass */
+ uint32_t x = t9 >> 22; t9 &= 0x03FFFFFUL;
+
+ /* The first pass ensures the magnitude is 1, ... */
+ t0 += x * 0x3D1UL; t1 += (x << 6);
+ t1 += (t0 >> 26); t0 &= 0x3FFFFFFUL; z0 = t0; z1 = t0 ^ 0x3D0UL;
+ t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL; z0 |= t1; z1 &= t1 ^ 0x40UL;
+ t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL; z0 |= t2; z1 &= t2;
+ t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL; z0 |= t3; z1 &= t3;
+ t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL; z0 |= t4; z1 &= t4;
+ t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL; z0 |= t5; z1 &= t5;
+ t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL; z0 |= t6; z1 &= t6;
+ t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL; z0 |= t7; z1 &= t7;
+ t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL; z0 |= t8; z1 &= t8;
+ z0 |= t9; z1 &= t9 ^ 0x3C00000UL;
+
+ /* ... except for a possible carry at bit 22 of t9 (i.e. bit 256 of the field element) */
+ VERIFY_CHECK(t9 >> 23 == 0);
+
+ return (z0 == 0) | (z1 == 0x3FFFFFFUL);
+}
+
+static int secp256k1_fe_normalizes_to_zero_var(secp256k1_fe *r) {
+ uint32_t t0, t1, t2, t3, t4, t5, t6, t7, t8, t9;
+ uint32_t z0, z1;
+ uint32_t x;
+
+ t0 = r->n[0];
+ t9 = r->n[9];
+
+ /* Reduce t9 at the start so there will be at most a single carry from the first pass */
+ x = t9 >> 22;
+
+ /* The first pass ensures the magnitude is 1, ... */
+ t0 += x * 0x3D1UL;
+
+ /* z0 tracks a possible raw value of 0, z1 tracks a possible raw value of P */
+ z0 = t0 & 0x3FFFFFFUL;
+ z1 = z0 ^ 0x3D0UL;
+
+ /* Fast return path should catch the majority of cases */
+ if ((z0 != 0UL) & (z1 != 0x3FFFFFFUL)) {
+ return 0;
+ }
+
+ t1 = r->n[1];
+ t2 = r->n[2];
+ t3 = r->n[3];
+ t4 = r->n[4];
+ t5 = r->n[5];
+ t6 = r->n[6];
+ t7 = r->n[7];
+ t8 = r->n[8];
+
+ t9 &= 0x03FFFFFUL;
+ t1 += (x << 6);
+
+ t1 += (t0 >> 26);
+ t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL; z0 |= t1; z1 &= t1 ^ 0x40UL;
+ t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL; z0 |= t2; z1 &= t2;
+ t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL; z0 |= t3; z1 &= t3;
+ t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL; z0 |= t4; z1 &= t4;
+ t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL; z0 |= t5; z1 &= t5;
+ t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL; z0 |= t6; z1 &= t6;
+ t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL; z0 |= t7; z1 &= t7;
+ t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL; z0 |= t8; z1 &= t8;
+ z0 |= t9; z1 &= t9 ^ 0x3C00000UL;
+
+ /* ... except for a possible carry at bit 22 of t9 (i.e. bit 256 of the field element) */
+ VERIFY_CHECK(t9 >> 23 == 0);
+
+ return (z0 == 0) | (z1 == 0x3FFFFFFUL);
+}
+
+SECP256K1_INLINE static void secp256k1_fe_set_int(secp256k1_fe *r, int a) {
+ r->n[0] = a;
+ r->n[1] = r->n[2] = r->n[3] = r->n[4] = r->n[5] = r->n[6] = r->n[7] = r->n[8] = r->n[9] = 0;
+#ifdef VERIFY
+ r->magnitude = 1;
+ r->normalized = 1;
+ secp256k1_fe_verify(r);
+#endif
+}
+
+SECP256K1_INLINE static int secp256k1_fe_is_zero(const secp256k1_fe *a) {
+ const uint32_t *t = a->n;
+#ifdef VERIFY
+ VERIFY_CHECK(a->normalized);
+ secp256k1_fe_verify(a);
+#endif
+ return (t[0] | t[1] | t[2] | t[3] | t[4] | t[5] | t[6] | t[7] | t[8] | t[9]) == 0;
+}
+
+SECP256K1_INLINE static int secp256k1_fe_is_odd(const secp256k1_fe *a) {
+#ifdef VERIFY
+ VERIFY_CHECK(a->normalized);
+ secp256k1_fe_verify(a);
+#endif
+ return a->n[0] & 1;
+}
+
+SECP256K1_INLINE static void secp256k1_fe_clear(secp256k1_fe *a) {
+ int i;
+#ifdef VERIFY
+ a->magnitude = 0;
+ a->normalized = 1;
+#endif
+ for (i=0; i<10; i++) {
+ a->n[i] = 0;
+ }
+}
+
+static int secp256k1_fe_cmp_var(const secp256k1_fe *a, const secp256k1_fe *b) {
+ int i;
+#ifdef VERIFY
+ VERIFY_CHECK(a->normalized);
+ VERIFY_CHECK(b->normalized);
+ secp256k1_fe_verify(a);
+ secp256k1_fe_verify(b);
+#endif
+ for (i = 9; i >= 0; i--) {
+ if (a->n[i] > b->n[i]) {
+ return 1;
+ }
+ if (a->n[i] < b->n[i]) {
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int secp256k1_fe_set_b32(secp256k1_fe *r, const unsigned char *a) {
+ int i;
+ r->n[0] = r->n[1] = r->n[2] = r->n[3] = r->n[4] = 0;
+ r->n[5] = r->n[6] = r->n[7] = r->n[8] = r->n[9] = 0;
+ for (i=0; i<32; i++) {
+ int j;
+ for (j=0; j<4; j++) {
+ int limb = (8*i+2*j)/26;
+ int shift = (8*i+2*j)%26;
+ r->n[limb] |= (uint32_t)((a[31-i] >> (2*j)) & 0x3) << shift;
+ }
+ }
+ if (r->n[9] == 0x3FFFFFUL && (r->n[8] & r->n[7] & r->n[6] & r->n[5] & r->n[4] & r->n[3] & r->n[2]) == 0x3FFFFFFUL && (r->n[1] + 0x40UL + ((r->n[0] + 0x3D1UL) >> 26)) > 0x3FFFFFFUL) {
+ return 0;
+ }
+#ifdef VERIFY
+ r->magnitude = 1;
+ r->normalized = 1;
+ secp256k1_fe_verify(r);
+#endif
+ return 1;
+}
+
+/** Convert a field element to a 32-byte big endian value. Requires the input to be normalized */
+static void secp256k1_fe_get_b32(unsigned char *r, const secp256k1_fe *a) {
+ int i;
+#ifdef VERIFY
+ VERIFY_CHECK(a->normalized);
+ secp256k1_fe_verify(a);
+#endif
+ for (i=0; i<32; i++) {
+ int j;
+ int c = 0;
+ for (j=0; j<4; j++) {
+ int limb = (8*i+2*j)/26;
+ int shift = (8*i+2*j)%26;
+ c |= ((a->n[limb] >> shift) & 0x3) << (2 * j);
+ }
+ r[31-i] = c;
+ }
+}
+
+SECP256K1_INLINE static void secp256k1_fe_negate(secp256k1_fe *r, const secp256k1_fe *a, int m) {
+#ifdef VERIFY
+ VERIFY_CHECK(a->magnitude <= m);
+ secp256k1_fe_verify(a);
+#endif
+ r->n[0] = 0x3FFFC2FUL * 2 * (m + 1) - a->n[0];
+ r->n[1] = 0x3FFFFBFUL * 2 * (m + 1) - a->n[1];
+ r->n[2] = 0x3FFFFFFUL * 2 * (m + 1) - a->n[2];
+ r->n[3] = 0x3FFFFFFUL * 2 * (m + 1) - a->n[3];
+ r->n[4] = 0x3FFFFFFUL * 2 * (m + 1) - a->n[4];
+ r->n[5] = 0x3FFFFFFUL * 2 * (m + 1) - a->n[5];
+ r->n[6] = 0x3FFFFFFUL * 2 * (m + 1) - a->n[6];
+ r->n[7] = 0x3FFFFFFUL * 2 * (m + 1) - a->n[7];
+ r->n[8] = 0x3FFFFFFUL * 2 * (m + 1) - a->n[8];
+ r->n[9] = 0x03FFFFFUL * 2 * (m + 1) - a->n[9];
+#ifdef VERIFY
+ r->magnitude = m + 1;
+ r->normalized = 0;
+ secp256k1_fe_verify(r);
+#endif
+}
+
+SECP256K1_INLINE static void secp256k1_fe_mul_int(secp256k1_fe *r, int a) {
+ r->n[0] *= a;
+ r->n[1] *= a;
+ r->n[2] *= a;
+ r->n[3] *= a;
+ r->n[4] *= a;
+ r->n[5] *= a;
+ r->n[6] *= a;
+ r->n[7] *= a;
+ r->n[8] *= a;
+ r->n[9] *= a;
+#ifdef VERIFY
+ r->magnitude *= a;
+ r->normalized = 0;
+ secp256k1_fe_verify(r);
+#endif
+}
+
+SECP256K1_INLINE static void secp256k1_fe_add(secp256k1_fe *r, const secp256k1_fe *a) {
+#ifdef VERIFY
+ secp256k1_fe_verify(a);
+#endif
+ r->n[0] += a->n[0];
+ r->n[1] += a->n[1];
+ r->n[2] += a->n[2];
+ r->n[3] += a->n[3];
+ r->n[4] += a->n[4];
+ r->n[5] += a->n[5];
+ r->n[6] += a->n[6];
+ r->n[7] += a->n[7];
+ r->n[8] += a->n[8];
+ r->n[9] += a->n[9];
+#ifdef VERIFY
+ r->magnitude += a->magnitude;
+ r->normalized = 0;
+ secp256k1_fe_verify(r);
+#endif
+}
+
+#ifdef VERIFY
+#define VERIFY_BITS(x, n) VERIFY_CHECK(((x) >> (n)) == 0)
+#else
+#define VERIFY_BITS(x, n) do { } while(0)
+#endif
+
+SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint32_t *r, const uint32_t *a, const uint32_t * SECP256K1_RESTRICT b) {
+ uint64_t c, d;
+ uint64_t u0, u1, u2, u3, u4, u5, u6, u7, u8;
+ uint32_t t9, t1, t0, t2, t3, t4, t5, t6, t7;
+ const uint32_t M = 0x3FFFFFFUL, R0 = 0x3D10UL, R1 = 0x400UL;
+
+ VERIFY_BITS(a[0], 30);
+ VERIFY_BITS(a[1], 30);
+ VERIFY_BITS(a[2], 30);
+ VERIFY_BITS(a[3], 30);
+ VERIFY_BITS(a[4], 30);
+ VERIFY_BITS(a[5], 30);
+ VERIFY_BITS(a[6], 30);
+ VERIFY_BITS(a[7], 30);
+ VERIFY_BITS(a[8], 30);
+ VERIFY_BITS(a[9], 26);
+ VERIFY_BITS(b[0], 30);
+ VERIFY_BITS(b[1], 30);
+ VERIFY_BITS(b[2], 30);
+ VERIFY_BITS(b[3], 30);
+ VERIFY_BITS(b[4], 30);
+ VERIFY_BITS(b[5], 30);
+ VERIFY_BITS(b[6], 30);
+ VERIFY_BITS(b[7], 30);
+ VERIFY_BITS(b[8], 30);
+ VERIFY_BITS(b[9], 26);
+
+ /** [... a b c] is a shorthand for ... + a<<52 + b<<26 + c<<0 mod n.
+ * px is a shorthand for sum(a[i]*b[x-i], i=0..x).
+ * Note that [x 0 0 0 0 0 0 0 0 0 0] = [x*R1 x*R0].
+ */
+
+ d = (uint64_t)a[0] * b[9]
+ + (uint64_t)a[1] * b[8]
+ + (uint64_t)a[2] * b[7]
+ + (uint64_t)a[3] * b[6]
+ + (uint64_t)a[4] * b[5]
+ + (uint64_t)a[5] * b[4]
+ + (uint64_t)a[6] * b[3]
+ + (uint64_t)a[7] * b[2]
+ + (uint64_t)a[8] * b[1]
+ + (uint64_t)a[9] * b[0];
+ /* VERIFY_BITS(d, 64); */
+ /* [d 0 0 0 0 0 0 0 0 0] = [p9 0 0 0 0 0 0 0 0 0] */
+ t9 = d & M; d >>= 26;
+ VERIFY_BITS(t9, 26);
+ VERIFY_BITS(d, 38);
+ /* [d t9 0 0 0 0 0 0 0 0 0] = [p9 0 0 0 0 0 0 0 0 0] */
+
+ c = (uint64_t)a[0] * b[0];
+ VERIFY_BITS(c, 60);
+ /* [d t9 0 0 0 0 0 0 0 0 c] = [p9 0 0 0 0 0 0 0 0 p0] */
+ d += (uint64_t)a[1] * b[9]
+ + (uint64_t)a[2] * b[8]
+ + (uint64_t)a[3] * b[7]
+ + (uint64_t)a[4] * b[6]
+ + (uint64_t)a[5] * b[5]
+ + (uint64_t)a[6] * b[4]
+ + (uint64_t)a[7] * b[3]
+ + (uint64_t)a[8] * b[2]
+ + (uint64_t)a[9] * b[1];
+ VERIFY_BITS(d, 63);
+ /* [d t9 0 0 0 0 0 0 0 0 c] = [p10 p9 0 0 0 0 0 0 0 0 p0] */
+ u0 = d & M; d >>= 26; c += u0 * R0;
+ VERIFY_BITS(u0, 26);
+ VERIFY_BITS(d, 37);
+ VERIFY_BITS(c, 61);
+ /* [d u0 t9 0 0 0 0 0 0 0 0 c-u0*R0] = [p10 p9 0 0 0 0 0 0 0 0 p0] */
+ t0 = c & M; c >>= 26; c += u0 * R1;
+ VERIFY_BITS(t0, 26);
+ VERIFY_BITS(c, 37);
+ /* [d u0 t9 0 0 0 0 0 0 0 c-u0*R1 t0-u0*R0] = [p10 p9 0 0 0 0 0 0 0 0 p0] */
+ /* [d 0 t9 0 0 0 0 0 0 0 c t0] = [p10 p9 0 0 0 0 0 0 0 0 p0] */
+
+ c += (uint64_t)a[0] * b[1]
+ + (uint64_t)a[1] * b[0];
+ VERIFY_BITS(c, 62);
+ /* [d 0 t9 0 0 0 0 0 0 0 c t0] = [p10 p9 0 0 0 0 0 0 0 p1 p0] */
+ d += (uint64_t)a[2] * b[9]
+ + (uint64_t)a[3] * b[8]
+ + (uint64_t)a[4] * b[7]
+ + (uint64_t)a[5] * b[6]
+ + (uint64_t)a[6] * b[5]
+ + (uint64_t)a[7] * b[4]
+ + (uint64_t)a[8] * b[3]
+ + (uint64_t)a[9] * b[2];
+ VERIFY_BITS(d, 63);
+ /* [d 0 t9 0 0 0 0 0 0 0 c t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] */
+ u1 = d & M; d >>= 26; c += u1 * R0;
+ VERIFY_BITS(u1, 26);
+ VERIFY_BITS(d, 37);
+ VERIFY_BITS(c, 63);
+ /* [d u1 0 t9 0 0 0 0 0 0 0 c-u1*R0 t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] */
+ t1 = c & M; c >>= 26; c += u1 * R1;
+ VERIFY_BITS(t1, 26);
+ VERIFY_BITS(c, 38);
+ /* [d u1 0 t9 0 0 0 0 0 0 c-u1*R1 t1-u1*R0 t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] */
+ /* [d 0 0 t9 0 0 0 0 0 0 c t1 t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] */
+
+ c += (uint64_t)a[0] * b[2]
+ + (uint64_t)a[1] * b[1]
+ + (uint64_t)a[2] * b[0];
+ VERIFY_BITS(c, 62);
+ /* [d 0 0 t9 0 0 0 0 0 0 c t1 t0] = [p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */
+ d += (uint64_t)a[3] * b[9]
+ + (uint64_t)a[4] * b[8]
+ + (uint64_t)a[5] * b[7]
+ + (uint64_t)a[6] * b[6]
+ + (uint64_t)a[7] * b[5]
+ + (uint64_t)a[8] * b[4]
+ + (uint64_t)a[9] * b[3];
+ VERIFY_BITS(d, 63);
+ /* [d 0 0 t9 0 0 0 0 0 0 c t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */
+ u2 = d & M; d >>= 26; c += u2 * R0;
+ VERIFY_BITS(u2, 26);
+ VERIFY_BITS(d, 37);
+ VERIFY_BITS(c, 63);
+ /* [d u2 0 0 t9 0 0 0 0 0 0 c-u2*R0 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */
+ t2 = c & M; c >>= 26; c += u2 * R1;
+ VERIFY_BITS(t2, 26);
+ VERIFY_BITS(c, 38);
+ /* [d u2 0 0 t9 0 0 0 0 0 c-u2*R1 t2-u2*R0 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */
+ /* [d 0 0 0 t9 0 0 0 0 0 c t2 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */
+
+ c += (uint64_t)a[0] * b[3]
+ + (uint64_t)a[1] * b[2]
+ + (uint64_t)a[2] * b[1]
+ + (uint64_t)a[3] * b[0];
+ VERIFY_BITS(c, 63);
+ /* [d 0 0 0 t9 0 0 0 0 0 c t2 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */
+ d += (uint64_t)a[4] * b[9]
+ + (uint64_t)a[5] * b[8]
+ + (uint64_t)a[6] * b[7]
+ + (uint64_t)a[7] * b[6]
+ + (uint64_t)a[8] * b[5]
+ + (uint64_t)a[9] * b[4];
+ VERIFY_BITS(d, 63);
+ /* [d 0 0 0 t9 0 0 0 0 0 c t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */
+ u3 = d & M; d >>= 26; c += u3 * R0;
+ VERIFY_BITS(u3, 26);
+ VERIFY_BITS(d, 37);
+ /* VERIFY_BITS(c, 64); */
+ /* [d u3 0 0 0 t9 0 0 0 0 0 c-u3*R0 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */
+ t3 = c & M; c >>= 26; c += u3 * R1;
+ VERIFY_BITS(t3, 26);
+ VERIFY_BITS(c, 39);
+ /* [d u3 0 0 0 t9 0 0 0 0 c-u3*R1 t3-u3*R0 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */
+ /* [d 0 0 0 0 t9 0 0 0 0 c t3 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */
+
+ c += (uint64_t)a[0] * b[4]
+ + (uint64_t)a[1] * b[3]
+ + (uint64_t)a[2] * b[2]
+ + (uint64_t)a[3] * b[1]
+ + (uint64_t)a[4] * b[0];
+ VERIFY_BITS(c, 63);
+ /* [d 0 0 0 0 t9 0 0 0 0 c t3 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */
+ d += (uint64_t)a[5] * b[9]
+ + (uint64_t)a[6] * b[8]
+ + (uint64_t)a[7] * b[7]
+ + (uint64_t)a[8] * b[6]
+ + (uint64_t)a[9] * b[5];
+ VERIFY_BITS(d, 62);
+ /* [d 0 0 0 0 t9 0 0 0 0 c t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */
+ u4 = d & M; d >>= 26; c += u4 * R0;
+ VERIFY_BITS(u4, 26);
+ VERIFY_BITS(d, 36);
+ /* VERIFY_BITS(c, 64); */
+ /* [d u4 0 0 0 0 t9 0 0 0 0 c-u4*R0 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */
+ t4 = c & M; c >>= 26; c += u4 * R1;
+ VERIFY_BITS(t4, 26);
+ VERIFY_BITS(c, 39);
+ /* [d u4 0 0 0 0 t9 0 0 0 c-u4*R1 t4-u4*R0 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */
+ /* [d 0 0 0 0 0 t9 0 0 0 c t4 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */
+
+ c += (uint64_t)a[0] * b[5]
+ + (uint64_t)a[1] * b[4]
+ + (uint64_t)a[2] * b[3]
+ + (uint64_t)a[3] * b[2]
+ + (uint64_t)a[4] * b[1]
+ + (uint64_t)a[5] * b[0];
+ VERIFY_BITS(c, 63);
+ /* [d 0 0 0 0 0 t9 0 0 0 c t4 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */
+ d += (uint64_t)a[6] * b[9]
+ + (uint64_t)a[7] * b[8]
+ + (uint64_t)a[8] * b[7]
+ + (uint64_t)a[9] * b[6];
+ VERIFY_BITS(d, 62);
+ /* [d 0 0 0 0 0 t9 0 0 0 c t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */
+ u5 = d & M; d >>= 26; c += u5 * R0;
+ VERIFY_BITS(u5, 26);
+ VERIFY_BITS(d, 36);
+ /* VERIFY_BITS(c, 64); */
+ /* [d u5 0 0 0 0 0 t9 0 0 0 c-u5*R0 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */
+ t5 = c & M; c >>= 26; c += u5 * R1;
+ VERIFY_BITS(t5, 26);
+ VERIFY_BITS(c, 39);
+ /* [d u5 0 0 0 0 0 t9 0 0 c-u5*R1 t5-u5*R0 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */
+ /* [d 0 0 0 0 0 0 t9 0 0 c t5 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */
+
+ c += (uint64_t)a[0] * b[6]
+ + (uint64_t)a[1] * b[5]
+ + (uint64_t)a[2] * b[4]
+ + (uint64_t)a[3] * b[3]
+ + (uint64_t)a[4] * b[2]
+ + (uint64_t)a[5] * b[1]
+ + (uint64_t)a[6] * b[0];
+ VERIFY_BITS(c, 63);
+ /* [d 0 0 0 0 0 0 t9 0 0 c t5 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */
+ d += (uint64_t)a[7] * b[9]
+ + (uint64_t)a[8] * b[8]
+ + (uint64_t)a[9] * b[7];
+ VERIFY_BITS(d, 61);
+ /* [d 0 0 0 0 0 0 t9 0 0 c t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */
+ u6 = d & M; d >>= 26; c += u6 * R0;
+ VERIFY_BITS(u6, 26);
+ VERIFY_BITS(d, 35);
+ /* VERIFY_BITS(c, 64); */
+ /* [d u6 0 0 0 0 0 0 t9 0 0 c-u6*R0 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */
+ t6 = c & M; c >>= 26; c += u6 * R1;
+ VERIFY_BITS(t6, 26);
+ VERIFY_BITS(c, 39);
+ /* [d u6 0 0 0 0 0 0 t9 0 c-u6*R1 t6-u6*R0 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */
+ /* [d 0 0 0 0 0 0 0 t9 0 c t6 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */
+
+ c += (uint64_t)a[0] * b[7]
+ + (uint64_t)a[1] * b[6]
+ + (uint64_t)a[2] * b[5]
+ + (uint64_t)a[3] * b[4]
+ + (uint64_t)a[4] * b[3]
+ + (uint64_t)a[5] * b[2]
+ + (uint64_t)a[6] * b[1]
+ + (uint64_t)a[7] * b[0];
+ /* VERIFY_BITS(c, 64); */
+ VERIFY_CHECK(c <= 0x8000007C00000007ULL);
+ /* [d 0 0 0 0 0 0 0 t9 0 c t6 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */
+ d += (uint64_t)a[8] * b[9]
+ + (uint64_t)a[9] * b[8];
+ VERIFY_BITS(d, 58);
+ /* [d 0 0 0 0 0 0 0 t9 0 c t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */
+ u7 = d & M; d >>= 26; c += u7 * R0;
+ VERIFY_BITS(u7, 26);
+ VERIFY_BITS(d, 32);
+ /* VERIFY_BITS(c, 64); */
+ VERIFY_CHECK(c <= 0x800001703FFFC2F7ULL);
+ /* [d u7 0 0 0 0 0 0 0 t9 0 c-u7*R0 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */
+ t7 = c & M; c >>= 26; c += u7 * R1;
+ VERIFY_BITS(t7, 26);
+ VERIFY_BITS(c, 38);
+ /* [d u7 0 0 0 0 0 0 0 t9 c-u7*R1 t7-u7*R0 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */
+ /* [d 0 0 0 0 0 0 0 0 t9 c t7 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */
+
+ c += (uint64_t)a[0] * b[8]
+ + (uint64_t)a[1] * b[7]
+ + (uint64_t)a[2] * b[6]
+ + (uint64_t)a[3] * b[5]
+ + (uint64_t)a[4] * b[4]
+ + (uint64_t)a[5] * b[3]
+ + (uint64_t)a[6] * b[2]
+ + (uint64_t)a[7] * b[1]
+ + (uint64_t)a[8] * b[0];
+ /* VERIFY_BITS(c, 64); */
+ VERIFY_CHECK(c <= 0x9000007B80000008ULL);
+ /* [d 0 0 0 0 0 0 0 0 t9 c t7 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+ d += (uint64_t)a[9] * b[9];
+ VERIFY_BITS(d, 57);
+ /* [d 0 0 0 0 0 0 0 0 t9 c t7 t6 t5 t4 t3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+ u8 = d & M; d >>= 26; c += u8 * R0;
+ VERIFY_BITS(u8, 26);
+ VERIFY_BITS(d, 31);
+ /* VERIFY_BITS(c, 64); */
+ VERIFY_CHECK(c <= 0x9000016FBFFFC2F8ULL);
+ /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 t5 t4 t3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+
+ r[3] = t3;
+ VERIFY_BITS(r[3], 26);
+ /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 t5 t4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+ r[4] = t4;
+ VERIFY_BITS(r[4], 26);
+ /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 t5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+ r[5] = t5;
+ VERIFY_BITS(r[5], 26);
+ /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+ r[6] = t6;
+ VERIFY_BITS(r[6], 26);
+ /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+ r[7] = t7;
+ VERIFY_BITS(r[7], 26);
+ /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+
+ r[8] = c & M; c >>= 26; c += u8 * R1;
+ VERIFY_BITS(r[8], 26);
+ VERIFY_BITS(c, 39);
+ /* [d u8 0 0 0 0 0 0 0 0 t9+c-u8*R1 r8-u8*R0 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+ /* [d 0 0 0 0 0 0 0 0 0 t9+c r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+ c += d * R0 + t9;
+ VERIFY_BITS(c, 45);
+ /* [d 0 0 0 0 0 0 0 0 0 c-d*R0 r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+ r[9] = c & (M >> 4); c >>= 22; c += d * (R1 << 4);
+ VERIFY_BITS(r[9], 22);
+ VERIFY_BITS(c, 46);
+ /* [d 0 0 0 0 0 0 0 0 r9+((c-d*R1<<4)<<22)-d*R0 r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+ /* [d 0 0 0 0 0 0 0 -d*R1 r9+(c<<22)-d*R0 r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+ /* [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+
+ d = c * (R0 >> 4) + t0;
+ VERIFY_BITS(d, 56);
+ /* [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 t1 d-c*R0>>4] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+ r[0] = d & M; d >>= 26;
+ VERIFY_BITS(r[0], 26);
+ VERIFY_BITS(d, 30);
+ /* [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 t1+d r0-c*R0>>4] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+ d += c * (R1 >> 4) + t1;
+ VERIFY_BITS(d, 53);
+ VERIFY_CHECK(d <= 0x10000003FFFFBFULL);
+ /* [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 d-c*R1>>4 r0-c*R0>>4] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+ /* [r9 r8 r7 r6 r5 r4 r3 t2 d r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+ r[1] = d & M; d >>= 26;
+ VERIFY_BITS(r[1], 26);
+ VERIFY_BITS(d, 27);
+ VERIFY_CHECK(d <= 0x4000000ULL);
+ /* [r9 r8 r7 r6 r5 r4 r3 t2+d r1 r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+ d += t2;
+ VERIFY_BITS(d, 27);
+ /* [r9 r8 r7 r6 r5 r4 r3 d r1 r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+ r[2] = d;
+ VERIFY_BITS(r[2], 27);
+ /* [r9 r8 r7 r6 r5 r4 r3 r2 r1 r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+}
+
+SECP256K1_INLINE static void secp256k1_fe_sqr_inner(uint32_t *r, const uint32_t *a) {
+ uint64_t c, d;
+ uint64_t u0, u1, u2, u3, u4, u5, u6, u7, u8;
+ uint32_t t9, t0, t1, t2, t3, t4, t5, t6, t7;
+ const uint32_t M = 0x3FFFFFFUL, R0 = 0x3D10UL, R1 = 0x400UL;
+
+ VERIFY_BITS(a[0], 30);
+ VERIFY_BITS(a[1], 30);
+ VERIFY_BITS(a[2], 30);
+ VERIFY_BITS(a[3], 30);
+ VERIFY_BITS(a[4], 30);
+ VERIFY_BITS(a[5], 30);
+ VERIFY_BITS(a[6], 30);
+ VERIFY_BITS(a[7], 30);
+ VERIFY_BITS(a[8], 30);
+ VERIFY_BITS(a[9], 26);
+
+ /** [... a b c] is a shorthand for ... + a<<52 + b<<26 + c<<0 mod n.
+ * px is a shorthand for sum(a[i]*a[x-i], i=0..x).
+ * Note that [x 0 0 0 0 0 0 0 0 0 0] = [x*R1 x*R0].
+ */
+
+ d = (uint64_t)(a[0]*2) * a[9]
+ + (uint64_t)(a[1]*2) * a[8]
+ + (uint64_t)(a[2]*2) * a[7]
+ + (uint64_t)(a[3]*2) * a[6]
+ + (uint64_t)(a[4]*2) * a[5];
+ /* VERIFY_BITS(d, 64); */
+ /* [d 0 0 0 0 0 0 0 0 0] = [p9 0 0 0 0 0 0 0 0 0] */
+ t9 = d & M; d >>= 26;
+ VERIFY_BITS(t9, 26);
+ VERIFY_BITS(d, 38);
+ /* [d t9 0 0 0 0 0 0 0 0 0] = [p9 0 0 0 0 0 0 0 0 0] */
+
+ c = (uint64_t)a[0] * a[0];
+ VERIFY_BITS(c, 60);
+ /* [d t9 0 0 0 0 0 0 0 0 c] = [p9 0 0 0 0 0 0 0 0 p0] */
+ d += (uint64_t)(a[1]*2) * a[9]
+ + (uint64_t)(a[2]*2) * a[8]
+ + (uint64_t)(a[3]*2) * a[7]
+ + (uint64_t)(a[4]*2) * a[6]
+ + (uint64_t)a[5] * a[5];
+ VERIFY_BITS(d, 63);
+ /* [d t9 0 0 0 0 0 0 0 0 c] = [p10 p9 0 0 0 0 0 0 0 0 p0] */
+ u0 = d & M; d >>= 26; c += u0 * R0;
+ VERIFY_BITS(u0, 26);
+ VERIFY_BITS(d, 37);
+ VERIFY_BITS(c, 61);
+ /* [d u0 t9 0 0 0 0 0 0 0 0 c-u0*R0] = [p10 p9 0 0 0 0 0 0 0 0 p0] */
+ t0 = c & M; c >>= 26; c += u0 * R1;
+ VERIFY_BITS(t0, 26);
+ VERIFY_BITS(c, 37);
+ /* [d u0 t9 0 0 0 0 0 0 0 c-u0*R1 t0-u0*R0] = [p10 p9 0 0 0 0 0 0 0 0 p0] */
+ /* [d 0 t9 0 0 0 0 0 0 0 c t0] = [p10 p9 0 0 0 0 0 0 0 0 p0] */
+
+ c += (uint64_t)(a[0]*2) * a[1];
+ VERIFY_BITS(c, 62);
+ /* [d 0 t9 0 0 0 0 0 0 0 c t0] = [p10 p9 0 0 0 0 0 0 0 p1 p0] */
+ d += (uint64_t)(a[2]*2) * a[9]
+ + (uint64_t)(a[3]*2) * a[8]
+ + (uint64_t)(a[4]*2) * a[7]
+ + (uint64_t)(a[5]*2) * a[6];
+ VERIFY_BITS(d, 63);
+ /* [d 0 t9 0 0 0 0 0 0 0 c t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] */
+ u1 = d & M; d >>= 26; c += u1 * R0;
+ VERIFY_BITS(u1, 26);
+ VERIFY_BITS(d, 37);
+ VERIFY_BITS(c, 63);
+ /* [d u1 0 t9 0 0 0 0 0 0 0 c-u1*R0 t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] */
+ t1 = c & M; c >>= 26; c += u1 * R1;
+ VERIFY_BITS(t1, 26);
+ VERIFY_BITS(c, 38);
+ /* [d u1 0 t9 0 0 0 0 0 0 c-u1*R1 t1-u1*R0 t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] */
+ /* [d 0 0 t9 0 0 0 0 0 0 c t1 t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] */
+
+ c += (uint64_t)(a[0]*2) * a[2]
+ + (uint64_t)a[1] * a[1];
+ VERIFY_BITS(c, 62);
+ /* [d 0 0 t9 0 0 0 0 0 0 c t1 t0] = [p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */
+ d += (uint64_t)(a[3]*2) * a[9]
+ + (uint64_t)(a[4]*2) * a[8]
+ + (uint64_t)(a[5]*2) * a[7]
+ + (uint64_t)a[6] * a[6];
+ VERIFY_BITS(d, 63);
+ /* [d 0 0 t9 0 0 0 0 0 0 c t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */
+ u2 = d & M; d >>= 26; c += u2 * R0;
+ VERIFY_BITS(u2, 26);
+ VERIFY_BITS(d, 37);
+ VERIFY_BITS(c, 63);
+ /* [d u2 0 0 t9 0 0 0 0 0 0 c-u2*R0 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */
+ t2 = c & M; c >>= 26; c += u2 * R1;
+ VERIFY_BITS(t2, 26);
+ VERIFY_BITS(c, 38);
+ /* [d u2 0 0 t9 0 0 0 0 0 c-u2*R1 t2-u2*R0 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */
+ /* [d 0 0 0 t9 0 0 0 0 0 c t2 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */
+
+ c += (uint64_t)(a[0]*2) * a[3]
+ + (uint64_t)(a[1]*2) * a[2];
+ VERIFY_BITS(c, 63);
+ /* [d 0 0 0 t9 0 0 0 0 0 c t2 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */
+ d += (uint64_t)(a[4]*2) * a[9]
+ + (uint64_t)(a[5]*2) * a[8]
+ + (uint64_t)(a[6]*2) * a[7];
+ VERIFY_BITS(d, 63);
+ /* [d 0 0 0 t9 0 0 0 0 0 c t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */
+ u3 = d & M; d >>= 26; c += u3 * R0;
+ VERIFY_BITS(u3, 26);
+ VERIFY_BITS(d, 37);
+ /* VERIFY_BITS(c, 64); */
+ /* [d u3 0 0 0 t9 0 0 0 0 0 c-u3*R0 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */
+ t3 = c & M; c >>= 26; c += u3 * R1;
+ VERIFY_BITS(t3, 26);
+ VERIFY_BITS(c, 39);
+ /* [d u3 0 0 0 t9 0 0 0 0 c-u3*R1 t3-u3*R0 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */
+ /* [d 0 0 0 0 t9 0 0 0 0 c t3 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */
+
+ c += (uint64_t)(a[0]*2) * a[4]
+ + (uint64_t)(a[1]*2) * a[3]
+ + (uint64_t)a[2] * a[2];
+ VERIFY_BITS(c, 63);
+ /* [d 0 0 0 0 t9 0 0 0 0 c t3 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */
+ d += (uint64_t)(a[5]*2) * a[9]
+ + (uint64_t)(a[6]*2) * a[8]
+ + (uint64_t)a[7] * a[7];
+ VERIFY_BITS(d, 62);
+ /* [d 0 0 0 0 t9 0 0 0 0 c t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */
+ u4 = d & M; d >>= 26; c += u4 * R0;
+ VERIFY_BITS(u4, 26);
+ VERIFY_BITS(d, 36);
+ /* VERIFY_BITS(c, 64); */
+ /* [d u4 0 0 0 0 t9 0 0 0 0 c-u4*R0 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */
+ t4 = c & M; c >>= 26; c += u4 * R1;
+ VERIFY_BITS(t4, 26);
+ VERIFY_BITS(c, 39);
+ /* [d u4 0 0 0 0 t9 0 0 0 c-u4*R1 t4-u4*R0 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */
+ /* [d 0 0 0 0 0 t9 0 0 0 c t4 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */
+
+ c += (uint64_t)(a[0]*2) * a[5]
+ + (uint64_t)(a[1]*2) * a[4]
+ + (uint64_t)(a[2]*2) * a[3];
+ VERIFY_BITS(c, 63);
+ /* [d 0 0 0 0 0 t9 0 0 0 c t4 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */
+ d += (uint64_t)(a[6]*2) * a[9]
+ + (uint64_t)(a[7]*2) * a[8];
+ VERIFY_BITS(d, 62);
+ /* [d 0 0 0 0 0 t9 0 0 0 c t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */
+ u5 = d & M; d >>= 26; c += u5 * R0;
+ VERIFY_BITS(u5, 26);
+ VERIFY_BITS(d, 36);
+ /* VERIFY_BITS(c, 64); */
+ /* [d u5 0 0 0 0 0 t9 0 0 0 c-u5*R0 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */
+ t5 = c & M; c >>= 26; c += u5 * R1;
+ VERIFY_BITS(t5, 26);
+ VERIFY_BITS(c, 39);
+ /* [d u5 0 0 0 0 0 t9 0 0 c-u5*R1 t5-u5*R0 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */
+ /* [d 0 0 0 0 0 0 t9 0 0 c t5 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */
+
+ c += (uint64_t)(a[0]*2) * a[6]
+ + (uint64_t)(a[1]*2) * a[5]
+ + (uint64_t)(a[2]*2) * a[4]
+ + (uint64_t)a[3] * a[3];
+ VERIFY_BITS(c, 63);
+ /* [d 0 0 0 0 0 0 t9 0 0 c t5 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */
+ d += (uint64_t)(a[7]*2) * a[9]
+ + (uint64_t)a[8] * a[8];
+ VERIFY_BITS(d, 61);
+ /* [d 0 0 0 0 0 0 t9 0 0 c t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */
+ u6 = d & M; d >>= 26; c += u6 * R0;
+ VERIFY_BITS(u6, 26);
+ VERIFY_BITS(d, 35);
+ /* VERIFY_BITS(c, 64); */
+ /* [d u6 0 0 0 0 0 0 t9 0 0 c-u6*R0 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */
+ t6 = c & M; c >>= 26; c += u6 * R1;
+ VERIFY_BITS(t6, 26);
+ VERIFY_BITS(c, 39);
+ /* [d u6 0 0 0 0 0 0 t9 0 c-u6*R1 t6-u6*R0 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */
+ /* [d 0 0 0 0 0 0 0 t9 0 c t6 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */
+
+ c += (uint64_t)(a[0]*2) * a[7]
+ + (uint64_t)(a[1]*2) * a[6]
+ + (uint64_t)(a[2]*2) * a[5]
+ + (uint64_t)(a[3]*2) * a[4];
+ /* VERIFY_BITS(c, 64); */
+ VERIFY_CHECK(c <= 0x8000007C00000007ULL);
+ /* [d 0 0 0 0 0 0 0 t9 0 c t6 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */
+ d += (uint64_t)(a[8]*2) * a[9];
+ VERIFY_BITS(d, 58);
+ /* [d 0 0 0 0 0 0 0 t9 0 c t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */
+ u7 = d & M; d >>= 26; c += u7 * R0;
+ VERIFY_BITS(u7, 26);
+ VERIFY_BITS(d, 32);
+ /* VERIFY_BITS(c, 64); */
+ VERIFY_CHECK(c <= 0x800001703FFFC2F7ULL);
+ /* [d u7 0 0 0 0 0 0 0 t9 0 c-u7*R0 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */
+ t7 = c & M; c >>= 26; c += u7 * R1;
+ VERIFY_BITS(t7, 26);
+ VERIFY_BITS(c, 38);
+ /* [d u7 0 0 0 0 0 0 0 t9 c-u7*R1 t7-u7*R0 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */
+ /* [d 0 0 0 0 0 0 0 0 t9 c t7 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */
+
+ c += (uint64_t)(a[0]*2) * a[8]
+ + (uint64_t)(a[1]*2) * a[7]
+ + (uint64_t)(a[2]*2) * a[6]
+ + (uint64_t)(a[3]*2) * a[5]
+ + (uint64_t)a[4] * a[4];
+ /* VERIFY_BITS(c, 64); */
+ VERIFY_CHECK(c <= 0x9000007B80000008ULL);
+ /* [d 0 0 0 0 0 0 0 0 t9 c t7 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+ d += (uint64_t)a[9] * a[9];
+ VERIFY_BITS(d, 57);
+ /* [d 0 0 0 0 0 0 0 0 t9 c t7 t6 t5 t4 t3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+ u8 = d & M; d >>= 26; c += u8 * R0;
+ VERIFY_BITS(u8, 26);
+ VERIFY_BITS(d, 31);
+ /* VERIFY_BITS(c, 64); */
+ VERIFY_CHECK(c <= 0x9000016FBFFFC2F8ULL);
+ /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 t5 t4 t3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+
+ r[3] = t3;
+ VERIFY_BITS(r[3], 26);
+ /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 t5 t4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+ r[4] = t4;
+ VERIFY_BITS(r[4], 26);
+ /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 t5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+ r[5] = t5;
+ VERIFY_BITS(r[5], 26);
+ /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+ r[6] = t6;
+ VERIFY_BITS(r[6], 26);
+ /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+ r[7] = t7;
+ VERIFY_BITS(r[7], 26);
+ /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+
+ r[8] = c & M; c >>= 26; c += u8 * R1;
+ VERIFY_BITS(r[8], 26);
+ VERIFY_BITS(c, 39);
+ /* [d u8 0 0 0 0 0 0 0 0 t9+c-u8*R1 r8-u8*R0 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+ /* [d 0 0 0 0 0 0 0 0 0 t9+c r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+ c += d * R0 + t9;
+ VERIFY_BITS(c, 45);
+ /* [d 0 0 0 0 0 0 0 0 0 c-d*R0 r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+ r[9] = c & (M >> 4); c >>= 22; c += d * (R1 << 4);
+ VERIFY_BITS(r[9], 22);
+ VERIFY_BITS(c, 46);
+ /* [d 0 0 0 0 0 0 0 0 r9+((c-d*R1<<4)<<22)-d*R0 r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+ /* [d 0 0 0 0 0 0 0 -d*R1 r9+(c<<22)-d*R0 r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+ /* [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+
+ d = c * (R0 >> 4) + t0;
+ VERIFY_BITS(d, 56);
+ /* [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 t1 d-c*R0>>4] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+ r[0] = d & M; d >>= 26;
+ VERIFY_BITS(r[0], 26);
+ VERIFY_BITS(d, 30);
+ /* [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 t1+d r0-c*R0>>4] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+ d += c * (R1 >> 4) + t1;
+ VERIFY_BITS(d, 53);
+ VERIFY_CHECK(d <= 0x10000003FFFFBFULL);
+ /* [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 d-c*R1>>4 r0-c*R0>>4] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+ /* [r9 r8 r7 r6 r5 r4 r3 t2 d r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+ r[1] = d & M; d >>= 26;
+ VERIFY_BITS(r[1], 26);
+ VERIFY_BITS(d, 27);
+ VERIFY_CHECK(d <= 0x4000000ULL);
+ /* [r9 r8 r7 r6 r5 r4 r3 t2+d r1 r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+ d += t2;
+ VERIFY_BITS(d, 27);
+ /* [r9 r8 r7 r6 r5 r4 r3 d r1 r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+ r[2] = d;
+ VERIFY_BITS(r[2], 27);
+ /* [r9 r8 r7 r6 r5 r4 r3 r2 r1 r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+}
+
+
+static void secp256k1_fe_mul(secp256k1_fe *r, const secp256k1_fe *a, const secp256k1_fe * SECP256K1_RESTRICT b) {
+#ifdef VERIFY
+ VERIFY_CHECK(a->magnitude <= 8);
+ VERIFY_CHECK(b->magnitude <= 8);
+ secp256k1_fe_verify(a);
+ secp256k1_fe_verify(b);
+ VERIFY_CHECK(r != b);
+#endif
+ secp256k1_fe_mul_inner(r->n, a->n, b->n);
+#ifdef VERIFY
+ r->magnitude = 1;
+ r->normalized = 0;
+ secp256k1_fe_verify(r);
+#endif
+}
+
+static void secp256k1_fe_sqr(secp256k1_fe *r, const secp256k1_fe *a) {
+#ifdef VERIFY
+ VERIFY_CHECK(a->magnitude <= 8);
+ secp256k1_fe_verify(a);
+#endif
+ secp256k1_fe_sqr_inner(r->n, a->n);
+#ifdef VERIFY
+ r->magnitude = 1;
+ r->normalized = 0;
+ secp256k1_fe_verify(r);
+#endif
+}
+
+static SECP256K1_INLINE void secp256k1_fe_cmov(secp256k1_fe *r, const secp256k1_fe *a, int flag) {
+ uint32_t mask0, mask1;
+ mask0 = flag + ~((uint32_t)0);
+ mask1 = ~mask0;
+ r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1);
+ r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1);
+ r->n[2] = (r->n[2] & mask0) | (a->n[2] & mask1);
+ r->n[3] = (r->n[3] & mask0) | (a->n[3] & mask1);
+ r->n[4] = (r->n[4] & mask0) | (a->n[4] & mask1);
+ r->n[5] = (r->n[5] & mask0) | (a->n[5] & mask1);
+ r->n[6] = (r->n[6] & mask0) | (a->n[6] & mask1);
+ r->n[7] = (r->n[7] & mask0) | (a->n[7] & mask1);
+ r->n[8] = (r->n[8] & mask0) | (a->n[8] & mask1);
+ r->n[9] = (r->n[9] & mask0) | (a->n[9] & mask1);
+#ifdef VERIFY
+ if (a->magnitude > r->magnitude) {
+ r->magnitude = a->magnitude;
+ }
+ r->normalized &= a->normalized;
+#endif
+}
+
+static SECP256K1_INLINE void secp256k1_fe_storage_cmov(secp256k1_fe_storage *r, const secp256k1_fe_storage *a, int flag) {
+ uint32_t mask0, mask1;
+ mask0 = flag + ~((uint32_t)0);
+ mask1 = ~mask0;
+ r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1);
+ r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1);
+ r->n[2] = (r->n[2] & mask0) | (a->n[2] & mask1);
+ r->n[3] = (r->n[3] & mask0) | (a->n[3] & mask1);
+ r->n[4] = (r->n[4] & mask0) | (a->n[4] & mask1);
+ r->n[5] = (r->n[5] & mask0) | (a->n[5] & mask1);
+ r->n[6] = (r->n[6] & mask0) | (a->n[6] & mask1);
+ r->n[7] = (r->n[7] & mask0) | (a->n[7] & mask1);
+}
+
+static void secp256k1_fe_to_storage(secp256k1_fe_storage *r, const secp256k1_fe *a) {
+#ifdef VERIFY
+ VERIFY_CHECK(a->normalized);
+#endif
+ r->n[0] = a->n[0] | a->n[1] << 26;
+ r->n[1] = a->n[1] >> 6 | a->n[2] << 20;
+ r->n[2] = a->n[2] >> 12 | a->n[3] << 14;
+ r->n[3] = a->n[3] >> 18 | a->n[4] << 8;
+ r->n[4] = a->n[4] >> 24 | a->n[5] << 2 | a->n[6] << 28;
+ r->n[5] = a->n[6] >> 4 | a->n[7] << 22;
+ r->n[6] = a->n[7] >> 10 | a->n[8] << 16;
+ r->n[7] = a->n[8] >> 16 | a->n[9] << 10;
+}
+
+static SECP256K1_INLINE void secp256k1_fe_from_storage(secp256k1_fe *r, const secp256k1_fe_storage *a) {
+ r->n[0] = a->n[0] & 0x3FFFFFFUL;
+ r->n[1] = a->n[0] >> 26 | ((a->n[1] << 6) & 0x3FFFFFFUL);
+ r->n[2] = a->n[1] >> 20 | ((a->n[2] << 12) & 0x3FFFFFFUL);
+ r->n[3] = a->n[2] >> 14 | ((a->n[3] << 18) & 0x3FFFFFFUL);
+ r->n[4] = a->n[3] >> 8 | ((a->n[4] << 24) & 0x3FFFFFFUL);
+ r->n[5] = (a->n[4] >> 2) & 0x3FFFFFFUL;
+ r->n[6] = a->n[4] >> 28 | ((a->n[5] << 4) & 0x3FFFFFFUL);
+ r->n[7] = a->n[5] >> 22 | ((a->n[6] << 10) & 0x3FFFFFFUL);
+ r->n[8] = a->n[6] >> 16 | ((a->n[7] << 16) & 0x3FFFFFFUL);
+ r->n[9] = a->n[7] >> 10;
+#ifdef VERIFY
+ r->magnitude = 1;
+ r->normalized = 1;
+#endif
+}
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/field_5x52.h b/crypto/secp256k1/libsecp256k1/src/field_5x52.h
new file mode 100644
index 000000000..8e69a560d
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/field_5x52.h
@@ -0,0 +1,47 @@
+/**********************************************************************
+ * Copyright (c) 2013, 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_FIELD_REPR_
+#define _SECP256K1_FIELD_REPR_
+
+#include <stdint.h>
+
+typedef struct {
+ /* X = sum(i=0..4, elem[i]*2^52) mod n */
+ uint64_t n[5];
+#ifdef VERIFY
+ int magnitude;
+ int normalized;
+#endif
+} secp256k1_fe;
+
+/* Unpacks a constant into a overlapping multi-limbed FE element. */
+#define SECP256K1_FE_CONST_INNER(d7, d6, d5, d4, d3, d2, d1, d0) { \
+ (d0) | (((uint64_t)(d1) & 0xFFFFFUL) << 32), \
+ ((uint64_t)(d1) >> 20) | (((uint64_t)(d2)) << 12) | (((uint64_t)(d3) & 0xFFUL) << 44), \
+ ((uint64_t)(d3) >> 8) | (((uint64_t)(d4) & 0xFFFFFFFUL) << 24), \
+ ((uint64_t)(d4) >> 28) | (((uint64_t)(d5)) << 4) | (((uint64_t)(d6) & 0xFFFFUL) << 36), \
+ ((uint64_t)(d6) >> 16) | (((uint64_t)(d7)) << 16) \
+}
+
+#ifdef VERIFY
+#define SECP256K1_FE_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {SECP256K1_FE_CONST_INNER((d7), (d6), (d5), (d4), (d3), (d2), (d1), (d0)), 1, 1}
+#else
+#define SECP256K1_FE_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {SECP256K1_FE_CONST_INNER((d7), (d6), (d5), (d4), (d3), (d2), (d1), (d0))}
+#endif
+
+typedef struct {
+ uint64_t n[4];
+} secp256k1_fe_storage;
+
+#define SECP256K1_FE_STORAGE_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {{ \
+ (d0) | (((uint64_t)(d1)) << 32), \
+ (d2) | (((uint64_t)(d3)) << 32), \
+ (d4) | (((uint64_t)(d5)) << 32), \
+ (d6) | (((uint64_t)(d7)) << 32) \
+}}
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/field_5x52_asm_impl.h b/crypto/secp256k1/libsecp256k1/src/field_5x52_asm_impl.h
new file mode 100644
index 000000000..98cc004bf
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/field_5x52_asm_impl.h
@@ -0,0 +1,502 @@
+/**********************************************************************
+ * Copyright (c) 2013-2014 Diederik Huys, Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+/**
+ * Changelog:
+ * - March 2013, Diederik Huys: original version
+ * - November 2014, Pieter Wuille: updated to use Peter Dettman's parallel multiplication algorithm
+ * - December 2014, Pieter Wuille: converted from YASM to GCC inline assembly
+ */
+
+#ifndef _SECP256K1_FIELD_INNER5X52_IMPL_H_
+#define _SECP256K1_FIELD_INNER5X52_IMPL_H_
+
+SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint64_t *r, const uint64_t *a, const uint64_t * SECP256K1_RESTRICT b) {
+/**
+ * Registers: rdx:rax = multiplication accumulator
+ * r9:r8 = c
+ * r15:rcx = d
+ * r10-r14 = a0-a4
+ * rbx = b
+ * rdi = r
+ * rsi = a / t?
+ */
+ uint64_t tmp1, tmp2, tmp3;
+__asm__ __volatile__(
+ "movq 0(%%rsi),%%r10\n"
+ "movq 8(%%rsi),%%r11\n"
+ "movq 16(%%rsi),%%r12\n"
+ "movq 24(%%rsi),%%r13\n"
+ "movq 32(%%rsi),%%r14\n"
+
+ /* d += a3 * b0 */
+ "movq 0(%%rbx),%%rax\n"
+ "mulq %%r13\n"
+ "movq %%rax,%%rcx\n"
+ "movq %%rdx,%%r15\n"
+ /* d += a2 * b1 */
+ "movq 8(%%rbx),%%rax\n"
+ "mulq %%r12\n"
+ "addq %%rax,%%rcx\n"
+ "adcq %%rdx,%%r15\n"
+ /* d += a1 * b2 */
+ "movq 16(%%rbx),%%rax\n"
+ "mulq %%r11\n"
+ "addq %%rax,%%rcx\n"
+ "adcq %%rdx,%%r15\n"
+ /* d = a0 * b3 */
+ "movq 24(%%rbx),%%rax\n"
+ "mulq %%r10\n"
+ "addq %%rax,%%rcx\n"
+ "adcq %%rdx,%%r15\n"
+ /* c = a4 * b4 */
+ "movq 32(%%rbx),%%rax\n"
+ "mulq %%r14\n"
+ "movq %%rax,%%r8\n"
+ "movq %%rdx,%%r9\n"
+ /* d += (c & M) * R */
+ "movq $0xfffffffffffff,%%rdx\n"
+ "andq %%rdx,%%rax\n"
+ "movq $0x1000003d10,%%rdx\n"
+ "mulq %%rdx\n"
+ "addq %%rax,%%rcx\n"
+ "adcq %%rdx,%%r15\n"
+ /* c >>= 52 (%%r8 only) */
+ "shrdq $52,%%r9,%%r8\n"
+ /* t3 (tmp1) = d & M */
+ "movq %%rcx,%%rsi\n"
+ "movq $0xfffffffffffff,%%rdx\n"
+ "andq %%rdx,%%rsi\n"
+ "movq %%rsi,%q1\n"
+ /* d >>= 52 */
+ "shrdq $52,%%r15,%%rcx\n"
+ "xorq %%r15,%%r15\n"
+ /* d += a4 * b0 */
+ "movq 0(%%rbx),%%rax\n"
+ "mulq %%r14\n"
+ "addq %%rax,%%rcx\n"
+ "adcq %%rdx,%%r15\n"
+ /* d += a3 * b1 */
+ "movq 8(%%rbx),%%rax\n"
+ "mulq %%r13\n"
+ "addq %%rax,%%rcx\n"
+ "adcq %%rdx,%%r15\n"
+ /* d += a2 * b2 */
+ "movq 16(%%rbx),%%rax\n"
+ "mulq %%r12\n"
+ "addq %%rax,%%rcx\n"
+ "adcq %%rdx,%%r15\n"
+ /* d += a1 * b3 */
+ "movq 24(%%rbx),%%rax\n"
+ "mulq %%r11\n"
+ "addq %%rax,%%rcx\n"
+ "adcq %%rdx,%%r15\n"
+ /* d += a0 * b4 */
+ "movq 32(%%rbx),%%rax\n"
+ "mulq %%r10\n"
+ "addq %%rax,%%rcx\n"
+ "adcq %%rdx,%%r15\n"
+ /* d += c * R */
+ "movq %%r8,%%rax\n"
+ "movq $0x1000003d10,%%rdx\n"
+ "mulq %%rdx\n"
+ "addq %%rax,%%rcx\n"
+ "adcq %%rdx,%%r15\n"
+ /* t4 = d & M (%%rsi) */
+ "movq %%rcx,%%rsi\n"
+ "movq $0xfffffffffffff,%%rdx\n"
+ "andq %%rdx,%%rsi\n"
+ /* d >>= 52 */
+ "shrdq $52,%%r15,%%rcx\n"
+ "xorq %%r15,%%r15\n"
+ /* tx = t4 >> 48 (tmp3) */
+ "movq %%rsi,%%rax\n"
+ "shrq $48,%%rax\n"
+ "movq %%rax,%q3\n"
+ /* t4 &= (M >> 4) (tmp2) */
+ "movq $0xffffffffffff,%%rax\n"
+ "andq %%rax,%%rsi\n"
+ "movq %%rsi,%q2\n"
+ /* c = a0 * b0 */
+ "movq 0(%%rbx),%%rax\n"
+ "mulq %%r10\n"
+ "movq %%rax,%%r8\n"
+ "movq %%rdx,%%r9\n"
+ /* d += a4 * b1 */
+ "movq 8(%%rbx),%%rax\n"
+ "mulq %%r14\n"
+ "addq %%rax,%%rcx\n"
+ "adcq %%rdx,%%r15\n"
+ /* d += a3 * b2 */
+ "movq 16(%%rbx),%%rax\n"
+ "mulq %%r13\n"
+ "addq %%rax,%%rcx\n"
+ "adcq %%rdx,%%r15\n"
+ /* d += a2 * b3 */
+ "movq 24(%%rbx),%%rax\n"
+ "mulq %%r12\n"
+ "addq %%rax,%%rcx\n"
+ "adcq %%rdx,%%r15\n"
+ /* d += a1 * b4 */
+ "movq 32(%%rbx),%%rax\n"
+ "mulq %%r11\n"
+ "addq %%rax,%%rcx\n"
+ "adcq %%rdx,%%r15\n"
+ /* u0 = d & M (%%rsi) */
+ "movq %%rcx,%%rsi\n"
+ "movq $0xfffffffffffff,%%rdx\n"
+ "andq %%rdx,%%rsi\n"
+ /* d >>= 52 */
+ "shrdq $52,%%r15,%%rcx\n"
+ "xorq %%r15,%%r15\n"
+ /* u0 = (u0 << 4) | tx (%%rsi) */
+ "shlq $4,%%rsi\n"
+ "movq %q3,%%rax\n"
+ "orq %%rax,%%rsi\n"
+ /* c += u0 * (R >> 4) */
+ "movq $0x1000003d1,%%rax\n"
+ "mulq %%rsi\n"
+ "addq %%rax,%%r8\n"
+ "adcq %%rdx,%%r9\n"
+ /* r[0] = c & M */
+ "movq %%r8,%%rax\n"
+ "movq $0xfffffffffffff,%%rdx\n"
+ "andq %%rdx,%%rax\n"
+ "movq %%rax,0(%%rdi)\n"
+ /* c >>= 52 */
+ "shrdq $52,%%r9,%%r8\n"
+ "xorq %%r9,%%r9\n"
+ /* c += a1 * b0 */
+ "movq 0(%%rbx),%%rax\n"
+ "mulq %%r11\n"
+ "addq %%rax,%%r8\n"
+ "adcq %%rdx,%%r9\n"
+ /* c += a0 * b1 */
+ "movq 8(%%rbx),%%rax\n"
+ "mulq %%r10\n"
+ "addq %%rax,%%r8\n"
+ "adcq %%rdx,%%r9\n"
+ /* d += a4 * b2 */
+ "movq 16(%%rbx),%%rax\n"
+ "mulq %%r14\n"
+ "addq %%rax,%%rcx\n"
+ "adcq %%rdx,%%r15\n"
+ /* d += a3 * b3 */
+ "movq 24(%%rbx),%%rax\n"
+ "mulq %%r13\n"
+ "addq %%rax,%%rcx\n"
+ "adcq %%rdx,%%r15\n"
+ /* d += a2 * b4 */
+ "movq 32(%%rbx),%%rax\n"
+ "mulq %%r12\n"
+ "addq %%rax,%%rcx\n"
+ "adcq %%rdx,%%r15\n"
+ /* c += (d & M) * R */
+ "movq %%rcx,%%rax\n"
+ "movq $0xfffffffffffff,%%rdx\n"
+ "andq %%rdx,%%rax\n"
+ "movq $0x1000003d10,%%rdx\n"
+ "mulq %%rdx\n"
+ "addq %%rax,%%r8\n"
+ "adcq %%rdx,%%r9\n"
+ /* d >>= 52 */
+ "shrdq $52,%%r15,%%rcx\n"
+ "xorq %%r15,%%r15\n"
+ /* r[1] = c & M */
+ "movq %%r8,%%rax\n"
+ "movq $0xfffffffffffff,%%rdx\n"
+ "andq %%rdx,%%rax\n"
+ "movq %%rax,8(%%rdi)\n"
+ /* c >>= 52 */
+ "shrdq $52,%%r9,%%r8\n"
+ "xorq %%r9,%%r9\n"
+ /* c += a2 * b0 */
+ "movq 0(%%rbx),%%rax\n"
+ "mulq %%r12\n"
+ "addq %%rax,%%r8\n"
+ "adcq %%rdx,%%r9\n"
+ /* c += a1 * b1 */
+ "movq 8(%%rbx),%%rax\n"
+ "mulq %%r11\n"
+ "addq %%rax,%%r8\n"
+ "adcq %%rdx,%%r9\n"
+ /* c += a0 * b2 (last use of %%r10 = a0) */
+ "movq 16(%%rbx),%%rax\n"
+ "mulq %%r10\n"
+ "addq %%rax,%%r8\n"
+ "adcq %%rdx,%%r9\n"
+ /* fetch t3 (%%r10, overwrites a0), t4 (%%rsi) */
+ "movq %q2,%%rsi\n"
+ "movq %q1,%%r10\n"
+ /* d += a4 * b3 */
+ "movq 24(%%rbx),%%rax\n"
+ "mulq %%r14\n"
+ "addq %%rax,%%rcx\n"
+ "adcq %%rdx,%%r15\n"
+ /* d += a3 * b4 */
+ "movq 32(%%rbx),%%rax\n"
+ "mulq %%r13\n"
+ "addq %%rax,%%rcx\n"
+ "adcq %%rdx,%%r15\n"
+ /* c += (d & M) * R */
+ "movq %%rcx,%%rax\n"
+ "movq $0xfffffffffffff,%%rdx\n"
+ "andq %%rdx,%%rax\n"
+ "movq $0x1000003d10,%%rdx\n"
+ "mulq %%rdx\n"
+ "addq %%rax,%%r8\n"
+ "adcq %%rdx,%%r9\n"
+ /* d >>= 52 (%%rcx only) */
+ "shrdq $52,%%r15,%%rcx\n"
+ /* r[2] = c & M */
+ "movq %%r8,%%rax\n"
+ "movq $0xfffffffffffff,%%rdx\n"
+ "andq %%rdx,%%rax\n"
+ "movq %%rax,16(%%rdi)\n"
+ /* c >>= 52 */
+ "shrdq $52,%%r9,%%r8\n"
+ "xorq %%r9,%%r9\n"
+ /* c += t3 */
+ "addq %%r10,%%r8\n"
+ /* c += d * R */
+ "movq %%rcx,%%rax\n"
+ "movq $0x1000003d10,%%rdx\n"
+ "mulq %%rdx\n"
+ "addq %%rax,%%r8\n"
+ "adcq %%rdx,%%r9\n"
+ /* r[3] = c & M */
+ "movq %%r8,%%rax\n"
+ "movq $0xfffffffffffff,%%rdx\n"
+ "andq %%rdx,%%rax\n"
+ "movq %%rax,24(%%rdi)\n"
+ /* c >>= 52 (%%r8 only) */
+ "shrdq $52,%%r9,%%r8\n"
+ /* c += t4 (%%r8 only) */
+ "addq %%rsi,%%r8\n"
+ /* r[4] = c */
+ "movq %%r8,32(%%rdi)\n"
+: "+S"(a), "=m"(tmp1), "=m"(tmp2), "=m"(tmp3)
+: "b"(b), "D"(r)
+: "%rax", "%rcx", "%rdx", "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15", "cc", "memory"
+);
+}
+
+SECP256K1_INLINE static void secp256k1_fe_sqr_inner(uint64_t *r, const uint64_t *a) {
+/**
+ * Registers: rdx:rax = multiplication accumulator
+ * r9:r8 = c
+ * rcx:rbx = d
+ * r10-r14 = a0-a4
+ * r15 = M (0xfffffffffffff)
+ * rdi = r
+ * rsi = a / t?
+ */
+ uint64_t tmp1, tmp2, tmp3;
+__asm__ __volatile__(
+ "movq 0(%%rsi),%%r10\n"
+ "movq 8(%%rsi),%%r11\n"
+ "movq 16(%%rsi),%%r12\n"
+ "movq 24(%%rsi),%%r13\n"
+ "movq 32(%%rsi),%%r14\n"
+ "movq $0xfffffffffffff,%%r15\n"
+
+ /* d = (a0*2) * a3 */
+ "leaq (%%r10,%%r10,1),%%rax\n"
+ "mulq %%r13\n"
+ "movq %%rax,%%rbx\n"
+ "movq %%rdx,%%rcx\n"
+ /* d += (a1*2) * a2 */
+ "leaq (%%r11,%%r11,1),%%rax\n"
+ "mulq %%r12\n"
+ "addq %%rax,%%rbx\n"
+ "adcq %%rdx,%%rcx\n"
+ /* c = a4 * a4 */
+ "movq %%r14,%%rax\n"
+ "mulq %%r14\n"
+ "movq %%rax,%%r8\n"
+ "movq %%rdx,%%r9\n"
+ /* d += (c & M) * R */
+ "andq %%r15,%%rax\n"
+ "movq $0x1000003d10,%%rdx\n"
+ "mulq %%rdx\n"
+ "addq %%rax,%%rbx\n"
+ "adcq %%rdx,%%rcx\n"
+ /* c >>= 52 (%%r8 only) */
+ "shrdq $52,%%r9,%%r8\n"
+ /* t3 (tmp1) = d & M */
+ "movq %%rbx,%%rsi\n"
+ "andq %%r15,%%rsi\n"
+ "movq %%rsi,%q1\n"
+ /* d >>= 52 */
+ "shrdq $52,%%rcx,%%rbx\n"
+ "xorq %%rcx,%%rcx\n"
+ /* a4 *= 2 */
+ "addq %%r14,%%r14\n"
+ /* d += a0 * a4 */
+ "movq %%r10,%%rax\n"
+ "mulq %%r14\n"
+ "addq %%rax,%%rbx\n"
+ "adcq %%rdx,%%rcx\n"
+ /* d+= (a1*2) * a3 */
+ "leaq (%%r11,%%r11,1),%%rax\n"
+ "mulq %%r13\n"
+ "addq %%rax,%%rbx\n"
+ "adcq %%rdx,%%rcx\n"
+ /* d += a2 * a2 */
+ "movq %%r12,%%rax\n"
+ "mulq %%r12\n"
+ "addq %%rax,%%rbx\n"
+ "adcq %%rdx,%%rcx\n"
+ /* d += c * R */
+ "movq %%r8,%%rax\n"
+ "movq $0x1000003d10,%%rdx\n"
+ "mulq %%rdx\n"
+ "addq %%rax,%%rbx\n"
+ "adcq %%rdx,%%rcx\n"
+ /* t4 = d & M (%%rsi) */
+ "movq %%rbx,%%rsi\n"
+ "andq %%r15,%%rsi\n"
+ /* d >>= 52 */
+ "shrdq $52,%%rcx,%%rbx\n"
+ "xorq %%rcx,%%rcx\n"
+ /* tx = t4 >> 48 (tmp3) */
+ "movq %%rsi,%%rax\n"
+ "shrq $48,%%rax\n"
+ "movq %%rax,%q3\n"
+ /* t4 &= (M >> 4) (tmp2) */
+ "movq $0xffffffffffff,%%rax\n"
+ "andq %%rax,%%rsi\n"
+ "movq %%rsi,%q2\n"
+ /* c = a0 * a0 */
+ "movq %%r10,%%rax\n"
+ "mulq %%r10\n"
+ "movq %%rax,%%r8\n"
+ "movq %%rdx,%%r9\n"
+ /* d += a1 * a4 */
+ "movq %%r11,%%rax\n"
+ "mulq %%r14\n"
+ "addq %%rax,%%rbx\n"
+ "adcq %%rdx,%%rcx\n"
+ /* d += (a2*2) * a3 */
+ "leaq (%%r12,%%r12,1),%%rax\n"
+ "mulq %%r13\n"
+ "addq %%rax,%%rbx\n"
+ "adcq %%rdx,%%rcx\n"
+ /* u0 = d & M (%%rsi) */
+ "movq %%rbx,%%rsi\n"
+ "andq %%r15,%%rsi\n"
+ /* d >>= 52 */
+ "shrdq $52,%%rcx,%%rbx\n"
+ "xorq %%rcx,%%rcx\n"
+ /* u0 = (u0 << 4) | tx (%%rsi) */
+ "shlq $4,%%rsi\n"
+ "movq %q3,%%rax\n"
+ "orq %%rax,%%rsi\n"
+ /* c += u0 * (R >> 4) */
+ "movq $0x1000003d1,%%rax\n"
+ "mulq %%rsi\n"
+ "addq %%rax,%%r8\n"
+ "adcq %%rdx,%%r9\n"
+ /* r[0] = c & M */
+ "movq %%r8,%%rax\n"
+ "andq %%r15,%%rax\n"
+ "movq %%rax,0(%%rdi)\n"
+ /* c >>= 52 */
+ "shrdq $52,%%r9,%%r8\n"
+ "xorq %%r9,%%r9\n"
+ /* a0 *= 2 */
+ "addq %%r10,%%r10\n"
+ /* c += a0 * a1 */
+ "movq %%r10,%%rax\n"
+ "mulq %%r11\n"
+ "addq %%rax,%%r8\n"
+ "adcq %%rdx,%%r9\n"
+ /* d += a2 * a4 */
+ "movq %%r12,%%rax\n"
+ "mulq %%r14\n"
+ "addq %%rax,%%rbx\n"
+ "adcq %%rdx,%%rcx\n"
+ /* d += a3 * a3 */
+ "movq %%r13,%%rax\n"
+ "mulq %%r13\n"
+ "addq %%rax,%%rbx\n"
+ "adcq %%rdx,%%rcx\n"
+ /* c += (d & M) * R */
+ "movq %%rbx,%%rax\n"
+ "andq %%r15,%%rax\n"
+ "movq $0x1000003d10,%%rdx\n"
+ "mulq %%rdx\n"
+ "addq %%rax,%%r8\n"
+ "adcq %%rdx,%%r9\n"
+ /* d >>= 52 */
+ "shrdq $52,%%rcx,%%rbx\n"
+ "xorq %%rcx,%%rcx\n"
+ /* r[1] = c & M */
+ "movq %%r8,%%rax\n"
+ "andq %%r15,%%rax\n"
+ "movq %%rax,8(%%rdi)\n"
+ /* c >>= 52 */
+ "shrdq $52,%%r9,%%r8\n"
+ "xorq %%r9,%%r9\n"
+ /* c += a0 * a2 (last use of %%r10) */
+ "movq %%r10,%%rax\n"
+ "mulq %%r12\n"
+ "addq %%rax,%%r8\n"
+ "adcq %%rdx,%%r9\n"
+ /* fetch t3 (%%r10, overwrites a0),t4 (%%rsi) */
+ "movq %q2,%%rsi\n"
+ "movq %q1,%%r10\n"
+ /* c += a1 * a1 */
+ "movq %%r11,%%rax\n"
+ "mulq %%r11\n"
+ "addq %%rax,%%r8\n"
+ "adcq %%rdx,%%r9\n"
+ /* d += a3 * a4 */
+ "movq %%r13,%%rax\n"
+ "mulq %%r14\n"
+ "addq %%rax,%%rbx\n"
+ "adcq %%rdx,%%rcx\n"
+ /* c += (d & M) * R */
+ "movq %%rbx,%%rax\n"
+ "andq %%r15,%%rax\n"
+ "movq $0x1000003d10,%%rdx\n"
+ "mulq %%rdx\n"
+ "addq %%rax,%%r8\n"
+ "adcq %%rdx,%%r9\n"
+ /* d >>= 52 (%%rbx only) */
+ "shrdq $52,%%rcx,%%rbx\n"
+ /* r[2] = c & M */
+ "movq %%r8,%%rax\n"
+ "andq %%r15,%%rax\n"
+ "movq %%rax,16(%%rdi)\n"
+ /* c >>= 52 */
+ "shrdq $52,%%r9,%%r8\n"
+ "xorq %%r9,%%r9\n"
+ /* c += t3 */
+ "addq %%r10,%%r8\n"
+ /* c += d * R */
+ "movq %%rbx,%%rax\n"
+ "movq $0x1000003d10,%%rdx\n"
+ "mulq %%rdx\n"
+ "addq %%rax,%%r8\n"
+ "adcq %%rdx,%%r9\n"
+ /* r[3] = c & M */
+ "movq %%r8,%%rax\n"
+ "andq %%r15,%%rax\n"
+ "movq %%rax,24(%%rdi)\n"
+ /* c >>= 52 (%%r8 only) */
+ "shrdq $52,%%r9,%%r8\n"
+ /* c += t4 (%%r8 only) */
+ "addq %%rsi,%%r8\n"
+ /* r[4] = c */
+ "movq %%r8,32(%%rdi)\n"
+: "+S"(a), "=m"(tmp1), "=m"(tmp2), "=m"(tmp3)
+: "D"(r)
+: "%rax", "%rbx", "%rcx", "%rdx", "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15", "cc", "memory"
+);
+}
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/field_5x52_impl.h b/crypto/secp256k1/libsecp256k1/src/field_5x52_impl.h
new file mode 100644
index 000000000..b31e24ab8
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/field_5x52_impl.h
@@ -0,0 +1,456 @@
+/**********************************************************************
+ * Copyright (c) 2013, 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_FIELD_REPR_IMPL_H_
+#define _SECP256K1_FIELD_REPR_IMPL_H_
+
+#if defined HAVE_CONFIG_H
+#include "libsecp256k1-config.h"
+#endif
+
+#include <string.h>
+#include "util.h"
+#include "num.h"
+#include "field.h"
+
+#if defined(USE_ASM_X86_64)
+#include "field_5x52_asm_impl.h"
+#else
+#include "field_5x52_int128_impl.h"
+#endif
+
+/** Implements arithmetic modulo FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFE FFFFFC2F,
+ * represented as 5 uint64_t's in base 2^52. The values are allowed to contain >52 each. In particular,
+ * each FieldElem has a 'magnitude' associated with it. Internally, a magnitude M means each element
+ * is at most M*(2^53-1), except the most significant one, which is limited to M*(2^49-1). All operations
+ * accept any input with magnitude at most M, and have different rules for propagating magnitude to their
+ * output.
+ */
+
+#ifdef VERIFY
+static void secp256k1_fe_verify(const secp256k1_fe *a) {
+ const uint64_t *d = a->n;
+ int m = a->normalized ? 1 : 2 * a->magnitude, r = 1;
+ /* secp256k1 'p' value defined in "Standards for Efficient Cryptography" (SEC2) 2.7.1. */
+ r &= (d[0] <= 0xFFFFFFFFFFFFFULL * m);
+ r &= (d[1] <= 0xFFFFFFFFFFFFFULL * m);
+ r &= (d[2] <= 0xFFFFFFFFFFFFFULL * m);
+ r &= (d[3] <= 0xFFFFFFFFFFFFFULL * m);
+ r &= (d[4] <= 0x0FFFFFFFFFFFFULL * m);
+ r &= (a->magnitude >= 0);
+ r &= (a->magnitude <= 2048);
+ if (a->normalized) {
+ r &= (a->magnitude <= 1);
+ if (r && (d[4] == 0x0FFFFFFFFFFFFULL) && ((d[3] & d[2] & d[1]) == 0xFFFFFFFFFFFFFULL)) {
+ r &= (d[0] < 0xFFFFEFFFFFC2FULL);
+ }
+ }
+ VERIFY_CHECK(r == 1);
+}
+#else
+static void secp256k1_fe_verify(const secp256k1_fe *a) {
+ (void)a;
+}
+#endif
+
+static void secp256k1_fe_normalize(secp256k1_fe *r) {
+ uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4];
+
+ /* Reduce t4 at the start so there will be at most a single carry from the first pass */
+ uint64_t m;
+ uint64_t x = t4 >> 48; t4 &= 0x0FFFFFFFFFFFFULL;
+
+ /* The first pass ensures the magnitude is 1, ... */
+ t0 += x * 0x1000003D1ULL;
+ t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL;
+ t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL; m = t1;
+ t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL; m &= t2;
+ t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL; m &= t3;
+
+ /* ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element) */
+ VERIFY_CHECK(t4 >> 49 == 0);
+
+ /* At most a single final reduction is needed; check if the value is >= the field characteristic */
+ x = (t4 >> 48) | ((t4 == 0x0FFFFFFFFFFFFULL) & (m == 0xFFFFFFFFFFFFFULL)
+ & (t0 >= 0xFFFFEFFFFFC2FULL));
+
+ /* Apply the final reduction (for constant-time behaviour, we do it always) */
+ t0 += x * 0x1000003D1ULL;
+ t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL;
+ t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL;
+ t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL;
+ t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL;
+
+ /* If t4 didn't carry to bit 48 already, then it should have after any final reduction */
+ VERIFY_CHECK(t4 >> 48 == x);
+
+ /* Mask off the possible multiple of 2^256 from the final reduction */
+ t4 &= 0x0FFFFFFFFFFFFULL;
+
+ r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4;
+
+#ifdef VERIFY
+ r->magnitude = 1;
+ r->normalized = 1;
+ secp256k1_fe_verify(r);
+#endif
+}
+
+static void secp256k1_fe_normalize_weak(secp256k1_fe *r) {
+ uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4];
+
+ /* Reduce t4 at the start so there will be at most a single carry from the first pass */
+ uint64_t x = t4 >> 48; t4 &= 0x0FFFFFFFFFFFFULL;
+
+ /* The first pass ensures the magnitude is 1, ... */
+ t0 += x * 0x1000003D1ULL;
+ t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL;
+ t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL;
+ t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL;
+ t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL;
+
+ /* ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element) */
+ VERIFY_CHECK(t4 >> 49 == 0);
+
+ r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4;
+
+#ifdef VERIFY
+ r->magnitude = 1;
+ secp256k1_fe_verify(r);
+#endif
+}
+
+static void secp256k1_fe_normalize_var(secp256k1_fe *r) {
+ uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4];
+
+ /* Reduce t4 at the start so there will be at most a single carry from the first pass */
+ uint64_t m;
+ uint64_t x = t4 >> 48; t4 &= 0x0FFFFFFFFFFFFULL;
+
+ /* The first pass ensures the magnitude is 1, ... */
+ t0 += x * 0x1000003D1ULL;
+ t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL;
+ t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL; m = t1;
+ t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL; m &= t2;
+ t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL; m &= t3;
+
+ /* ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element) */
+ VERIFY_CHECK(t4 >> 49 == 0);
+
+ /* At most a single final reduction is needed; check if the value is >= the field characteristic */
+ x = (t4 >> 48) | ((t4 == 0x0FFFFFFFFFFFFULL) & (m == 0xFFFFFFFFFFFFFULL)
+ & (t0 >= 0xFFFFEFFFFFC2FULL));
+
+ if (x) {
+ t0 += 0x1000003D1ULL;
+ t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL;
+ t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL;
+ t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL;
+ t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL;
+
+ /* If t4 didn't carry to bit 48 already, then it should have after any final reduction */
+ VERIFY_CHECK(t4 >> 48 == x);
+
+ /* Mask off the possible multiple of 2^256 from the final reduction */
+ t4 &= 0x0FFFFFFFFFFFFULL;
+ }
+
+ r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4;
+
+#ifdef VERIFY
+ r->magnitude = 1;
+ r->normalized = 1;
+ secp256k1_fe_verify(r);
+#endif
+}
+
+static int secp256k1_fe_normalizes_to_zero(secp256k1_fe *r) {
+ uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4];
+
+ /* z0 tracks a possible raw value of 0, z1 tracks a possible raw value of P */
+ uint64_t z0, z1;
+
+ /* Reduce t4 at the start so there will be at most a single carry from the first pass */
+ uint64_t x = t4 >> 48; t4 &= 0x0FFFFFFFFFFFFULL;
+
+ /* The first pass ensures the magnitude is 1, ... */
+ t0 += x * 0x1000003D1ULL;
+ t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL; z0 = t0; z1 = t0 ^ 0x1000003D0ULL;
+ t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL; z0 |= t1; z1 &= t1;
+ t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL; z0 |= t2; z1 &= t2;
+ t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL; z0 |= t3; z1 &= t3;
+ z0 |= t4; z1 &= t4 ^ 0xF000000000000ULL;
+
+ /* ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element) */
+ VERIFY_CHECK(t4 >> 49 == 0);
+
+ return (z0 == 0) | (z1 == 0xFFFFFFFFFFFFFULL);
+}
+
+static int secp256k1_fe_normalizes_to_zero_var(secp256k1_fe *r) {
+ uint64_t t0, t1, t2, t3, t4;
+ uint64_t z0, z1;
+ uint64_t x;
+
+ t0 = r->n[0];
+ t4 = r->n[4];
+
+ /* Reduce t4 at the start so there will be at most a single carry from the first pass */
+ x = t4 >> 48;
+
+ /* The first pass ensures the magnitude is 1, ... */
+ t0 += x * 0x1000003D1ULL;
+
+ /* z0 tracks a possible raw value of 0, z1 tracks a possible raw value of P */
+ z0 = t0 & 0xFFFFFFFFFFFFFULL;
+ z1 = z0 ^ 0x1000003D0ULL;
+
+ /* Fast return path should catch the majority of cases */
+ if ((z0 != 0ULL) & (z1 != 0xFFFFFFFFFFFFFULL)) {
+ return 0;
+ }
+
+ t1 = r->n[1];
+ t2 = r->n[2];
+ t3 = r->n[3];
+
+ t4 &= 0x0FFFFFFFFFFFFULL;
+
+ t1 += (t0 >> 52);
+ t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL; z0 |= t1; z1 &= t1;
+ t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL; z0 |= t2; z1 &= t2;
+ t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL; z0 |= t3; z1 &= t3;
+ z0 |= t4; z1 &= t4 ^ 0xF000000000000ULL;
+
+ /* ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element) */
+ VERIFY_CHECK(t4 >> 49 == 0);
+
+ return (z0 == 0) | (z1 == 0xFFFFFFFFFFFFFULL);
+}
+
+SECP256K1_INLINE static void secp256k1_fe_set_int(secp256k1_fe *r, int a) {
+ r->n[0] = a;
+ r->n[1] = r->n[2] = r->n[3] = r->n[4] = 0;
+#ifdef VERIFY
+ r->magnitude = 1;
+ r->normalized = 1;
+ secp256k1_fe_verify(r);
+#endif
+}
+
+SECP256K1_INLINE static int secp256k1_fe_is_zero(const secp256k1_fe *a) {
+ const uint64_t *t = a->n;
+#ifdef VERIFY
+ VERIFY_CHECK(a->normalized);
+ secp256k1_fe_verify(a);
+#endif
+ return (t[0] | t[1] | t[2] | t[3] | t[4]) == 0;
+}
+
+SECP256K1_INLINE static int secp256k1_fe_is_odd(const secp256k1_fe *a) {
+#ifdef VERIFY
+ VERIFY_CHECK(a->normalized);
+ secp256k1_fe_verify(a);
+#endif
+ return a->n[0] & 1;
+}
+
+SECP256K1_INLINE static void secp256k1_fe_clear(secp256k1_fe *a) {
+ int i;
+#ifdef VERIFY
+ a->magnitude = 0;
+ a->normalized = 1;
+#endif
+ for (i=0; i<5; i++) {
+ a->n[i] = 0;
+ }
+}
+
+static int secp256k1_fe_cmp_var(const secp256k1_fe *a, const secp256k1_fe *b) {
+ int i;
+#ifdef VERIFY
+ VERIFY_CHECK(a->normalized);
+ VERIFY_CHECK(b->normalized);
+ secp256k1_fe_verify(a);
+ secp256k1_fe_verify(b);
+#endif
+ for (i = 4; i >= 0; i--) {
+ if (a->n[i] > b->n[i]) {
+ return 1;
+ }
+ if (a->n[i] < b->n[i]) {
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int secp256k1_fe_set_b32(secp256k1_fe *r, const unsigned char *a) {
+ int i;
+ r->n[0] = r->n[1] = r->n[2] = r->n[3] = r->n[4] = 0;
+ for (i=0; i<32; i++) {
+ int j;
+ for (j=0; j<2; j++) {
+ int limb = (8*i+4*j)/52;
+ int shift = (8*i+4*j)%52;
+ r->n[limb] |= (uint64_t)((a[31-i] >> (4*j)) & 0xF) << shift;
+ }
+ }
+ if (r->n[4] == 0x0FFFFFFFFFFFFULL && (r->n[3] & r->n[2] & r->n[1]) == 0xFFFFFFFFFFFFFULL && r->n[0] >= 0xFFFFEFFFFFC2FULL) {
+ return 0;
+ }
+#ifdef VERIFY
+ r->magnitude = 1;
+ r->normalized = 1;
+ secp256k1_fe_verify(r);
+#endif
+ return 1;
+}
+
+/** Convert a field element to a 32-byte big endian value. Requires the input to be normalized */
+static void secp256k1_fe_get_b32(unsigned char *r, const secp256k1_fe *a) {
+ int i;
+#ifdef VERIFY
+ VERIFY_CHECK(a->normalized);
+ secp256k1_fe_verify(a);
+#endif
+ for (i=0; i<32; i++) {
+ int j;
+ int c = 0;
+ for (j=0; j<2; j++) {
+ int limb = (8*i+4*j)/52;
+ int shift = (8*i+4*j)%52;
+ c |= ((a->n[limb] >> shift) & 0xF) << (4 * j);
+ }
+ r[31-i] = c;
+ }
+}
+
+SECP256K1_INLINE static void secp256k1_fe_negate(secp256k1_fe *r, const secp256k1_fe *a, int m) {
+#ifdef VERIFY
+ VERIFY_CHECK(a->magnitude <= m);
+ secp256k1_fe_verify(a);
+#endif
+ r->n[0] = 0xFFFFEFFFFFC2FULL * 2 * (m + 1) - a->n[0];
+ r->n[1] = 0xFFFFFFFFFFFFFULL * 2 * (m + 1) - a->n[1];
+ r->n[2] = 0xFFFFFFFFFFFFFULL * 2 * (m + 1) - a->n[2];
+ r->n[3] = 0xFFFFFFFFFFFFFULL * 2 * (m + 1) - a->n[3];
+ r->n[4] = 0x0FFFFFFFFFFFFULL * 2 * (m + 1) - a->n[4];
+#ifdef VERIFY
+ r->magnitude = m + 1;
+ r->normalized = 0;
+ secp256k1_fe_verify(r);
+#endif
+}
+
+SECP256K1_INLINE static void secp256k1_fe_mul_int(secp256k1_fe *r, int a) {
+ r->n[0] *= a;
+ r->n[1] *= a;
+ r->n[2] *= a;
+ r->n[3] *= a;
+ r->n[4] *= a;
+#ifdef VERIFY
+ r->magnitude *= a;
+ r->normalized = 0;
+ secp256k1_fe_verify(r);
+#endif
+}
+
+SECP256K1_INLINE static void secp256k1_fe_add(secp256k1_fe *r, const secp256k1_fe *a) {
+#ifdef VERIFY
+ secp256k1_fe_verify(a);
+#endif
+ r->n[0] += a->n[0];
+ r->n[1] += a->n[1];
+ r->n[2] += a->n[2];
+ r->n[3] += a->n[3];
+ r->n[4] += a->n[4];
+#ifdef VERIFY
+ r->magnitude += a->magnitude;
+ r->normalized = 0;
+ secp256k1_fe_verify(r);
+#endif
+}
+
+static void secp256k1_fe_mul(secp256k1_fe *r, const secp256k1_fe *a, const secp256k1_fe * SECP256K1_RESTRICT b) {
+#ifdef VERIFY
+ VERIFY_CHECK(a->magnitude <= 8);
+ VERIFY_CHECK(b->magnitude <= 8);
+ secp256k1_fe_verify(a);
+ secp256k1_fe_verify(b);
+ VERIFY_CHECK(r != b);
+#endif
+ secp256k1_fe_mul_inner(r->n, a->n, b->n);
+#ifdef VERIFY
+ r->magnitude = 1;
+ r->normalized = 0;
+ secp256k1_fe_verify(r);
+#endif
+}
+
+static void secp256k1_fe_sqr(secp256k1_fe *r, const secp256k1_fe *a) {
+#ifdef VERIFY
+ VERIFY_CHECK(a->magnitude <= 8);
+ secp256k1_fe_verify(a);
+#endif
+ secp256k1_fe_sqr_inner(r->n, a->n);
+#ifdef VERIFY
+ r->magnitude = 1;
+ r->normalized = 0;
+ secp256k1_fe_verify(r);
+#endif
+}
+
+static SECP256K1_INLINE void secp256k1_fe_cmov(secp256k1_fe *r, const secp256k1_fe *a, int flag) {
+ uint64_t mask0, mask1;
+ mask0 = flag + ~((uint64_t)0);
+ mask1 = ~mask0;
+ r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1);
+ r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1);
+ r->n[2] = (r->n[2] & mask0) | (a->n[2] & mask1);
+ r->n[3] = (r->n[3] & mask0) | (a->n[3] & mask1);
+ r->n[4] = (r->n[4] & mask0) | (a->n[4] & mask1);
+#ifdef VERIFY
+ if (a->magnitude > r->magnitude) {
+ r->magnitude = a->magnitude;
+ }
+ r->normalized &= a->normalized;
+#endif
+}
+
+static SECP256K1_INLINE void secp256k1_fe_storage_cmov(secp256k1_fe_storage *r, const secp256k1_fe_storage *a, int flag) {
+ uint64_t mask0, mask1;
+ mask0 = flag + ~((uint64_t)0);
+ mask1 = ~mask0;
+ r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1);
+ r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1);
+ r->n[2] = (r->n[2] & mask0) | (a->n[2] & mask1);
+ r->n[3] = (r->n[3] & mask0) | (a->n[3] & mask1);
+}
+
+static void secp256k1_fe_to_storage(secp256k1_fe_storage *r, const secp256k1_fe *a) {
+#ifdef VERIFY
+ VERIFY_CHECK(a->normalized);
+#endif
+ r->n[0] = a->n[0] | a->n[1] << 52;
+ r->n[1] = a->n[1] >> 12 | a->n[2] << 40;
+ r->n[2] = a->n[2] >> 24 | a->n[3] << 28;
+ r->n[3] = a->n[3] >> 36 | a->n[4] << 16;
+}
+
+static SECP256K1_INLINE void secp256k1_fe_from_storage(secp256k1_fe *r, const secp256k1_fe_storage *a) {
+ r->n[0] = a->n[0] & 0xFFFFFFFFFFFFFULL;
+ r->n[1] = a->n[0] >> 52 | ((a->n[1] << 12) & 0xFFFFFFFFFFFFFULL);
+ r->n[2] = a->n[1] >> 40 | ((a->n[2] << 24) & 0xFFFFFFFFFFFFFULL);
+ r->n[3] = a->n[2] >> 28 | ((a->n[3] << 36) & 0xFFFFFFFFFFFFFULL);
+ r->n[4] = a->n[3] >> 16;
+#ifdef VERIFY
+ r->magnitude = 1;
+ r->normalized = 1;
+#endif
+}
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/field_5x52_int128_impl.h b/crypto/secp256k1/libsecp256k1/src/field_5x52_int128_impl.h
new file mode 100644
index 000000000..9280bb5ea
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/field_5x52_int128_impl.h
@@ -0,0 +1,277 @@
+/**********************************************************************
+ * Copyright (c) 2013, 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_FIELD_INNER5X52_IMPL_H_
+#define _SECP256K1_FIELD_INNER5X52_IMPL_H_
+
+#include <stdint.h>
+
+#ifdef VERIFY
+#define VERIFY_BITS(x, n) VERIFY_CHECK(((x) >> (n)) == 0)
+#else
+#define VERIFY_BITS(x, n) do { } while(0)
+#endif
+
+SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint64_t *r, const uint64_t *a, const uint64_t * SECP256K1_RESTRICT b) {
+ uint128_t c, d;
+ uint64_t t3, t4, tx, u0;
+ uint64_t a0 = a[0], a1 = a[1], a2 = a[2], a3 = a[3], a4 = a[4];
+ const uint64_t M = 0xFFFFFFFFFFFFFULL, R = 0x1000003D10ULL;
+
+ VERIFY_BITS(a[0], 56);
+ VERIFY_BITS(a[1], 56);
+ VERIFY_BITS(a[2], 56);
+ VERIFY_BITS(a[3], 56);
+ VERIFY_BITS(a[4], 52);
+ VERIFY_BITS(b[0], 56);
+ VERIFY_BITS(b[1], 56);
+ VERIFY_BITS(b[2], 56);
+ VERIFY_BITS(b[3], 56);
+ VERIFY_BITS(b[4], 52);
+ VERIFY_CHECK(r != b);
+
+ /* [... a b c] is a shorthand for ... + a<<104 + b<<52 + c<<0 mod n.
+ * px is a shorthand for sum(a[i]*b[x-i], i=0..x).
+ * Note that [x 0 0 0 0 0] = [x*R].
+ */
+
+ d = (uint128_t)a0 * b[3]
+ + (uint128_t)a1 * b[2]
+ + (uint128_t)a2 * b[1]
+ + (uint128_t)a3 * b[0];
+ VERIFY_BITS(d, 114);
+ /* [d 0 0 0] = [p3 0 0 0] */
+ c = (uint128_t)a4 * b[4];
+ VERIFY_BITS(c, 112);
+ /* [c 0 0 0 0 d 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */
+ d += (c & M) * R; c >>= 52;
+ VERIFY_BITS(d, 115);
+ VERIFY_BITS(c, 60);
+ /* [c 0 0 0 0 0 d 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */
+ t3 = d & M; d >>= 52;
+ VERIFY_BITS(t3, 52);
+ VERIFY_BITS(d, 63);
+ /* [c 0 0 0 0 d t3 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */
+
+ d += (uint128_t)a0 * b[4]
+ + (uint128_t)a1 * b[3]
+ + (uint128_t)a2 * b[2]
+ + (uint128_t)a3 * b[1]
+ + (uint128_t)a4 * b[0];
+ VERIFY_BITS(d, 115);
+ /* [c 0 0 0 0 d t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */
+ d += c * R;
+ VERIFY_BITS(d, 116);
+ /* [d t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */
+ t4 = d & M; d >>= 52;
+ VERIFY_BITS(t4, 52);
+ VERIFY_BITS(d, 64);
+ /* [d t4 t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */
+ tx = (t4 >> 48); t4 &= (M >> 4);
+ VERIFY_BITS(tx, 4);
+ VERIFY_BITS(t4, 48);
+ /* [d t4+(tx<<48) t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */
+
+ c = (uint128_t)a0 * b[0];
+ VERIFY_BITS(c, 112);
+ /* [d t4+(tx<<48) t3 0 0 c] = [p8 0 0 0 p4 p3 0 0 p0] */
+ d += (uint128_t)a1 * b[4]
+ + (uint128_t)a2 * b[3]
+ + (uint128_t)a3 * b[2]
+ + (uint128_t)a4 * b[1];
+ VERIFY_BITS(d, 115);
+ /* [d t4+(tx<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */
+ u0 = d & M; d >>= 52;
+ VERIFY_BITS(u0, 52);
+ VERIFY_BITS(d, 63);
+ /* [d u0 t4+(tx<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */
+ /* [d 0 t4+(tx<<48)+(u0<<52) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */
+ u0 = (u0 << 4) | tx;
+ VERIFY_BITS(u0, 56);
+ /* [d 0 t4+(u0<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */
+ c += (uint128_t)u0 * (R >> 4);
+ VERIFY_BITS(c, 115);
+ /* [d 0 t4 t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */
+ r[0] = c & M; c >>= 52;
+ VERIFY_BITS(r[0], 52);
+ VERIFY_BITS(c, 61);
+ /* [d 0 t4 t3 0 c r0] = [p8 0 0 p5 p4 p3 0 0 p0] */
+
+ c += (uint128_t)a0 * b[1]
+ + (uint128_t)a1 * b[0];
+ VERIFY_BITS(c, 114);
+ /* [d 0 t4 t3 0 c r0] = [p8 0 0 p5 p4 p3 0 p1 p0] */
+ d += (uint128_t)a2 * b[4]
+ + (uint128_t)a3 * b[3]
+ + (uint128_t)a4 * b[2];
+ VERIFY_BITS(d, 114);
+ /* [d 0 t4 t3 0 c r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */
+ c += (d & M) * R; d >>= 52;
+ VERIFY_BITS(c, 115);
+ VERIFY_BITS(d, 62);
+ /* [d 0 0 t4 t3 0 c r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */
+ r[1] = c & M; c >>= 52;
+ VERIFY_BITS(r[1], 52);
+ VERIFY_BITS(c, 63);
+ /* [d 0 0 t4 t3 c r1 r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */
+
+ c += (uint128_t)a0 * b[2]
+ + (uint128_t)a1 * b[1]
+ + (uint128_t)a2 * b[0];
+ VERIFY_BITS(c, 114);
+ /* [d 0 0 t4 t3 c r1 r0] = [p8 0 p6 p5 p4 p3 p2 p1 p0] */
+ d += (uint128_t)a3 * b[4]
+ + (uint128_t)a4 * b[3];
+ VERIFY_BITS(d, 114);
+ /* [d 0 0 t4 t3 c t1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+ c += (d & M) * R; d >>= 52;
+ VERIFY_BITS(c, 115);
+ VERIFY_BITS(d, 62);
+ /* [d 0 0 0 t4 t3 c r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+
+ /* [d 0 0 0 t4 t3 c r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+ r[2] = c & M; c >>= 52;
+ VERIFY_BITS(r[2], 52);
+ VERIFY_BITS(c, 63);
+ /* [d 0 0 0 t4 t3+c r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+ c += d * R + t3;;
+ VERIFY_BITS(c, 100);
+ /* [t4 c r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+ r[3] = c & M; c >>= 52;
+ VERIFY_BITS(r[3], 52);
+ VERIFY_BITS(c, 48);
+ /* [t4+c r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+ c += t4;
+ VERIFY_BITS(c, 49);
+ /* [c r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+ r[4] = c;
+ VERIFY_BITS(r[4], 49);
+ /* [r4 r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+}
+
+SECP256K1_INLINE static void secp256k1_fe_sqr_inner(uint64_t *r, const uint64_t *a) {
+ uint128_t c, d;
+ uint64_t a0 = a[0], a1 = a[1], a2 = a[2], a3 = a[3], a4 = a[4];
+ int64_t t3, t4, tx, u0;
+ const uint64_t M = 0xFFFFFFFFFFFFFULL, R = 0x1000003D10ULL;
+
+ VERIFY_BITS(a[0], 56);
+ VERIFY_BITS(a[1], 56);
+ VERIFY_BITS(a[2], 56);
+ VERIFY_BITS(a[3], 56);
+ VERIFY_BITS(a[4], 52);
+
+ /** [... a b c] is a shorthand for ... + a<<104 + b<<52 + c<<0 mod n.
+ * px is a shorthand for sum(a[i]*a[x-i], i=0..x).
+ * Note that [x 0 0 0 0 0] = [x*R].
+ */
+
+ d = (uint128_t)(a0*2) * a3
+ + (uint128_t)(a1*2) * a2;
+ VERIFY_BITS(d, 114);
+ /* [d 0 0 0] = [p3 0 0 0] */
+ c = (uint128_t)a4 * a4;
+ VERIFY_BITS(c, 112);
+ /* [c 0 0 0 0 d 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */
+ d += (c & M) * R; c >>= 52;
+ VERIFY_BITS(d, 115);
+ VERIFY_BITS(c, 60);
+ /* [c 0 0 0 0 0 d 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */
+ t3 = d & M; d >>= 52;
+ VERIFY_BITS(t3, 52);
+ VERIFY_BITS(d, 63);
+ /* [c 0 0 0 0 d t3 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */
+
+ a4 *= 2;
+ d += (uint128_t)a0 * a4
+ + (uint128_t)(a1*2) * a3
+ + (uint128_t)a2 * a2;
+ VERIFY_BITS(d, 115);
+ /* [c 0 0 0 0 d t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */
+ d += c * R;
+ VERIFY_BITS(d, 116);
+ /* [d t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */
+ t4 = d & M; d >>= 52;
+ VERIFY_BITS(t4, 52);
+ VERIFY_BITS(d, 64);
+ /* [d t4 t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */
+ tx = (t4 >> 48); t4 &= (M >> 4);
+ VERIFY_BITS(tx, 4);
+ VERIFY_BITS(t4, 48);
+ /* [d t4+(tx<<48) t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */
+
+ c = (uint128_t)a0 * a0;
+ VERIFY_BITS(c, 112);
+ /* [d t4+(tx<<48) t3 0 0 c] = [p8 0 0 0 p4 p3 0 0 p0] */
+ d += (uint128_t)a1 * a4
+ + (uint128_t)(a2*2) * a3;
+ VERIFY_BITS(d, 114);
+ /* [d t4+(tx<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */
+ u0 = d & M; d >>= 52;
+ VERIFY_BITS(u0, 52);
+ VERIFY_BITS(d, 62);
+ /* [d u0 t4+(tx<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */
+ /* [d 0 t4+(tx<<48)+(u0<<52) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */
+ u0 = (u0 << 4) | tx;
+ VERIFY_BITS(u0, 56);
+ /* [d 0 t4+(u0<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */
+ c += (uint128_t)u0 * (R >> 4);
+ VERIFY_BITS(c, 113);
+ /* [d 0 t4 t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */
+ r[0] = c & M; c >>= 52;
+ VERIFY_BITS(r[0], 52);
+ VERIFY_BITS(c, 61);
+ /* [d 0 t4 t3 0 c r0] = [p8 0 0 p5 p4 p3 0 0 p0] */
+
+ a0 *= 2;
+ c += (uint128_t)a0 * a1;
+ VERIFY_BITS(c, 114);
+ /* [d 0 t4 t3 0 c r0] = [p8 0 0 p5 p4 p3 0 p1 p0] */
+ d += (uint128_t)a2 * a4
+ + (uint128_t)a3 * a3;
+ VERIFY_BITS(d, 114);
+ /* [d 0 t4 t3 0 c r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */
+ c += (d & M) * R; d >>= 52;
+ VERIFY_BITS(c, 115);
+ VERIFY_BITS(d, 62);
+ /* [d 0 0 t4 t3 0 c r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */
+ r[1] = c & M; c >>= 52;
+ VERIFY_BITS(r[1], 52);
+ VERIFY_BITS(c, 63);
+ /* [d 0 0 t4 t3 c r1 r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */
+
+ c += (uint128_t)a0 * a2
+ + (uint128_t)a1 * a1;
+ VERIFY_BITS(c, 114);
+ /* [d 0 0 t4 t3 c r1 r0] = [p8 0 p6 p5 p4 p3 p2 p1 p0] */
+ d += (uint128_t)a3 * a4;
+ VERIFY_BITS(d, 114);
+ /* [d 0 0 t4 t3 c r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+ c += (d & M) * R; d >>= 52;
+ VERIFY_BITS(c, 115);
+ VERIFY_BITS(d, 62);
+ /* [d 0 0 0 t4 t3 c r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+ r[2] = c & M; c >>= 52;
+ VERIFY_BITS(r[2], 52);
+ VERIFY_BITS(c, 63);
+ /* [d 0 0 0 t4 t3+c r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+
+ c += d * R + t3;;
+ VERIFY_BITS(c, 100);
+ /* [t4 c r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+ r[3] = c & M; c >>= 52;
+ VERIFY_BITS(r[3], 52);
+ VERIFY_BITS(c, 48);
+ /* [t4+c r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+ c += t4;
+ VERIFY_BITS(c, 49);
+ /* [c r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+ r[4] = c;
+ VERIFY_BITS(r[4], 49);
+ /* [r4 r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */
+}
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/field_impl.h b/crypto/secp256k1/libsecp256k1/src/field_impl.h
new file mode 100644
index 000000000..551a6243e
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/field_impl.h
@@ -0,0 +1,271 @@
+/**********************************************************************
+ * Copyright (c) 2013, 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_FIELD_IMPL_H_
+#define _SECP256K1_FIELD_IMPL_H_
+
+#if defined HAVE_CONFIG_H
+#include "libsecp256k1-config.h"
+#endif
+
+#include "util.h"
+
+#if defined(USE_FIELD_10X26)
+#include "field_10x26_impl.h"
+#elif defined(USE_FIELD_5X52)
+#include "field_5x52_impl.h"
+#else
+#error "Please select field implementation"
+#endif
+
+SECP256K1_INLINE static int secp256k1_fe_equal_var(const secp256k1_fe *a, const secp256k1_fe *b) {
+ secp256k1_fe na;
+ secp256k1_fe_negate(&na, a, 1);
+ secp256k1_fe_add(&na, b);
+ return secp256k1_fe_normalizes_to_zero_var(&na);
+}
+
+static int secp256k1_fe_sqrt_var(secp256k1_fe *r, const secp256k1_fe *a) {
+ secp256k1_fe x2, x3, x6, x9, x11, x22, x44, x88, x176, x220, x223, t1;
+ int j;
+
+ /** The binary representation of (p + 1)/4 has 3 blocks of 1s, with lengths in
+ * { 2, 22, 223 }. Use an addition chain to calculate 2^n - 1 for each block:
+ * 1, [2], 3, 6, 9, 11, [22], 44, 88, 176, 220, [223]
+ */
+
+ secp256k1_fe_sqr(&x2, a);
+ secp256k1_fe_mul(&x2, &x2, a);
+
+ secp256k1_fe_sqr(&x3, &x2);
+ secp256k1_fe_mul(&x3, &x3, a);
+
+ x6 = x3;
+ for (j=0; j<3; j++) {
+ secp256k1_fe_sqr(&x6, &x6);
+ }
+ secp256k1_fe_mul(&x6, &x6, &x3);
+
+ x9 = x6;
+ for (j=0; j<3; j++) {
+ secp256k1_fe_sqr(&x9, &x9);
+ }
+ secp256k1_fe_mul(&x9, &x9, &x3);
+
+ x11 = x9;
+ for (j=0; j<2; j++) {
+ secp256k1_fe_sqr(&x11, &x11);
+ }
+ secp256k1_fe_mul(&x11, &x11, &x2);
+
+ x22 = x11;
+ for (j=0; j<11; j++) {
+ secp256k1_fe_sqr(&x22, &x22);
+ }
+ secp256k1_fe_mul(&x22, &x22, &x11);
+
+ x44 = x22;
+ for (j=0; j<22; j++) {
+ secp256k1_fe_sqr(&x44, &x44);
+ }
+ secp256k1_fe_mul(&x44, &x44, &x22);
+
+ x88 = x44;
+ for (j=0; j<44; j++) {
+ secp256k1_fe_sqr(&x88, &x88);
+ }
+ secp256k1_fe_mul(&x88, &x88, &x44);
+
+ x176 = x88;
+ for (j=0; j<88; j++) {
+ secp256k1_fe_sqr(&x176, &x176);
+ }
+ secp256k1_fe_mul(&x176, &x176, &x88);
+
+ x220 = x176;
+ for (j=0; j<44; j++) {
+ secp256k1_fe_sqr(&x220, &x220);
+ }
+ secp256k1_fe_mul(&x220, &x220, &x44);
+
+ x223 = x220;
+ for (j=0; j<3; j++) {
+ secp256k1_fe_sqr(&x223, &x223);
+ }
+ secp256k1_fe_mul(&x223, &x223, &x3);
+
+ /* The final result is then assembled using a sliding window over the blocks. */
+
+ t1 = x223;
+ for (j=0; j<23; j++) {
+ secp256k1_fe_sqr(&t1, &t1);
+ }
+ secp256k1_fe_mul(&t1, &t1, &x22);
+ for (j=0; j<6; j++) {
+ secp256k1_fe_sqr(&t1, &t1);
+ }
+ secp256k1_fe_mul(&t1, &t1, &x2);
+ secp256k1_fe_sqr(&t1, &t1);
+ secp256k1_fe_sqr(r, &t1);
+
+ /* Check that a square root was actually calculated */
+
+ secp256k1_fe_sqr(&t1, r);
+ return secp256k1_fe_equal_var(&t1, a);
+}
+
+static void secp256k1_fe_inv(secp256k1_fe *r, const secp256k1_fe *a) {
+ secp256k1_fe x2, x3, x6, x9, x11, x22, x44, x88, x176, x220, x223, t1;
+ int j;
+
+ /** The binary representation of (p - 2) has 5 blocks of 1s, with lengths in
+ * { 1, 2, 22, 223 }. Use an addition chain to calculate 2^n - 1 for each block:
+ * [1], [2], 3, 6, 9, 11, [22], 44, 88, 176, 220, [223]
+ */
+
+ secp256k1_fe_sqr(&x2, a);
+ secp256k1_fe_mul(&x2, &x2, a);
+
+ secp256k1_fe_sqr(&x3, &x2);
+ secp256k1_fe_mul(&x3, &x3, a);
+
+ x6 = x3;
+ for (j=0; j<3; j++) {
+ secp256k1_fe_sqr(&x6, &x6);
+ }
+ secp256k1_fe_mul(&x6, &x6, &x3);
+
+ x9 = x6;
+ for (j=0; j<3; j++) {
+ secp256k1_fe_sqr(&x9, &x9);
+ }
+ secp256k1_fe_mul(&x9, &x9, &x3);
+
+ x11 = x9;
+ for (j=0; j<2; j++) {
+ secp256k1_fe_sqr(&x11, &x11);
+ }
+ secp256k1_fe_mul(&x11, &x11, &x2);
+
+ x22 = x11;
+ for (j=0; j<11; j++) {
+ secp256k1_fe_sqr(&x22, &x22);
+ }
+ secp256k1_fe_mul(&x22, &x22, &x11);
+
+ x44 = x22;
+ for (j=0; j<22; j++) {
+ secp256k1_fe_sqr(&x44, &x44);
+ }
+ secp256k1_fe_mul(&x44, &x44, &x22);
+
+ x88 = x44;
+ for (j=0; j<44; j++) {
+ secp256k1_fe_sqr(&x88, &x88);
+ }
+ secp256k1_fe_mul(&x88, &x88, &x44);
+
+ x176 = x88;
+ for (j=0; j<88; j++) {
+ secp256k1_fe_sqr(&x176, &x176);
+ }
+ secp256k1_fe_mul(&x176, &x176, &x88);
+
+ x220 = x176;
+ for (j=0; j<44; j++) {
+ secp256k1_fe_sqr(&x220, &x220);
+ }
+ secp256k1_fe_mul(&x220, &x220, &x44);
+
+ x223 = x220;
+ for (j=0; j<3; j++) {
+ secp256k1_fe_sqr(&x223, &x223);
+ }
+ secp256k1_fe_mul(&x223, &x223, &x3);
+
+ /* The final result is then assembled using a sliding window over the blocks. */
+
+ t1 = x223;
+ for (j=0; j<23; j++) {
+ secp256k1_fe_sqr(&t1, &t1);
+ }
+ secp256k1_fe_mul(&t1, &t1, &x22);
+ for (j=0; j<5; j++) {
+ secp256k1_fe_sqr(&t1, &t1);
+ }
+ secp256k1_fe_mul(&t1, &t1, a);
+ for (j=0; j<3; j++) {
+ secp256k1_fe_sqr(&t1, &t1);
+ }
+ secp256k1_fe_mul(&t1, &t1, &x2);
+ for (j=0; j<2; j++) {
+ secp256k1_fe_sqr(&t1, &t1);
+ }
+ secp256k1_fe_mul(r, a, &t1);
+}
+
+static void secp256k1_fe_inv_var(secp256k1_fe *r, const secp256k1_fe *a) {
+#if defined(USE_FIELD_INV_BUILTIN)
+ secp256k1_fe_inv(r, a);
+#elif defined(USE_FIELD_INV_NUM)
+ secp256k1_num n, m;
+ static const secp256k1_fe negone = SECP256K1_FE_CONST(
+ 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL,
+ 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFEUL, 0xFFFFFC2EUL
+ );
+ /* secp256k1 field prime, value p defined in "Standards for Efficient Cryptography" (SEC2) 2.7.1. */
+ static const unsigned char prime[32] = {
+ 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+ 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+ 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+ 0xFF,0xFF,0xFF,0xFE,0xFF,0xFF,0xFC,0x2F
+ };
+ unsigned char b[32];
+ secp256k1_fe c = *a;
+ secp256k1_fe_normalize_var(&c);
+ secp256k1_fe_get_b32(b, &c);
+ secp256k1_num_set_bin(&n, b, 32);
+ secp256k1_num_set_bin(&m, prime, 32);
+ secp256k1_num_mod_inverse(&n, &n, &m);
+ secp256k1_num_get_bin(b, 32, &n);
+ VERIFY_CHECK(secp256k1_fe_set_b32(r, b));
+ /* Verify the result is the (unique) valid inverse using non-GMP code. */
+ secp256k1_fe_mul(&c, &c, r);
+ secp256k1_fe_add(&c, &negone);
+ CHECK(secp256k1_fe_normalizes_to_zero_var(&c));
+#else
+#error "Please select field inverse implementation"
+#endif
+}
+
+static void secp256k1_fe_inv_all_var(size_t len, secp256k1_fe *r, const secp256k1_fe *a) {
+ secp256k1_fe u;
+ size_t i;
+ if (len < 1) {
+ return;
+ }
+
+ VERIFY_CHECK((r + len <= a) || (a + len <= r));
+
+ r[0] = a[0];
+
+ i = 0;
+ while (++i < len) {
+ secp256k1_fe_mul(&r[i], &r[i - 1], &a[i]);
+ }
+
+ secp256k1_fe_inv_var(&u, &r[--i]);
+
+ while (i > 0) {
+ size_t j = i--;
+ secp256k1_fe_mul(&r[j], &r[i], &u);
+ secp256k1_fe_mul(&u, &u, &a[j]);
+ }
+
+ r[0] = u;
+}
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/gen_context.c b/crypto/secp256k1/libsecp256k1/src/gen_context.c
new file mode 100644
index 000000000..1835fd491
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/gen_context.c
@@ -0,0 +1,74 @@
+/**********************************************************************
+ * Copyright (c) 2013, 2014, 2015 Thomas Daede, Cory Fields *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#define USE_BASIC_CONFIG 1
+
+#include "basic-config.h"
+#include "include/secp256k1.h"
+#include "field_impl.h"
+#include "scalar_impl.h"
+#include "group_impl.h"
+#include "ecmult_gen_impl.h"
+
+static void default_error_callback_fn(const char* str, void* data) {
+ (void)data;
+ fprintf(stderr, "[libsecp256k1] internal consistency check failed: %s\n", str);
+ abort();
+}
+
+static const secp256k1_callback default_error_callback = {
+ default_error_callback_fn,
+ NULL
+};
+
+int main(int argc, char **argv) {
+ secp256k1_ecmult_gen_context ctx;
+ int inner;
+ int outer;
+ FILE* fp;
+
+ (void)argc;
+ (void)argv;
+
+ fp = fopen("src/ecmult_static_context.h","w");
+ if (fp == NULL) {
+ fprintf(stderr, "Could not open src/ecmult_static_context.h for writing!\n");
+ return -1;
+ }
+
+ fprintf(fp, "#ifndef _SECP256K1_ECMULT_STATIC_CONTEXT_\n");
+ fprintf(fp, "#define _SECP256K1_ECMULT_STATIC_CONTEXT_\n");
+ fprintf(fp, "#include \"group.h\"\n");
+ fprintf(fp, "#define SC SECP256K1_GE_STORAGE_CONST\n");
+ fprintf(fp, "static const secp256k1_ge_storage secp256k1_ecmult_static_context[64][16] = {\n");
+
+ secp256k1_ecmult_gen_context_init(&ctx);
+ secp256k1_ecmult_gen_context_build(&ctx, &default_error_callback);
+ for(outer = 0; outer != 64; outer++) {
+ fprintf(fp,"{\n");
+ for(inner = 0; inner != 16; inner++) {
+ fprintf(fp," SC(%uu, %uu, %uu, %uu, %uu, %uu, %uu, %uu, %uu, %uu, %uu, %uu, %uu, %uu, %uu, %uu)", SECP256K1_GE_STORAGE_CONST_GET((*ctx.prec)[outer][inner]));
+ if (inner != 15) {
+ fprintf(fp,",\n");
+ } else {
+ fprintf(fp,"\n");
+ }
+ }
+ if (outer != 63) {
+ fprintf(fp,"},\n");
+ } else {
+ fprintf(fp,"}\n");
+ }
+ }
+ fprintf(fp,"};\n");
+ secp256k1_ecmult_gen_context_clear(&ctx);
+
+ fprintf(fp, "#undef SC\n");
+ fprintf(fp, "#endif\n");
+ fclose(fp);
+
+ return 0;
+}
diff --git a/crypto/secp256k1/libsecp256k1/src/group.h b/crypto/secp256k1/libsecp256k1/src/group.h
new file mode 100644
index 000000000..89b079d5c
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/group.h
@@ -0,0 +1,141 @@
+/**********************************************************************
+ * Copyright (c) 2013, 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_GROUP_
+#define _SECP256K1_GROUP_
+
+#include "num.h"
+#include "field.h"
+
+/** A group element of the secp256k1 curve, in affine coordinates. */
+typedef struct {
+ secp256k1_fe x;
+ secp256k1_fe y;
+ int infinity; /* whether this represents the point at infinity */
+} secp256k1_ge;
+
+#define SECP256K1_GE_CONST(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) {SECP256K1_FE_CONST((a),(b),(c),(d),(e),(f),(g),(h)), SECP256K1_FE_CONST((i),(j),(k),(l),(m),(n),(o),(p)), 0}
+#define SECP256K1_GE_CONST_INFINITY {SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), 1}
+
+/** A group element of the secp256k1 curve, in jacobian coordinates. */
+typedef struct {
+ secp256k1_fe x; /* actual X: x/z^2 */
+ secp256k1_fe y; /* actual Y: y/z^3 */
+ secp256k1_fe z;
+ int infinity; /* whether this represents the point at infinity */
+} secp256k1_gej;
+
+#define SECP256K1_GEJ_CONST(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) {SECP256K1_FE_CONST((a),(b),(c),(d),(e),(f),(g),(h)), SECP256K1_FE_CONST((i),(j),(k),(l),(m),(n),(o),(p)), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1), 0}
+#define SECP256K1_GEJ_CONST_INFINITY {SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), 1}
+
+typedef struct {
+ secp256k1_fe_storage x;
+ secp256k1_fe_storage y;
+} secp256k1_ge_storage;
+
+#define SECP256K1_GE_STORAGE_CONST(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) {SECP256K1_FE_STORAGE_CONST((a),(b),(c),(d),(e),(f),(g),(h)), SECP256K1_FE_STORAGE_CONST((i),(j),(k),(l),(m),(n),(o),(p))}
+
+#define SECP256K1_GE_STORAGE_CONST_GET(t) SECP256K1_FE_STORAGE_CONST_GET(t.x), SECP256K1_FE_STORAGE_CONST_GET(t.y)
+
+/** Set a group element equal to the point at infinity */
+static void secp256k1_ge_set_infinity(secp256k1_ge *r);
+
+/** Set a group element equal to the point with given X and Y coordinates */
+static void secp256k1_ge_set_xy(secp256k1_ge *r, const secp256k1_fe *x, const secp256k1_fe *y);
+
+/** Set a group element (affine) equal to the point with the given X coordinate, and given oddness
+ * for Y. Return value indicates whether the result is valid. */
+static int secp256k1_ge_set_xo_var(secp256k1_ge *r, const secp256k1_fe *x, int odd);
+
+/** Check whether a group element is the point at infinity. */
+static int secp256k1_ge_is_infinity(const secp256k1_ge *a);
+
+/** Check whether a group element is valid (i.e., on the curve). */
+static int secp256k1_ge_is_valid_var(const secp256k1_ge *a);
+
+static void secp256k1_ge_neg(secp256k1_ge *r, const secp256k1_ge *a);
+
+/** Set a group element equal to another which is given in jacobian coordinates */
+static void secp256k1_ge_set_gej(secp256k1_ge *r, secp256k1_gej *a);
+
+/** Set a batch of group elements equal to the inputs given in jacobian coordinates */
+static void secp256k1_ge_set_all_gej_var(size_t len, secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_callback *cb);
+
+/** Set a batch of group elements equal to the inputs given in jacobian
+ * coordinates (with known z-ratios). zr must contain the known z-ratios such
+ * that mul(a[i].z, zr[i+1]) == a[i+1].z. zr[0] is ignored. */
+static void secp256k1_ge_set_table_gej_var(size_t len, secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_fe *zr);
+
+/** Bring a batch inputs given in jacobian coordinates (with known z-ratios) to
+ * the same global z "denominator". zr must contain the known z-ratios such
+ * that mul(a[i].z, zr[i+1]) == a[i+1].z. zr[0] is ignored. The x and y
+ * coordinates of the result are stored in r, the common z coordinate is
+ * stored in globalz. */
+static void secp256k1_ge_globalz_set_table_gej(size_t len, secp256k1_ge *r, secp256k1_fe *globalz, const secp256k1_gej *a, const secp256k1_fe *zr);
+
+/** Set a group element (jacobian) equal to the point at infinity. */
+static void secp256k1_gej_set_infinity(secp256k1_gej *r);
+
+/** Set a group element (jacobian) equal to the point with given X and Y coordinates. */
+static void secp256k1_gej_set_xy(secp256k1_gej *r, const secp256k1_fe *x, const secp256k1_fe *y);
+
+/** Set a group element (jacobian) equal to another which is given in affine coordinates. */
+static void secp256k1_gej_set_ge(secp256k1_gej *r, const secp256k1_ge *a);
+
+/** Compare the X coordinate of a group element (jacobian). */
+static int secp256k1_gej_eq_x_var(const secp256k1_fe *x, const secp256k1_gej *a);
+
+/** Set r equal to the inverse of a (i.e., mirrored around the X axis) */
+static void secp256k1_gej_neg(secp256k1_gej *r, const secp256k1_gej *a);
+
+/** Check whether a group element is the point at infinity. */
+static int secp256k1_gej_is_infinity(const secp256k1_gej *a);
+
+/** Set r equal to the double of a. If rzr is not-NULL, r->z = a->z * *rzr (where infinity means an implicit z = 0).
+ * a may not be zero. Constant time. */
+static void secp256k1_gej_double_nonzero(secp256k1_gej *r, const secp256k1_gej *a, secp256k1_fe *rzr);
+
+/** Set r equal to the double of a. If rzr is not-NULL, r->z = a->z * *rzr (where infinity means an implicit z = 0). */
+static void secp256k1_gej_double_var(secp256k1_gej *r, const secp256k1_gej *a, secp256k1_fe *rzr);
+
+/** Set r equal to the sum of a and b. If rzr is non-NULL, r->z = a->z * *rzr (a cannot be infinity in that case). */
+static void secp256k1_gej_add_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_gej *b, secp256k1_fe *rzr);
+
+/** Set r equal to the sum of a and b (with b given in affine coordinates, and not infinity). */
+static void secp256k1_gej_add_ge(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b);
+
+/** Set r equal to the sum of a and b (with b given in affine coordinates). This is more efficient
+ than secp256k1_gej_add_var. It is identical to secp256k1_gej_add_ge but without constant-time
+ guarantee, and b is allowed to be infinity. If rzr is non-NULL, r->z = a->z * *rzr (a cannot be infinity in that case). */
+static void secp256k1_gej_add_ge_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b, secp256k1_fe *rzr);
+
+/** Set r equal to the sum of a and b (with the inverse of b's Z coordinate passed as bzinv). */
+static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b, const secp256k1_fe *bzinv);
+
+#ifdef USE_ENDOMORPHISM
+/** Set r to be equal to lambda times a, where lambda is chosen in a way such that this is very fast. */
+static void secp256k1_ge_mul_lambda(secp256k1_ge *r, const secp256k1_ge *a);
+#endif
+
+/** Clear a secp256k1_gej to prevent leaking sensitive information. */
+static void secp256k1_gej_clear(secp256k1_gej *r);
+
+/** Clear a secp256k1_ge to prevent leaking sensitive information. */
+static void secp256k1_ge_clear(secp256k1_ge *r);
+
+/** Convert a group element to the storage type. */
+static void secp256k1_ge_to_storage(secp256k1_ge_storage *r, const secp256k1_ge *a);
+
+/** Convert a group element back from the storage type. */
+static void secp256k1_ge_from_storage(secp256k1_ge *r, const secp256k1_ge_storage *a);
+
+/** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. */
+static void secp256k1_ge_storage_cmov(secp256k1_ge_storage *r, const secp256k1_ge_storage *a, int flag);
+
+/** Rescale a jacobian point by b which must be non-zero. Constant-time. */
+static void secp256k1_gej_rescale(secp256k1_gej *r, const secp256k1_fe *b);
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/group_impl.h b/crypto/secp256k1/libsecp256k1/src/group_impl.h
new file mode 100644
index 000000000..fe0a35929
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/group_impl.h
@@ -0,0 +1,632 @@
+/**********************************************************************
+ * Copyright (c) 2013, 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_GROUP_IMPL_H_
+#define _SECP256K1_GROUP_IMPL_H_
+
+#include <string.h>
+
+#include "num.h"
+#include "field.h"
+#include "group.h"
+
+/** Generator for secp256k1, value 'g' defined in
+ * "Standards for Efficient Cryptography" (SEC2) 2.7.1.
+ */
+static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST(
+ 0x79BE667EUL, 0xF9DCBBACUL, 0x55A06295UL, 0xCE870B07UL,
+ 0x029BFCDBUL, 0x2DCE28D9UL, 0x59F2815BUL, 0x16F81798UL,
+ 0x483ADA77UL, 0x26A3C465UL, 0x5DA4FBFCUL, 0x0E1108A8UL,
+ 0xFD17B448UL, 0xA6855419UL, 0x9C47D08FUL, 0xFB10D4B8UL
+);
+
+static void secp256k1_ge_set_gej_zinv(secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_fe *zi) {
+ secp256k1_fe zi2;
+ secp256k1_fe zi3;
+ secp256k1_fe_sqr(&zi2, zi);
+ secp256k1_fe_mul(&zi3, &zi2, zi);
+ secp256k1_fe_mul(&r->x, &a->x, &zi2);
+ secp256k1_fe_mul(&r->y, &a->y, &zi3);
+ r->infinity = a->infinity;
+}
+
+static void secp256k1_ge_set_infinity(secp256k1_ge *r) {
+ r->infinity = 1;
+}
+
+static void secp256k1_ge_set_xy(secp256k1_ge *r, const secp256k1_fe *x, const secp256k1_fe *y) {
+ r->infinity = 0;
+ r->x = *x;
+ r->y = *y;
+}
+
+static int secp256k1_ge_is_infinity(const secp256k1_ge *a) {
+ return a->infinity;
+}
+
+static void secp256k1_ge_neg(secp256k1_ge *r, const secp256k1_ge *a) {
+ *r = *a;
+ secp256k1_fe_normalize_weak(&r->y);
+ secp256k1_fe_negate(&r->y, &r->y, 1);
+}
+
+static void secp256k1_ge_set_gej(secp256k1_ge *r, secp256k1_gej *a) {
+ secp256k1_fe z2, z3;
+ r->infinity = a->infinity;
+ secp256k1_fe_inv(&a->z, &a->z);
+ secp256k1_fe_sqr(&z2, &a->z);
+ secp256k1_fe_mul(&z3, &a->z, &z2);
+ secp256k1_fe_mul(&a->x, &a->x, &z2);
+ secp256k1_fe_mul(&a->y, &a->y, &z3);
+ secp256k1_fe_set_int(&a->z, 1);
+ r->x = a->x;
+ r->y = a->y;
+}
+
+static void secp256k1_ge_set_gej_var(secp256k1_ge *r, secp256k1_gej *a) {
+ secp256k1_fe z2, z3;
+ r->infinity = a->infinity;
+ if (a->infinity) {
+ return;
+ }
+ secp256k1_fe_inv_var(&a->z, &a->z);
+ secp256k1_fe_sqr(&z2, &a->z);
+ secp256k1_fe_mul(&z3, &a->z, &z2);
+ secp256k1_fe_mul(&a->x, &a->x, &z2);
+ secp256k1_fe_mul(&a->y, &a->y, &z3);
+ secp256k1_fe_set_int(&a->z, 1);
+ r->x = a->x;
+ r->y = a->y;
+}
+
+static void secp256k1_ge_set_all_gej_var(size_t len, secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_callback *cb) {
+ secp256k1_fe *az;
+ secp256k1_fe *azi;
+ size_t i;
+ size_t count = 0;
+ az = (secp256k1_fe *)checked_malloc(cb, sizeof(secp256k1_fe) * len);
+ for (i = 0; i < len; i++) {
+ if (!a[i].infinity) {
+ az[count++] = a[i].z;
+ }
+ }
+
+ azi = (secp256k1_fe *)checked_malloc(cb, sizeof(secp256k1_fe) * count);
+ secp256k1_fe_inv_all_var(count, azi, az);
+ free(az);
+
+ count = 0;
+ for (i = 0; i < len; i++) {
+ r[i].infinity = a[i].infinity;
+ if (!a[i].infinity) {
+ secp256k1_ge_set_gej_zinv(&r[i], &a[i], &azi[count++]);
+ }
+ }
+ free(azi);
+}
+
+static void secp256k1_ge_set_table_gej_var(size_t len, secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_fe *zr) {
+ size_t i = len - 1;
+ secp256k1_fe zi;
+
+ if (len > 0) {
+ /* Compute the inverse of the last z coordinate, and use it to compute the last affine output. */
+ secp256k1_fe_inv(&zi, &a[i].z);
+ secp256k1_ge_set_gej_zinv(&r[i], &a[i], &zi);
+
+ /* Work out way backwards, using the z-ratios to scale the x/y values. */
+ while (i > 0) {
+ secp256k1_fe_mul(&zi, &zi, &zr[i]);
+ i--;
+ secp256k1_ge_set_gej_zinv(&r[i], &a[i], &zi);
+ }
+ }
+}
+
+static void secp256k1_ge_globalz_set_table_gej(size_t len, secp256k1_ge *r, secp256k1_fe *globalz, const secp256k1_gej *a, const secp256k1_fe *zr) {
+ size_t i = len - 1;
+ secp256k1_fe zs;
+
+ if (len > 0) {
+ /* The z of the final point gives us the "global Z" for the table. */
+ r[i].x = a[i].x;
+ r[i].y = a[i].y;
+ *globalz = a[i].z;
+ r[i].infinity = 0;
+ zs = zr[i];
+
+ /* Work our way backwards, using the z-ratios to scale the x/y values. */
+ while (i > 0) {
+ if (i != len - 1) {
+ secp256k1_fe_mul(&zs, &zs, &zr[i]);
+ }
+ i--;
+ secp256k1_ge_set_gej_zinv(&r[i], &a[i], &zs);
+ }
+ }
+}
+
+static void secp256k1_gej_set_infinity(secp256k1_gej *r) {
+ r->infinity = 1;
+ secp256k1_fe_set_int(&r->x, 0);
+ secp256k1_fe_set_int(&r->y, 0);
+ secp256k1_fe_set_int(&r->z, 0);
+}
+
+static void secp256k1_gej_set_xy(secp256k1_gej *r, const secp256k1_fe *x, const secp256k1_fe *y) {
+ r->infinity = 0;
+ r->x = *x;
+ r->y = *y;
+ secp256k1_fe_set_int(&r->z, 1);
+}
+
+static void secp256k1_gej_clear(secp256k1_gej *r) {
+ r->infinity = 0;
+ secp256k1_fe_clear(&r->x);
+ secp256k1_fe_clear(&r->y);
+ secp256k1_fe_clear(&r->z);
+}
+
+static void secp256k1_ge_clear(secp256k1_ge *r) {
+ r->infinity = 0;
+ secp256k1_fe_clear(&r->x);
+ secp256k1_fe_clear(&r->y);
+}
+
+static int secp256k1_ge_set_xo_var(secp256k1_ge *r, const secp256k1_fe *x, int odd) {
+ secp256k1_fe x2, x3, c;
+ r->x = *x;
+ secp256k1_fe_sqr(&x2, x);
+ secp256k1_fe_mul(&x3, x, &x2);
+ r->infinity = 0;
+ secp256k1_fe_set_int(&c, 7);
+ secp256k1_fe_add(&c, &x3);
+ if (!secp256k1_fe_sqrt_var(&r->y, &c)) {
+ return 0;
+ }
+ secp256k1_fe_normalize_var(&r->y);
+ if (secp256k1_fe_is_odd(&r->y) != odd) {
+ secp256k1_fe_negate(&r->y, &r->y, 1);
+ }
+ return 1;
+}
+
+static void secp256k1_gej_set_ge(secp256k1_gej *r, const secp256k1_ge *a) {
+ r->infinity = a->infinity;
+ r->x = a->x;
+ r->y = a->y;
+ secp256k1_fe_set_int(&r->z, 1);
+}
+
+static int secp256k1_gej_eq_x_var(const secp256k1_fe *x, const secp256k1_gej *a) {
+ secp256k1_fe r, r2;
+ VERIFY_CHECK(!a->infinity);
+ secp256k1_fe_sqr(&r, &a->z); secp256k1_fe_mul(&r, &r, x);
+ r2 = a->x; secp256k1_fe_normalize_weak(&r2);
+ return secp256k1_fe_equal_var(&r, &r2);
+}
+
+static void secp256k1_gej_neg(secp256k1_gej *r, const secp256k1_gej *a) {
+ r->infinity = a->infinity;
+ r->x = a->x;
+ r->y = a->y;
+ r->z = a->z;
+ secp256k1_fe_normalize_weak(&r->y);
+ secp256k1_fe_negate(&r->y, &r->y, 1);
+}
+
+static int secp256k1_gej_is_infinity(const secp256k1_gej *a) {
+ return a->infinity;
+}
+
+static int secp256k1_gej_is_valid_var(const secp256k1_gej *a) {
+ secp256k1_fe y2, x3, z2, z6;
+ if (a->infinity) {
+ return 0;
+ }
+ /** y^2 = x^3 + 7
+ * (Y/Z^3)^2 = (X/Z^2)^3 + 7
+ * Y^2 / Z^6 = X^3 / Z^6 + 7
+ * Y^2 = X^3 + 7*Z^6
+ */
+ secp256k1_fe_sqr(&y2, &a->y);
+ secp256k1_fe_sqr(&x3, &a->x); secp256k1_fe_mul(&x3, &x3, &a->x);
+ secp256k1_fe_sqr(&z2, &a->z);
+ secp256k1_fe_sqr(&z6, &z2); secp256k1_fe_mul(&z6, &z6, &z2);
+ secp256k1_fe_mul_int(&z6, 7);
+ secp256k1_fe_add(&x3, &z6);
+ secp256k1_fe_normalize_weak(&x3);
+ return secp256k1_fe_equal_var(&y2, &x3);
+}
+
+static int secp256k1_ge_is_valid_var(const secp256k1_ge *a) {
+ secp256k1_fe y2, x3, c;
+ if (a->infinity) {
+ return 0;
+ }
+ /* y^2 = x^3 + 7 */
+ secp256k1_fe_sqr(&y2, &a->y);
+ secp256k1_fe_sqr(&x3, &a->x); secp256k1_fe_mul(&x3, &x3, &a->x);
+ secp256k1_fe_set_int(&c, 7);
+ secp256k1_fe_add(&x3, &c);
+ secp256k1_fe_normalize_weak(&x3);
+ return secp256k1_fe_equal_var(&y2, &x3);
+}
+
+static void secp256k1_gej_double_var(secp256k1_gej *r, const secp256k1_gej *a, secp256k1_fe *rzr) {
+ /* Operations: 3 mul, 4 sqr, 0 normalize, 12 mul_int/add/negate */
+ secp256k1_fe t1,t2,t3,t4;
+ /** For secp256k1, 2Q is infinity if and only if Q is infinity. This is because if 2Q = infinity,
+ * Q must equal -Q, or that Q.y == -(Q.y), or Q.y is 0. For a point on y^2 = x^3 + 7 to have
+ * y=0, x^3 must be -7 mod p. However, -7 has no cube root mod p.
+ */
+ r->infinity = a->infinity;
+ if (r->infinity) {
+ if (rzr != NULL) {
+ secp256k1_fe_set_int(rzr, 1);
+ }
+ return;
+ }
+
+ if (rzr != NULL) {
+ *rzr = a->y;
+ secp256k1_fe_normalize_weak(rzr);
+ secp256k1_fe_mul_int(rzr, 2);
+ }
+
+ secp256k1_fe_mul(&r->z, &a->z, &a->y);
+ secp256k1_fe_mul_int(&r->z, 2); /* Z' = 2*Y*Z (2) */
+ secp256k1_fe_sqr(&t1, &a->x);
+ secp256k1_fe_mul_int(&t1, 3); /* T1 = 3*X^2 (3) */
+ secp256k1_fe_sqr(&t2, &t1); /* T2 = 9*X^4 (1) */
+ secp256k1_fe_sqr(&t3, &a->y);
+ secp256k1_fe_mul_int(&t3, 2); /* T3 = 2*Y^2 (2) */
+ secp256k1_fe_sqr(&t4, &t3);
+ secp256k1_fe_mul_int(&t4, 2); /* T4 = 8*Y^4 (2) */
+ secp256k1_fe_mul(&t3, &t3, &a->x); /* T3 = 2*X*Y^2 (1) */
+ r->x = t3;
+ secp256k1_fe_mul_int(&r->x, 4); /* X' = 8*X*Y^2 (4) */
+ secp256k1_fe_negate(&r->x, &r->x, 4); /* X' = -8*X*Y^2 (5) */
+ secp256k1_fe_add(&r->x, &t2); /* X' = 9*X^4 - 8*X*Y^2 (6) */
+ secp256k1_fe_negate(&t2, &t2, 1); /* T2 = -9*X^4 (2) */
+ secp256k1_fe_mul_int(&t3, 6); /* T3 = 12*X*Y^2 (6) */
+ secp256k1_fe_add(&t3, &t2); /* T3 = 12*X*Y^2 - 9*X^4 (8) */
+ secp256k1_fe_mul(&r->y, &t1, &t3); /* Y' = 36*X^3*Y^2 - 27*X^6 (1) */
+ secp256k1_fe_negate(&t2, &t4, 2); /* T2 = -8*Y^4 (3) */
+ secp256k1_fe_add(&r->y, &t2); /* Y' = 36*X^3*Y^2 - 27*X^6 - 8*Y^4 (4) */
+}
+
+static SECP256K1_INLINE void secp256k1_gej_double_nonzero(secp256k1_gej *r, const secp256k1_gej *a, secp256k1_fe *rzr) {
+ VERIFY_CHECK(!secp256k1_gej_is_infinity(a));
+ secp256k1_gej_double_var(r, a, rzr);
+}
+
+static void secp256k1_gej_add_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_gej *b, secp256k1_fe *rzr) {
+ /* Operations: 12 mul, 4 sqr, 2 normalize, 12 mul_int/add/negate */
+ secp256k1_fe z22, z12, u1, u2, s1, s2, h, i, i2, h2, h3, t;
+
+ if (a->infinity) {
+ VERIFY_CHECK(rzr == NULL);
+ *r = *b;
+ return;
+ }
+
+ if (b->infinity) {
+ if (rzr != NULL) {
+ secp256k1_fe_set_int(rzr, 1);
+ }
+ *r = *a;
+ return;
+ }
+
+ r->infinity = 0;
+ secp256k1_fe_sqr(&z22, &b->z);
+ secp256k1_fe_sqr(&z12, &a->z);
+ secp256k1_fe_mul(&u1, &a->x, &z22);
+ secp256k1_fe_mul(&u2, &b->x, &z12);
+ secp256k1_fe_mul(&s1, &a->y, &z22); secp256k1_fe_mul(&s1, &s1, &b->z);
+ secp256k1_fe_mul(&s2, &b->y, &z12); secp256k1_fe_mul(&s2, &s2, &a->z);
+ secp256k1_fe_negate(&h, &u1, 1); secp256k1_fe_add(&h, &u2);
+ secp256k1_fe_negate(&i, &s1, 1); secp256k1_fe_add(&i, &s2);
+ if (secp256k1_fe_normalizes_to_zero_var(&h)) {
+ if (secp256k1_fe_normalizes_to_zero_var(&i)) {
+ secp256k1_gej_double_var(r, a, rzr);
+ } else {
+ if (rzr != NULL) {
+ secp256k1_fe_set_int(rzr, 0);
+ }
+ r->infinity = 1;
+ }
+ return;
+ }
+ secp256k1_fe_sqr(&i2, &i);
+ secp256k1_fe_sqr(&h2, &h);
+ secp256k1_fe_mul(&h3, &h, &h2);
+ secp256k1_fe_mul(&h, &h, &b->z);
+ if (rzr != NULL) {
+ *rzr = h;
+ }
+ secp256k1_fe_mul(&r->z, &a->z, &h);
+ secp256k1_fe_mul(&t, &u1, &h2);
+ r->x = t; secp256k1_fe_mul_int(&r->x, 2); secp256k1_fe_add(&r->x, &h3); secp256k1_fe_negate(&r->x, &r->x, 3); secp256k1_fe_add(&r->x, &i2);
+ secp256k1_fe_negate(&r->y, &r->x, 5); secp256k1_fe_add(&r->y, &t); secp256k1_fe_mul(&r->y, &r->y, &i);
+ secp256k1_fe_mul(&h3, &h3, &s1); secp256k1_fe_negate(&h3, &h3, 1);
+ secp256k1_fe_add(&r->y, &h3);
+}
+
+static void secp256k1_gej_add_ge_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b, secp256k1_fe *rzr) {
+ /* 8 mul, 3 sqr, 4 normalize, 12 mul_int/add/negate */
+ secp256k1_fe z12, u1, u2, s1, s2, h, i, i2, h2, h3, t;
+ if (a->infinity) {
+ VERIFY_CHECK(rzr == NULL);
+ secp256k1_gej_set_ge(r, b);
+ return;
+ }
+ if (b->infinity) {
+ if (rzr != NULL) {
+ secp256k1_fe_set_int(rzr, 1);
+ }
+ *r = *a;
+ return;
+ }
+ r->infinity = 0;
+
+ secp256k1_fe_sqr(&z12, &a->z);
+ u1 = a->x; secp256k1_fe_normalize_weak(&u1);
+ secp256k1_fe_mul(&u2, &b->x, &z12);
+ s1 = a->y; secp256k1_fe_normalize_weak(&s1);
+ secp256k1_fe_mul(&s2, &b->y, &z12); secp256k1_fe_mul(&s2, &s2, &a->z);
+ secp256k1_fe_negate(&h, &u1, 1); secp256k1_fe_add(&h, &u2);
+ secp256k1_fe_negate(&i, &s1, 1); secp256k1_fe_add(&i, &s2);
+ if (secp256k1_fe_normalizes_to_zero_var(&h)) {
+ if (secp256k1_fe_normalizes_to_zero_var(&i)) {
+ secp256k1_gej_double_var(r, a, rzr);
+ } else {
+ if (rzr != NULL) {
+ secp256k1_fe_set_int(rzr, 0);
+ }
+ r->infinity = 1;
+ }
+ return;
+ }
+ secp256k1_fe_sqr(&i2, &i);
+ secp256k1_fe_sqr(&h2, &h);
+ secp256k1_fe_mul(&h3, &h, &h2);
+ if (rzr != NULL) {
+ *rzr = h;
+ }
+ secp256k1_fe_mul(&r->z, &a->z, &h);
+ secp256k1_fe_mul(&t, &u1, &h2);
+ r->x = t; secp256k1_fe_mul_int(&r->x, 2); secp256k1_fe_add(&r->x, &h3); secp256k1_fe_negate(&r->x, &r->x, 3); secp256k1_fe_add(&r->x, &i2);
+ secp256k1_fe_negate(&r->y, &r->x, 5); secp256k1_fe_add(&r->y, &t); secp256k1_fe_mul(&r->y, &r->y, &i);
+ secp256k1_fe_mul(&h3, &h3, &s1); secp256k1_fe_negate(&h3, &h3, 1);
+ secp256k1_fe_add(&r->y, &h3);
+}
+
+static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b, const secp256k1_fe *bzinv) {
+ /* 9 mul, 3 sqr, 4 normalize, 12 mul_int/add/negate */
+ secp256k1_fe az, z12, u1, u2, s1, s2, h, i, i2, h2, h3, t;
+
+ if (b->infinity) {
+ *r = *a;
+ return;
+ }
+ if (a->infinity) {
+ secp256k1_fe bzinv2, bzinv3;
+ r->infinity = b->infinity;
+ secp256k1_fe_sqr(&bzinv2, bzinv);
+ secp256k1_fe_mul(&bzinv3, &bzinv2, bzinv);
+ secp256k1_fe_mul(&r->x, &b->x, &bzinv2);
+ secp256k1_fe_mul(&r->y, &b->y, &bzinv3);
+ secp256k1_fe_set_int(&r->z, 1);
+ return;
+ }
+ r->infinity = 0;
+
+ /** We need to calculate (rx,ry,rz) = (ax,ay,az) + (bx,by,1/bzinv). Due to
+ * secp256k1's isomorphism we can multiply the Z coordinates on both sides
+ * by bzinv, and get: (rx,ry,rz*bzinv) = (ax,ay,az*bzinv) + (bx,by,1).
+ * This means that (rx,ry,rz) can be calculated as
+ * (ax,ay,az*bzinv) + (bx,by,1), when not applying the bzinv factor to rz.
+ * The variable az below holds the modified Z coordinate for a, which is used
+ * for the computation of rx and ry, but not for rz.
+ */
+ secp256k1_fe_mul(&az, &a->z, bzinv);
+
+ secp256k1_fe_sqr(&z12, &az);
+ u1 = a->x; secp256k1_fe_normalize_weak(&u1);
+ secp256k1_fe_mul(&u2, &b->x, &z12);
+ s1 = a->y; secp256k1_fe_normalize_weak(&s1);
+ secp256k1_fe_mul(&s2, &b->y, &z12); secp256k1_fe_mul(&s2, &s2, &az);
+ secp256k1_fe_negate(&h, &u1, 1); secp256k1_fe_add(&h, &u2);
+ secp256k1_fe_negate(&i, &s1, 1); secp256k1_fe_add(&i, &s2);
+ if (secp256k1_fe_normalizes_to_zero_var(&h)) {
+ if (secp256k1_fe_normalizes_to_zero_var(&i)) {
+ secp256k1_gej_double_var(r, a, NULL);
+ } else {
+ r->infinity = 1;
+ }
+ return;
+ }
+ secp256k1_fe_sqr(&i2, &i);
+ secp256k1_fe_sqr(&h2, &h);
+ secp256k1_fe_mul(&h3, &h, &h2);
+ r->z = a->z; secp256k1_fe_mul(&r->z, &r->z, &h);
+ secp256k1_fe_mul(&t, &u1, &h2);
+ r->x = t; secp256k1_fe_mul_int(&r->x, 2); secp256k1_fe_add(&r->x, &h3); secp256k1_fe_negate(&r->x, &r->x, 3); secp256k1_fe_add(&r->x, &i2);
+ secp256k1_fe_negate(&r->y, &r->x, 5); secp256k1_fe_add(&r->y, &t); secp256k1_fe_mul(&r->y, &r->y, &i);
+ secp256k1_fe_mul(&h3, &h3, &s1); secp256k1_fe_negate(&h3, &h3, 1);
+ secp256k1_fe_add(&r->y, &h3);
+}
+
+
+static void secp256k1_gej_add_ge(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b) {
+ /* Operations: 7 mul, 5 sqr, 4 normalize, 21 mul_int/add/negate/cmov */
+ static const secp256k1_fe fe_1 = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1);
+ secp256k1_fe zz, u1, u2, s1, s2, t, tt, m, n, q, rr;
+ secp256k1_fe m_alt, rr_alt;
+ int infinity, degenerate;
+ VERIFY_CHECK(!b->infinity);
+ VERIFY_CHECK(a->infinity == 0 || a->infinity == 1);
+
+ /** In:
+ * Eric Brier and Marc Joye, Weierstrass Elliptic Curves and Side-Channel Attacks.
+ * In D. Naccache and P. Paillier, Eds., Public Key Cryptography, vol. 2274 of Lecture Notes in Computer Science, pages 335-345. Springer-Verlag, 2002.
+ * we find as solution for a unified addition/doubling formula:
+ * lambda = ((x1 + x2)^2 - x1 * x2 + a) / (y1 + y2), with a = 0 for secp256k1's curve equation.
+ * x3 = lambda^2 - (x1 + x2)
+ * 2*y3 = lambda * (x1 + x2 - 2 * x3) - (y1 + y2).
+ *
+ * Substituting x_i = Xi / Zi^2 and yi = Yi / Zi^3, for i=1,2,3, gives:
+ * U1 = X1*Z2^2, U2 = X2*Z1^2
+ * S1 = Y1*Z2^3, S2 = Y2*Z1^3
+ * Z = Z1*Z2
+ * T = U1+U2
+ * M = S1+S2
+ * Q = T*M^2
+ * R = T^2-U1*U2
+ * X3 = 4*(R^2-Q)
+ * Y3 = 4*(R*(3*Q-2*R^2)-M^4)
+ * Z3 = 2*M*Z
+ * (Note that the paper uses xi = Xi / Zi and yi = Yi / Zi instead.)
+ *
+ * This formula has the benefit of being the same for both addition
+ * of distinct points and doubling. However, it breaks down in the
+ * case that either point is infinity, or that y1 = -y2. We handle
+ * these cases in the following ways:
+ *
+ * - If b is infinity we simply bail by means of a VERIFY_CHECK.
+ *
+ * - If a is infinity, we detect this, and at the end of the
+ * computation replace the result (which will be meaningless,
+ * but we compute to be constant-time) with b.x : b.y : 1.
+ *
+ * - If a = -b, we have y1 = -y2, which is a degenerate case.
+ * But here the answer is infinity, so we simply set the
+ * infinity flag of the result, overriding the computed values
+ * without even needing to cmov.
+ *
+ * - If y1 = -y2 but x1 != x2, which does occur thanks to certain
+ * properties of our curve (specifically, 1 has nontrivial cube
+ * roots in our field, and the curve equation has no x coefficient)
+ * then the answer is not infinity but also not given by the above
+ * equation. In this case, we cmov in place an alternate expression
+ * for lambda. Specifically (y1 - y2)/(x1 - x2). Where both these
+ * expressions for lambda are defined, they are equal, and can be
+ * obtained from each other by multiplication by (y1 + y2)/(y1 + y2)
+ * then substitution of x^3 + 7 for y^2 (using the curve equation).
+ * For all pairs of nonzero points (a, b) at least one is defined,
+ * so this covers everything.
+ */
+
+ secp256k1_fe_sqr(&zz, &a->z); /* z = Z1^2 */
+ u1 = a->x; secp256k1_fe_normalize_weak(&u1); /* u1 = U1 = X1*Z2^2 (1) */
+ secp256k1_fe_mul(&u2, &b->x, &zz); /* u2 = U2 = X2*Z1^2 (1) */
+ s1 = a->y; secp256k1_fe_normalize_weak(&s1); /* s1 = S1 = Y1*Z2^3 (1) */
+ secp256k1_fe_mul(&s2, &b->y, &zz); /* s2 = Y2*Z1^2 (1) */
+ secp256k1_fe_mul(&s2, &s2, &a->z); /* s2 = S2 = Y2*Z1^3 (1) */
+ t = u1; secp256k1_fe_add(&t, &u2); /* t = T = U1+U2 (2) */
+ m = s1; secp256k1_fe_add(&m, &s2); /* m = M = S1+S2 (2) */
+ secp256k1_fe_sqr(&rr, &t); /* rr = T^2 (1) */
+ secp256k1_fe_negate(&m_alt, &u2, 1); /* Malt = -X2*Z1^2 */
+ secp256k1_fe_mul(&tt, &u1, &m_alt); /* tt = -U1*U2 (2) */
+ secp256k1_fe_add(&rr, &tt); /* rr = R = T^2-U1*U2 (3) */
+ /** If lambda = R/M = 0/0 we have a problem (except in the "trivial"
+ * case that Z = z1z2 = 0, and this is special-cased later on). */
+ degenerate = secp256k1_fe_normalizes_to_zero(&m) &
+ secp256k1_fe_normalizes_to_zero(&rr);
+ /* This only occurs when y1 == -y2 and x1^3 == x2^3, but x1 != x2.
+ * This means either x1 == beta*x2 or beta*x1 == x2, where beta is
+ * a nontrivial cube root of one. In either case, an alternate
+ * non-indeterminate expression for lambda is (y1 - y2)/(x1 - x2),
+ * so we set R/M equal to this. */
+ rr_alt = s1;
+ secp256k1_fe_mul_int(&rr_alt, 2); /* rr = Y1*Z2^3 - Y2*Z1^3 (2) */
+ secp256k1_fe_add(&m_alt, &u1); /* Malt = X1*Z2^2 - X2*Z1^2 */
+
+ secp256k1_fe_cmov(&rr_alt, &rr, !degenerate);
+ secp256k1_fe_cmov(&m_alt, &m, !degenerate);
+ /* Now Ralt / Malt = lambda and is guaranteed not to be 0/0.
+ * From here on out Ralt and Malt represent the numerator
+ * and denominator of lambda; R and M represent the explicit
+ * expressions x1^2 + x2^2 + x1x2 and y1 + y2. */
+ secp256k1_fe_sqr(&n, &m_alt); /* n = Malt^2 (1) */
+ secp256k1_fe_mul(&q, &n, &t); /* q = Q = T*Malt^2 (1) */
+ /* These two lines use the observation that either M == Malt or M == 0,
+ * so M^3 * Malt is either Malt^4 (which is computed by squaring), or
+ * zero (which is "computed" by cmov). So the cost is one squaring
+ * versus two multiplications. */
+ secp256k1_fe_sqr(&n, &n);
+ secp256k1_fe_cmov(&n, &m, degenerate); /* n = M^3 * Malt (2) */
+ secp256k1_fe_sqr(&t, &rr_alt); /* t = Ralt^2 (1) */
+ secp256k1_fe_mul(&r->z, &a->z, &m_alt); /* r->z = Malt*Z (1) */
+ infinity = secp256k1_fe_normalizes_to_zero(&r->z) * (1 - a->infinity);
+ secp256k1_fe_mul_int(&r->z, 2); /* r->z = Z3 = 2*Malt*Z (2) */
+ secp256k1_fe_negate(&q, &q, 1); /* q = -Q (2) */
+ secp256k1_fe_add(&t, &q); /* t = Ralt^2-Q (3) */
+ secp256k1_fe_normalize_weak(&t);
+ r->x = t; /* r->x = Ralt^2-Q (1) */
+ secp256k1_fe_mul_int(&t, 2); /* t = 2*x3 (2) */
+ secp256k1_fe_add(&t, &q); /* t = 2*x3 - Q: (4) */
+ secp256k1_fe_mul(&t, &t, &rr_alt); /* t = Ralt*(2*x3 - Q) (1) */
+ secp256k1_fe_add(&t, &n); /* t = Ralt*(2*x3 - Q) + M^3*Malt (3) */
+ secp256k1_fe_negate(&r->y, &t, 3); /* r->y = Ralt*(Q - 2x3) - M^3*Malt (4) */
+ secp256k1_fe_normalize_weak(&r->y);
+ secp256k1_fe_mul_int(&r->x, 4); /* r->x = X3 = 4*(Ralt^2-Q) */
+ secp256k1_fe_mul_int(&r->y, 4); /* r->y = Y3 = 4*Ralt*(Q - 2x3) - 4*M^3*Malt (4) */
+
+ /** In case a->infinity == 1, replace r with (b->x, b->y, 1). */
+ secp256k1_fe_cmov(&r->x, &b->x, a->infinity);
+ secp256k1_fe_cmov(&r->y, &b->y, a->infinity);
+ secp256k1_fe_cmov(&r->z, &fe_1, a->infinity);
+ r->infinity = infinity;
+}
+
+static void secp256k1_gej_rescale(secp256k1_gej *r, const secp256k1_fe *s) {
+ /* Operations: 4 mul, 1 sqr */
+ secp256k1_fe zz;
+ VERIFY_CHECK(!secp256k1_fe_is_zero(s));
+ secp256k1_fe_sqr(&zz, s);
+ secp256k1_fe_mul(&r->x, &r->x, &zz); /* r->x *= s^2 */
+ secp256k1_fe_mul(&r->y, &r->y, &zz);
+ secp256k1_fe_mul(&r->y, &r->y, s); /* r->y *= s^3 */
+ secp256k1_fe_mul(&r->z, &r->z, s); /* r->z *= s */
+}
+
+static void secp256k1_ge_to_storage(secp256k1_ge_storage *r, const secp256k1_ge *a) {
+ secp256k1_fe x, y;
+ VERIFY_CHECK(!a->infinity);
+ x = a->x;
+ secp256k1_fe_normalize(&x);
+ y = a->y;
+ secp256k1_fe_normalize(&y);
+ secp256k1_fe_to_storage(&r->x, &x);
+ secp256k1_fe_to_storage(&r->y, &y);
+}
+
+static void secp256k1_ge_from_storage(secp256k1_ge *r, const secp256k1_ge_storage *a) {
+ secp256k1_fe_from_storage(&r->x, &a->x);
+ secp256k1_fe_from_storage(&r->y, &a->y);
+ r->infinity = 0;
+}
+
+static SECP256K1_INLINE void secp256k1_ge_storage_cmov(secp256k1_ge_storage *r, const secp256k1_ge_storage *a, int flag) {
+ secp256k1_fe_storage_cmov(&r->x, &a->x, flag);
+ secp256k1_fe_storage_cmov(&r->y, &a->y, flag);
+}
+
+#ifdef USE_ENDOMORPHISM
+static void secp256k1_ge_mul_lambda(secp256k1_ge *r, const secp256k1_ge *a) {
+ static const secp256k1_fe beta = SECP256K1_FE_CONST(
+ 0x7ae96a2bul, 0x657c0710ul, 0x6e64479eul, 0xac3434e9ul,
+ 0x9cf04975ul, 0x12f58995ul, 0xc1396c28ul, 0x719501eeul
+ );
+ *r = *a;
+ secp256k1_fe_mul(&r->x, &r->x, &beta);
+}
+#endif
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/hash.h b/crypto/secp256k1/libsecp256k1/src/hash.h
new file mode 100644
index 000000000..0ff01e63f
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/hash.h
@@ -0,0 +1,41 @@
+/**********************************************************************
+ * Copyright (c) 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_HASH_
+#define _SECP256K1_HASH_
+
+#include <stdlib.h>
+#include <stdint.h>
+
+typedef struct {
+ uint32_t s[32];
+ uint32_t buf[16]; /* In big endian */
+ size_t bytes;
+} secp256k1_sha256_t;
+
+static void secp256k1_sha256_initialize(secp256k1_sha256_t *hash);
+static void secp256k1_sha256_write(secp256k1_sha256_t *hash, const unsigned char *data, size_t size);
+static void secp256k1_sha256_finalize(secp256k1_sha256_t *hash, unsigned char *out32);
+
+typedef struct {
+ secp256k1_sha256_t inner, outer;
+} secp256k1_hmac_sha256_t;
+
+static void secp256k1_hmac_sha256_initialize(secp256k1_hmac_sha256_t *hash, const unsigned char *key, size_t size);
+static void secp256k1_hmac_sha256_write(secp256k1_hmac_sha256_t *hash, const unsigned char *data, size_t size);
+static void secp256k1_hmac_sha256_finalize(secp256k1_hmac_sha256_t *hash, unsigned char *out32);
+
+typedef struct {
+ unsigned char v[32];
+ unsigned char k[32];
+ int retry;
+} secp256k1_rfc6979_hmac_sha256_t;
+
+static void secp256k1_rfc6979_hmac_sha256_initialize(secp256k1_rfc6979_hmac_sha256_t *rng, const unsigned char *key, size_t keylen);
+static void secp256k1_rfc6979_hmac_sha256_generate(secp256k1_rfc6979_hmac_sha256_t *rng, unsigned char *out, size_t outlen);
+static void secp256k1_rfc6979_hmac_sha256_finalize(secp256k1_rfc6979_hmac_sha256_t *rng);
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/hash_impl.h b/crypto/secp256k1/libsecp256k1/src/hash_impl.h
new file mode 100644
index 000000000..ae55df6d8
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/hash_impl.h
@@ -0,0 +1,283 @@
+/**********************************************************************
+ * Copyright (c) 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_HASH_IMPL_H_
+#define _SECP256K1_HASH_IMPL_H_
+
+#include "hash.h"
+
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+
+#define Ch(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
+#define Maj(x,y,z) (((x) & (y)) | ((z) & ((x) | (y))))
+#define Sigma0(x) (((x) >> 2 | (x) << 30) ^ ((x) >> 13 | (x) << 19) ^ ((x) >> 22 | (x) << 10))
+#define Sigma1(x) (((x) >> 6 | (x) << 26) ^ ((x) >> 11 | (x) << 21) ^ ((x) >> 25 | (x) << 7))
+#define sigma0(x) (((x) >> 7 | (x) << 25) ^ ((x) >> 18 | (x) << 14) ^ ((x) >> 3))
+#define sigma1(x) (((x) >> 17 | (x) << 15) ^ ((x) >> 19 | (x) << 13) ^ ((x) >> 10))
+
+#define Round(a,b,c,d,e,f,g,h,k,w) do { \
+ uint32_t t1 = (h) + Sigma1(e) + Ch((e), (f), (g)) + (k) + (w); \
+ uint32_t t2 = Sigma0(a) + Maj((a), (b), (c)); \
+ (d) += t1; \
+ (h) = t1 + t2; \
+} while(0)
+
+#ifdef WORDS_BIGENDIAN
+#define BE32(x) (x)
+#else
+#define BE32(p) ((((p) & 0xFF) << 24) | (((p) & 0xFF00) << 8) | (((p) & 0xFF0000) >> 8) | (((p) & 0xFF000000) >> 24))
+#endif
+
+static void secp256k1_sha256_initialize(secp256k1_sha256_t *hash) {
+ hash->s[0] = 0x6a09e667ul;
+ hash->s[1] = 0xbb67ae85ul;
+ hash->s[2] = 0x3c6ef372ul;
+ hash->s[3] = 0xa54ff53aul;
+ hash->s[4] = 0x510e527ful;
+ hash->s[5] = 0x9b05688cul;
+ hash->s[6] = 0x1f83d9abul;
+ hash->s[7] = 0x5be0cd19ul;
+ hash->bytes = 0;
+}
+
+/** Perform one SHA-256 transformation, processing 16 big endian 32-bit words. */
+static void secp256k1_sha256_transform(uint32_t* s, const uint32_t* chunk) {
+ uint32_t a = s[0], b = s[1], c = s[2], d = s[3], e = s[4], f = s[5], g = s[6], h = s[7];
+ uint32_t w0, w1, w2, w3, w4, w5, w6, w7, w8, w9, w10, w11, w12, w13, w14, w15;
+
+ Round(a, b, c, d, e, f, g, h, 0x428a2f98, w0 = BE32(chunk[0]));
+ Round(h, a, b, c, d, e, f, g, 0x71374491, w1 = BE32(chunk[1]));
+ Round(g, h, a, b, c, d, e, f, 0xb5c0fbcf, w2 = BE32(chunk[2]));
+ Round(f, g, h, a, b, c, d, e, 0xe9b5dba5, w3 = BE32(chunk[3]));
+ Round(e, f, g, h, a, b, c, d, 0x3956c25b, w4 = BE32(chunk[4]));
+ Round(d, e, f, g, h, a, b, c, 0x59f111f1, w5 = BE32(chunk[5]));
+ Round(c, d, e, f, g, h, a, b, 0x923f82a4, w6 = BE32(chunk[6]));
+ Round(b, c, d, e, f, g, h, a, 0xab1c5ed5, w7 = BE32(chunk[7]));
+ Round(a, b, c, d, e, f, g, h, 0xd807aa98, w8 = BE32(chunk[8]));
+ Round(h, a, b, c, d, e, f, g, 0x12835b01, w9 = BE32(chunk[9]));
+ Round(g, h, a, b, c, d, e, f, 0x243185be, w10 = BE32(chunk[10]));
+ Round(f, g, h, a, b, c, d, e, 0x550c7dc3, w11 = BE32(chunk[11]));
+ Round(e, f, g, h, a, b, c, d, 0x72be5d74, w12 = BE32(chunk[12]));
+ Round(d, e, f, g, h, a, b, c, 0x80deb1fe, w13 = BE32(chunk[13]));
+ Round(c, d, e, f, g, h, a, b, 0x9bdc06a7, w14 = BE32(chunk[14]));
+ Round(b, c, d, e, f, g, h, a, 0xc19bf174, w15 = BE32(chunk[15]));
+
+ Round(a, b, c, d, e, f, g, h, 0xe49b69c1, w0 += sigma1(w14) + w9 + sigma0(w1));
+ Round(h, a, b, c, d, e, f, g, 0xefbe4786, w1 += sigma1(w15) + w10 + sigma0(w2));
+ Round(g, h, a, b, c, d, e, f, 0x0fc19dc6, w2 += sigma1(w0) + w11 + sigma0(w3));
+ Round(f, g, h, a, b, c, d, e, 0x240ca1cc, w3 += sigma1(w1) + w12 + sigma0(w4));
+ Round(e, f, g, h, a, b, c, d, 0x2de92c6f, w4 += sigma1(w2) + w13 + sigma0(w5));
+ Round(d, e, f, g, h, a, b, c, 0x4a7484aa, w5 += sigma1(w3) + w14 + sigma0(w6));
+ Round(c, d, e, f, g, h, a, b, 0x5cb0a9dc, w6 += sigma1(w4) + w15 + sigma0(w7));
+ Round(b, c, d, e, f, g, h, a, 0x76f988da, w7 += sigma1(w5) + w0 + sigma0(w8));
+ Round(a, b, c, d, e, f, g, h, 0x983e5152, w8 += sigma1(w6) + w1 + sigma0(w9));
+ Round(h, a, b, c, d, e, f, g, 0xa831c66d, w9 += sigma1(w7) + w2 + sigma0(w10));
+ Round(g, h, a, b, c, d, e, f, 0xb00327c8, w10 += sigma1(w8) + w3 + sigma0(w11));
+ Round(f, g, h, a, b, c, d, e, 0xbf597fc7, w11 += sigma1(w9) + w4 + sigma0(w12));
+ Round(e, f, g, h, a, b, c, d, 0xc6e00bf3, w12 += sigma1(w10) + w5 + sigma0(w13));
+ Round(d, e, f, g, h, a, b, c, 0xd5a79147, w13 += sigma1(w11) + w6 + sigma0(w14));
+ Round(c, d, e, f, g, h, a, b, 0x06ca6351, w14 += sigma1(w12) + w7 + sigma0(w15));
+ Round(b, c, d, e, f, g, h, a, 0x14292967, w15 += sigma1(w13) + w8 + sigma0(w0));
+
+ Round(a, b, c, d, e, f, g, h, 0x27b70a85, w0 += sigma1(w14) + w9 + sigma0(w1));
+ Round(h, a, b, c, d, e, f, g, 0x2e1b2138, w1 += sigma1(w15) + w10 + sigma0(w2));
+ Round(g, h, a, b, c, d, e, f, 0x4d2c6dfc, w2 += sigma1(w0) + w11 + sigma0(w3));
+ Round(f, g, h, a, b, c, d, e, 0x53380d13, w3 += sigma1(w1) + w12 + sigma0(w4));
+ Round(e, f, g, h, a, b, c, d, 0x650a7354, w4 += sigma1(w2) + w13 + sigma0(w5));
+ Round(d, e, f, g, h, a, b, c, 0x766a0abb, w5 += sigma1(w3) + w14 + sigma0(w6));
+ Round(c, d, e, f, g, h, a, b, 0x81c2c92e, w6 += sigma1(w4) + w15 + sigma0(w7));
+ Round(b, c, d, e, f, g, h, a, 0x92722c85, w7 += sigma1(w5) + w0 + sigma0(w8));
+ Round(a, b, c, d, e, f, g, h, 0xa2bfe8a1, w8 += sigma1(w6) + w1 + sigma0(w9));
+ Round(h, a, b, c, d, e, f, g, 0xa81a664b, w9 += sigma1(w7) + w2 + sigma0(w10));
+ Round(g, h, a, b, c, d, e, f, 0xc24b8b70, w10 += sigma1(w8) + w3 + sigma0(w11));
+ Round(f, g, h, a, b, c, d, e, 0xc76c51a3, w11 += sigma1(w9) + w4 + sigma0(w12));
+ Round(e, f, g, h, a, b, c, d, 0xd192e819, w12 += sigma1(w10) + w5 + sigma0(w13));
+ Round(d, e, f, g, h, a, b, c, 0xd6990624, w13 += sigma1(w11) + w6 + sigma0(w14));
+ Round(c, d, e, f, g, h, a, b, 0xf40e3585, w14 += sigma1(w12) + w7 + sigma0(w15));
+ Round(b, c, d, e, f, g, h, a, 0x106aa070, w15 += sigma1(w13) + w8 + sigma0(w0));
+
+ Round(a, b, c, d, e, f, g, h, 0x19a4c116, w0 += sigma1(w14) + w9 + sigma0(w1));
+ Round(h, a, b, c, d, e, f, g, 0x1e376c08, w1 += sigma1(w15) + w10 + sigma0(w2));
+ Round(g, h, a, b, c, d, e, f, 0x2748774c, w2 += sigma1(w0) + w11 + sigma0(w3));
+ Round(f, g, h, a, b, c, d, e, 0x34b0bcb5, w3 += sigma1(w1) + w12 + sigma0(w4));
+ Round(e, f, g, h, a, b, c, d, 0x391c0cb3, w4 += sigma1(w2) + w13 + sigma0(w5));
+ Round(d, e, f, g, h, a, b, c, 0x4ed8aa4a, w5 += sigma1(w3) + w14 + sigma0(w6));
+ Round(c, d, e, f, g, h, a, b, 0x5b9cca4f, w6 += sigma1(w4) + w15 + sigma0(w7));
+ Round(b, c, d, e, f, g, h, a, 0x682e6ff3, w7 += sigma1(w5) + w0 + sigma0(w8));
+ Round(a, b, c, d, e, f, g, h, 0x748f82ee, w8 += sigma1(w6) + w1 + sigma0(w9));
+ Round(h, a, b, c, d, e, f, g, 0x78a5636f, w9 += sigma1(w7) + w2 + sigma0(w10));
+ Round(g, h, a, b, c, d, e, f, 0x84c87814, w10 += sigma1(w8) + w3 + sigma0(w11));
+ Round(f, g, h, a, b, c, d, e, 0x8cc70208, w11 += sigma1(w9) + w4 + sigma0(w12));
+ Round(e, f, g, h, a, b, c, d, 0x90befffa, w12 += sigma1(w10) + w5 + sigma0(w13));
+ Round(d, e, f, g, h, a, b, c, 0xa4506ceb, w13 += sigma1(w11) + w6 + sigma0(w14));
+ Round(c, d, e, f, g, h, a, b, 0xbef9a3f7, w14 + sigma1(w12) + w7 + sigma0(w15));
+ Round(b, c, d, e, f, g, h, a, 0xc67178f2, w15 + sigma1(w13) + w8 + sigma0(w0));
+
+ s[0] += a;
+ s[1] += b;
+ s[2] += c;
+ s[3] += d;
+ s[4] += e;
+ s[5] += f;
+ s[6] += g;
+ s[7] += h;
+}
+
+static void secp256k1_sha256_write(secp256k1_sha256_t *hash, const unsigned char *data, size_t len) {
+ size_t bufsize = hash->bytes & 0x3F;
+ hash->bytes += len;
+ while (bufsize + len >= 64) {
+ /* Fill the buffer, and process it. */
+ memcpy(((unsigned char*)hash->buf) + bufsize, data, 64 - bufsize);
+ data += 64 - bufsize;
+ len -= 64 - bufsize;
+ secp256k1_sha256_transform(hash->s, hash->buf);
+ bufsize = 0;
+ }
+ if (len) {
+ /* Fill the buffer with what remains. */
+ memcpy(((unsigned char*)hash->buf) + bufsize, data, len);
+ }
+}
+
+static void secp256k1_sha256_finalize(secp256k1_sha256_t *hash, unsigned char *out32) {
+ static const unsigned char pad[64] = {0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ uint32_t sizedesc[2];
+ uint32_t out[8];
+ int i = 0;
+ sizedesc[0] = BE32(hash->bytes >> 29);
+ sizedesc[1] = BE32(hash->bytes << 3);
+ secp256k1_sha256_write(hash, pad, 1 + ((119 - (hash->bytes % 64)) % 64));
+ secp256k1_sha256_write(hash, (const unsigned char*)sizedesc, 8);
+ for (i = 0; i < 8; i++) {
+ out[i] = BE32(hash->s[i]);
+ hash->s[i] = 0;
+ }
+ memcpy(out32, (const unsigned char*)out, 32);
+}
+
+static void secp256k1_hmac_sha256_initialize(secp256k1_hmac_sha256_t *hash, const unsigned char *key, size_t keylen) {
+ int n;
+ unsigned char rkey[64];
+ if (keylen <= 64) {
+ memcpy(rkey, key, keylen);
+ memset(rkey + keylen, 0, 64 - keylen);
+ } else {
+ secp256k1_sha256_t sha256;
+ secp256k1_sha256_initialize(&sha256);
+ secp256k1_sha256_write(&sha256, key, keylen);
+ secp256k1_sha256_finalize(&sha256, rkey);
+ memset(rkey + 32, 0, 32);
+ }
+
+ secp256k1_sha256_initialize(&hash->outer);
+ for (n = 0; n < 64; n++) {
+ rkey[n] ^= 0x5c;
+ }
+ secp256k1_sha256_write(&hash->outer, rkey, 64);
+
+ secp256k1_sha256_initialize(&hash->inner);
+ for (n = 0; n < 64; n++) {
+ rkey[n] ^= 0x5c ^ 0x36;
+ }
+ secp256k1_sha256_write(&hash->inner, rkey, 64);
+ memset(rkey, 0, 64);
+}
+
+static void secp256k1_hmac_sha256_write(secp256k1_hmac_sha256_t *hash, const unsigned char *data, size_t size) {
+ secp256k1_sha256_write(&hash->inner, data, size);
+}
+
+static void secp256k1_hmac_sha256_finalize(secp256k1_hmac_sha256_t *hash, unsigned char *out32) {
+ unsigned char temp[32];
+ secp256k1_sha256_finalize(&hash->inner, temp);
+ secp256k1_sha256_write(&hash->outer, temp, 32);
+ memset(temp, 0, 32);
+ secp256k1_sha256_finalize(&hash->outer, out32);
+}
+
+
+static void secp256k1_rfc6979_hmac_sha256_initialize(secp256k1_rfc6979_hmac_sha256_t *rng, const unsigned char *key, size_t keylen) {
+ secp256k1_hmac_sha256_t hmac;
+ static const unsigned char zero[1] = {0x00};
+ static const unsigned char one[1] = {0x01};
+
+ memset(rng->v, 0x01, 32); /* RFC6979 3.2.b. */
+ memset(rng->k, 0x00, 32); /* RFC6979 3.2.c. */
+
+ /* RFC6979 3.2.d. */
+ secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32);
+ secp256k1_hmac_sha256_write(&hmac, rng->v, 32);
+ secp256k1_hmac_sha256_write(&hmac, zero, 1);
+ secp256k1_hmac_sha256_write(&hmac, key, keylen);
+ secp256k1_hmac_sha256_finalize(&hmac, rng->k);
+ secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32);
+ secp256k1_hmac_sha256_write(&hmac, rng->v, 32);
+ secp256k1_hmac_sha256_finalize(&hmac, rng->v);
+
+ /* RFC6979 3.2.f. */
+ secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32);
+ secp256k1_hmac_sha256_write(&hmac, rng->v, 32);
+ secp256k1_hmac_sha256_write(&hmac, one, 1);
+ secp256k1_hmac_sha256_write(&hmac, key, keylen);
+ secp256k1_hmac_sha256_finalize(&hmac, rng->k);
+ secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32);
+ secp256k1_hmac_sha256_write(&hmac, rng->v, 32);
+ secp256k1_hmac_sha256_finalize(&hmac, rng->v);
+ rng->retry = 0;
+}
+
+static void secp256k1_rfc6979_hmac_sha256_generate(secp256k1_rfc6979_hmac_sha256_t *rng, unsigned char *out, size_t outlen) {
+ /* RFC6979 3.2.h. */
+ static const unsigned char zero[1] = {0x00};
+ if (rng->retry) {
+ secp256k1_hmac_sha256_t hmac;
+ secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32);
+ secp256k1_hmac_sha256_write(&hmac, rng->v, 32);
+ secp256k1_hmac_sha256_write(&hmac, zero, 1);
+ secp256k1_hmac_sha256_finalize(&hmac, rng->k);
+ secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32);
+ secp256k1_hmac_sha256_write(&hmac, rng->v, 32);
+ secp256k1_hmac_sha256_finalize(&hmac, rng->v);
+ }
+
+ while (outlen > 0) {
+ secp256k1_hmac_sha256_t hmac;
+ int now = outlen;
+ secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32);
+ secp256k1_hmac_sha256_write(&hmac, rng->v, 32);
+ secp256k1_hmac_sha256_finalize(&hmac, rng->v);
+ if (now > 32) {
+ now = 32;
+ }
+ memcpy(out, rng->v, now);
+ out += now;
+ outlen -= now;
+ }
+
+ rng->retry = 1;
+}
+
+static void secp256k1_rfc6979_hmac_sha256_finalize(secp256k1_rfc6979_hmac_sha256_t *rng) {
+ memset(rng->k, 0, 32);
+ memset(rng->v, 0, 32);
+ rng->retry = 0;
+}
+
+
+#undef Round
+#undef sigma0
+#undef sigma1
+#undef Sigma0
+#undef Sigma1
+#undef Ch
+#undef Maj
+#undef ReadBE32
+#undef WriteBE32
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/java/org/bitcoin/NativeSecp256k1.java b/crypto/secp256k1/libsecp256k1/src/java/org/bitcoin/NativeSecp256k1.java
new file mode 100644
index 000000000..90a498eaa
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/java/org/bitcoin/NativeSecp256k1.java
@@ -0,0 +1,60 @@
+package org.bitcoin;
+
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+
+import com.google.common.base.Preconditions;
+
+
+/**
+ * This class holds native methods to handle ECDSA verification.
+ * You can find an example library that can be used for this at
+ * https://github.com/sipa/secp256k1
+ */
+public class NativeSecp256k1 {
+ public static final boolean enabled;
+ static {
+ boolean isEnabled = true;
+ try {
+ System.loadLibrary("javasecp256k1");
+ } catch (UnsatisfiedLinkError e) {
+ isEnabled = false;
+ }
+ enabled = isEnabled;
+ }
+
+ private static ThreadLocal<ByteBuffer> nativeECDSABuffer = new ThreadLocal<ByteBuffer>();
+ /**
+ * Verifies the given secp256k1 signature in native code.
+ * Calling when enabled == false is undefined (probably library not loaded)
+ *
+ * @param data The data which was signed, must be exactly 32 bytes
+ * @param signature The signature
+ * @param pub The public key which did the signing
+ */
+ public static boolean verify(byte[] data, byte[] signature, byte[] pub) {
+ Preconditions.checkArgument(data.length == 32 && signature.length <= 520 && pub.length <= 520);
+
+ ByteBuffer byteBuff = nativeECDSABuffer.get();
+ if (byteBuff == null) {
+ byteBuff = ByteBuffer.allocateDirect(32 + 8 + 520 + 520);
+ byteBuff.order(ByteOrder.nativeOrder());
+ nativeECDSABuffer.set(byteBuff);
+ }
+ byteBuff.rewind();
+ byteBuff.put(data);
+ byteBuff.putInt(signature.length);
+ byteBuff.putInt(pub.length);
+ byteBuff.put(signature);
+ byteBuff.put(pub);
+ return secp256k1_ecdsa_verify(byteBuff) == 1;
+ }
+
+ /**
+ * @param byteBuff signature format is byte[32] data,
+ * native-endian int signatureLength, native-endian int pubkeyLength,
+ * byte[signatureLength] signature, byte[pubkeyLength] pub
+ * @returns 1 for valid signature, anything else for invalid
+ */
+ private static native int secp256k1_ecdsa_verify(ByteBuffer byteBuff);
+}
diff --git a/crypto/secp256k1/libsecp256k1/src/java/org_bitcoin_NativeSecp256k1.c b/crypto/secp256k1/libsecp256k1/src/java/org_bitcoin_NativeSecp256k1.c
new file mode 100644
index 000000000..bb4cd7072
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/java/org_bitcoin_NativeSecp256k1.c
@@ -0,0 +1,23 @@
+#include "org_bitcoin_NativeSecp256k1.h"
+#include "include/secp256k1.h"
+
+JNIEXPORT jint JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdsa_1verify
+ (JNIEnv* env, jclass classObject, jobject byteBufferObject)
+{
+ unsigned char* data = (unsigned char*) (*env)->GetDirectBufferAddress(env, byteBufferObject);
+ int sigLen = *((int*)(data + 32));
+ int pubLen = *((int*)(data + 32 + 4));
+
+ return secp256k1_ecdsa_verify(data, 32, data+32+8, sigLen, data+32+8+sigLen, pubLen);
+}
+
+static void __javasecp256k1_attach(void) __attribute__((constructor));
+static void __javasecp256k1_detach(void) __attribute__((destructor));
+
+static void __javasecp256k1_attach(void) {
+ secp256k1_start(SECP256K1_START_VERIFY);
+}
+
+static void __javasecp256k1_detach(void) {
+ secp256k1_stop();
+}
diff --git a/crypto/secp256k1/libsecp256k1/src/java/org_bitcoin_NativeSecp256k1.h b/crypto/secp256k1/libsecp256k1/src/java/org_bitcoin_NativeSecp256k1.h
new file mode 100644
index 000000000..d7fb004fa
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/java/org_bitcoin_NativeSecp256k1.h
@@ -0,0 +1,21 @@
+/* DO NOT EDIT THIS FILE - it is machine generated */
+#include <jni.h>
+/* Header for class org_bitcoin_NativeSecp256k1 */
+
+#ifndef _Included_org_bitcoin_NativeSecp256k1
+#define _Included_org_bitcoin_NativeSecp256k1
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+ * Class: org_bitcoin_NativeSecp256k1
+ * Method: secp256k1_ecdsa_verify
+ * Signature: (Ljava/nio/ByteBuffer;)I
+ */
+JNIEXPORT jint JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdsa_1verify
+ (JNIEnv *, jclass, jobject);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/modules/ecdh/Makefile.am.include b/crypto/secp256k1/libsecp256k1/src/modules/ecdh/Makefile.am.include
new file mode 100644
index 000000000..8ef3aff92
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/modules/ecdh/Makefile.am.include
@@ -0,0 +1,9 @@
+include_HEADERS += include/secp256k1_ecdh.h
+noinst_HEADERS += src/modules/ecdh/main_impl.h
+noinst_HEADERS += src/modules/ecdh/tests_impl.h
+if USE_BENCHMARK
+noinst_PROGRAMS += bench_ecdh
+bench_ecdh_SOURCES = src/bench_ecdh.c
+bench_ecdh_LDADD = libsecp256k1.la $(SECP_LIBS)
+bench_ecdh_LDFLAGS = -static
+endif
diff --git a/crypto/secp256k1/libsecp256k1/src/modules/ecdh/main_impl.h b/crypto/secp256k1/libsecp256k1/src/modules/ecdh/main_impl.h
new file mode 100644
index 000000000..c23e4f82f
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/modules/ecdh/main_impl.h
@@ -0,0 +1,54 @@
+/**********************************************************************
+ * Copyright (c) 2015 Andrew Poelstra *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_MODULE_ECDH_MAIN_
+#define _SECP256K1_MODULE_ECDH_MAIN_
+
+#include "include/secp256k1_ecdh.h"
+#include "ecmult_const_impl.h"
+
+int secp256k1_ecdh(const secp256k1_context* ctx, unsigned char *result, const secp256k1_pubkey *point, const unsigned char *scalar) {
+ int ret = 0;
+ int overflow = 0;
+ secp256k1_gej res;
+ secp256k1_ge pt;
+ secp256k1_scalar s;
+ ARG_CHECK(result != NULL);
+ ARG_CHECK(point != NULL);
+ ARG_CHECK(scalar != NULL);
+ (void)ctx;
+
+ secp256k1_pubkey_load(ctx, &pt, point);
+ secp256k1_scalar_set_b32(&s, scalar, &overflow);
+ if (overflow || secp256k1_scalar_is_zero(&s)) {
+ ret = 0;
+ } else {
+ unsigned char x[32];
+ unsigned char y[1];
+ secp256k1_sha256_t sha;
+
+ secp256k1_ecmult_const(&res, &pt, &s);
+ secp256k1_ge_set_gej(&pt, &res);
+ /* Compute a hash of the point in compressed form
+ * Note we cannot use secp256k1_eckey_pubkey_serialize here since it does not
+ * expect its output to be secret and has a timing sidechannel. */
+ secp256k1_fe_normalize(&pt.x);
+ secp256k1_fe_normalize(&pt.y);
+ secp256k1_fe_get_b32(x, &pt.x);
+ y[0] = 0x02 | secp256k1_fe_is_odd(&pt.y);
+
+ secp256k1_sha256_initialize(&sha);
+ secp256k1_sha256_write(&sha, y, sizeof(y));
+ secp256k1_sha256_write(&sha, x, sizeof(x));
+ secp256k1_sha256_finalize(&sha, result);
+ ret = 1;
+ }
+
+ secp256k1_scalar_clear(&s);
+ return ret;
+}
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/modules/ecdh/tests_impl.h b/crypto/secp256k1/libsecp256k1/src/modules/ecdh/tests_impl.h
new file mode 100644
index 000000000..7badc9033
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/modules/ecdh/tests_impl.h
@@ -0,0 +1,75 @@
+/**********************************************************************
+ * Copyright (c) 2015 Andrew Poelstra *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_MODULE_ECDH_TESTS_
+#define _SECP256K1_MODULE_ECDH_TESTS_
+
+void test_ecdh_generator_basepoint(void) {
+ unsigned char s_one[32] = { 0 };
+ secp256k1_pubkey point[2];
+ int i;
+
+ s_one[31] = 1;
+ /* Check against pubkey creation when the basepoint is the generator */
+ for (i = 0; i < 100; ++i) {
+ secp256k1_sha256_t sha;
+ unsigned char s_b32[32];
+ unsigned char output_ecdh[32];
+ unsigned char output_ser[32];
+ unsigned char point_ser[33];
+ size_t point_ser_len = sizeof(point_ser);
+ secp256k1_scalar s;
+
+ random_scalar_order(&s);
+ secp256k1_scalar_get_b32(s_b32, &s);
+
+ /* compute using ECDH function */
+ CHECK(secp256k1_ec_pubkey_create(ctx, &point[0], s_one) == 1);
+ CHECK(secp256k1_ecdh(ctx, output_ecdh, &point[0], s_b32) == 1);
+ /* compute "explicitly" */
+ CHECK(secp256k1_ec_pubkey_create(ctx, &point[1], s_b32) == 1);
+ CHECK(secp256k1_ec_pubkey_serialize(ctx, point_ser, &point_ser_len, &point[1], SECP256K1_EC_COMPRESSED) == 1);
+ CHECK(point_ser_len == sizeof(point_ser));
+ secp256k1_sha256_initialize(&sha);
+ secp256k1_sha256_write(&sha, point_ser, point_ser_len);
+ secp256k1_sha256_finalize(&sha, output_ser);
+ /* compare */
+ CHECK(memcmp(output_ecdh, output_ser, sizeof(output_ser)) == 0);
+ }
+}
+
+void test_bad_scalar(void) {
+ unsigned char s_zero[32] = { 0 };
+ unsigned char s_overflow[32] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe,
+ 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0, 0x3b,
+ 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x41
+ };
+ unsigned char s_rand[32] = { 0 };
+ unsigned char output[32];
+ secp256k1_scalar rand;
+ secp256k1_pubkey point;
+
+ /* Create random point */
+ random_scalar_order(&rand);
+ secp256k1_scalar_get_b32(s_rand, &rand);
+ CHECK(secp256k1_ec_pubkey_create(ctx, &point, s_rand) == 1);
+
+ /* Try to multiply it by bad values */
+ CHECK(secp256k1_ecdh(ctx, output, &point, s_zero) == 0);
+ CHECK(secp256k1_ecdh(ctx, output, &point, s_overflow) == 0);
+ /* ...and a good one */
+ s_overflow[31] -= 1;
+ CHECK(secp256k1_ecdh(ctx, output, &point, s_overflow) == 1);
+}
+
+void run_ecdh_tests(void) {
+ test_ecdh_generator_basepoint();
+ test_bad_scalar();
+}
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/modules/recovery/Makefile.am.include b/crypto/secp256k1/libsecp256k1/src/modules/recovery/Makefile.am.include
new file mode 100644
index 000000000..754469eeb
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/modules/recovery/Makefile.am.include
@@ -0,0 +1,9 @@
+include_HEADERS += include/secp256k1_recovery.h
+noinst_HEADERS += src/modules/recovery/main_impl.h
+noinst_HEADERS += src/modules/recovery/tests_impl.h
+if USE_BENCHMARK
+noinst_PROGRAMS += bench_recover
+bench_recover_SOURCES = src/bench_recover.c
+bench_recover_LDADD = libsecp256k1.la $(SECP_LIBS)
+bench_recover_LDFLAGS = -static
+endif
diff --git a/crypto/secp256k1/libsecp256k1/src/modules/recovery/main_impl.h b/crypto/secp256k1/libsecp256k1/src/modules/recovery/main_impl.h
new file mode 100644
index 000000000..75b695894
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/modules/recovery/main_impl.h
@@ -0,0 +1,156 @@
+/**********************************************************************
+ * Copyright (c) 2013-2015 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_MODULE_RECOVERY_MAIN_
+#define _SECP256K1_MODULE_RECOVERY_MAIN_
+
+#include "include/secp256k1_recovery.h"
+
+static void secp256k1_ecdsa_recoverable_signature_load(const secp256k1_context* ctx, secp256k1_scalar* r, secp256k1_scalar* s, int* recid, const secp256k1_ecdsa_recoverable_signature* sig) {
+ (void)ctx;
+ if (sizeof(secp256k1_scalar) == 32) {
+ /* When the secp256k1_scalar type is exactly 32 byte, use its
+ * representation inside secp256k1_ecdsa_signature, as conversion is very fast.
+ * Note that secp256k1_ecdsa_signature_save must use the same representation. */
+ memcpy(r, &sig->data[0], 32);
+ memcpy(s, &sig->data[32], 32);
+ } else {
+ secp256k1_scalar_set_b32(r, &sig->data[0], NULL);
+ secp256k1_scalar_set_b32(s, &sig->data[32], NULL);
+ }
+ *recid = sig->data[64];
+}
+
+static void secp256k1_ecdsa_recoverable_signature_save(secp256k1_ecdsa_recoverable_signature* sig, const secp256k1_scalar* r, const secp256k1_scalar* s, int recid) {
+ if (sizeof(secp256k1_scalar) == 32) {
+ memcpy(&sig->data[0], r, 32);
+ memcpy(&sig->data[32], s, 32);
+ } else {
+ secp256k1_scalar_get_b32(&sig->data[0], r);
+ secp256k1_scalar_get_b32(&sig->data[32], s);
+ }
+ sig->data[64] = recid;
+}
+
+int secp256k1_ecdsa_recoverable_signature_parse_compact(const secp256k1_context* ctx, secp256k1_ecdsa_recoverable_signature* sig, const unsigned char *input64, int recid) {
+ secp256k1_scalar r, s;
+ int ret = 1;
+ int overflow = 0;
+
+ (void)ctx;
+ ARG_CHECK(sig != NULL);
+ ARG_CHECK(input64 != NULL);
+ ARG_CHECK(recid >= 0 && recid <= 3);
+
+ secp256k1_scalar_set_b32(&r, &input64[0], &overflow);
+ ret &= !overflow;
+ secp256k1_scalar_set_b32(&s, &input64[32], &overflow);
+ ret &= !overflow;
+ if (ret) {
+ secp256k1_ecdsa_recoverable_signature_save(sig, &r, &s, recid);
+ } else {
+ memset(sig, 0, sizeof(*sig));
+ }
+ return ret;
+}
+
+int secp256k1_ecdsa_recoverable_signature_serialize_compact(const secp256k1_context* ctx, unsigned char *output64, int *recid, const secp256k1_ecdsa_recoverable_signature* sig) {
+ secp256k1_scalar r, s;
+
+ (void)ctx;
+ ARG_CHECK(output64 != NULL);
+ ARG_CHECK(sig != NULL);
+
+ secp256k1_ecdsa_recoverable_signature_load(ctx, &r, &s, recid, sig);
+ secp256k1_scalar_get_b32(&output64[0], &r);
+ secp256k1_scalar_get_b32(&output64[32], &s);
+ return 1;
+}
+
+int secp256k1_ecdsa_recoverable_signature_convert(const secp256k1_context* ctx, secp256k1_ecdsa_signature* sig, const secp256k1_ecdsa_recoverable_signature* sigin) {
+ secp256k1_scalar r, s;
+ int recid;
+
+ (void)ctx;
+ ARG_CHECK(sig != NULL);
+ ARG_CHECK(sigin != NULL);
+
+ secp256k1_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, sigin);
+ secp256k1_ecdsa_signature_save(sig, &r, &s);
+ return 1;
+}
+
+int secp256k1_ecdsa_sign_recoverable(const secp256k1_context* ctx, secp256k1_ecdsa_recoverable_signature *signature, const unsigned char *msg32, const unsigned char *seckey, secp256k1_nonce_function noncefp, const void* noncedata) {
+ secp256k1_scalar r, s;
+ secp256k1_scalar sec, non, msg;
+ int recid;
+ int ret = 0;
+ int overflow = 0;
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx));
+ ARG_CHECK(msg32 != NULL);
+ ARG_CHECK(signature != NULL);
+ ARG_CHECK(seckey != NULL);
+ if (noncefp == NULL) {
+ noncefp = secp256k1_nonce_function_default;
+ }
+
+ secp256k1_scalar_set_b32(&sec, seckey, &overflow);
+ /* Fail if the secret key is invalid. */
+ if (!overflow && !secp256k1_scalar_is_zero(&sec)) {
+ unsigned int count = 0;
+ secp256k1_scalar_set_b32(&msg, msg32, NULL);
+ while (1) {
+ unsigned char nonce32[32];
+ ret = noncefp(nonce32, seckey, msg32, NULL, (void*)noncedata, count);
+ if (!ret) {
+ break;
+ }
+ secp256k1_scalar_set_b32(&non, nonce32, &overflow);
+ memset(nonce32, 0, 32);
+ if (!secp256k1_scalar_is_zero(&non) && !overflow) {
+ if (secp256k1_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, &r, &s, &sec, &msg, &non, &recid)) {
+ break;
+ }
+ }
+ count++;
+ }
+ secp256k1_scalar_clear(&msg);
+ secp256k1_scalar_clear(&non);
+ secp256k1_scalar_clear(&sec);
+ }
+ if (ret) {
+ secp256k1_ecdsa_recoverable_signature_save(signature, &r, &s, recid);
+ } else {
+ memset(signature, 0, sizeof(*signature));
+ }
+ return ret;
+}
+
+int secp256k1_ecdsa_recover(const secp256k1_context* ctx, secp256k1_pubkey *pubkey, const secp256k1_ecdsa_recoverable_signature *signature, const unsigned char *msg32) {
+ secp256k1_ge q;
+ secp256k1_scalar r, s;
+ secp256k1_scalar m;
+ int recid;
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx));
+ ARG_CHECK(msg32 != NULL);
+ ARG_CHECK(signature != NULL);
+ ARG_CHECK(pubkey != NULL);
+
+ secp256k1_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, signature);
+ ARG_CHECK(recid >= 0 && recid < 4);
+ secp256k1_scalar_set_b32(&m, msg32, NULL);
+ if (secp256k1_ecdsa_sig_recover(&ctx->ecmult_ctx, &r, &s, &q, &m, recid)) {
+ secp256k1_pubkey_save(pubkey, &q);
+ return 1;
+ } else {
+ memset(pubkey, 0, sizeof(*pubkey));
+ return 0;
+ }
+}
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/modules/recovery/tests_impl.h b/crypto/secp256k1/libsecp256k1/src/modules/recovery/tests_impl.h
new file mode 100644
index 000000000..5a78fae92
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/modules/recovery/tests_impl.h
@@ -0,0 +1,249 @@
+/**********************************************************************
+ * Copyright (c) 2013-2015 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_MODULE_RECOVERY_TESTS_
+#define _SECP256K1_MODULE_RECOVERY_TESTS_
+
+void test_ecdsa_recovery_end_to_end(void) {
+ unsigned char extra[32] = {0x00};
+ unsigned char privkey[32];
+ unsigned char message[32];
+ secp256k1_ecdsa_signature signature[5];
+ secp256k1_ecdsa_recoverable_signature rsignature[5];
+ unsigned char sig[74];
+ secp256k1_pubkey pubkey;
+ secp256k1_pubkey recpubkey;
+ int recid = 0;
+
+ /* Generate a random key and message. */
+ {
+ secp256k1_scalar msg, key;
+ random_scalar_order_test(&msg);
+ random_scalar_order_test(&key);
+ secp256k1_scalar_get_b32(privkey, &key);
+ secp256k1_scalar_get_b32(message, &msg);
+ }
+
+ /* Construct and verify corresponding public key. */
+ CHECK(secp256k1_ec_seckey_verify(ctx, privkey) == 1);
+ CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, privkey) == 1);
+
+ /* Serialize/parse compact and verify/recover. */
+ extra[0] = 0;
+ CHECK(secp256k1_ecdsa_sign_recoverable(ctx, &rsignature[0], message, privkey, NULL, NULL) == 1);
+ CHECK(secp256k1_ecdsa_sign_recoverable(ctx, &rsignature[4], message, privkey, NULL, NULL) == 1);
+ CHECK(secp256k1_ecdsa_sign_recoverable(ctx, &rsignature[1], message, privkey, NULL, extra) == 1);
+ extra[31] = 1;
+ CHECK(secp256k1_ecdsa_sign_recoverable(ctx, &rsignature[2], message, privkey, NULL, extra) == 1);
+ extra[31] = 0;
+ extra[0] = 1;
+ CHECK(secp256k1_ecdsa_sign_recoverable(ctx, &rsignature[3], message, privkey, NULL, extra) == 1);
+ CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(ctx, sig, &recid, &rsignature[4]) == 1);
+ CHECK(secp256k1_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1);
+ CHECK(secp256k1_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 1);
+ memset(&rsignature[4], 0, sizeof(rsignature[4]));
+ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1);
+ CHECK(secp256k1_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1);
+ CHECK(secp256k1_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 1);
+ /* Parse compact (with recovery id) and recover. */
+ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1);
+ CHECK(secp256k1_ecdsa_recover(ctx, &recpubkey, &rsignature[4], message) == 1);
+ CHECK(memcmp(&pubkey, &recpubkey, sizeof(pubkey)) == 0);
+ /* Serialize/destroy/parse signature and verify again. */
+ CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(ctx, sig, &recid, &rsignature[4]) == 1);
+ sig[secp256k1_rand32() % 64] += 1 + (secp256k1_rand32() % 255);
+ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1);
+ CHECK(secp256k1_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1);
+ CHECK(secp256k1_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 0);
+ /* Recover again */
+ CHECK(secp256k1_ecdsa_recover(ctx, &recpubkey, &rsignature[4], message) == 0 ||
+ memcmp(&pubkey, &recpubkey, sizeof(pubkey)) != 0);
+}
+
+/* Tests several edge cases. */
+void test_ecdsa_recovery_edge_cases(void) {
+ const unsigned char msg32[32] = {
+ 'T', 'h', 'i', 's', ' ', 'i', 's', ' ',
+ 'a', ' ', 'v', 'e', 'r', 'y', ' ', 's',
+ 'e', 'c', 'r', 'e', 't', ' ', 'm', 'e',
+ 's', 's', 'a', 'g', 'e', '.', '.', '.'
+ };
+ const unsigned char sig64[64] = {
+ /* Generated by signing the above message with nonce 'This is the nonce we will use...'
+ * and secret key 0 (which is not valid), resulting in recid 0. */
+ 0x67, 0xCB, 0x28, 0x5F, 0x9C, 0xD1, 0x94, 0xE8,
+ 0x40, 0xD6, 0x29, 0x39, 0x7A, 0xF5, 0x56, 0x96,
+ 0x62, 0xFD, 0xE4, 0x46, 0x49, 0x99, 0x59, 0x63,
+ 0x17, 0x9A, 0x7D, 0xD1, 0x7B, 0xD2, 0x35, 0x32,
+ 0x4B, 0x1B, 0x7D, 0xF3, 0x4C, 0xE1, 0xF6, 0x8E,
+ 0x69, 0x4F, 0xF6, 0xF1, 0x1A, 0xC7, 0x51, 0xDD,
+ 0x7D, 0xD7, 0x3E, 0x38, 0x7E, 0xE4, 0xFC, 0x86,
+ 0x6E, 0x1B, 0xE8, 0xEC, 0xC7, 0xDD, 0x95, 0x57
+ };
+ secp256k1_pubkey pubkey;
+ /* signature (r,s) = (4,4), which can be recovered with all 4 recids. */
+ const unsigned char sigb64[64] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04,
+ };
+ secp256k1_pubkey pubkeyb;
+ secp256k1_ecdsa_recoverable_signature rsig;
+ secp256k1_ecdsa_signature sig;
+ int recid;
+
+ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 0));
+ CHECK(!secp256k1_ecdsa_recover(ctx, &pubkey, &rsig, msg32));
+ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 1));
+ CHECK(secp256k1_ecdsa_recover(ctx, &pubkey, &rsig, msg32));
+ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 2));
+ CHECK(!secp256k1_ecdsa_recover(ctx, &pubkey, &rsig, msg32));
+ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 3));
+ CHECK(!secp256k1_ecdsa_recover(ctx, &pubkey, &rsig, msg32));
+
+ for (recid = 0; recid < 4; recid++) {
+ int i;
+ int recid2;
+ /* (4,4) encoded in DER. */
+ unsigned char sigbder[8] = {0x30, 0x06, 0x02, 0x01, 0x04, 0x02, 0x01, 0x04};
+ unsigned char sigcder_zr[7] = {0x30, 0x05, 0x02, 0x00, 0x02, 0x01, 0x01};
+ unsigned char sigcder_zs[7] = {0x30, 0x05, 0x02, 0x01, 0x01, 0x02, 0x00};
+ unsigned char sigbderalt1[39] = {
+ 0x30, 0x25, 0x02, 0x20, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x04, 0x02, 0x01, 0x04,
+ };
+ unsigned char sigbderalt2[39] = {
+ 0x30, 0x25, 0x02, 0x01, 0x04, 0x02, 0x20, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04,
+ };
+ unsigned char sigbderalt3[40] = {
+ 0x30, 0x26, 0x02, 0x21, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x04, 0x02, 0x01, 0x04,
+ };
+ unsigned char sigbderalt4[40] = {
+ 0x30, 0x26, 0x02, 0x01, 0x04, 0x02, 0x21, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04,
+ };
+ /* (order + r,4) encoded in DER. */
+ unsigned char sigbderlong[40] = {
+ 0x30, 0x26, 0x02, 0x21, 0x00, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xBA, 0xAE, 0xDC,
+ 0xE6, 0xAF, 0x48, 0xA0, 0x3B, 0xBF, 0xD2, 0x5E,
+ 0x8C, 0xD0, 0x36, 0x41, 0x45, 0x02, 0x01, 0x04
+ };
+ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigb64, recid) == 1);
+ CHECK(secp256k1_ecdsa_recover(ctx, &pubkeyb, &rsig, msg32) == 1);
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder)) == 1);
+ CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 1);
+ for (recid2 = 0; recid2 < 4; recid2++) {
+ secp256k1_pubkey pubkey2b;
+ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigb64, recid2) == 1);
+ CHECK(secp256k1_ecdsa_recover(ctx, &pubkey2b, &rsig, msg32) == 1);
+ /* Verifying with (order + r,4) should always fail. */
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbderlong, sizeof(sigbderlong)) == 0);
+ }
+ /* DER parsing tests. */
+ /* Zero length r/s. */
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigcder_zr, sizeof(sigcder_zr)) == 0);
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigcder_zs, sizeof(sigcder_zs)) == 0);
+ /* Leading zeros. */
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt1, sizeof(sigbderalt1)) == 1);
+ CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 1);
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt2, sizeof(sigbderalt2)) == 1);
+ CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 1);
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt3, sizeof(sigbderalt3)) == 1);
+ CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 1);
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt4, sizeof(sigbderalt4)) == 1);
+ CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 1);
+ sigbderalt3[4] = 1;
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt3, sizeof(sigbderalt3)) == 0);
+ sigbderalt4[7] = 1;
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt4, sizeof(sigbderalt4)) == 0);
+ /* Damage signature. */
+ sigbder[7]++;
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder)) == 1);
+ CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0);
+ sigbder[7]--;
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbder, 6) == 0);
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder) - 1) == 0);
+ for(i = 0; i < 8; i++) {
+ int c;
+ unsigned char orig = sigbder[i];
+ /*Try every single-byte change.*/
+ for (c = 0; c < 256; c++) {
+ if (c == orig ) {
+ continue;
+ }
+ sigbder[i] = c;
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder)) == 0 || secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0);
+ }
+ sigbder[i] = orig;
+ }
+ }
+
+ /* Test r/s equal to zero */
+ {
+ /* (1,1) encoded in DER. */
+ unsigned char sigcder[8] = {0x30, 0x06, 0x02, 0x01, 0x01, 0x02, 0x01, 0x01};
+ unsigned char sigc64[64] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ };
+ secp256k1_pubkey pubkeyc;
+ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigc64, 0) == 1);
+ CHECK(secp256k1_ecdsa_recover(ctx, &pubkeyc, &rsig, msg32) == 1);
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigcder, sizeof(sigcder)) == 1);
+ CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyc) == 1);
+ sigcder[4] = 0;
+ sigc64[31] = 0;
+ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigc64, 0) == 1);
+ CHECK(secp256k1_ecdsa_recover(ctx, &pubkeyb, &rsig, msg32) == 0);
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigcder, sizeof(sigcder)) == 1);
+ CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyc) == 0);
+ sigcder[4] = 1;
+ sigcder[7] = 0;
+ sigc64[31] = 1;
+ sigc64[63] = 0;
+ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigc64, 0) == 1);
+ CHECK(secp256k1_ecdsa_recover(ctx, &pubkeyb, &rsig, msg32) == 0);
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigcder, sizeof(sigcder)) == 1);
+ CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyc) == 0);
+ }
+}
+
+void run_recovery_tests(void) {
+ int i;
+ for (i = 0; i < 64*count; i++) {
+ test_ecdsa_recovery_end_to_end();
+ }
+ test_ecdsa_recovery_edge_cases();
+}
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/modules/schnorr/Makefile.am.include b/crypto/secp256k1/libsecp256k1/src/modules/schnorr/Makefile.am.include
new file mode 100644
index 000000000..bad4cb7c5
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/modules/schnorr/Makefile.am.include
@@ -0,0 +1,11 @@
+include_HEADERS += include/secp256k1_schnorr.h
+noinst_HEADERS += src/modules/schnorr/main_impl.h
+noinst_HEADERS += src/modules/schnorr/schnorr.h
+noinst_HEADERS += src/modules/schnorr/schnorr_impl.h
+noinst_HEADERS += src/modules/schnorr/tests_impl.h
+if USE_BENCHMARK
+noinst_PROGRAMS += bench_schnorr_verify
+bench_schnorr_verify_SOURCES = src/bench_schnorr_verify.c
+bench_schnorr_verify_LDADD = libsecp256k1.la $(SECP_LIBS)
+bench_schnorr_verify_LDFLAGS = -static
+endif
diff --git a/crypto/secp256k1/libsecp256k1/src/modules/schnorr/main_impl.h b/crypto/secp256k1/libsecp256k1/src/modules/schnorr/main_impl.h
new file mode 100644
index 000000000..c10fd259f
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/modules/schnorr/main_impl.h
@@ -0,0 +1,164 @@
+/**********************************************************************
+ * Copyright (c) 2014-2015 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef SECP256K1_MODULE_SCHNORR_MAIN
+#define SECP256K1_MODULE_SCHNORR_MAIN
+
+#include "include/secp256k1_schnorr.h"
+#include "modules/schnorr/schnorr_impl.h"
+
+static void secp256k1_schnorr_msghash_sha256(unsigned char *h32, const unsigned char *r32, const unsigned char *msg32) {
+ secp256k1_sha256_t sha;
+ secp256k1_sha256_initialize(&sha);
+ secp256k1_sha256_write(&sha, r32, 32);
+ secp256k1_sha256_write(&sha, msg32, 32);
+ secp256k1_sha256_finalize(&sha, h32);
+}
+
+static const unsigned char secp256k1_schnorr_algo16[17] = "Schnorr+SHA256 ";
+
+int secp256k1_schnorr_sign(const secp256k1_context* ctx, unsigned char *sig64, const unsigned char *msg32, const unsigned char *seckey, secp256k1_nonce_function noncefp, const void* noncedata) {
+ secp256k1_scalar sec, non;
+ int ret = 0;
+ int overflow = 0;
+ unsigned int count = 0;
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx));
+ ARG_CHECK(msg32 != NULL);
+ ARG_CHECK(sig64 != NULL);
+ ARG_CHECK(seckey != NULL);
+ if (noncefp == NULL) {
+ noncefp = secp256k1_nonce_function_default;
+ }
+
+ secp256k1_scalar_set_b32(&sec, seckey, NULL);
+ while (1) {
+ unsigned char nonce32[32];
+ ret = noncefp(nonce32, msg32, seckey, secp256k1_schnorr_algo16, (void*)noncedata, count);
+ if (!ret) {
+ break;
+ }
+ secp256k1_scalar_set_b32(&non, nonce32, &overflow);
+ memset(nonce32, 0, 32);
+ if (!secp256k1_scalar_is_zero(&non) && !overflow) {
+ if (secp256k1_schnorr_sig_sign(&ctx->ecmult_gen_ctx, sig64, &sec, &non, NULL, secp256k1_schnorr_msghash_sha256, msg32)) {
+ break;
+ }
+ }
+ count++;
+ }
+ if (!ret) {
+ memset(sig64, 0, 64);
+ }
+ secp256k1_scalar_clear(&non);
+ secp256k1_scalar_clear(&sec);
+ return ret;
+}
+
+int secp256k1_schnorr_verify(const secp256k1_context* ctx, const unsigned char *sig64, const unsigned char *msg32, const secp256k1_pubkey *pubkey) {
+ secp256k1_ge q;
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx));
+ ARG_CHECK(msg32 != NULL);
+ ARG_CHECK(sig64 != NULL);
+ ARG_CHECK(pubkey != NULL);
+
+ secp256k1_pubkey_load(ctx, &q, pubkey);
+ return secp256k1_schnorr_sig_verify(&ctx->ecmult_ctx, sig64, &q, secp256k1_schnorr_msghash_sha256, msg32);
+}
+
+int secp256k1_schnorr_recover(const secp256k1_context* ctx, secp256k1_pubkey *pubkey, const unsigned char *sig64, const unsigned char *msg32) {
+ secp256k1_ge q;
+
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx));
+ ARG_CHECK(msg32 != NULL);
+ ARG_CHECK(sig64 != NULL);
+ ARG_CHECK(pubkey != NULL);
+
+ if (secp256k1_schnorr_sig_recover(&ctx->ecmult_ctx, sig64, &q, secp256k1_schnorr_msghash_sha256, msg32)) {
+ secp256k1_pubkey_save(pubkey, &q);
+ return 1;
+ } else {
+ memset(pubkey, 0, sizeof(*pubkey));
+ return 0;
+ }
+}
+
+int secp256k1_schnorr_generate_nonce_pair(const secp256k1_context* ctx, secp256k1_pubkey *pubnonce, unsigned char *privnonce32, const unsigned char *sec32, const unsigned char *msg32, secp256k1_nonce_function noncefp, const void* noncedata) {
+ int count = 0;
+ int ret = 1;
+ secp256k1_gej Qj;
+ secp256k1_ge Q;
+ secp256k1_scalar sec;
+
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx));
+ ARG_CHECK(msg32 != NULL);
+ ARG_CHECK(sec32 != NULL);
+ ARG_CHECK(pubnonce != NULL);
+ ARG_CHECK(privnonce32 != NULL);
+
+ if (noncefp == NULL) {
+ noncefp = secp256k1_nonce_function_default;
+ }
+
+ do {
+ int overflow;
+ ret = noncefp(privnonce32, sec32, msg32, secp256k1_schnorr_algo16, (void*)noncedata, count++);
+ if (!ret) {
+ break;
+ }
+ secp256k1_scalar_set_b32(&sec, privnonce32, &overflow);
+ if (overflow || secp256k1_scalar_is_zero(&sec)) {
+ continue;
+ }
+ secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &Qj, &sec);
+ secp256k1_ge_set_gej(&Q, &Qj);
+
+ secp256k1_pubkey_save(pubnonce, &Q);
+ break;
+ } while(1);
+
+ secp256k1_scalar_clear(&sec);
+ if (!ret) {
+ memset(pubnonce, 0, sizeof(*pubnonce));
+ }
+ return ret;
+}
+
+int secp256k1_schnorr_partial_sign(const secp256k1_context* ctx, unsigned char *sig64, const unsigned char *msg32, const unsigned char *sec32, const secp256k1_pubkey *pubnonce_others, const unsigned char *secnonce32) {
+ int overflow = 0;
+ secp256k1_scalar sec, non;
+ secp256k1_ge pubnon;
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx));
+ ARG_CHECK(msg32 != NULL);
+ ARG_CHECK(sig64 != NULL);
+ ARG_CHECK(sec32 != NULL);
+ ARG_CHECK(secnonce32 != NULL);
+ ARG_CHECK(pubnonce_others != NULL);
+
+ secp256k1_scalar_set_b32(&sec, sec32, &overflow);
+ if (overflow || secp256k1_scalar_is_zero(&sec)) {
+ return -1;
+ }
+ secp256k1_scalar_set_b32(&non, secnonce32, &overflow);
+ if (overflow || secp256k1_scalar_is_zero(&non)) {
+ return -1;
+ }
+ secp256k1_pubkey_load(ctx, &pubnon, pubnonce_others);
+ return secp256k1_schnorr_sig_sign(&ctx->ecmult_gen_ctx, sig64, &sec, &non, &pubnon, secp256k1_schnorr_msghash_sha256, msg32);
+}
+
+int secp256k1_schnorr_partial_combine(const secp256k1_context* ctx, unsigned char *sig64, const unsigned char * const *sig64sin, int n) {
+ ARG_CHECK(sig64 != NULL);
+ ARG_CHECK(n >= 1);
+ ARG_CHECK(sig64sin != NULL);
+ return secp256k1_schnorr_sig_combine(sig64, n, sig64sin);
+}
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/modules/schnorr/schnorr.h b/crypto/secp256k1/libsecp256k1/src/modules/schnorr/schnorr.h
new file mode 100644
index 000000000..d227433d4
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/modules/schnorr/schnorr.h
@@ -0,0 +1,20 @@
+/***********************************************************************
+ * Copyright (c) 2014-2015 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php. *
+ ***********************************************************************/
+
+#ifndef _SECP256K1_MODULE_SCHNORR_H_
+#define _SECP256K1_MODULE_SCHNORR_H_
+
+#include "scalar.h"
+#include "group.h"
+
+typedef void (*secp256k1_schnorr_msghash)(unsigned char *h32, const unsigned char *r32, const unsigned char *msg32);
+
+static int secp256k1_schnorr_sig_sign(const secp256k1_ecmult_gen_context* ctx, unsigned char *sig64, const secp256k1_scalar *key, const secp256k1_scalar *nonce, const secp256k1_ge *pubnonce, secp256k1_schnorr_msghash hash, const unsigned char *msg32);
+static int secp256k1_schnorr_sig_verify(const secp256k1_ecmult_context* ctx, const unsigned char *sig64, const secp256k1_ge *pubkey, secp256k1_schnorr_msghash hash, const unsigned char *msg32);
+static int secp256k1_schnorr_sig_recover(const secp256k1_ecmult_context* ctx, const unsigned char *sig64, secp256k1_ge *pubkey, secp256k1_schnorr_msghash hash, const unsigned char *msg32);
+static int secp256k1_schnorr_sig_combine(unsigned char *sig64, int n, const unsigned char * const *sig64ins);
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/modules/schnorr/schnorr_impl.h b/crypto/secp256k1/libsecp256k1/src/modules/schnorr/schnorr_impl.h
new file mode 100644
index 000000000..ed70390bb
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/modules/schnorr/schnorr_impl.h
@@ -0,0 +1,207 @@
+/***********************************************************************
+ * Copyright (c) 2014-2015 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php. *
+ ***********************************************************************/
+
+#ifndef _SECP256K1_SCHNORR_IMPL_H_
+#define _SECP256K1_SCHNORR_IMPL_H_
+
+#include <string.h>
+
+#include "schnorr.h"
+#include "num.h"
+#include "field.h"
+#include "group.h"
+#include "ecmult.h"
+#include "ecmult_gen.h"
+
+/**
+ * Custom Schnorr-based signature scheme. They support multiparty signing, public key
+ * recovery and batch validation.
+ *
+ * Rationale for verifying R's y coordinate:
+ * In order to support batch validation and public key recovery, the full R point must
+ * be known to verifiers, rather than just its x coordinate. In order to not risk
+ * being more strict in batch validation than normal validation, validators must be
+ * required to reject signatures with incorrect y coordinate. This is only possible
+ * by including a (relatively slow) field inverse, or a field square root. However,
+ * batch validation offers potentially much higher benefits than this cost.
+ *
+ * Rationale for having an implicit y coordinate oddness:
+ * If we commit to having the full R point known to verifiers, there are two mechanism.
+ * Either include its oddness in the signature, or give it an implicit fixed value.
+ * As the R y coordinate can be flipped by a simple negation of the nonce, we choose the
+ * latter, as it comes with nearly zero impact on signing or validation performance, and
+ * saves a byte in the signature.
+ *
+ * Signing:
+ * Inputs: 32-byte message m, 32-byte scalar key x (!=0), 32-byte scalar nonce k (!=0)
+ *
+ * Compute point R = k * G. Reject nonce if R's y coordinate is odd (or negate nonce).
+ * Compute 32-byte r, the serialization of R's x coordinate.
+ * Compute scalar h = Hash(r || m). Reject nonce if h == 0 or h >= order.
+ * Compute scalar s = k - h * x.
+ * The signature is (r, s).
+ *
+ *
+ * Verification:
+ * Inputs: 32-byte message m, public key point Q, signature: (32-byte r, scalar s)
+ *
+ * Signature is invalid if s >= order.
+ * Signature is invalid if r >= p.
+ * Compute scalar h = Hash(r || m). Signature is invalid if h == 0 or h >= order.
+ * Option 1 (faster for single verification):
+ * Compute point R = h * Q + s * G. Signature is invalid if R is infinity or R's y coordinate is odd.
+ * Signature is valid if the serialization of R's x coordinate equals r.
+ * Option 2 (allows batch validation and pubkey recovery):
+ * Decompress x coordinate r into point R, with odd y coordinate. Fail if R is not on the curve.
+ * Signature is valid if R + h * Q + s * G == 0.
+ */
+
+static int secp256k1_schnorr_sig_sign(const secp256k1_ecmult_gen_context* ctx, unsigned char *sig64, const secp256k1_scalar *key, const secp256k1_scalar *nonce, const secp256k1_ge *pubnonce, secp256k1_schnorr_msghash hash, const unsigned char *msg32) {
+ secp256k1_gej Rj;
+ secp256k1_ge Ra;
+ unsigned char h32[32];
+ secp256k1_scalar h, s;
+ int overflow;
+ secp256k1_scalar n;
+
+ if (secp256k1_scalar_is_zero(key) || secp256k1_scalar_is_zero(nonce)) {
+ return 0;
+ }
+ n = *nonce;
+
+ secp256k1_ecmult_gen(ctx, &Rj, &n);
+ if (pubnonce != NULL) {
+ secp256k1_gej_add_ge(&Rj, &Rj, pubnonce);
+ }
+ secp256k1_ge_set_gej(&Ra, &Rj);
+ secp256k1_fe_normalize(&Ra.y);
+ if (secp256k1_fe_is_odd(&Ra.y)) {
+ /* R's y coordinate is odd, which is not allowed (see rationale above).
+ Force it to be even by negating the nonce. Note that this even works
+ for multiparty signing, as the R point is known to all participants,
+ which can all decide to flip the sign in unison, resulting in the
+ overall R point to be negated too. */
+ secp256k1_scalar_negate(&n, &n);
+ }
+ secp256k1_fe_normalize(&Ra.x);
+ secp256k1_fe_get_b32(sig64, &Ra.x);
+ hash(h32, sig64, msg32);
+ overflow = 0;
+ secp256k1_scalar_set_b32(&h, h32, &overflow);
+ if (overflow || secp256k1_scalar_is_zero(&h)) {
+ secp256k1_scalar_clear(&n);
+ return 0;
+ }
+ secp256k1_scalar_mul(&s, &h, key);
+ secp256k1_scalar_negate(&s, &s);
+ secp256k1_scalar_add(&s, &s, &n);
+ secp256k1_scalar_clear(&n);
+ secp256k1_scalar_get_b32(sig64 + 32, &s);
+ return 1;
+}
+
+static int secp256k1_schnorr_sig_verify(const secp256k1_ecmult_context* ctx, const unsigned char *sig64, const secp256k1_ge *pubkey, secp256k1_schnorr_msghash hash, const unsigned char *msg32) {
+ secp256k1_gej Qj, Rj;
+ secp256k1_ge Ra;
+ secp256k1_fe Rx;
+ secp256k1_scalar h, s;
+ unsigned char hh[32];
+ int overflow;
+
+ if (secp256k1_ge_is_infinity(pubkey)) {
+ return 0;
+ }
+ hash(hh, sig64, msg32);
+ overflow = 0;
+ secp256k1_scalar_set_b32(&h, hh, &overflow);
+ if (overflow || secp256k1_scalar_is_zero(&h)) {
+ return 0;
+ }
+ overflow = 0;
+ secp256k1_scalar_set_b32(&s, sig64 + 32, &overflow);
+ if (overflow) {
+ return 0;
+ }
+ if (!secp256k1_fe_set_b32(&Rx, sig64)) {
+ return 0;
+ }
+ secp256k1_gej_set_ge(&Qj, pubkey);
+ secp256k1_ecmult(ctx, &Rj, &Qj, &h, &s);
+ if (secp256k1_gej_is_infinity(&Rj)) {
+ return 0;
+ }
+ secp256k1_ge_set_gej_var(&Ra, &Rj);
+ secp256k1_fe_normalize_var(&Ra.y);
+ if (secp256k1_fe_is_odd(&Ra.y)) {
+ return 0;
+ }
+ return secp256k1_fe_equal_var(&Rx, &Ra.x);
+}
+
+static int secp256k1_schnorr_sig_recover(const secp256k1_ecmult_context* ctx, const unsigned char *sig64, secp256k1_ge *pubkey, secp256k1_schnorr_msghash hash, const unsigned char *msg32) {
+ secp256k1_gej Qj, Rj;
+ secp256k1_ge Ra;
+ secp256k1_fe Rx;
+ secp256k1_scalar h, s;
+ unsigned char hh[32];
+ int overflow;
+
+ hash(hh, sig64, msg32);
+ overflow = 0;
+ secp256k1_scalar_set_b32(&h, hh, &overflow);
+ if (overflow || secp256k1_scalar_is_zero(&h)) {
+ return 0;
+ }
+ overflow = 0;
+ secp256k1_scalar_set_b32(&s, sig64 + 32, &overflow);
+ if (overflow) {
+ return 0;
+ }
+ if (!secp256k1_fe_set_b32(&Rx, sig64)) {
+ return 0;
+ }
+ if (!secp256k1_ge_set_xo_var(&Ra, &Rx, 0)) {
+ return 0;
+ }
+ secp256k1_gej_set_ge(&Rj, &Ra);
+ secp256k1_scalar_inverse_var(&h, &h);
+ secp256k1_scalar_negate(&s, &s);
+ secp256k1_scalar_mul(&s, &s, &h);
+ secp256k1_ecmult(ctx, &Qj, &Rj, &h, &s);
+ if (secp256k1_gej_is_infinity(&Qj)) {
+ return 0;
+ }
+ secp256k1_ge_set_gej(pubkey, &Qj);
+ return 1;
+}
+
+static int secp256k1_schnorr_sig_combine(unsigned char *sig64, int n, const unsigned char * const *sig64ins) {
+ secp256k1_scalar s = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0);
+ int i;
+ for (i = 0; i < n; i++) {
+ secp256k1_scalar si;
+ int overflow;
+ secp256k1_scalar_set_b32(&si, sig64ins[i] + 32, &overflow);
+ if (overflow) {
+ return -1;
+ }
+ if (i) {
+ if (memcmp(sig64ins[i - 1], sig64ins[i], 32) != 0) {
+ return -1;
+ }
+ }
+ secp256k1_scalar_add(&s, &s, &si);
+ }
+ if (secp256k1_scalar_is_zero(&s)) {
+ return 0;
+ }
+ memcpy(sig64, sig64ins[0], 32);
+ secp256k1_scalar_get_b32(sig64 + 32, &s);
+ secp256k1_scalar_clear(&s);
+ return 1;
+}
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/modules/schnorr/tests_impl.h b/crypto/secp256k1/libsecp256k1/src/modules/schnorr/tests_impl.h
new file mode 100644
index 000000000..79737f748
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/modules/schnorr/tests_impl.h
@@ -0,0 +1,175 @@
+/**********************************************************************
+ * Copyright (c) 2014-2015 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef SECP256K1_MODULE_SCHNORR_TESTS
+#define SECP256K1_MODULE_SCHNORR_TESTS
+
+#include "include/secp256k1_schnorr.h"
+
+void test_schnorr_end_to_end(void) {
+ unsigned char privkey[32];
+ unsigned char message[32];
+ unsigned char schnorr_signature[64];
+ secp256k1_pubkey pubkey, recpubkey;
+
+ /* Generate a random key and message. */
+ {
+ secp256k1_scalar key;
+ random_scalar_order_test(&key);
+ secp256k1_scalar_get_b32(privkey, &key);
+ secp256k1_rand256_test(message);
+ }
+
+ /* Construct and verify corresponding public key. */
+ CHECK(secp256k1_ec_seckey_verify(ctx, privkey) == 1);
+ CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, privkey) == 1);
+
+ /* Schnorr sign. */
+ CHECK(secp256k1_schnorr_sign(ctx, schnorr_signature, message, privkey, NULL, NULL) == 1);
+ CHECK(secp256k1_schnorr_verify(ctx, schnorr_signature, message, &pubkey) == 1);
+ CHECK(secp256k1_schnorr_recover(ctx, &recpubkey, schnorr_signature, message) == 1);
+ CHECK(memcmp(&pubkey, &recpubkey, sizeof(pubkey)) == 0);
+ /* Destroy signature and verify again. */
+ schnorr_signature[secp256k1_rand32() % 64] += 1 + (secp256k1_rand32() % 255);
+ CHECK(secp256k1_schnorr_verify(ctx, schnorr_signature, message, &pubkey) == 0);
+ CHECK(secp256k1_schnorr_recover(ctx, &recpubkey, schnorr_signature, message) != 1 ||
+ memcmp(&pubkey, &recpubkey, sizeof(pubkey)) != 0);
+}
+
+/** Horribly broken hash function. Do not use for anything but tests. */
+void test_schnorr_hash(unsigned char *h32, const unsigned char *r32, const unsigned char *msg32) {
+ int i;
+ for (i = 0; i < 32; i++) {
+ h32[i] = r32[i] ^ msg32[i];
+ }
+}
+
+void test_schnorr_sign_verify(void) {
+ unsigned char msg32[32];
+ unsigned char sig64[3][64];
+ secp256k1_gej pubkeyj[3];
+ secp256k1_ge pubkey[3];
+ secp256k1_scalar nonce[3], key[3];
+ int i = 0;
+ int k;
+
+ secp256k1_rand256_test(msg32);
+
+ for (k = 0; k < 3; k++) {
+ random_scalar_order_test(&key[k]);
+
+ do {
+ random_scalar_order_test(&nonce[k]);
+ if (secp256k1_schnorr_sig_sign(&ctx->ecmult_gen_ctx, sig64[k], &key[k], &nonce[k], NULL, &test_schnorr_hash, msg32)) {
+ break;
+ }
+ } while(1);
+
+ secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &pubkeyj[k], &key[k]);
+ secp256k1_ge_set_gej_var(&pubkey[k], &pubkeyj[k]);
+ CHECK(secp256k1_schnorr_sig_verify(&ctx->ecmult_ctx, sig64[k], &pubkey[k], &test_schnorr_hash, msg32));
+
+ for (i = 0; i < 4; i++) {
+ int pos = secp256k1_rand32() % 64;
+ int mod = 1 + (secp256k1_rand32() % 255);
+ sig64[k][pos] ^= mod;
+ CHECK(secp256k1_schnorr_sig_verify(&ctx->ecmult_ctx, sig64[k], &pubkey[k], &test_schnorr_hash, msg32) == 0);
+ sig64[k][pos] ^= mod;
+ }
+ }
+}
+
+void test_schnorr_threshold(void) {
+ unsigned char msg[32];
+ unsigned char sec[5][32];
+ secp256k1_pubkey pub[5];
+ unsigned char nonce[5][32];
+ secp256k1_pubkey pubnonce[5];
+ unsigned char sig[5][64];
+ const unsigned char* sigs[5];
+ unsigned char allsig[64];
+ const secp256k1_pubkey* pubs[5];
+ secp256k1_pubkey allpub;
+ int n, i;
+ int damage;
+ int ret = 0;
+
+ damage = (secp256k1_rand32() % 2) ? (1 + (secp256k1_rand32() % 4)) : 0;
+ secp256k1_rand256_test(msg);
+ n = 2 + (secp256k1_rand32() % 4);
+ for (i = 0; i < n; i++) {
+ do {
+ secp256k1_rand256_test(sec[i]);
+ } while (!secp256k1_ec_seckey_verify(ctx, sec[i]));
+ CHECK(secp256k1_ec_pubkey_create(ctx, &pub[i], sec[i]));
+ CHECK(secp256k1_schnorr_generate_nonce_pair(ctx, &pubnonce[i], nonce[i], msg, sec[i], NULL, NULL));
+ pubs[i] = &pub[i];
+ }
+ if (damage == 1) {
+ nonce[secp256k1_rand32() % n][secp256k1_rand32() % 32] ^= 1 + (secp256k1_rand32() % 255);
+ } else if (damage == 2) {
+ sec[secp256k1_rand32() % n][secp256k1_rand32() % 32] ^= 1 + (secp256k1_rand32() % 255);
+ }
+ for (i = 0; i < n; i++) {
+ secp256k1_pubkey allpubnonce;
+ const secp256k1_pubkey *pubnonces[4];
+ int j;
+ for (j = 0; j < i; j++) {
+ pubnonces[j] = &pubnonce[j];
+ }
+ for (j = i + 1; j < n; j++) {
+ pubnonces[j - 1] = &pubnonce[j];
+ }
+ CHECK(secp256k1_ec_pubkey_combine(ctx, &allpubnonce, pubnonces, n - 1));
+ ret |= (secp256k1_schnorr_partial_sign(ctx, sig[i], msg, sec[i], &allpubnonce, nonce[i]) != 1) * 1;
+ sigs[i] = sig[i];
+ }
+ if (damage == 3) {
+ sig[secp256k1_rand32() % n][secp256k1_rand32() % 64] ^= 1 + (secp256k1_rand32() % 255);
+ }
+ ret |= (secp256k1_ec_pubkey_combine(ctx, &allpub, pubs, n) != 1) * 2;
+ if ((ret & 1) == 0) {
+ ret |= (secp256k1_schnorr_partial_combine(ctx, allsig, sigs, n) != 1) * 4;
+ }
+ if (damage == 4) {
+ allsig[secp256k1_rand32() % 32] ^= 1 + (secp256k1_rand32() % 255);
+ }
+ if ((ret & 7) == 0) {
+ ret |= (secp256k1_schnorr_verify(ctx, allsig, msg, &allpub) != 1) * 8;
+ }
+ CHECK((ret == 0) == (damage == 0));
+}
+
+void test_schnorr_recovery(void) {
+ unsigned char msg32[32];
+ unsigned char sig64[64];
+ secp256k1_ge Q;
+
+ secp256k1_rand256_test(msg32);
+ secp256k1_rand256_test(sig64);
+ secp256k1_rand256_test(sig64 + 32);
+ if (secp256k1_schnorr_sig_recover(&ctx->ecmult_ctx, sig64, &Q, &test_schnorr_hash, msg32) == 1) {
+ CHECK(secp256k1_schnorr_sig_verify(&ctx->ecmult_ctx, sig64, &Q, &test_schnorr_hash, msg32) == 1);
+ }
+}
+
+void run_schnorr_tests(void) {
+ int i;
+ for (i = 0; i < 32*count; i++) {
+ test_schnorr_end_to_end();
+ }
+ for (i = 0; i < 32 * count; i++) {
+ test_schnorr_sign_verify();
+ }
+ for (i = 0; i < 16 * count; i++) {
+ test_schnorr_recovery();
+ }
+ for (i = 0; i < 10 * count; i++) {
+ test_schnorr_threshold();
+ }
+}
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/num.h b/crypto/secp256k1/libsecp256k1/src/num.h
new file mode 100644
index 000000000..ebfa71eb4
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/num.h
@@ -0,0 +1,68 @@
+/**********************************************************************
+ * Copyright (c) 2013, 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_NUM_
+#define _SECP256K1_NUM_
+
+#ifndef USE_NUM_NONE
+
+#if defined HAVE_CONFIG_H
+#include "libsecp256k1-config.h"
+#endif
+
+#if defined(USE_NUM_GMP)
+#include "num_gmp.h"
+#else
+#error "Please select num implementation"
+#endif
+
+/** Copy a number. */
+static void secp256k1_num_copy(secp256k1_num *r, const secp256k1_num *a);
+
+/** Convert a number's absolute value to a binary big-endian string.
+ * There must be enough place. */
+static void secp256k1_num_get_bin(unsigned char *r, unsigned int rlen, const secp256k1_num *a);
+
+/** Set a number to the value of a binary big-endian string. */
+static void secp256k1_num_set_bin(secp256k1_num *r, const unsigned char *a, unsigned int alen);
+
+/** Compute a modular inverse. The input must be less than the modulus. */
+static void secp256k1_num_mod_inverse(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *m);
+
+/** Compare the absolute value of two numbers. */
+static int secp256k1_num_cmp(const secp256k1_num *a, const secp256k1_num *b);
+
+/** Test whether two number are equal (including sign). */
+static int secp256k1_num_eq(const secp256k1_num *a, const secp256k1_num *b);
+
+/** Add two (signed) numbers. */
+static void secp256k1_num_add(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b);
+
+/** Subtract two (signed) numbers. */
+static void secp256k1_num_sub(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b);
+
+/** Multiply two (signed) numbers. */
+static void secp256k1_num_mul(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b);
+
+/** Replace a number by its remainder modulo m. M's sign is ignored. The result is a number between 0 and m-1,
+ even if r was negative. */
+static void secp256k1_num_mod(secp256k1_num *r, const secp256k1_num *m);
+
+/** Right-shift the passed number by bits bits. */
+static void secp256k1_num_shift(secp256k1_num *r, int bits);
+
+/** Check whether a number is zero. */
+static int secp256k1_num_is_zero(const secp256k1_num *a);
+
+/** Check whether a number is strictly negative. */
+static int secp256k1_num_is_neg(const secp256k1_num *a);
+
+/** Change a number's sign. */
+static void secp256k1_num_negate(secp256k1_num *r);
+
+#endif
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/num_gmp.h b/crypto/secp256k1/libsecp256k1/src/num_gmp.h
new file mode 100644
index 000000000..7dd813088
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/num_gmp.h
@@ -0,0 +1,20 @@
+/**********************************************************************
+ * Copyright (c) 2013, 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_NUM_REPR_
+#define _SECP256K1_NUM_REPR_
+
+#include <gmp.h>
+
+#define NUM_LIMBS ((256+GMP_NUMB_BITS-1)/GMP_NUMB_BITS)
+
+typedef struct {
+ mp_limb_t data[2*NUM_LIMBS];
+ int neg;
+ int limbs;
+} secp256k1_num;
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/num_gmp_impl.h b/crypto/secp256k1/libsecp256k1/src/num_gmp_impl.h
new file mode 100644
index 000000000..f43e7a56c
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/num_gmp_impl.h
@@ -0,0 +1,260 @@
+/**********************************************************************
+ * Copyright (c) 2013, 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_NUM_REPR_IMPL_H_
+#define _SECP256K1_NUM_REPR_IMPL_H_
+
+#include <string.h>
+#include <stdlib.h>
+#include <gmp.h>
+
+#include "util.h"
+#include "num.h"
+
+#ifdef VERIFY
+static void secp256k1_num_sanity(const secp256k1_num *a) {
+ VERIFY_CHECK(a->limbs == 1 || (a->limbs > 1 && a->data[a->limbs-1] != 0));
+}
+#else
+#define secp256k1_num_sanity(a) do { } while(0)
+#endif
+
+static void secp256k1_num_copy(secp256k1_num *r, const secp256k1_num *a) {
+ *r = *a;
+}
+
+static void secp256k1_num_get_bin(unsigned char *r, unsigned int rlen, const secp256k1_num *a) {
+ unsigned char tmp[65];
+ int len = 0;
+ int shift = 0;
+ if (a->limbs>1 || a->data[0] != 0) {
+ len = mpn_get_str(tmp, 256, (mp_limb_t*)a->data, a->limbs);
+ }
+ while (shift < len && tmp[shift] == 0) shift++;
+ VERIFY_CHECK(len-shift <= (int)rlen);
+ memset(r, 0, rlen - len + shift);
+ if (len > shift) {
+ memcpy(r + rlen - len + shift, tmp + shift, len - shift);
+ }
+ memset(tmp, 0, sizeof(tmp));
+}
+
+static void secp256k1_num_set_bin(secp256k1_num *r, const unsigned char *a, unsigned int alen) {
+ int len;
+ VERIFY_CHECK(alen > 0);
+ VERIFY_CHECK(alen <= 64);
+ len = mpn_set_str(r->data, a, alen, 256);
+ if (len == 0) {
+ r->data[0] = 0;
+ len = 1;
+ }
+ VERIFY_CHECK(len <= NUM_LIMBS*2);
+ r->limbs = len;
+ r->neg = 0;
+ while (r->limbs > 1 && r->data[r->limbs-1]==0) {
+ r->limbs--;
+ }
+}
+
+static void secp256k1_num_add_abs(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b) {
+ mp_limb_t c = mpn_add(r->data, a->data, a->limbs, b->data, b->limbs);
+ r->limbs = a->limbs;
+ if (c != 0) {
+ VERIFY_CHECK(r->limbs < 2*NUM_LIMBS);
+ r->data[r->limbs++] = c;
+ }
+}
+
+static void secp256k1_num_sub_abs(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b) {
+ mp_limb_t c = mpn_sub(r->data, a->data, a->limbs, b->data, b->limbs);
+ VERIFY_CHECK(c == 0);
+ r->limbs = a->limbs;
+ while (r->limbs > 1 && r->data[r->limbs-1]==0) {
+ r->limbs--;
+ }
+}
+
+static void secp256k1_num_mod(secp256k1_num *r, const secp256k1_num *m) {
+ secp256k1_num_sanity(r);
+ secp256k1_num_sanity(m);
+
+ if (r->limbs >= m->limbs) {
+ mp_limb_t t[2*NUM_LIMBS];
+ mpn_tdiv_qr(t, r->data, 0, r->data, r->limbs, m->data, m->limbs);
+ memset(t, 0, sizeof(t));
+ r->limbs = m->limbs;
+ while (r->limbs > 1 && r->data[r->limbs-1]==0) {
+ r->limbs--;
+ }
+ }
+
+ if (r->neg && (r->limbs > 1 || r->data[0] != 0)) {
+ secp256k1_num_sub_abs(r, m, r);
+ r->neg = 0;
+ }
+}
+
+static void secp256k1_num_mod_inverse(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *m) {
+ int i;
+ mp_limb_t g[NUM_LIMBS+1];
+ mp_limb_t u[NUM_LIMBS+1];
+ mp_limb_t v[NUM_LIMBS+1];
+ mp_size_t sn;
+ mp_size_t gn;
+ secp256k1_num_sanity(a);
+ secp256k1_num_sanity(m);
+
+ /** mpn_gcdext computes: (G,S) = gcdext(U,V), where
+ * * G = gcd(U,V)
+ * * G = U*S + V*T
+ * * U has equal or more limbs than V, and V has no padding
+ * If we set U to be (a padded version of) a, and V = m:
+ * G = a*S + m*T
+ * G = a*S mod m
+ * Assuming G=1:
+ * S = 1/a mod m
+ */
+ VERIFY_CHECK(m->limbs <= NUM_LIMBS);
+ VERIFY_CHECK(m->data[m->limbs-1] != 0);
+ for (i = 0; i < m->limbs; i++) {
+ u[i] = (i < a->limbs) ? a->data[i] : 0;
+ v[i] = m->data[i];
+ }
+ sn = NUM_LIMBS+1;
+ gn = mpn_gcdext(g, r->data, &sn, u, m->limbs, v, m->limbs);
+ VERIFY_CHECK(gn == 1);
+ VERIFY_CHECK(g[0] == 1);
+ r->neg = a->neg ^ m->neg;
+ if (sn < 0) {
+ mpn_sub(r->data, m->data, m->limbs, r->data, -sn);
+ r->limbs = m->limbs;
+ while (r->limbs > 1 && r->data[r->limbs-1]==0) {
+ r->limbs--;
+ }
+ } else {
+ r->limbs = sn;
+ }
+ memset(g, 0, sizeof(g));
+ memset(u, 0, sizeof(u));
+ memset(v, 0, sizeof(v));
+}
+
+static int secp256k1_num_is_zero(const secp256k1_num *a) {
+ return (a->limbs == 1 && a->data[0] == 0);
+}
+
+static int secp256k1_num_is_neg(const secp256k1_num *a) {
+ return (a->limbs > 1 || a->data[0] != 0) && a->neg;
+}
+
+static int secp256k1_num_cmp(const secp256k1_num *a, const secp256k1_num *b) {
+ if (a->limbs > b->limbs) {
+ return 1;
+ }
+ if (a->limbs < b->limbs) {
+ return -1;
+ }
+ return mpn_cmp(a->data, b->data, a->limbs);
+}
+
+static int secp256k1_num_eq(const secp256k1_num *a, const secp256k1_num *b) {
+ if (a->limbs > b->limbs) {
+ return 0;
+ }
+ if (a->limbs < b->limbs) {
+ return 0;
+ }
+ if ((a->neg && !secp256k1_num_is_zero(a)) != (b->neg && !secp256k1_num_is_zero(b))) {
+ return 0;
+ }
+ return mpn_cmp(a->data, b->data, a->limbs) == 0;
+}
+
+static void secp256k1_num_subadd(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b, int bneg) {
+ if (!(b->neg ^ bneg ^ a->neg)) { /* a and b have the same sign */
+ r->neg = a->neg;
+ if (a->limbs >= b->limbs) {
+ secp256k1_num_add_abs(r, a, b);
+ } else {
+ secp256k1_num_add_abs(r, b, a);
+ }
+ } else {
+ if (secp256k1_num_cmp(a, b) > 0) {
+ r->neg = a->neg;
+ secp256k1_num_sub_abs(r, a, b);
+ } else {
+ r->neg = b->neg ^ bneg;
+ secp256k1_num_sub_abs(r, b, a);
+ }
+ }
+}
+
+static void secp256k1_num_add(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b) {
+ secp256k1_num_sanity(a);
+ secp256k1_num_sanity(b);
+ secp256k1_num_subadd(r, a, b, 0);
+}
+
+static void secp256k1_num_sub(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b) {
+ secp256k1_num_sanity(a);
+ secp256k1_num_sanity(b);
+ secp256k1_num_subadd(r, a, b, 1);
+}
+
+static void secp256k1_num_mul(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b) {
+ mp_limb_t tmp[2*NUM_LIMBS+1];
+ secp256k1_num_sanity(a);
+ secp256k1_num_sanity(b);
+
+ VERIFY_CHECK(a->limbs + b->limbs <= 2*NUM_LIMBS+1);
+ if ((a->limbs==1 && a->data[0]==0) || (b->limbs==1 && b->data[0]==0)) {
+ r->limbs = 1;
+ r->neg = 0;
+ r->data[0] = 0;
+ return;
+ }
+ if (a->limbs >= b->limbs) {
+ mpn_mul(tmp, a->data, a->limbs, b->data, b->limbs);
+ } else {
+ mpn_mul(tmp, b->data, b->limbs, a->data, a->limbs);
+ }
+ r->limbs = a->limbs + b->limbs;
+ if (r->limbs > 1 && tmp[r->limbs - 1]==0) {
+ r->limbs--;
+ }
+ VERIFY_CHECK(r->limbs <= 2*NUM_LIMBS);
+ mpn_copyi(r->data, tmp, r->limbs);
+ r->neg = a->neg ^ b->neg;
+ memset(tmp, 0, sizeof(tmp));
+}
+
+static void secp256k1_num_shift(secp256k1_num *r, int bits) {
+ if (bits % GMP_NUMB_BITS) {
+ /* Shift within limbs. */
+ mpn_rshift(r->data, r->data, r->limbs, bits % GMP_NUMB_BITS);
+ }
+ if (bits >= GMP_NUMB_BITS) {
+ int i;
+ /* Shift full limbs. */
+ for (i = 0; i < r->limbs; i++) {
+ int index = i + (bits / GMP_NUMB_BITS);
+ if (index < r->limbs && index < 2*NUM_LIMBS) {
+ r->data[i] = r->data[index];
+ } else {
+ r->data[i] = 0;
+ }
+ }
+ }
+ while (r->limbs>1 && r->data[r->limbs-1]==0) {
+ r->limbs--;
+ }
+}
+
+static void secp256k1_num_negate(secp256k1_num *r) {
+ r->neg ^= 1;
+}
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/num_impl.h b/crypto/secp256k1/libsecp256k1/src/num_impl.h
new file mode 100644
index 000000000..0b0e3a072
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/num_impl.h
@@ -0,0 +1,24 @@
+/**********************************************************************
+ * Copyright (c) 2013, 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_NUM_IMPL_H_
+#define _SECP256K1_NUM_IMPL_H_
+
+#if defined HAVE_CONFIG_H
+#include "libsecp256k1-config.h"
+#endif
+
+#include "num.h"
+
+#if defined(USE_NUM_GMP)
+#include "num_gmp_impl.h"
+#elif defined(USE_NUM_NONE)
+/* Nothing. */
+#else
+#error "Please select num implementation"
+#endif
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/scalar.h b/crypto/secp256k1/libsecp256k1/src/scalar.h
new file mode 100644
index 000000000..b590ccd6d
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/scalar.h
@@ -0,0 +1,104 @@
+/**********************************************************************
+ * Copyright (c) 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_SCALAR_
+#define _SECP256K1_SCALAR_
+
+#include "num.h"
+
+#if defined HAVE_CONFIG_H
+#include "libsecp256k1-config.h"
+#endif
+
+#if defined(USE_SCALAR_4X64)
+#include "scalar_4x64.h"
+#elif defined(USE_SCALAR_8X32)
+#include "scalar_8x32.h"
+#else
+#error "Please select scalar implementation"
+#endif
+
+/** Clear a scalar to prevent the leak of sensitive data. */
+static void secp256k1_scalar_clear(secp256k1_scalar *r);
+
+/** Access bits from a scalar. All requested bits must belong to the same 32-bit limb. */
+static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count);
+
+/** Access bits from a scalar. Not constant time. */
+static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count);
+
+/** Set a scalar from a big endian byte array. */
+static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *bin, int *overflow);
+
+/** Set a scalar to an unsigned integer. */
+static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v);
+
+/** Convert a scalar to a byte array. */
+static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a);
+
+/** Add two scalars together (modulo the group order). Returns whether it overflowed. */
+static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b);
+
+/** Conditionally add a power of two to a scalar. The result is not allowed to overflow. */
+static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag);
+
+/** Multiply two scalars (modulo the group order). */
+static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b);
+
+/** Shift a scalar right by some amount strictly between 0 and 16, returning
+ * the low bits that were shifted off */
+static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n);
+
+/** Compute the square of a scalar (modulo the group order). */
+static void secp256k1_scalar_sqr(secp256k1_scalar *r, const secp256k1_scalar *a);
+
+/** Compute the inverse of a scalar (modulo the group order). */
+static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *a);
+
+/** Compute the inverse of a scalar (modulo the group order), without constant-time guarantee. */
+static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *a);
+
+/** Compute the complement of a scalar (modulo the group order). */
+static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a);
+
+/** Check whether a scalar equals zero. */
+static int secp256k1_scalar_is_zero(const secp256k1_scalar *a);
+
+/** Check whether a scalar equals one. */
+static int secp256k1_scalar_is_one(const secp256k1_scalar *a);
+
+/** Check whether a scalar, considered as an nonnegative integer, is even. */
+static int secp256k1_scalar_is_even(const secp256k1_scalar *a);
+
+/** Check whether a scalar is higher than the group order divided by 2. */
+static int secp256k1_scalar_is_high(const secp256k1_scalar *a);
+
+/** Conditionally negate a number, in constant time.
+ * Returns -1 if the number was negated, 1 otherwise */
+static int secp256k1_scalar_cond_negate(secp256k1_scalar *a, int flag);
+
+#ifndef USE_NUM_NONE
+/** Convert a scalar to a number. */
+static void secp256k1_scalar_get_num(secp256k1_num *r, const secp256k1_scalar *a);
+
+/** Get the order of the group as a number. */
+static void secp256k1_scalar_order_get_num(secp256k1_num *r);
+#endif
+
+/** Compare two scalars. */
+static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b);
+
+#ifdef USE_ENDOMORPHISM
+/** Find r1 and r2 such that r1+r2*2^128 = a. */
+static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a);
+/** Find r1 and r2 such that r1+r2*lambda = a, and r1 and r2 are maximum 128 bits long (see secp256k1_gej_mul_lambda). */
+static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a);
+#endif
+
+/** Multiply a and b (without taking the modulus!), divide by 2**shift, and round to the nearest integer. Shift must be at least 256. */
+static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b, unsigned int shift);
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/scalar_4x64.h b/crypto/secp256k1/libsecp256k1/src/scalar_4x64.h
new file mode 100644
index 000000000..cff406038
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/scalar_4x64.h
@@ -0,0 +1,19 @@
+/**********************************************************************
+ * Copyright (c) 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_SCALAR_REPR_
+#define _SECP256K1_SCALAR_REPR_
+
+#include <stdint.h>
+
+/** A scalar modulo the group order of the secp256k1 curve. */
+typedef struct {
+ uint64_t d[4];
+} secp256k1_scalar;
+
+#define SECP256K1_SCALAR_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {{((uint64_t)(d1)) << 32 | (d0), ((uint64_t)(d3)) << 32 | (d2), ((uint64_t)(d5)) << 32 | (d4), ((uint64_t)(d7)) << 32 | (d6)}}
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/scalar_4x64_impl.h b/crypto/secp256k1/libsecp256k1/src/scalar_4x64_impl.h
new file mode 100644
index 000000000..cbec34d71
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/scalar_4x64_impl.h
@@ -0,0 +1,947 @@
+/**********************************************************************
+ * Copyright (c) 2013, 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_SCALAR_REPR_IMPL_H_
+#define _SECP256K1_SCALAR_REPR_IMPL_H_
+
+/* Limbs of the secp256k1 order. */
+#define SECP256K1_N_0 ((uint64_t)0xBFD25E8CD0364141ULL)
+#define SECP256K1_N_1 ((uint64_t)0xBAAEDCE6AF48A03BULL)
+#define SECP256K1_N_2 ((uint64_t)0xFFFFFFFFFFFFFFFEULL)
+#define SECP256K1_N_3 ((uint64_t)0xFFFFFFFFFFFFFFFFULL)
+
+/* Limbs of 2^256 minus the secp256k1 order. */
+#define SECP256K1_N_C_0 (~SECP256K1_N_0 + 1)
+#define SECP256K1_N_C_1 (~SECP256K1_N_1)
+#define SECP256K1_N_C_2 (1)
+
+/* Limbs of half the secp256k1 order. */
+#define SECP256K1_N_H_0 ((uint64_t)0xDFE92F46681B20A0ULL)
+#define SECP256K1_N_H_1 ((uint64_t)0x5D576E7357A4501DULL)
+#define SECP256K1_N_H_2 ((uint64_t)0xFFFFFFFFFFFFFFFFULL)
+#define SECP256K1_N_H_3 ((uint64_t)0x7FFFFFFFFFFFFFFFULL)
+
+SECP256K1_INLINE static void secp256k1_scalar_clear(secp256k1_scalar *r) {
+ r->d[0] = 0;
+ r->d[1] = 0;
+ r->d[2] = 0;
+ r->d[3] = 0;
+}
+
+SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v) {
+ r->d[0] = v;
+ r->d[1] = 0;
+ r->d[2] = 0;
+ r->d[3] = 0;
+}
+
+SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
+ VERIFY_CHECK((offset + count - 1) >> 6 == offset >> 6);
+ return (a->d[offset >> 6] >> (offset & 0x3F)) & ((((uint64_t)1) << count) - 1);
+}
+
+SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
+ VERIFY_CHECK(count < 32);
+ VERIFY_CHECK(offset + count <= 256);
+ if ((offset + count - 1) >> 6 == offset >> 6) {
+ return secp256k1_scalar_get_bits(a, offset, count);
+ } else {
+ VERIFY_CHECK((offset >> 6) + 1 < 4);
+ return ((a->d[offset >> 6] >> (offset & 0x3F)) | (a->d[(offset >> 6) + 1] << (64 - (offset & 0x3F)))) & ((((uint64_t)1) << count) - 1);
+ }
+}
+
+SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scalar *a) {
+ int yes = 0;
+ int no = 0;
+ no |= (a->d[3] < SECP256K1_N_3); /* No need for a > check. */
+ no |= (a->d[2] < SECP256K1_N_2);
+ yes |= (a->d[2] > SECP256K1_N_2) & ~no;
+ no |= (a->d[1] < SECP256K1_N_1);
+ yes |= (a->d[1] > SECP256K1_N_1) & ~no;
+ yes |= (a->d[0] >= SECP256K1_N_0) & ~no;
+ return yes;
+}
+
+SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar *r, unsigned int overflow) {
+ uint128_t t;
+ VERIFY_CHECK(overflow <= 1);
+ t = (uint128_t)r->d[0] + overflow * SECP256K1_N_C_0;
+ r->d[0] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
+ t += (uint128_t)r->d[1] + overflow * SECP256K1_N_C_1;
+ r->d[1] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
+ t += (uint128_t)r->d[2] + overflow * SECP256K1_N_C_2;
+ r->d[2] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
+ t += (uint64_t)r->d[3];
+ r->d[3] = t & 0xFFFFFFFFFFFFFFFFULL;
+ return overflow;
+}
+
+static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
+ int overflow;
+ uint128_t t = (uint128_t)a->d[0] + b->d[0];
+ r->d[0] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
+ t += (uint128_t)a->d[1] + b->d[1];
+ r->d[1] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
+ t += (uint128_t)a->d[2] + b->d[2];
+ r->d[2] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
+ t += (uint128_t)a->d[3] + b->d[3];
+ r->d[3] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
+ overflow = t + secp256k1_scalar_check_overflow(r);
+ VERIFY_CHECK(overflow == 0 || overflow == 1);
+ secp256k1_scalar_reduce(r, overflow);
+ return overflow;
+}
+
+static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) {
+ uint128_t t;
+ VERIFY_CHECK(bit < 256);
+ bit += ((uint32_t) flag - 1) & 0x100; /* forcing (bit >> 6) > 3 makes this a noop */
+ t = (uint128_t)r->d[0] + (((uint64_t)((bit >> 6) == 0)) << (bit & 0x3F));
+ r->d[0] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
+ t += (uint128_t)r->d[1] + (((uint64_t)((bit >> 6) == 1)) << (bit & 0x3F));
+ r->d[1] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
+ t += (uint128_t)r->d[2] + (((uint64_t)((bit >> 6) == 2)) << (bit & 0x3F));
+ r->d[2] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
+ t += (uint128_t)r->d[3] + (((uint64_t)((bit >> 6) == 3)) << (bit & 0x3F));
+ r->d[3] = t & 0xFFFFFFFFFFFFFFFFULL;
+#ifdef VERIFY
+ VERIFY_CHECK((t >> 64) == 0);
+ VERIFY_CHECK(secp256k1_scalar_check_overflow(r) == 0);
+#endif
+}
+
+static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) {
+ int over;
+ r->d[0] = (uint64_t)b32[31] | (uint64_t)b32[30] << 8 | (uint64_t)b32[29] << 16 | (uint64_t)b32[28] << 24 | (uint64_t)b32[27] << 32 | (uint64_t)b32[26] << 40 | (uint64_t)b32[25] << 48 | (uint64_t)b32[24] << 56;
+ r->d[1] = (uint64_t)b32[23] | (uint64_t)b32[22] << 8 | (uint64_t)b32[21] << 16 | (uint64_t)b32[20] << 24 | (uint64_t)b32[19] << 32 | (uint64_t)b32[18] << 40 | (uint64_t)b32[17] << 48 | (uint64_t)b32[16] << 56;
+ r->d[2] = (uint64_t)b32[15] | (uint64_t)b32[14] << 8 | (uint64_t)b32[13] << 16 | (uint64_t)b32[12] << 24 | (uint64_t)b32[11] << 32 | (uint64_t)b32[10] << 40 | (uint64_t)b32[9] << 48 | (uint64_t)b32[8] << 56;
+ r->d[3] = (uint64_t)b32[7] | (uint64_t)b32[6] << 8 | (uint64_t)b32[5] << 16 | (uint64_t)b32[4] << 24 | (uint64_t)b32[3] << 32 | (uint64_t)b32[2] << 40 | (uint64_t)b32[1] << 48 | (uint64_t)b32[0] << 56;
+ over = secp256k1_scalar_reduce(r, secp256k1_scalar_check_overflow(r));
+ if (overflow) {
+ *overflow = over;
+ }
+}
+
+static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) {
+ bin[0] = a->d[3] >> 56; bin[1] = a->d[3] >> 48; bin[2] = a->d[3] >> 40; bin[3] = a->d[3] >> 32; bin[4] = a->d[3] >> 24; bin[5] = a->d[3] >> 16; bin[6] = a->d[3] >> 8; bin[7] = a->d[3];
+ bin[8] = a->d[2] >> 56; bin[9] = a->d[2] >> 48; bin[10] = a->d[2] >> 40; bin[11] = a->d[2] >> 32; bin[12] = a->d[2] >> 24; bin[13] = a->d[2] >> 16; bin[14] = a->d[2] >> 8; bin[15] = a->d[2];
+ bin[16] = a->d[1] >> 56; bin[17] = a->d[1] >> 48; bin[18] = a->d[1] >> 40; bin[19] = a->d[1] >> 32; bin[20] = a->d[1] >> 24; bin[21] = a->d[1] >> 16; bin[22] = a->d[1] >> 8; bin[23] = a->d[1];
+ bin[24] = a->d[0] >> 56; bin[25] = a->d[0] >> 48; bin[26] = a->d[0] >> 40; bin[27] = a->d[0] >> 32; bin[28] = a->d[0] >> 24; bin[29] = a->d[0] >> 16; bin[30] = a->d[0] >> 8; bin[31] = a->d[0];
+}
+
+SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) {
+ return (a->d[0] | a->d[1] | a->d[2] | a->d[3]) == 0;
+}
+
+static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) {
+ uint64_t nonzero = 0xFFFFFFFFFFFFFFFFULL * (secp256k1_scalar_is_zero(a) == 0);
+ uint128_t t = (uint128_t)(~a->d[0]) + SECP256K1_N_0 + 1;
+ r->d[0] = t & nonzero; t >>= 64;
+ t += (uint128_t)(~a->d[1]) + SECP256K1_N_1;
+ r->d[1] = t & nonzero; t >>= 64;
+ t += (uint128_t)(~a->d[2]) + SECP256K1_N_2;
+ r->d[2] = t & nonzero; t >>= 64;
+ t += (uint128_t)(~a->d[3]) + SECP256K1_N_3;
+ r->d[3] = t & nonzero;
+}
+
+SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) {
+ return ((a->d[0] ^ 1) | a->d[1] | a->d[2] | a->d[3]) == 0;
+}
+
+static int secp256k1_scalar_is_high(const secp256k1_scalar *a) {
+ int yes = 0;
+ int no = 0;
+ no |= (a->d[3] < SECP256K1_N_H_3);
+ yes |= (a->d[3] > SECP256K1_N_H_3) & ~no;
+ no |= (a->d[2] < SECP256K1_N_H_2) & ~yes; /* No need for a > check. */
+ no |= (a->d[1] < SECP256K1_N_H_1) & ~yes;
+ yes |= (a->d[1] > SECP256K1_N_H_1) & ~no;
+ yes |= (a->d[0] > SECP256K1_N_H_0) & ~no;
+ return yes;
+}
+
+static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
+ /* If we are flag = 0, mask = 00...00 and this is a no-op;
+ * if we are flag = 1, mask = 11...11 and this is identical to secp256k1_scalar_negate */
+ uint64_t mask = !flag - 1;
+ uint64_t nonzero = (secp256k1_scalar_is_zero(r) != 0) - 1;
+ uint128_t t = (uint128_t)(r->d[0] ^ mask) + ((SECP256K1_N_0 + 1) & mask);
+ r->d[0] = t & nonzero; t >>= 64;
+ t += (uint128_t)(r->d[1] ^ mask) + (SECP256K1_N_1 & mask);
+ r->d[1] = t & nonzero; t >>= 64;
+ t += (uint128_t)(r->d[2] ^ mask) + (SECP256K1_N_2 & mask);
+ r->d[2] = t & nonzero; t >>= 64;
+ t += (uint128_t)(r->d[3] ^ mask) + (SECP256K1_N_3 & mask);
+ r->d[3] = t & nonzero;
+ return 2 * (mask == 0) - 1;
+}
+
+/* Inspired by the macros in OpenSSL's crypto/bn/asm/x86_64-gcc.c. */
+
+/** Add a*b to the number defined by (c0,c1,c2). c2 must never overflow. */
+#define muladd(a,b) { \
+ uint64_t tl, th; \
+ { \
+ uint128_t t = (uint128_t)a * b; \
+ th = t >> 64; /* at most 0xFFFFFFFFFFFFFFFE */ \
+ tl = t; \
+ } \
+ c0 += tl; /* overflow is handled on the next line */ \
+ th += (c0 < tl) ? 1 : 0; /* at most 0xFFFFFFFFFFFFFFFF */ \
+ c1 += th; /* overflow is handled on the next line */ \
+ c2 += (c1 < th) ? 1 : 0; /* never overflows by contract (verified in the next line) */ \
+ VERIFY_CHECK((c1 >= th) || (c2 != 0)); \
+}
+
+/** Add a*b to the number defined by (c0,c1). c1 must never overflow. */
+#define muladd_fast(a,b) { \
+ uint64_t tl, th; \
+ { \
+ uint128_t t = (uint128_t)a * b; \
+ th = t >> 64; /* at most 0xFFFFFFFFFFFFFFFE */ \
+ tl = t; \
+ } \
+ c0 += tl; /* overflow is handled on the next line */ \
+ th += (c0 < tl) ? 1 : 0; /* at most 0xFFFFFFFFFFFFFFFF */ \
+ c1 += th; /* never overflows by contract (verified in the next line) */ \
+ VERIFY_CHECK(c1 >= th); \
+}
+
+/** Add 2*a*b to the number defined by (c0,c1,c2). c2 must never overflow. */
+#define muladd2(a,b) { \
+ uint64_t tl, th, th2, tl2; \
+ { \
+ uint128_t t = (uint128_t)a * b; \
+ th = t >> 64; /* at most 0xFFFFFFFFFFFFFFFE */ \
+ tl = t; \
+ } \
+ th2 = th + th; /* at most 0xFFFFFFFFFFFFFFFE (in case th was 0x7FFFFFFFFFFFFFFF) */ \
+ c2 += (th2 < th) ? 1 : 0; /* never overflows by contract (verified the next line) */ \
+ VERIFY_CHECK((th2 >= th) || (c2 != 0)); \
+ tl2 = tl + tl; /* at most 0xFFFFFFFFFFFFFFFE (in case the lowest 63 bits of tl were 0x7FFFFFFFFFFFFFFF) */ \
+ th2 += (tl2 < tl) ? 1 : 0; /* at most 0xFFFFFFFFFFFFFFFF */ \
+ c0 += tl2; /* overflow is handled on the next line */ \
+ th2 += (c0 < tl2) ? 1 : 0; /* second overflow is handled on the next line */ \
+ c2 += (c0 < tl2) & (th2 == 0); /* never overflows by contract (verified the next line) */ \
+ VERIFY_CHECK((c0 >= tl2) || (th2 != 0) || (c2 != 0)); \
+ c1 += th2; /* overflow is handled on the next line */ \
+ c2 += (c1 < th2) ? 1 : 0; /* never overflows by contract (verified the next line) */ \
+ VERIFY_CHECK((c1 >= th2) || (c2 != 0)); \
+}
+
+/** Add a to the number defined by (c0,c1,c2). c2 must never overflow. */
+#define sumadd(a) { \
+ unsigned int over; \
+ c0 += (a); /* overflow is handled on the next line */ \
+ over = (c0 < (a)) ? 1 : 0; \
+ c1 += over; /* overflow is handled on the next line */ \
+ c2 += (c1 < over) ? 1 : 0; /* never overflows by contract */ \
+}
+
+/** Add a to the number defined by (c0,c1). c1 must never overflow, c2 must be zero. */
+#define sumadd_fast(a) { \
+ c0 += (a); /* overflow is handled on the next line */ \
+ c1 += (c0 < (a)) ? 1 : 0; /* never overflows by contract (verified the next line) */ \
+ VERIFY_CHECK((c1 != 0) | (c0 >= (a))); \
+ VERIFY_CHECK(c2 == 0); \
+}
+
+/** Extract the lowest 64 bits of (c0,c1,c2) into n, and left shift the number 64 bits. */
+#define extract(n) { \
+ (n) = c0; \
+ c0 = c1; \
+ c1 = c2; \
+ c2 = 0; \
+}
+
+/** Extract the lowest 64 bits of (c0,c1,c2) into n, and left shift the number 64 bits. c2 is required to be zero. */
+#define extract_fast(n) { \
+ (n) = c0; \
+ c0 = c1; \
+ c1 = 0; \
+ VERIFY_CHECK(c2 == 0); \
+}
+
+static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l) {
+#ifdef USE_ASM_X86_64
+ /* Reduce 512 bits into 385. */
+ uint64_t m0, m1, m2, m3, m4, m5, m6;
+ uint64_t p0, p1, p2, p3, p4;
+ uint64_t c;
+
+ __asm__ __volatile__(
+ /* Preload. */
+ "movq 32(%%rsi), %%r11\n"
+ "movq 40(%%rsi), %%r12\n"
+ "movq 48(%%rsi), %%r13\n"
+ "movq 56(%%rsi), %%r14\n"
+ /* Initialize r8,r9,r10 */
+ "movq 0(%%rsi), %%r8\n"
+ "movq $0, %%r9\n"
+ "movq $0, %%r10\n"
+ /* (r8,r9) += n0 * c0 */
+ "movq %8, %%rax\n"
+ "mulq %%r11\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ /* extract m0 */
+ "movq %%r8, %q0\n"
+ "movq $0, %%r8\n"
+ /* (r9,r10) += l1 */
+ "addq 8(%%rsi), %%r9\n"
+ "adcq $0, %%r10\n"
+ /* (r9,r10,r8) += n1 * c0 */
+ "movq %8, %%rax\n"
+ "mulq %%r12\n"
+ "addq %%rax, %%r9\n"
+ "adcq %%rdx, %%r10\n"
+ "adcq $0, %%r8\n"
+ /* (r9,r10,r8) += n0 * c1 */
+ "movq %9, %%rax\n"
+ "mulq %%r11\n"
+ "addq %%rax, %%r9\n"
+ "adcq %%rdx, %%r10\n"
+ "adcq $0, %%r8\n"
+ /* extract m1 */
+ "movq %%r9, %q1\n"
+ "movq $0, %%r9\n"
+ /* (r10,r8,r9) += l2 */
+ "addq 16(%%rsi), %%r10\n"
+ "adcq $0, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* (r10,r8,r9) += n2 * c0 */
+ "movq %8, %%rax\n"
+ "mulq %%r13\n"
+ "addq %%rax, %%r10\n"
+ "adcq %%rdx, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* (r10,r8,r9) += n1 * c1 */
+ "movq %9, %%rax\n"
+ "mulq %%r12\n"
+ "addq %%rax, %%r10\n"
+ "adcq %%rdx, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* (r10,r8,r9) += n0 */
+ "addq %%r11, %%r10\n"
+ "adcq $0, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* extract m2 */
+ "movq %%r10, %q2\n"
+ "movq $0, %%r10\n"
+ /* (r8,r9,r10) += l3 */
+ "addq 24(%%rsi), %%r8\n"
+ "adcq $0, %%r9\n"
+ "adcq $0, %%r10\n"
+ /* (r8,r9,r10) += n3 * c0 */
+ "movq %8, %%rax\n"
+ "mulq %%r14\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ "adcq $0, %%r10\n"
+ /* (r8,r9,r10) += n2 * c1 */
+ "movq %9, %%rax\n"
+ "mulq %%r13\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ "adcq $0, %%r10\n"
+ /* (r8,r9,r10) += n1 */
+ "addq %%r12, %%r8\n"
+ "adcq $0, %%r9\n"
+ "adcq $0, %%r10\n"
+ /* extract m3 */
+ "movq %%r8, %q3\n"
+ "movq $0, %%r8\n"
+ /* (r9,r10,r8) += n3 * c1 */
+ "movq %9, %%rax\n"
+ "mulq %%r14\n"
+ "addq %%rax, %%r9\n"
+ "adcq %%rdx, %%r10\n"
+ "adcq $0, %%r8\n"
+ /* (r9,r10,r8) += n2 */
+ "addq %%r13, %%r9\n"
+ "adcq $0, %%r10\n"
+ "adcq $0, %%r8\n"
+ /* extract m4 */
+ "movq %%r9, %q4\n"
+ /* (r10,r8) += n3 */
+ "addq %%r14, %%r10\n"
+ "adcq $0, %%r8\n"
+ /* extract m5 */
+ "movq %%r10, %q5\n"
+ /* extract m6 */
+ "movq %%r8, %q6\n"
+ : "=g"(m0), "=g"(m1), "=g"(m2), "=g"(m3), "=g"(m4), "=g"(m5), "=g"(m6)
+ : "S"(l), "n"(SECP256K1_N_C_0), "n"(SECP256K1_N_C_1)
+ : "rax", "rdx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "cc");
+
+ /* Reduce 385 bits into 258. */
+ __asm__ __volatile__(
+ /* Preload */
+ "movq %q9, %%r11\n"
+ "movq %q10, %%r12\n"
+ "movq %q11, %%r13\n"
+ /* Initialize (r8,r9,r10) */
+ "movq %q5, %%r8\n"
+ "movq $0, %%r9\n"
+ "movq $0, %%r10\n"
+ /* (r8,r9) += m4 * c0 */
+ "movq %12, %%rax\n"
+ "mulq %%r11\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ /* extract p0 */
+ "movq %%r8, %q0\n"
+ "movq $0, %%r8\n"
+ /* (r9,r10) += m1 */
+ "addq %q6, %%r9\n"
+ "adcq $0, %%r10\n"
+ /* (r9,r10,r8) += m5 * c0 */
+ "movq %12, %%rax\n"
+ "mulq %%r12\n"
+ "addq %%rax, %%r9\n"
+ "adcq %%rdx, %%r10\n"
+ "adcq $0, %%r8\n"
+ /* (r9,r10,r8) += m4 * c1 */
+ "movq %13, %%rax\n"
+ "mulq %%r11\n"
+ "addq %%rax, %%r9\n"
+ "adcq %%rdx, %%r10\n"
+ "adcq $0, %%r8\n"
+ /* extract p1 */
+ "movq %%r9, %q1\n"
+ "movq $0, %%r9\n"
+ /* (r10,r8,r9) += m2 */
+ "addq %q7, %%r10\n"
+ "adcq $0, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* (r10,r8,r9) += m6 * c0 */
+ "movq %12, %%rax\n"
+ "mulq %%r13\n"
+ "addq %%rax, %%r10\n"
+ "adcq %%rdx, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* (r10,r8,r9) += m5 * c1 */
+ "movq %13, %%rax\n"
+ "mulq %%r12\n"
+ "addq %%rax, %%r10\n"
+ "adcq %%rdx, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* (r10,r8,r9) += m4 */
+ "addq %%r11, %%r10\n"
+ "adcq $0, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* extract p2 */
+ "movq %%r10, %q2\n"
+ /* (r8,r9) += m3 */
+ "addq %q8, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* (r8,r9) += m6 * c1 */
+ "movq %13, %%rax\n"
+ "mulq %%r13\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ /* (r8,r9) += m5 */
+ "addq %%r12, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* extract p3 */
+ "movq %%r8, %q3\n"
+ /* (r9) += m6 */
+ "addq %%r13, %%r9\n"
+ /* extract p4 */
+ "movq %%r9, %q4\n"
+ : "=&g"(p0), "=&g"(p1), "=&g"(p2), "=g"(p3), "=g"(p4)
+ : "g"(m0), "g"(m1), "g"(m2), "g"(m3), "g"(m4), "g"(m5), "g"(m6), "n"(SECP256K1_N_C_0), "n"(SECP256K1_N_C_1)
+ : "rax", "rdx", "r8", "r9", "r10", "r11", "r12", "r13", "cc");
+
+ /* Reduce 258 bits into 256. */
+ __asm__ __volatile__(
+ /* Preload */
+ "movq %q5, %%r10\n"
+ /* (rax,rdx) = p4 * c0 */
+ "movq %7, %%rax\n"
+ "mulq %%r10\n"
+ /* (rax,rdx) += p0 */
+ "addq %q1, %%rax\n"
+ "adcq $0, %%rdx\n"
+ /* extract r0 */
+ "movq %%rax, 0(%q6)\n"
+ /* Move to (r8,r9) */
+ "movq %%rdx, %%r8\n"
+ "movq $0, %%r9\n"
+ /* (r8,r9) += p1 */
+ "addq %q2, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* (r8,r9) += p4 * c1 */
+ "movq %8, %%rax\n"
+ "mulq %%r10\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ /* Extract r1 */
+ "movq %%r8, 8(%q6)\n"
+ "movq $0, %%r8\n"
+ /* (r9,r8) += p4 */
+ "addq %%r10, %%r9\n"
+ "adcq $0, %%r8\n"
+ /* (r9,r8) += p2 */
+ "addq %q3, %%r9\n"
+ "adcq $0, %%r8\n"
+ /* Extract r2 */
+ "movq %%r9, 16(%q6)\n"
+ "movq $0, %%r9\n"
+ /* (r8,r9) += p3 */
+ "addq %q4, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* Extract r3 */
+ "movq %%r8, 24(%q6)\n"
+ /* Extract c */
+ "movq %%r9, %q0\n"
+ : "=g"(c)
+ : "g"(p0), "g"(p1), "g"(p2), "g"(p3), "g"(p4), "D"(r), "n"(SECP256K1_N_C_0), "n"(SECP256K1_N_C_1)
+ : "rax", "rdx", "r8", "r9", "r10", "cc", "memory");
+#else
+ uint128_t c;
+ uint64_t c0, c1, c2;
+ uint64_t n0 = l[4], n1 = l[5], n2 = l[6], n3 = l[7];
+ uint64_t m0, m1, m2, m3, m4, m5;
+ uint32_t m6;
+ uint64_t p0, p1, p2, p3;
+ uint32_t p4;
+
+ /* Reduce 512 bits into 385. */
+ /* m[0..6] = l[0..3] + n[0..3] * SECP256K1_N_C. */
+ c0 = l[0]; c1 = 0; c2 = 0;
+ muladd_fast(n0, SECP256K1_N_C_0);
+ extract_fast(m0);
+ sumadd_fast(l[1]);
+ muladd(n1, SECP256K1_N_C_0);
+ muladd(n0, SECP256K1_N_C_1);
+ extract(m1);
+ sumadd(l[2]);
+ muladd(n2, SECP256K1_N_C_0);
+ muladd(n1, SECP256K1_N_C_1);
+ sumadd(n0);
+ extract(m2);
+ sumadd(l[3]);
+ muladd(n3, SECP256K1_N_C_0);
+ muladd(n2, SECP256K1_N_C_1);
+ sumadd(n1);
+ extract(m3);
+ muladd(n3, SECP256K1_N_C_1);
+ sumadd(n2);
+ extract(m4);
+ sumadd_fast(n3);
+ extract_fast(m5);
+ VERIFY_CHECK(c0 <= 1);
+ m6 = c0;
+
+ /* Reduce 385 bits into 258. */
+ /* p[0..4] = m[0..3] + m[4..6] * SECP256K1_N_C. */
+ c0 = m0; c1 = 0; c2 = 0;
+ muladd_fast(m4, SECP256K1_N_C_0);
+ extract_fast(p0);
+ sumadd_fast(m1);
+ muladd(m5, SECP256K1_N_C_0);
+ muladd(m4, SECP256K1_N_C_1);
+ extract(p1);
+ sumadd(m2);
+ muladd(m6, SECP256K1_N_C_0);
+ muladd(m5, SECP256K1_N_C_1);
+ sumadd(m4);
+ extract(p2);
+ sumadd_fast(m3);
+ muladd_fast(m6, SECP256K1_N_C_1);
+ sumadd_fast(m5);
+ extract_fast(p3);
+ p4 = c0 + m6;
+ VERIFY_CHECK(p4 <= 2);
+
+ /* Reduce 258 bits into 256. */
+ /* r[0..3] = p[0..3] + p[4] * SECP256K1_N_C. */
+ c = p0 + (uint128_t)SECP256K1_N_C_0 * p4;
+ r->d[0] = c & 0xFFFFFFFFFFFFFFFFULL; c >>= 64;
+ c += p1 + (uint128_t)SECP256K1_N_C_1 * p4;
+ r->d[1] = c & 0xFFFFFFFFFFFFFFFFULL; c >>= 64;
+ c += p2 + (uint128_t)p4;
+ r->d[2] = c & 0xFFFFFFFFFFFFFFFFULL; c >>= 64;
+ c += p3;
+ r->d[3] = c & 0xFFFFFFFFFFFFFFFFULL; c >>= 64;
+#endif
+
+ /* Final reduction of r. */
+ secp256k1_scalar_reduce(r, c + secp256k1_scalar_check_overflow(r));
+}
+
+static void secp256k1_scalar_mul_512(uint64_t l[8], const secp256k1_scalar *a, const secp256k1_scalar *b) {
+#ifdef USE_ASM_X86_64
+ const uint64_t *pb = b->d;
+ __asm__ __volatile__(
+ /* Preload */
+ "movq 0(%%rdi), %%r15\n"
+ "movq 8(%%rdi), %%rbx\n"
+ "movq 16(%%rdi), %%rcx\n"
+ "movq 0(%%rdx), %%r11\n"
+ "movq 8(%%rdx), %%r12\n"
+ "movq 16(%%rdx), %%r13\n"
+ "movq 24(%%rdx), %%r14\n"
+ /* (rax,rdx) = a0 * b0 */
+ "movq %%r15, %%rax\n"
+ "mulq %%r11\n"
+ /* Extract l0 */
+ "movq %%rax, 0(%%rsi)\n"
+ /* (r8,r9,r10) = (rdx) */
+ "movq %%rdx, %%r8\n"
+ "xorq %%r9, %%r9\n"
+ "xorq %%r10, %%r10\n"
+ /* (r8,r9,r10) += a0 * b1 */
+ "movq %%r15, %%rax\n"
+ "mulq %%r12\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ "adcq $0, %%r10\n"
+ /* (r8,r9,r10) += a1 * b0 */
+ "movq %%rbx, %%rax\n"
+ "mulq %%r11\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ "adcq $0, %%r10\n"
+ /* Extract l1 */
+ "movq %%r8, 8(%%rsi)\n"
+ "xorq %%r8, %%r8\n"
+ /* (r9,r10,r8) += a0 * b2 */
+ "movq %%r15, %%rax\n"
+ "mulq %%r13\n"
+ "addq %%rax, %%r9\n"
+ "adcq %%rdx, %%r10\n"
+ "adcq $0, %%r8\n"
+ /* (r9,r10,r8) += a1 * b1 */
+ "movq %%rbx, %%rax\n"
+ "mulq %%r12\n"
+ "addq %%rax, %%r9\n"
+ "adcq %%rdx, %%r10\n"
+ "adcq $0, %%r8\n"
+ /* (r9,r10,r8) += a2 * b0 */
+ "movq %%rcx, %%rax\n"
+ "mulq %%r11\n"
+ "addq %%rax, %%r9\n"
+ "adcq %%rdx, %%r10\n"
+ "adcq $0, %%r8\n"
+ /* Extract l2 */
+ "movq %%r9, 16(%%rsi)\n"
+ "xorq %%r9, %%r9\n"
+ /* (r10,r8,r9) += a0 * b3 */
+ "movq %%r15, %%rax\n"
+ "mulq %%r14\n"
+ "addq %%rax, %%r10\n"
+ "adcq %%rdx, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* Preload a3 */
+ "movq 24(%%rdi), %%r15\n"
+ /* (r10,r8,r9) += a1 * b2 */
+ "movq %%rbx, %%rax\n"
+ "mulq %%r13\n"
+ "addq %%rax, %%r10\n"
+ "adcq %%rdx, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* (r10,r8,r9) += a2 * b1 */
+ "movq %%rcx, %%rax\n"
+ "mulq %%r12\n"
+ "addq %%rax, %%r10\n"
+ "adcq %%rdx, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* (r10,r8,r9) += a3 * b0 */
+ "movq %%r15, %%rax\n"
+ "mulq %%r11\n"
+ "addq %%rax, %%r10\n"
+ "adcq %%rdx, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* Extract l3 */
+ "movq %%r10, 24(%%rsi)\n"
+ "xorq %%r10, %%r10\n"
+ /* (r8,r9,r10) += a1 * b3 */
+ "movq %%rbx, %%rax\n"
+ "mulq %%r14\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ "adcq $0, %%r10\n"
+ /* (r8,r9,r10) += a2 * b2 */
+ "movq %%rcx, %%rax\n"
+ "mulq %%r13\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ "adcq $0, %%r10\n"
+ /* (r8,r9,r10) += a3 * b1 */
+ "movq %%r15, %%rax\n"
+ "mulq %%r12\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ "adcq $0, %%r10\n"
+ /* Extract l4 */
+ "movq %%r8, 32(%%rsi)\n"
+ "xorq %%r8, %%r8\n"
+ /* (r9,r10,r8) += a2 * b3 */
+ "movq %%rcx, %%rax\n"
+ "mulq %%r14\n"
+ "addq %%rax, %%r9\n"
+ "adcq %%rdx, %%r10\n"
+ "adcq $0, %%r8\n"
+ /* (r9,r10,r8) += a3 * b2 */
+ "movq %%r15, %%rax\n"
+ "mulq %%r13\n"
+ "addq %%rax, %%r9\n"
+ "adcq %%rdx, %%r10\n"
+ "adcq $0, %%r8\n"
+ /* Extract l5 */
+ "movq %%r9, 40(%%rsi)\n"
+ /* (r10,r8) += a3 * b3 */
+ "movq %%r15, %%rax\n"
+ "mulq %%r14\n"
+ "addq %%rax, %%r10\n"
+ "adcq %%rdx, %%r8\n"
+ /* Extract l6 */
+ "movq %%r10, 48(%%rsi)\n"
+ /* Extract l7 */
+ "movq %%r8, 56(%%rsi)\n"
+ : "+d"(pb)
+ : "S"(l), "D"(a->d)
+ : "rax", "rbx", "rcx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", "cc", "memory");
+#else
+ /* 160 bit accumulator. */
+ uint64_t c0 = 0, c1 = 0;
+ uint32_t c2 = 0;
+
+ /* l[0..7] = a[0..3] * b[0..3]. */
+ muladd_fast(a->d[0], b->d[0]);
+ extract_fast(l[0]);
+ muladd(a->d[0], b->d[1]);
+ muladd(a->d[1], b->d[0]);
+ extract(l[1]);
+ muladd(a->d[0], b->d[2]);
+ muladd(a->d[1], b->d[1]);
+ muladd(a->d[2], b->d[0]);
+ extract(l[2]);
+ muladd(a->d[0], b->d[3]);
+ muladd(a->d[1], b->d[2]);
+ muladd(a->d[2], b->d[1]);
+ muladd(a->d[3], b->d[0]);
+ extract(l[3]);
+ muladd(a->d[1], b->d[3]);
+ muladd(a->d[2], b->d[2]);
+ muladd(a->d[3], b->d[1]);
+ extract(l[4]);
+ muladd(a->d[2], b->d[3]);
+ muladd(a->d[3], b->d[2]);
+ extract(l[5]);
+ muladd_fast(a->d[3], b->d[3]);
+ extract_fast(l[6]);
+ VERIFY_CHECK(c1 == 0);
+ l[7] = c0;
+#endif
+}
+
+static void secp256k1_scalar_sqr_512(uint64_t l[8], const secp256k1_scalar *a) {
+#ifdef USE_ASM_X86_64
+ __asm__ __volatile__(
+ /* Preload */
+ "movq 0(%%rdi), %%r11\n"
+ "movq 8(%%rdi), %%r12\n"
+ "movq 16(%%rdi), %%r13\n"
+ "movq 24(%%rdi), %%r14\n"
+ /* (rax,rdx) = a0 * a0 */
+ "movq %%r11, %%rax\n"
+ "mulq %%r11\n"
+ /* Extract l0 */
+ "movq %%rax, 0(%%rsi)\n"
+ /* (r8,r9,r10) = (rdx,0) */
+ "movq %%rdx, %%r8\n"
+ "xorq %%r9, %%r9\n"
+ "xorq %%r10, %%r10\n"
+ /* (r8,r9,r10) += 2 * a0 * a1 */
+ "movq %%r11, %%rax\n"
+ "mulq %%r12\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ "adcq $0, %%r10\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ "adcq $0, %%r10\n"
+ /* Extract l1 */
+ "movq %%r8, 8(%%rsi)\n"
+ "xorq %%r8, %%r8\n"
+ /* (r9,r10,r8) += 2 * a0 * a2 */
+ "movq %%r11, %%rax\n"
+ "mulq %%r13\n"
+ "addq %%rax, %%r9\n"
+ "adcq %%rdx, %%r10\n"
+ "adcq $0, %%r8\n"
+ "addq %%rax, %%r9\n"
+ "adcq %%rdx, %%r10\n"
+ "adcq $0, %%r8\n"
+ /* (r9,r10,r8) += a1 * a1 */
+ "movq %%r12, %%rax\n"
+ "mulq %%r12\n"
+ "addq %%rax, %%r9\n"
+ "adcq %%rdx, %%r10\n"
+ "adcq $0, %%r8\n"
+ /* Extract l2 */
+ "movq %%r9, 16(%%rsi)\n"
+ "xorq %%r9, %%r9\n"
+ /* (r10,r8,r9) += 2 * a0 * a3 */
+ "movq %%r11, %%rax\n"
+ "mulq %%r14\n"
+ "addq %%rax, %%r10\n"
+ "adcq %%rdx, %%r8\n"
+ "adcq $0, %%r9\n"
+ "addq %%rax, %%r10\n"
+ "adcq %%rdx, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* (r10,r8,r9) += 2 * a1 * a2 */
+ "movq %%r12, %%rax\n"
+ "mulq %%r13\n"
+ "addq %%rax, %%r10\n"
+ "adcq %%rdx, %%r8\n"
+ "adcq $0, %%r9\n"
+ "addq %%rax, %%r10\n"
+ "adcq %%rdx, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* Extract l3 */
+ "movq %%r10, 24(%%rsi)\n"
+ "xorq %%r10, %%r10\n"
+ /* (r8,r9,r10) += 2 * a1 * a3 */
+ "movq %%r12, %%rax\n"
+ "mulq %%r14\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ "adcq $0, %%r10\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ "adcq $0, %%r10\n"
+ /* (r8,r9,r10) += a2 * a2 */
+ "movq %%r13, %%rax\n"
+ "mulq %%r13\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ "adcq $0, %%r10\n"
+ /* Extract l4 */
+ "movq %%r8, 32(%%rsi)\n"
+ "xorq %%r8, %%r8\n"
+ /* (r9,r10,r8) += 2 * a2 * a3 */
+ "movq %%r13, %%rax\n"
+ "mulq %%r14\n"
+ "addq %%rax, %%r9\n"
+ "adcq %%rdx, %%r10\n"
+ "adcq $0, %%r8\n"
+ "addq %%rax, %%r9\n"
+ "adcq %%rdx, %%r10\n"
+ "adcq $0, %%r8\n"
+ /* Extract l5 */
+ "movq %%r9, 40(%%rsi)\n"
+ /* (r10,r8) += a3 * a3 */
+ "movq %%r14, %%rax\n"
+ "mulq %%r14\n"
+ "addq %%rax, %%r10\n"
+ "adcq %%rdx, %%r8\n"
+ /* Extract l6 */
+ "movq %%r10, 48(%%rsi)\n"
+ /* Extract l7 */
+ "movq %%r8, 56(%%rsi)\n"
+ :
+ : "S"(l), "D"(a->d)
+ : "rax", "rdx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "cc", "memory");
+#else
+ /* 160 bit accumulator. */
+ uint64_t c0 = 0, c1 = 0;
+ uint32_t c2 = 0;
+
+ /* l[0..7] = a[0..3] * b[0..3]. */
+ muladd_fast(a->d[0], a->d[0]);
+ extract_fast(l[0]);
+ muladd2(a->d[0], a->d[1]);
+ extract(l[1]);
+ muladd2(a->d[0], a->d[2]);
+ muladd(a->d[1], a->d[1]);
+ extract(l[2]);
+ muladd2(a->d[0], a->d[3]);
+ muladd2(a->d[1], a->d[2]);
+ extract(l[3]);
+ muladd2(a->d[1], a->d[3]);
+ muladd(a->d[2], a->d[2]);
+ extract(l[4]);
+ muladd2(a->d[2], a->d[3]);
+ extract(l[5]);
+ muladd_fast(a->d[3], a->d[3]);
+ extract_fast(l[6]);
+ VERIFY_CHECK(c1 == 0);
+ l[7] = c0;
+#endif
+}
+
+#undef sumadd
+#undef sumadd_fast
+#undef muladd
+#undef muladd_fast
+#undef muladd2
+#undef extract
+#undef extract_fast
+
+static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
+ uint64_t l[8];
+ secp256k1_scalar_mul_512(l, a, b);
+ secp256k1_scalar_reduce_512(r, l);
+}
+
+static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n) {
+ int ret;
+ VERIFY_CHECK(n > 0);
+ VERIFY_CHECK(n < 16);
+ ret = r->d[0] & ((1 << n) - 1);
+ r->d[0] = (r->d[0] >> n) + (r->d[1] << (64 - n));
+ r->d[1] = (r->d[1] >> n) + (r->d[2] << (64 - n));
+ r->d[2] = (r->d[2] >> n) + (r->d[3] << (64 - n));
+ r->d[3] = (r->d[3] >> n);
+ return ret;
+}
+
+static void secp256k1_scalar_sqr(secp256k1_scalar *r, const secp256k1_scalar *a) {
+ uint64_t l[8];
+ secp256k1_scalar_sqr_512(l, a);
+ secp256k1_scalar_reduce_512(r, l);
+}
+
+static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) {
+ r1->d[0] = a->d[0];
+ r1->d[1] = a->d[1];
+ r1->d[2] = 0;
+ r1->d[3] = 0;
+ r2->d[0] = a->d[2];
+ r2->d[1] = a->d[3];
+ r2->d[2] = 0;
+ r2->d[3] = 0;
+}
+
+SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) {
+ return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3])) == 0;
+}
+
+SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b, unsigned int shift) {
+ uint64_t l[8];
+ unsigned int shiftlimbs;
+ unsigned int shiftlow;
+ unsigned int shifthigh;
+ VERIFY_CHECK(shift >= 256);
+ secp256k1_scalar_mul_512(l, a, b);
+ shiftlimbs = shift >> 6;
+ shiftlow = shift & 0x3F;
+ shifthigh = 64 - shiftlow;
+ r->d[0] = shift < 512 ? (l[0 + shiftlimbs] >> shiftlow | (shift < 448 && shiftlow ? (l[1 + shiftlimbs] << shifthigh) : 0)) : 0;
+ r->d[1] = shift < 448 ? (l[1 + shiftlimbs] >> shiftlow | (shift < 384 && shiftlow ? (l[2 + shiftlimbs] << shifthigh) : 0)) : 0;
+ r->d[2] = shift < 384 ? (l[2 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[3 + shiftlimbs] << shifthigh) : 0)) : 0;
+ r->d[3] = shift < 320 ? (l[3 + shiftlimbs] >> shiftlow) : 0;
+ secp256k1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 6] >> ((shift - 1) & 0x3f)) & 1);
+}
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/scalar_8x32.h b/crypto/secp256k1/libsecp256k1/src/scalar_8x32.h
new file mode 100644
index 000000000..1319664f6
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/scalar_8x32.h
@@ -0,0 +1,19 @@
+/**********************************************************************
+ * Copyright (c) 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_SCALAR_REPR_
+#define _SECP256K1_SCALAR_REPR_
+
+#include <stdint.h>
+
+/** A scalar modulo the group order of the secp256k1 curve. */
+typedef struct {
+ uint32_t d[8];
+} secp256k1_scalar;
+
+#define SECP256K1_SCALAR_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {{(d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7)}}
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/scalar_8x32_impl.h b/crypto/secp256k1/libsecp256k1/src/scalar_8x32_impl.h
new file mode 100644
index 000000000..aae4f35c0
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/scalar_8x32_impl.h
@@ -0,0 +1,721 @@
+/**********************************************************************
+ * Copyright (c) 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_SCALAR_REPR_IMPL_H_
+#define _SECP256K1_SCALAR_REPR_IMPL_H_
+
+/* Limbs of the secp256k1 order. */
+#define SECP256K1_N_0 ((uint32_t)0xD0364141UL)
+#define SECP256K1_N_1 ((uint32_t)0xBFD25E8CUL)
+#define SECP256K1_N_2 ((uint32_t)0xAF48A03BUL)
+#define SECP256K1_N_3 ((uint32_t)0xBAAEDCE6UL)
+#define SECP256K1_N_4 ((uint32_t)0xFFFFFFFEUL)
+#define SECP256K1_N_5 ((uint32_t)0xFFFFFFFFUL)
+#define SECP256K1_N_6 ((uint32_t)0xFFFFFFFFUL)
+#define SECP256K1_N_7 ((uint32_t)0xFFFFFFFFUL)
+
+/* Limbs of 2^256 minus the secp256k1 order. */
+#define SECP256K1_N_C_0 (~SECP256K1_N_0 + 1)
+#define SECP256K1_N_C_1 (~SECP256K1_N_1)
+#define SECP256K1_N_C_2 (~SECP256K1_N_2)
+#define SECP256K1_N_C_3 (~SECP256K1_N_3)
+#define SECP256K1_N_C_4 (1)
+
+/* Limbs of half the secp256k1 order. */
+#define SECP256K1_N_H_0 ((uint32_t)0x681B20A0UL)
+#define SECP256K1_N_H_1 ((uint32_t)0xDFE92F46UL)
+#define SECP256K1_N_H_2 ((uint32_t)0x57A4501DUL)
+#define SECP256K1_N_H_3 ((uint32_t)0x5D576E73UL)
+#define SECP256K1_N_H_4 ((uint32_t)0xFFFFFFFFUL)
+#define SECP256K1_N_H_5 ((uint32_t)0xFFFFFFFFUL)
+#define SECP256K1_N_H_6 ((uint32_t)0xFFFFFFFFUL)
+#define SECP256K1_N_H_7 ((uint32_t)0x7FFFFFFFUL)
+
+SECP256K1_INLINE static void secp256k1_scalar_clear(secp256k1_scalar *r) {
+ r->d[0] = 0;
+ r->d[1] = 0;
+ r->d[2] = 0;
+ r->d[3] = 0;
+ r->d[4] = 0;
+ r->d[5] = 0;
+ r->d[6] = 0;
+ r->d[7] = 0;
+}
+
+SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v) {
+ r->d[0] = v;
+ r->d[1] = 0;
+ r->d[2] = 0;
+ r->d[3] = 0;
+ r->d[4] = 0;
+ r->d[5] = 0;
+ r->d[6] = 0;
+ r->d[7] = 0;
+}
+
+SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
+ VERIFY_CHECK((offset + count - 1) >> 5 == offset >> 5);
+ return (a->d[offset >> 5] >> (offset & 0x1F)) & ((1 << count) - 1);
+}
+
+SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
+ VERIFY_CHECK(count < 32);
+ VERIFY_CHECK(offset + count <= 256);
+ if ((offset + count - 1) >> 5 == offset >> 5) {
+ return secp256k1_scalar_get_bits(a, offset, count);
+ } else {
+ VERIFY_CHECK((offset >> 5) + 1 < 8);
+ return ((a->d[offset >> 5] >> (offset & 0x1F)) | (a->d[(offset >> 5) + 1] << (32 - (offset & 0x1F)))) & ((((uint32_t)1) << count) - 1);
+ }
+}
+
+SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scalar *a) {
+ int yes = 0;
+ int no = 0;
+ no |= (a->d[7] < SECP256K1_N_7); /* No need for a > check. */
+ no |= (a->d[6] < SECP256K1_N_6); /* No need for a > check. */
+ no |= (a->d[5] < SECP256K1_N_5); /* No need for a > check. */
+ no |= (a->d[4] < SECP256K1_N_4);
+ yes |= (a->d[4] > SECP256K1_N_4) & ~no;
+ no |= (a->d[3] < SECP256K1_N_3) & ~yes;
+ yes |= (a->d[3] > SECP256K1_N_3) & ~no;
+ no |= (a->d[2] < SECP256K1_N_2) & ~yes;
+ yes |= (a->d[2] > SECP256K1_N_2) & ~no;
+ no |= (a->d[1] < SECP256K1_N_1) & ~yes;
+ yes |= (a->d[1] > SECP256K1_N_1) & ~no;
+ yes |= (a->d[0] >= SECP256K1_N_0) & ~no;
+ return yes;
+}
+
+SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar *r, uint32_t overflow) {
+ uint64_t t;
+ VERIFY_CHECK(overflow <= 1);
+ t = (uint64_t)r->d[0] + overflow * SECP256K1_N_C_0;
+ r->d[0] = t & 0xFFFFFFFFUL; t >>= 32;
+ t += (uint64_t)r->d[1] + overflow * SECP256K1_N_C_1;
+ r->d[1] = t & 0xFFFFFFFFUL; t >>= 32;
+ t += (uint64_t)r->d[2] + overflow * SECP256K1_N_C_2;
+ r->d[2] = t & 0xFFFFFFFFUL; t >>= 32;
+ t += (uint64_t)r->d[3] + overflow * SECP256K1_N_C_3;
+ r->d[3] = t & 0xFFFFFFFFUL; t >>= 32;
+ t += (uint64_t)r->d[4] + overflow * SECP256K1_N_C_4;
+ r->d[4] = t & 0xFFFFFFFFUL; t >>= 32;
+ t += (uint64_t)r->d[5];
+ r->d[5] = t & 0xFFFFFFFFUL; t >>= 32;
+ t += (uint64_t)r->d[6];
+ r->d[6] = t & 0xFFFFFFFFUL; t >>= 32;
+ t += (uint64_t)r->d[7];
+ r->d[7] = t & 0xFFFFFFFFUL;
+ return overflow;
+}
+
+static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
+ int overflow;
+ uint64_t t = (uint64_t)a->d[0] + b->d[0];
+ r->d[0] = t & 0xFFFFFFFFULL; t >>= 32;
+ t += (uint64_t)a->d[1] + b->d[1];
+ r->d[1] = t & 0xFFFFFFFFULL; t >>= 32;
+ t += (uint64_t)a->d[2] + b->d[2];
+ r->d[2] = t & 0xFFFFFFFFULL; t >>= 32;
+ t += (uint64_t)a->d[3] + b->d[3];
+ r->d[3] = t & 0xFFFFFFFFULL; t >>= 32;
+ t += (uint64_t)a->d[4] + b->d[4];
+ r->d[4] = t & 0xFFFFFFFFULL; t >>= 32;
+ t += (uint64_t)a->d[5] + b->d[5];
+ r->d[5] = t & 0xFFFFFFFFULL; t >>= 32;
+ t += (uint64_t)a->d[6] + b->d[6];
+ r->d[6] = t & 0xFFFFFFFFULL; t >>= 32;
+ t += (uint64_t)a->d[7] + b->d[7];
+ r->d[7] = t & 0xFFFFFFFFULL; t >>= 32;
+ overflow = t + secp256k1_scalar_check_overflow(r);
+ VERIFY_CHECK(overflow == 0 || overflow == 1);
+ secp256k1_scalar_reduce(r, overflow);
+ return overflow;
+}
+
+static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) {
+ uint64_t t;
+ VERIFY_CHECK(bit < 256);
+ bit += ((uint32_t) flag - 1) & 0x100; /* forcing (bit >> 5) > 7 makes this a noop */
+ t = (uint64_t)r->d[0] + (((uint32_t)((bit >> 5) == 0)) << (bit & 0x1F));
+ r->d[0] = t & 0xFFFFFFFFULL; t >>= 32;
+ t += (uint64_t)r->d[1] + (((uint32_t)((bit >> 5) == 1)) << (bit & 0x1F));
+ r->d[1] = t & 0xFFFFFFFFULL; t >>= 32;
+ t += (uint64_t)r->d[2] + (((uint32_t)((bit >> 5) == 2)) << (bit & 0x1F));
+ r->d[2] = t & 0xFFFFFFFFULL; t >>= 32;
+ t += (uint64_t)r->d[3] + (((uint32_t)((bit >> 5) == 3)) << (bit & 0x1F));
+ r->d[3] = t & 0xFFFFFFFFULL; t >>= 32;
+ t += (uint64_t)r->d[4] + (((uint32_t)((bit >> 5) == 4)) << (bit & 0x1F));
+ r->d[4] = t & 0xFFFFFFFFULL; t >>= 32;
+ t += (uint64_t)r->d[5] + (((uint32_t)((bit >> 5) == 5)) << (bit & 0x1F));
+ r->d[5] = t & 0xFFFFFFFFULL; t >>= 32;
+ t += (uint64_t)r->d[6] + (((uint32_t)((bit >> 5) == 6)) << (bit & 0x1F));
+ r->d[6] = t & 0xFFFFFFFFULL; t >>= 32;
+ t += (uint64_t)r->d[7] + (((uint32_t)((bit >> 5) == 7)) << (bit & 0x1F));
+ r->d[7] = t & 0xFFFFFFFFULL;
+#ifdef VERIFY
+ VERIFY_CHECK((t >> 32) == 0);
+ VERIFY_CHECK(secp256k1_scalar_check_overflow(r) == 0);
+#endif
+}
+
+static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) {
+ int over;
+ r->d[0] = (uint32_t)b32[31] | (uint32_t)b32[30] << 8 | (uint32_t)b32[29] << 16 | (uint32_t)b32[28] << 24;
+ r->d[1] = (uint32_t)b32[27] | (uint32_t)b32[26] << 8 | (uint32_t)b32[25] << 16 | (uint32_t)b32[24] << 24;
+ r->d[2] = (uint32_t)b32[23] | (uint32_t)b32[22] << 8 | (uint32_t)b32[21] << 16 | (uint32_t)b32[20] << 24;
+ r->d[3] = (uint32_t)b32[19] | (uint32_t)b32[18] << 8 | (uint32_t)b32[17] << 16 | (uint32_t)b32[16] << 24;
+ r->d[4] = (uint32_t)b32[15] | (uint32_t)b32[14] << 8 | (uint32_t)b32[13] << 16 | (uint32_t)b32[12] << 24;
+ r->d[5] = (uint32_t)b32[11] | (uint32_t)b32[10] << 8 | (uint32_t)b32[9] << 16 | (uint32_t)b32[8] << 24;
+ r->d[6] = (uint32_t)b32[7] | (uint32_t)b32[6] << 8 | (uint32_t)b32[5] << 16 | (uint32_t)b32[4] << 24;
+ r->d[7] = (uint32_t)b32[3] | (uint32_t)b32[2] << 8 | (uint32_t)b32[1] << 16 | (uint32_t)b32[0] << 24;
+ over = secp256k1_scalar_reduce(r, secp256k1_scalar_check_overflow(r));
+ if (overflow) {
+ *overflow = over;
+ }
+}
+
+static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) {
+ bin[0] = a->d[7] >> 24; bin[1] = a->d[7] >> 16; bin[2] = a->d[7] >> 8; bin[3] = a->d[7];
+ bin[4] = a->d[6] >> 24; bin[5] = a->d[6] >> 16; bin[6] = a->d[6] >> 8; bin[7] = a->d[6];
+ bin[8] = a->d[5] >> 24; bin[9] = a->d[5] >> 16; bin[10] = a->d[5] >> 8; bin[11] = a->d[5];
+ bin[12] = a->d[4] >> 24; bin[13] = a->d[4] >> 16; bin[14] = a->d[4] >> 8; bin[15] = a->d[4];
+ bin[16] = a->d[3] >> 24; bin[17] = a->d[3] >> 16; bin[18] = a->d[3] >> 8; bin[19] = a->d[3];
+ bin[20] = a->d[2] >> 24; bin[21] = a->d[2] >> 16; bin[22] = a->d[2] >> 8; bin[23] = a->d[2];
+ bin[24] = a->d[1] >> 24; bin[25] = a->d[1] >> 16; bin[26] = a->d[1] >> 8; bin[27] = a->d[1];
+ bin[28] = a->d[0] >> 24; bin[29] = a->d[0] >> 16; bin[30] = a->d[0] >> 8; bin[31] = a->d[0];
+}
+
+SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) {
+ return (a->d[0] | a->d[1] | a->d[2] | a->d[3] | a->d[4] | a->d[5] | a->d[6] | a->d[7]) == 0;
+}
+
+static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) {
+ uint32_t nonzero = 0xFFFFFFFFUL * (secp256k1_scalar_is_zero(a) == 0);
+ uint64_t t = (uint64_t)(~a->d[0]) + SECP256K1_N_0 + 1;
+ r->d[0] = t & nonzero; t >>= 32;
+ t += (uint64_t)(~a->d[1]) + SECP256K1_N_1;
+ r->d[1] = t & nonzero; t >>= 32;
+ t += (uint64_t)(~a->d[2]) + SECP256K1_N_2;
+ r->d[2] = t & nonzero; t >>= 32;
+ t += (uint64_t)(~a->d[3]) + SECP256K1_N_3;
+ r->d[3] = t & nonzero; t >>= 32;
+ t += (uint64_t)(~a->d[4]) + SECP256K1_N_4;
+ r->d[4] = t & nonzero; t >>= 32;
+ t += (uint64_t)(~a->d[5]) + SECP256K1_N_5;
+ r->d[5] = t & nonzero; t >>= 32;
+ t += (uint64_t)(~a->d[6]) + SECP256K1_N_6;
+ r->d[6] = t & nonzero; t >>= 32;
+ t += (uint64_t)(~a->d[7]) + SECP256K1_N_7;
+ r->d[7] = t & nonzero;
+}
+
+SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) {
+ return ((a->d[0] ^ 1) | a->d[1] | a->d[2] | a->d[3] | a->d[4] | a->d[5] | a->d[6] | a->d[7]) == 0;
+}
+
+static int secp256k1_scalar_is_high(const secp256k1_scalar *a) {
+ int yes = 0;
+ int no = 0;
+ no |= (a->d[7] < SECP256K1_N_H_7);
+ yes |= (a->d[7] > SECP256K1_N_H_7) & ~no;
+ no |= (a->d[6] < SECP256K1_N_H_6) & ~yes; /* No need for a > check. */
+ no |= (a->d[5] < SECP256K1_N_H_5) & ~yes; /* No need for a > check. */
+ no |= (a->d[4] < SECP256K1_N_H_4) & ~yes; /* No need for a > check. */
+ no |= (a->d[3] < SECP256K1_N_H_3) & ~yes;
+ yes |= (a->d[3] > SECP256K1_N_H_3) & ~no;
+ no |= (a->d[2] < SECP256K1_N_H_2) & ~yes;
+ yes |= (a->d[2] > SECP256K1_N_H_2) & ~no;
+ no |= (a->d[1] < SECP256K1_N_H_1) & ~yes;
+ yes |= (a->d[1] > SECP256K1_N_H_1) & ~no;
+ yes |= (a->d[0] > SECP256K1_N_H_0) & ~no;
+ return yes;
+}
+
+static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
+ /* If we are flag = 0, mask = 00...00 and this is a no-op;
+ * if we are flag = 1, mask = 11...11 and this is identical to secp256k1_scalar_negate */
+ uint32_t mask = !flag - 1;
+ uint32_t nonzero = 0xFFFFFFFFUL * (secp256k1_scalar_is_zero(r) == 0);
+ uint64_t t = (uint64_t)(r->d[0] ^ mask) + ((SECP256K1_N_0 + 1) & mask);
+ r->d[0] = t & nonzero; t >>= 32;
+ t += (uint64_t)(r->d[1] ^ mask) + (SECP256K1_N_1 & mask);
+ r->d[1] = t & nonzero; t >>= 32;
+ t += (uint64_t)(r->d[2] ^ mask) + (SECP256K1_N_2 & mask);
+ r->d[2] = t & nonzero; t >>= 32;
+ t += (uint64_t)(r->d[3] ^ mask) + (SECP256K1_N_3 & mask);
+ r->d[3] = t & nonzero; t >>= 32;
+ t += (uint64_t)(r->d[4] ^ mask) + (SECP256K1_N_4 & mask);
+ r->d[4] = t & nonzero; t >>= 32;
+ t += (uint64_t)(r->d[5] ^ mask) + (SECP256K1_N_5 & mask);
+ r->d[5] = t & nonzero; t >>= 32;
+ t += (uint64_t)(r->d[6] ^ mask) + (SECP256K1_N_6 & mask);
+ r->d[6] = t & nonzero; t >>= 32;
+ t += (uint64_t)(r->d[7] ^ mask) + (SECP256K1_N_7 & mask);
+ r->d[7] = t & nonzero;
+ return 2 * (mask == 0) - 1;
+}
+
+
+/* Inspired by the macros in OpenSSL's crypto/bn/asm/x86_64-gcc.c. */
+
+/** Add a*b to the number defined by (c0,c1,c2). c2 must never overflow. */
+#define muladd(a,b) { \
+ uint32_t tl, th; \
+ { \
+ uint64_t t = (uint64_t)a * b; \
+ th = t >> 32; /* at most 0xFFFFFFFE */ \
+ tl = t; \
+ } \
+ c0 += tl; /* overflow is handled on the next line */ \
+ th += (c0 < tl) ? 1 : 0; /* at most 0xFFFFFFFF */ \
+ c1 += th; /* overflow is handled on the next line */ \
+ c2 += (c1 < th) ? 1 : 0; /* never overflows by contract (verified in the next line) */ \
+ VERIFY_CHECK((c1 >= th) || (c2 != 0)); \
+}
+
+/** Add a*b to the number defined by (c0,c1). c1 must never overflow. */
+#define muladd_fast(a,b) { \
+ uint32_t tl, th; \
+ { \
+ uint64_t t = (uint64_t)a * b; \
+ th = t >> 32; /* at most 0xFFFFFFFE */ \
+ tl = t; \
+ } \
+ c0 += tl; /* overflow is handled on the next line */ \
+ th += (c0 < tl) ? 1 : 0; /* at most 0xFFFFFFFF */ \
+ c1 += th; /* never overflows by contract (verified in the next line) */ \
+ VERIFY_CHECK(c1 >= th); \
+}
+
+/** Add 2*a*b to the number defined by (c0,c1,c2). c2 must never overflow. */
+#define muladd2(a,b) { \
+ uint32_t tl, th, th2, tl2; \
+ { \
+ uint64_t t = (uint64_t)a * b; \
+ th = t >> 32; /* at most 0xFFFFFFFE */ \
+ tl = t; \
+ } \
+ th2 = th + th; /* at most 0xFFFFFFFE (in case th was 0x7FFFFFFF) */ \
+ c2 += (th2 < th) ? 1 : 0; /* never overflows by contract (verified the next line) */ \
+ VERIFY_CHECK((th2 >= th) || (c2 != 0)); \
+ tl2 = tl + tl; /* at most 0xFFFFFFFE (in case the lowest 63 bits of tl were 0x7FFFFFFF) */ \
+ th2 += (tl2 < tl) ? 1 : 0; /* at most 0xFFFFFFFF */ \
+ c0 += tl2; /* overflow is handled on the next line */ \
+ th2 += (c0 < tl2) ? 1 : 0; /* second overflow is handled on the next line */ \
+ c2 += (c0 < tl2) & (th2 == 0); /* never overflows by contract (verified the next line) */ \
+ VERIFY_CHECK((c0 >= tl2) || (th2 != 0) || (c2 != 0)); \
+ c1 += th2; /* overflow is handled on the next line */ \
+ c2 += (c1 < th2) ? 1 : 0; /* never overflows by contract (verified the next line) */ \
+ VERIFY_CHECK((c1 >= th2) || (c2 != 0)); \
+}
+
+/** Add a to the number defined by (c0,c1,c2). c2 must never overflow. */
+#define sumadd(a) { \
+ unsigned int over; \
+ c0 += (a); /* overflow is handled on the next line */ \
+ over = (c0 < (a)) ? 1 : 0; \
+ c1 += over; /* overflow is handled on the next line */ \
+ c2 += (c1 < over) ? 1 : 0; /* never overflows by contract */ \
+}
+
+/** Add a to the number defined by (c0,c1). c1 must never overflow, c2 must be zero. */
+#define sumadd_fast(a) { \
+ c0 += (a); /* overflow is handled on the next line */ \
+ c1 += (c0 < (a)) ? 1 : 0; /* never overflows by contract (verified the next line) */ \
+ VERIFY_CHECK((c1 != 0) | (c0 >= (a))); \
+ VERIFY_CHECK(c2 == 0); \
+}
+
+/** Extract the lowest 32 bits of (c0,c1,c2) into n, and left shift the number 32 bits. */
+#define extract(n) { \
+ (n) = c0; \
+ c0 = c1; \
+ c1 = c2; \
+ c2 = 0; \
+}
+
+/** Extract the lowest 32 bits of (c0,c1,c2) into n, and left shift the number 32 bits. c2 is required to be zero. */
+#define extract_fast(n) { \
+ (n) = c0; \
+ c0 = c1; \
+ c1 = 0; \
+ VERIFY_CHECK(c2 == 0); \
+}
+
+static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint32_t *l) {
+ uint64_t c;
+ uint32_t n0 = l[8], n1 = l[9], n2 = l[10], n3 = l[11], n4 = l[12], n5 = l[13], n6 = l[14], n7 = l[15];
+ uint32_t m0, m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, m11, m12;
+ uint32_t p0, p1, p2, p3, p4, p5, p6, p7, p8;
+
+ /* 96 bit accumulator. */
+ uint32_t c0, c1, c2;
+
+ /* Reduce 512 bits into 385. */
+ /* m[0..12] = l[0..7] + n[0..7] * SECP256K1_N_C. */
+ c0 = l[0]; c1 = 0; c2 = 0;
+ muladd_fast(n0, SECP256K1_N_C_0);
+ extract_fast(m0);
+ sumadd_fast(l[1]);
+ muladd(n1, SECP256K1_N_C_0);
+ muladd(n0, SECP256K1_N_C_1);
+ extract(m1);
+ sumadd(l[2]);
+ muladd(n2, SECP256K1_N_C_0);
+ muladd(n1, SECP256K1_N_C_1);
+ muladd(n0, SECP256K1_N_C_2);
+ extract(m2);
+ sumadd(l[3]);
+ muladd(n3, SECP256K1_N_C_0);
+ muladd(n2, SECP256K1_N_C_1);
+ muladd(n1, SECP256K1_N_C_2);
+ muladd(n0, SECP256K1_N_C_3);
+ extract(m3);
+ sumadd(l[4]);
+ muladd(n4, SECP256K1_N_C_0);
+ muladd(n3, SECP256K1_N_C_1);
+ muladd(n2, SECP256K1_N_C_2);
+ muladd(n1, SECP256K1_N_C_3);
+ sumadd(n0);
+ extract(m4);
+ sumadd(l[5]);
+ muladd(n5, SECP256K1_N_C_0);
+ muladd(n4, SECP256K1_N_C_1);
+ muladd(n3, SECP256K1_N_C_2);
+ muladd(n2, SECP256K1_N_C_3);
+ sumadd(n1);
+ extract(m5);
+ sumadd(l[6]);
+ muladd(n6, SECP256K1_N_C_0);
+ muladd(n5, SECP256K1_N_C_1);
+ muladd(n4, SECP256K1_N_C_2);
+ muladd(n3, SECP256K1_N_C_3);
+ sumadd(n2);
+ extract(m6);
+ sumadd(l[7]);
+ muladd(n7, SECP256K1_N_C_0);
+ muladd(n6, SECP256K1_N_C_1);
+ muladd(n5, SECP256K1_N_C_2);
+ muladd(n4, SECP256K1_N_C_3);
+ sumadd(n3);
+ extract(m7);
+ muladd(n7, SECP256K1_N_C_1);
+ muladd(n6, SECP256K1_N_C_2);
+ muladd(n5, SECP256K1_N_C_3);
+ sumadd(n4);
+ extract(m8);
+ muladd(n7, SECP256K1_N_C_2);
+ muladd(n6, SECP256K1_N_C_3);
+ sumadd(n5);
+ extract(m9);
+ muladd(n7, SECP256K1_N_C_3);
+ sumadd(n6);
+ extract(m10);
+ sumadd_fast(n7);
+ extract_fast(m11);
+ VERIFY_CHECK(c0 <= 1);
+ m12 = c0;
+
+ /* Reduce 385 bits into 258. */
+ /* p[0..8] = m[0..7] + m[8..12] * SECP256K1_N_C. */
+ c0 = m0; c1 = 0; c2 = 0;
+ muladd_fast(m8, SECP256K1_N_C_0);
+ extract_fast(p0);
+ sumadd_fast(m1);
+ muladd(m9, SECP256K1_N_C_0);
+ muladd(m8, SECP256K1_N_C_1);
+ extract(p1);
+ sumadd(m2);
+ muladd(m10, SECP256K1_N_C_0);
+ muladd(m9, SECP256K1_N_C_1);
+ muladd(m8, SECP256K1_N_C_2);
+ extract(p2);
+ sumadd(m3);
+ muladd(m11, SECP256K1_N_C_0);
+ muladd(m10, SECP256K1_N_C_1);
+ muladd(m9, SECP256K1_N_C_2);
+ muladd(m8, SECP256K1_N_C_3);
+ extract(p3);
+ sumadd(m4);
+ muladd(m12, SECP256K1_N_C_0);
+ muladd(m11, SECP256K1_N_C_1);
+ muladd(m10, SECP256K1_N_C_2);
+ muladd(m9, SECP256K1_N_C_3);
+ sumadd(m8);
+ extract(p4);
+ sumadd(m5);
+ muladd(m12, SECP256K1_N_C_1);
+ muladd(m11, SECP256K1_N_C_2);
+ muladd(m10, SECP256K1_N_C_3);
+ sumadd(m9);
+ extract(p5);
+ sumadd(m6);
+ muladd(m12, SECP256K1_N_C_2);
+ muladd(m11, SECP256K1_N_C_3);
+ sumadd(m10);
+ extract(p6);
+ sumadd_fast(m7);
+ muladd_fast(m12, SECP256K1_N_C_3);
+ sumadd_fast(m11);
+ extract_fast(p7);
+ p8 = c0 + m12;
+ VERIFY_CHECK(p8 <= 2);
+
+ /* Reduce 258 bits into 256. */
+ /* r[0..7] = p[0..7] + p[8] * SECP256K1_N_C. */
+ c = p0 + (uint64_t)SECP256K1_N_C_0 * p8;
+ r->d[0] = c & 0xFFFFFFFFUL; c >>= 32;
+ c += p1 + (uint64_t)SECP256K1_N_C_1 * p8;
+ r->d[1] = c & 0xFFFFFFFFUL; c >>= 32;
+ c += p2 + (uint64_t)SECP256K1_N_C_2 * p8;
+ r->d[2] = c & 0xFFFFFFFFUL; c >>= 32;
+ c += p3 + (uint64_t)SECP256K1_N_C_3 * p8;
+ r->d[3] = c & 0xFFFFFFFFUL; c >>= 32;
+ c += p4 + (uint64_t)p8;
+ r->d[4] = c & 0xFFFFFFFFUL; c >>= 32;
+ c += p5;
+ r->d[5] = c & 0xFFFFFFFFUL; c >>= 32;
+ c += p6;
+ r->d[6] = c & 0xFFFFFFFFUL; c >>= 32;
+ c += p7;
+ r->d[7] = c & 0xFFFFFFFFUL; c >>= 32;
+
+ /* Final reduction of r. */
+ secp256k1_scalar_reduce(r, c + secp256k1_scalar_check_overflow(r));
+}
+
+static void secp256k1_scalar_mul_512(uint32_t *l, const secp256k1_scalar *a, const secp256k1_scalar *b) {
+ /* 96 bit accumulator. */
+ uint32_t c0 = 0, c1 = 0, c2 = 0;
+
+ /* l[0..15] = a[0..7] * b[0..7]. */
+ muladd_fast(a->d[0], b->d[0]);
+ extract_fast(l[0]);
+ muladd(a->d[0], b->d[1]);
+ muladd(a->d[1], b->d[0]);
+ extract(l[1]);
+ muladd(a->d[0], b->d[2]);
+ muladd(a->d[1], b->d[1]);
+ muladd(a->d[2], b->d[0]);
+ extract(l[2]);
+ muladd(a->d[0], b->d[3]);
+ muladd(a->d[1], b->d[2]);
+ muladd(a->d[2], b->d[1]);
+ muladd(a->d[3], b->d[0]);
+ extract(l[3]);
+ muladd(a->d[0], b->d[4]);
+ muladd(a->d[1], b->d[3]);
+ muladd(a->d[2], b->d[2]);
+ muladd(a->d[3], b->d[1]);
+ muladd(a->d[4], b->d[0]);
+ extract(l[4]);
+ muladd(a->d[0], b->d[5]);
+ muladd(a->d[1], b->d[4]);
+ muladd(a->d[2], b->d[3]);
+ muladd(a->d[3], b->d[2]);
+ muladd(a->d[4], b->d[1]);
+ muladd(a->d[5], b->d[0]);
+ extract(l[5]);
+ muladd(a->d[0], b->d[6]);
+ muladd(a->d[1], b->d[5]);
+ muladd(a->d[2], b->d[4]);
+ muladd(a->d[3], b->d[3]);
+ muladd(a->d[4], b->d[2]);
+ muladd(a->d[5], b->d[1]);
+ muladd(a->d[6], b->d[0]);
+ extract(l[6]);
+ muladd(a->d[0], b->d[7]);
+ muladd(a->d[1], b->d[6]);
+ muladd(a->d[2], b->d[5]);
+ muladd(a->d[3], b->d[4]);
+ muladd(a->d[4], b->d[3]);
+ muladd(a->d[5], b->d[2]);
+ muladd(a->d[6], b->d[1]);
+ muladd(a->d[7], b->d[0]);
+ extract(l[7]);
+ muladd(a->d[1], b->d[7]);
+ muladd(a->d[2], b->d[6]);
+ muladd(a->d[3], b->d[5]);
+ muladd(a->d[4], b->d[4]);
+ muladd(a->d[5], b->d[3]);
+ muladd(a->d[6], b->d[2]);
+ muladd(a->d[7], b->d[1]);
+ extract(l[8]);
+ muladd(a->d[2], b->d[7]);
+ muladd(a->d[3], b->d[6]);
+ muladd(a->d[4], b->d[5]);
+ muladd(a->d[5], b->d[4]);
+ muladd(a->d[6], b->d[3]);
+ muladd(a->d[7], b->d[2]);
+ extract(l[9]);
+ muladd(a->d[3], b->d[7]);
+ muladd(a->d[4], b->d[6]);
+ muladd(a->d[5], b->d[5]);
+ muladd(a->d[6], b->d[4]);
+ muladd(a->d[7], b->d[3]);
+ extract(l[10]);
+ muladd(a->d[4], b->d[7]);
+ muladd(a->d[5], b->d[6]);
+ muladd(a->d[6], b->d[5]);
+ muladd(a->d[7], b->d[4]);
+ extract(l[11]);
+ muladd(a->d[5], b->d[7]);
+ muladd(a->d[6], b->d[6]);
+ muladd(a->d[7], b->d[5]);
+ extract(l[12]);
+ muladd(a->d[6], b->d[7]);
+ muladd(a->d[7], b->d[6]);
+ extract(l[13]);
+ muladd_fast(a->d[7], b->d[7]);
+ extract_fast(l[14]);
+ VERIFY_CHECK(c1 == 0);
+ l[15] = c0;
+}
+
+static void secp256k1_scalar_sqr_512(uint32_t *l, const secp256k1_scalar *a) {
+ /* 96 bit accumulator. */
+ uint32_t c0 = 0, c1 = 0, c2 = 0;
+
+ /* l[0..15] = a[0..7]^2. */
+ muladd_fast(a->d[0], a->d[0]);
+ extract_fast(l[0]);
+ muladd2(a->d[0], a->d[1]);
+ extract(l[1]);
+ muladd2(a->d[0], a->d[2]);
+ muladd(a->d[1], a->d[1]);
+ extract(l[2]);
+ muladd2(a->d[0], a->d[3]);
+ muladd2(a->d[1], a->d[2]);
+ extract(l[3]);
+ muladd2(a->d[0], a->d[4]);
+ muladd2(a->d[1], a->d[3]);
+ muladd(a->d[2], a->d[2]);
+ extract(l[4]);
+ muladd2(a->d[0], a->d[5]);
+ muladd2(a->d[1], a->d[4]);
+ muladd2(a->d[2], a->d[3]);
+ extract(l[5]);
+ muladd2(a->d[0], a->d[6]);
+ muladd2(a->d[1], a->d[5]);
+ muladd2(a->d[2], a->d[4]);
+ muladd(a->d[3], a->d[3]);
+ extract(l[6]);
+ muladd2(a->d[0], a->d[7]);
+ muladd2(a->d[1], a->d[6]);
+ muladd2(a->d[2], a->d[5]);
+ muladd2(a->d[3], a->d[4]);
+ extract(l[7]);
+ muladd2(a->d[1], a->d[7]);
+ muladd2(a->d[2], a->d[6]);
+ muladd2(a->d[3], a->d[5]);
+ muladd(a->d[4], a->d[4]);
+ extract(l[8]);
+ muladd2(a->d[2], a->d[7]);
+ muladd2(a->d[3], a->d[6]);
+ muladd2(a->d[4], a->d[5]);
+ extract(l[9]);
+ muladd2(a->d[3], a->d[7]);
+ muladd2(a->d[4], a->d[6]);
+ muladd(a->d[5], a->d[5]);
+ extract(l[10]);
+ muladd2(a->d[4], a->d[7]);
+ muladd2(a->d[5], a->d[6]);
+ extract(l[11]);
+ muladd2(a->d[5], a->d[7]);
+ muladd(a->d[6], a->d[6]);
+ extract(l[12]);
+ muladd2(a->d[6], a->d[7]);
+ extract(l[13]);
+ muladd_fast(a->d[7], a->d[7]);
+ extract_fast(l[14]);
+ VERIFY_CHECK(c1 == 0);
+ l[15] = c0;
+}
+
+#undef sumadd
+#undef sumadd_fast
+#undef muladd
+#undef muladd_fast
+#undef muladd2
+#undef extract
+#undef extract_fast
+
+static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
+ uint32_t l[16];
+ secp256k1_scalar_mul_512(l, a, b);
+ secp256k1_scalar_reduce_512(r, l);
+}
+
+static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n) {
+ int ret;
+ VERIFY_CHECK(n > 0);
+ VERIFY_CHECK(n < 16);
+ ret = r->d[0] & ((1 << n) - 1);
+ r->d[0] = (r->d[0] >> n) + (r->d[1] << (32 - n));
+ r->d[1] = (r->d[1] >> n) + (r->d[2] << (32 - n));
+ r->d[2] = (r->d[2] >> n) + (r->d[3] << (32 - n));
+ r->d[3] = (r->d[3] >> n) + (r->d[4] << (32 - n));
+ r->d[4] = (r->d[4] >> n) + (r->d[5] << (32 - n));
+ r->d[5] = (r->d[5] >> n) + (r->d[6] << (32 - n));
+ r->d[6] = (r->d[6] >> n) + (r->d[7] << (32 - n));
+ r->d[7] = (r->d[7] >> n);
+ return ret;
+}
+
+static void secp256k1_scalar_sqr(secp256k1_scalar *r, const secp256k1_scalar *a) {
+ uint32_t l[16];
+ secp256k1_scalar_sqr_512(l, a);
+ secp256k1_scalar_reduce_512(r, l);
+}
+
+#ifdef USE_ENDOMORPHISM
+static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) {
+ r1->d[0] = a->d[0];
+ r1->d[1] = a->d[1];
+ r1->d[2] = a->d[2];
+ r1->d[3] = a->d[3];
+ r1->d[4] = 0;
+ r1->d[5] = 0;
+ r1->d[6] = 0;
+ r1->d[7] = 0;
+ r2->d[0] = a->d[4];
+ r2->d[1] = a->d[5];
+ r2->d[2] = a->d[6];
+ r2->d[3] = a->d[7];
+ r2->d[4] = 0;
+ r2->d[5] = 0;
+ r2->d[6] = 0;
+ r2->d[7] = 0;
+}
+#endif
+
+SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) {
+ return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3]) | (a->d[4] ^ b->d[4]) | (a->d[5] ^ b->d[5]) | (a->d[6] ^ b->d[6]) | (a->d[7] ^ b->d[7])) == 0;
+}
+
+SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b, unsigned int shift) {
+ uint32_t l[16];
+ unsigned int shiftlimbs;
+ unsigned int shiftlow;
+ unsigned int shifthigh;
+ VERIFY_CHECK(shift >= 256);
+ secp256k1_scalar_mul_512(l, a, b);
+ shiftlimbs = shift >> 5;
+ shiftlow = shift & 0x1F;
+ shifthigh = 32 - shiftlow;
+ r->d[0] = shift < 512 ? (l[0 + shiftlimbs] >> shiftlow | (shift < 480 && shiftlow ? (l[1 + shiftlimbs] << shifthigh) : 0)) : 0;
+ r->d[1] = shift < 480 ? (l[1 + shiftlimbs] >> shiftlow | (shift < 448 && shiftlow ? (l[2 + shiftlimbs] << shifthigh) : 0)) : 0;
+ r->d[2] = shift < 448 ? (l[2 + shiftlimbs] >> shiftlow | (shift < 416 && shiftlow ? (l[3 + shiftlimbs] << shifthigh) : 0)) : 0;
+ r->d[3] = shift < 416 ? (l[3 + shiftlimbs] >> shiftlow | (shift < 384 && shiftlow ? (l[4 + shiftlimbs] << shifthigh) : 0)) : 0;
+ r->d[4] = shift < 384 ? (l[4 + shiftlimbs] >> shiftlow | (shift < 352 && shiftlow ? (l[5 + shiftlimbs] << shifthigh) : 0)) : 0;
+ r->d[5] = shift < 352 ? (l[5 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[6 + shiftlimbs] << shifthigh) : 0)) : 0;
+ r->d[6] = shift < 320 ? (l[6 + shiftlimbs] >> shiftlow | (shift < 288 && shiftlow ? (l[7 + shiftlimbs] << shifthigh) : 0)) : 0;
+ r->d[7] = shift < 288 ? (l[7 + shiftlimbs] >> shiftlow) : 0;
+ secp256k1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 5] >> ((shift - 1) & 0x1f)) & 1);
+}
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/scalar_impl.h b/crypto/secp256k1/libsecp256k1/src/scalar_impl.h
new file mode 100644
index 000000000..88ea97de8
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/scalar_impl.h
@@ -0,0 +1,337 @@
+/**********************************************************************
+ * Copyright (c) 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_SCALAR_IMPL_H_
+#define _SECP256K1_SCALAR_IMPL_H_
+
+#include <string.h>
+
+#include "group.h"
+#include "scalar.h"
+
+#if defined HAVE_CONFIG_H
+#include "libsecp256k1-config.h"
+#endif
+
+#if defined(USE_SCALAR_4X64)
+#include "scalar_4x64_impl.h"
+#elif defined(USE_SCALAR_8X32)
+#include "scalar_8x32_impl.h"
+#else
+#error "Please select scalar implementation"
+#endif
+
+#ifndef USE_NUM_NONE
+static void secp256k1_scalar_get_num(secp256k1_num *r, const secp256k1_scalar *a) {
+ unsigned char c[32];
+ secp256k1_scalar_get_b32(c, a);
+ secp256k1_num_set_bin(r, c, 32);
+}
+
+/** secp256k1 curve order, see secp256k1_ecdsa_const_order_as_fe in ecdsa_impl.h */
+static void secp256k1_scalar_order_get_num(secp256k1_num *r) {
+ static const unsigned char order[32] = {
+ 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+ 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFE,
+ 0xBA,0xAE,0xDC,0xE6,0xAF,0x48,0xA0,0x3B,
+ 0xBF,0xD2,0x5E,0x8C,0xD0,0x36,0x41,0x41
+ };
+ secp256k1_num_set_bin(r, order, 32);
+}
+#endif
+
+static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *x) {
+ secp256k1_scalar *t;
+ int i;
+ /* First compute x ^ (2^N - 1) for some values of N. */
+ secp256k1_scalar x2, x3, x4, x6, x7, x8, x15, x30, x60, x120, x127;
+
+ secp256k1_scalar_sqr(&x2, x);
+ secp256k1_scalar_mul(&x2, &x2, x);
+
+ secp256k1_scalar_sqr(&x3, &x2);
+ secp256k1_scalar_mul(&x3, &x3, x);
+
+ secp256k1_scalar_sqr(&x4, &x3);
+ secp256k1_scalar_mul(&x4, &x4, x);
+
+ secp256k1_scalar_sqr(&x6, &x4);
+ secp256k1_scalar_sqr(&x6, &x6);
+ secp256k1_scalar_mul(&x6, &x6, &x2);
+
+ secp256k1_scalar_sqr(&x7, &x6);
+ secp256k1_scalar_mul(&x7, &x7, x);
+
+ secp256k1_scalar_sqr(&x8, &x7);
+ secp256k1_scalar_mul(&x8, &x8, x);
+
+ secp256k1_scalar_sqr(&x15, &x8);
+ for (i = 0; i < 6; i++) {
+ secp256k1_scalar_sqr(&x15, &x15);
+ }
+ secp256k1_scalar_mul(&x15, &x15, &x7);
+
+ secp256k1_scalar_sqr(&x30, &x15);
+ for (i = 0; i < 14; i++) {
+ secp256k1_scalar_sqr(&x30, &x30);
+ }
+ secp256k1_scalar_mul(&x30, &x30, &x15);
+
+ secp256k1_scalar_sqr(&x60, &x30);
+ for (i = 0; i < 29; i++) {
+ secp256k1_scalar_sqr(&x60, &x60);
+ }
+ secp256k1_scalar_mul(&x60, &x60, &x30);
+
+ secp256k1_scalar_sqr(&x120, &x60);
+ for (i = 0; i < 59; i++) {
+ secp256k1_scalar_sqr(&x120, &x120);
+ }
+ secp256k1_scalar_mul(&x120, &x120, &x60);
+
+ secp256k1_scalar_sqr(&x127, &x120);
+ for (i = 0; i < 6; i++) {
+ secp256k1_scalar_sqr(&x127, &x127);
+ }
+ secp256k1_scalar_mul(&x127, &x127, &x7);
+
+ /* Then accumulate the final result (t starts at x127). */
+ t = &x127;
+ for (i = 0; i < 2; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, x); /* 1 */
+ for (i = 0; i < 4; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &x3); /* 111 */
+ for (i = 0; i < 2; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, x); /* 1 */
+ for (i = 0; i < 2; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, x); /* 1 */
+ for (i = 0; i < 2; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, x); /* 1 */
+ for (i = 0; i < 4; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &x3); /* 111 */
+ for (i = 0; i < 3; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &x2); /* 11 */
+ for (i = 0; i < 4; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &x3); /* 111 */
+ for (i = 0; i < 5; i++) { /* 00 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &x3); /* 111 */
+ for (i = 0; i < 4; i++) { /* 00 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &x2); /* 11 */
+ for (i = 0; i < 2; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, x); /* 1 */
+ for (i = 0; i < 2; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, x); /* 1 */
+ for (i = 0; i < 5; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &x4); /* 1111 */
+ for (i = 0; i < 2; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, x); /* 1 */
+ for (i = 0; i < 3; i++) { /* 00 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, x); /* 1 */
+ for (i = 0; i < 4; i++) { /* 000 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, x); /* 1 */
+ for (i = 0; i < 2; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, x); /* 1 */
+ for (i = 0; i < 10; i++) { /* 0000000 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &x3); /* 111 */
+ for (i = 0; i < 4; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &x3); /* 111 */
+ for (i = 0; i < 9; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &x8); /* 11111111 */
+ for (i = 0; i < 2; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, x); /* 1 */
+ for (i = 0; i < 3; i++) { /* 00 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, x); /* 1 */
+ for (i = 0; i < 3; i++) { /* 00 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, x); /* 1 */
+ for (i = 0; i < 5; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &x4); /* 1111 */
+ for (i = 0; i < 2; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, x); /* 1 */
+ for (i = 0; i < 5; i++) { /* 000 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &x2); /* 11 */
+ for (i = 0; i < 4; i++) { /* 00 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &x2); /* 11 */
+ for (i = 0; i < 2; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, x); /* 1 */
+ for (i = 0; i < 8; i++) { /* 000000 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &x2); /* 11 */
+ for (i = 0; i < 3; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &x2); /* 11 */
+ for (i = 0; i < 3; i++) { /* 00 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, x); /* 1 */
+ for (i = 0; i < 6; i++) { /* 00000 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, x); /* 1 */
+ for (i = 0; i < 8; i++) { /* 00 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(r, t, &x6); /* 111111 */
+}
+
+SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) {
+ /* d[0] is present and is the lowest word for all representations */
+ return !(a->d[0] & 1);
+}
+
+static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) {
+#if defined(USE_SCALAR_INV_BUILTIN)
+ secp256k1_scalar_inverse(r, x);
+#elif defined(USE_SCALAR_INV_NUM)
+ unsigned char b[32];
+ secp256k1_num n, m;
+ secp256k1_scalar t = *x;
+ secp256k1_scalar_get_b32(b, &t);
+ secp256k1_num_set_bin(&n, b, 32);
+ secp256k1_scalar_order_get_num(&m);
+ secp256k1_num_mod_inverse(&n, &n, &m);
+ secp256k1_num_get_bin(b, 32, &n);
+ secp256k1_scalar_set_b32(r, b, NULL);
+ /* Verify that the inverse was computed correctly, without GMP code. */
+ secp256k1_scalar_mul(&t, &t, r);
+ CHECK(secp256k1_scalar_is_one(&t));
+#else
+#error "Please select scalar inverse implementation"
+#endif
+}
+
+#ifdef USE_ENDOMORPHISM
+/**
+ * The Secp256k1 curve has an endomorphism, where lambda * (x, y) = (beta * x, y), where
+ * lambda is {0x53,0x63,0xad,0x4c,0xc0,0x5c,0x30,0xe0,0xa5,0x26,0x1c,0x02,0x88,0x12,0x64,0x5a,
+ * 0x12,0x2e,0x22,0xea,0x20,0x81,0x66,0x78,0xdf,0x02,0x96,0x7c,0x1b,0x23,0xbd,0x72}
+ *
+ * "Guide to Elliptic Curve Cryptography" (Hankerson, Menezes, Vanstone) gives an algorithm
+ * (algorithm 3.74) to find k1 and k2 given k, such that k1 + k2 * lambda == k mod n, and k1
+ * and k2 have a small size.
+ * It relies on constants a1, b1, a2, b2. These constants for the value of lambda above are:
+ *
+ * - a1 = {0x30,0x86,0xd2,0x21,0xa7,0xd4,0x6b,0xcd,0xe8,0x6c,0x90,0xe4,0x92,0x84,0xeb,0x15}
+ * - b1 = -{0xe4,0x43,0x7e,0xd6,0x01,0x0e,0x88,0x28,0x6f,0x54,0x7f,0xa9,0x0a,0xbf,0xe4,0xc3}
+ * - a2 = {0x01,0x14,0xca,0x50,0xf7,0xa8,0xe2,0xf3,0xf6,0x57,0xc1,0x10,0x8d,0x9d,0x44,0xcf,0xd8}
+ * - b2 = {0x30,0x86,0xd2,0x21,0xa7,0xd4,0x6b,0xcd,0xe8,0x6c,0x90,0xe4,0x92,0x84,0xeb,0x15}
+ *
+ * The algorithm then computes c1 = round(b1 * k / n) and c2 = round(b2 * k / n), and gives
+ * k1 = k - (c1*a1 + c2*a2) and k2 = -(c1*b1 + c2*b2). Instead, we use modular arithmetic, and
+ * compute k1 as k - k2 * lambda, avoiding the need for constants a1 and a2.
+ *
+ * g1, g2 are precomputed constants used to replace division with a rounded multiplication
+ * when decomposing the scalar for an endomorphism-based point multiplication.
+ *
+ * The possibility of using precomputed estimates is mentioned in "Guide to Elliptic Curve
+ * Cryptography" (Hankerson, Menezes, Vanstone) in section 3.5.
+ *
+ * The derivation is described in the paper "Efficient Software Implementation of Public-Key
+ * Cryptography on Sensor Networks Using the MSP430X Microcontroller" (Gouvea, Oliveira, Lopez),
+ * Section 4.3 (here we use a somewhat higher-precision estimate):
+ * d = a1*b2 - b1*a2
+ * g1 = round((2^272)*b2/d)
+ * g2 = round((2^272)*b1/d)
+ *
+ * (Note that 'd' is also equal to the curve order here because [a1,b1] and [a2,b2] are found
+ * as outputs of the Extended Euclidean Algorithm on inputs 'order' and 'lambda').
+ *
+ * The function below splits a in r1 and r2, such that r1 + lambda * r2 == a (mod order).
+ */
+
+static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) {
+ secp256k1_scalar c1, c2;
+ static const secp256k1_scalar minus_lambda = SECP256K1_SCALAR_CONST(
+ 0xAC9C52B3UL, 0x3FA3CF1FUL, 0x5AD9E3FDUL, 0x77ED9BA4UL,
+ 0xA880B9FCUL, 0x8EC739C2UL, 0xE0CFC810UL, 0xB51283CFUL
+ );
+ static const secp256k1_scalar minus_b1 = SECP256K1_SCALAR_CONST(
+ 0x00000000UL, 0x00000000UL, 0x00000000UL, 0x00000000UL,
+ 0xE4437ED6UL, 0x010E8828UL, 0x6F547FA9UL, 0x0ABFE4C3UL
+ );
+ static const secp256k1_scalar minus_b2 = SECP256K1_SCALAR_CONST(
+ 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFEUL,
+ 0x8A280AC5UL, 0x0774346DUL, 0xD765CDA8UL, 0x3DB1562CUL
+ );
+ static const secp256k1_scalar g1 = SECP256K1_SCALAR_CONST(
+ 0x00000000UL, 0x00000000UL, 0x00000000UL, 0x00003086UL,
+ 0xD221A7D4UL, 0x6BCDE86CUL, 0x90E49284UL, 0xEB153DABUL
+ );
+ static const secp256k1_scalar g2 = SECP256K1_SCALAR_CONST(
+ 0x00000000UL, 0x00000000UL, 0x00000000UL, 0x0000E443UL,
+ 0x7ED6010EUL, 0x88286F54UL, 0x7FA90ABFUL, 0xE4C42212UL
+ );
+ VERIFY_CHECK(r1 != a);
+ VERIFY_CHECK(r2 != a);
+ /* these _var calls are constant time since the shift amount is constant */
+ secp256k1_scalar_mul_shift_var(&c1, a, &g1, 272);
+ secp256k1_scalar_mul_shift_var(&c2, a, &g2, 272);
+ secp256k1_scalar_mul(&c1, &c1, &minus_b1);
+ secp256k1_scalar_mul(&c2, &c2, &minus_b2);
+ secp256k1_scalar_add(r2, &c1, &c2);
+ secp256k1_scalar_mul(r1, r2, &minus_lambda);
+ secp256k1_scalar_add(r1, r1, a);
+}
+#endif
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/secp256k1.c b/crypto/secp256k1/libsecp256k1/src/secp256k1.c
new file mode 100644
index 000000000..203f880af
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/secp256k1.c
@@ -0,0 +1,513 @@
+/**********************************************************************
+ * Copyright (c) 2013-2015 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#define SECP256K1_BUILD (1)
+
+#include "include/secp256k1.h"
+
+#include "util.h"
+#include "num_impl.h"
+#include "field_impl.h"
+#include "scalar_impl.h"
+#include "group_impl.h"
+#include "ecmult_impl.h"
+#include "ecmult_const_impl.h"
+#include "ecmult_gen_impl.h"
+#include "ecdsa_impl.h"
+#include "eckey_impl.h"
+#include "hash_impl.h"
+
+#define ARG_CHECK(cond) do { \
+ if (EXPECT(!(cond), 0)) { \
+ secp256k1_callback_call(&ctx->illegal_callback, #cond); \
+ return 0; \
+ } \
+} while(0)
+
+static void default_illegal_callback_fn(const char* str, void* data) {
+ (void)data;
+ fprintf(stderr, "[libsecp256k1] illegal argument: %s\n", str);
+ abort();
+}
+
+static const secp256k1_callback default_illegal_callback = {
+ default_illegal_callback_fn,
+ NULL
+};
+
+static void default_error_callback_fn(const char* str, void* data) {
+ (void)data;
+ fprintf(stderr, "[libsecp256k1] internal consistency check failed: %s\n", str);
+ abort();
+}
+
+static const secp256k1_callback default_error_callback = {
+ default_error_callback_fn,
+ NULL
+};
+
+
+struct secp256k1_context_struct {
+ secp256k1_ecmult_context ecmult_ctx;
+ secp256k1_ecmult_gen_context ecmult_gen_ctx;
+ secp256k1_callback illegal_callback;
+ secp256k1_callback error_callback;
+};
+
+secp256k1_context* secp256k1_context_create(unsigned int flags) {
+ secp256k1_context* ret = (secp256k1_context*)checked_malloc(&default_error_callback, sizeof(secp256k1_context));
+ ret->illegal_callback = default_illegal_callback;
+ ret->error_callback = default_error_callback;
+
+ secp256k1_ecmult_context_init(&ret->ecmult_ctx);
+ secp256k1_ecmult_gen_context_init(&ret->ecmult_gen_ctx);
+
+ if (flags & SECP256K1_CONTEXT_SIGN) {
+ secp256k1_ecmult_gen_context_build(&ret->ecmult_gen_ctx, &ret->error_callback);
+ }
+ if (flags & SECP256K1_CONTEXT_VERIFY) {
+ secp256k1_ecmult_context_build(&ret->ecmult_ctx, &ret->error_callback);
+ }
+
+ return ret;
+}
+
+secp256k1_context* secp256k1_context_clone(const secp256k1_context* ctx) {
+ secp256k1_context* ret = (secp256k1_context*)checked_malloc(&ctx->error_callback, sizeof(secp256k1_context));
+ ret->illegal_callback = ctx->illegal_callback;
+ ret->error_callback = ctx->error_callback;
+ secp256k1_ecmult_context_clone(&ret->ecmult_ctx, &ctx->ecmult_ctx, &ctx->error_callback);
+ secp256k1_ecmult_gen_context_clone(&ret->ecmult_gen_ctx, &ctx->ecmult_gen_ctx, &ctx->error_callback);
+ return ret;
+}
+
+void secp256k1_context_destroy(secp256k1_context* ctx) {
+ if (ctx != NULL) {
+ secp256k1_ecmult_context_clear(&ctx->ecmult_ctx);
+ secp256k1_ecmult_gen_context_clear(&ctx->ecmult_gen_ctx);
+
+ free(ctx);
+ }
+}
+
+void secp256k1_context_set_illegal_callback(secp256k1_context* ctx, void (*fun)(const char* message, void* data), const void* data) {
+ if (fun == NULL) {
+ fun = default_illegal_callback_fn;
+ }
+ ctx->illegal_callback.fn = fun;
+ ctx->illegal_callback.data = data;
+}
+
+void secp256k1_context_set_error_callback(secp256k1_context* ctx, void (*fun)(const char* message, void* data), const void* data) {
+ if (fun == NULL) {
+ fun = default_error_callback_fn;
+ }
+ ctx->error_callback.fn = fun;
+ ctx->error_callback.data = data;
+}
+
+static int secp256k1_pubkey_load(const secp256k1_context* ctx, secp256k1_ge* ge, const secp256k1_pubkey* pubkey) {
+ if (sizeof(secp256k1_ge_storage) == 64) {
+ /* When the secp256k1_ge_storage type is exactly 64 byte, use its
+ * representation inside secp256k1_pubkey, as conversion is very fast.
+ * Note that secp256k1_pubkey_save must use the same representation. */
+ secp256k1_ge_storage s;
+ memcpy(&s, &pubkey->data[0], 64);
+ secp256k1_ge_from_storage(ge, &s);
+ } else {
+ /* Otherwise, fall back to 32-byte big endian for X and Y. */
+ secp256k1_fe x, y;
+ secp256k1_fe_set_b32(&x, pubkey->data);
+ secp256k1_fe_set_b32(&y, pubkey->data + 32);
+ secp256k1_ge_set_xy(ge, &x, &y);
+ }
+ ARG_CHECK(!secp256k1_fe_is_zero(&ge->x));
+ return 1;
+}
+
+static void secp256k1_pubkey_save(secp256k1_pubkey* pubkey, secp256k1_ge* ge) {
+ if (sizeof(secp256k1_ge_storage) == 64) {
+ secp256k1_ge_storage s;
+ secp256k1_ge_to_storage(&s, ge);
+ memcpy(&pubkey->data[0], &s, 64);
+ } else {
+ VERIFY_CHECK(!secp256k1_ge_is_infinity(ge));
+ secp256k1_fe_normalize_var(&ge->x);
+ secp256k1_fe_normalize_var(&ge->y);
+ secp256k1_fe_get_b32(pubkey->data, &ge->x);
+ secp256k1_fe_get_b32(pubkey->data + 32, &ge->y);
+ }
+}
+
+int secp256k1_ec_pubkey_parse(const secp256k1_context* ctx, secp256k1_pubkey* pubkey, const unsigned char *input, size_t inputlen) {
+ secp256k1_ge Q;
+
+ (void)ctx;
+ if (!secp256k1_eckey_pubkey_parse(&Q, input, inputlen)) {
+ memset(pubkey, 0, sizeof(*pubkey));
+ return 0;
+ }
+ secp256k1_pubkey_save(pubkey, &Q);
+ secp256k1_ge_clear(&Q);
+ return 1;
+}
+
+int secp256k1_ec_pubkey_serialize(const secp256k1_context* ctx, unsigned char *output, size_t *outputlen, const secp256k1_pubkey* pubkey, unsigned int flags) {
+ secp256k1_ge Q;
+
+ (void)ctx;
+ return (secp256k1_pubkey_load(ctx, &Q, pubkey) &&
+ secp256k1_eckey_pubkey_serialize(&Q, output, outputlen, flags));
+}
+
+static void secp256k1_ecdsa_signature_load(const secp256k1_context* ctx, secp256k1_scalar* r, secp256k1_scalar* s, const secp256k1_ecdsa_signature* sig) {
+ (void)ctx;
+ if (sizeof(secp256k1_scalar) == 32) {
+ /* When the secp256k1_scalar type is exactly 32 byte, use its
+ * representation inside secp256k1_ecdsa_signature, as conversion is very fast.
+ * Note that secp256k1_ecdsa_signature_save must use the same representation. */
+ memcpy(r, &sig->data[0], 32);
+ memcpy(s, &sig->data[32], 32);
+ } else {
+ secp256k1_scalar_set_b32(r, &sig->data[0], NULL);
+ secp256k1_scalar_set_b32(s, &sig->data[32], NULL);
+ }
+}
+
+static void secp256k1_ecdsa_signature_save(secp256k1_ecdsa_signature* sig, const secp256k1_scalar* r, const secp256k1_scalar* s) {
+ if (sizeof(secp256k1_scalar) == 32) {
+ memcpy(&sig->data[0], r, 32);
+ memcpy(&sig->data[32], s, 32);
+ } else {
+ secp256k1_scalar_get_b32(&sig->data[0], r);
+ secp256k1_scalar_get_b32(&sig->data[32], s);
+ }
+}
+
+int secp256k1_ecdsa_signature_parse_der(const secp256k1_context* ctx, secp256k1_ecdsa_signature* sig, const unsigned char *input, size_t inputlen) {
+ secp256k1_scalar r, s;
+
+ (void)ctx;
+ ARG_CHECK(sig != NULL);
+ ARG_CHECK(input != NULL);
+
+ if (secp256k1_ecdsa_sig_parse(&r, &s, input, inputlen)) {
+ secp256k1_ecdsa_signature_save(sig, &r, &s);
+ return 1;
+ } else {
+ memset(sig, 0, sizeof(*sig));
+ return 0;
+ }
+}
+
+int secp256k1_ecdsa_signature_serialize_der(const secp256k1_context* ctx, unsigned char *output, size_t *outputlen, const secp256k1_ecdsa_signature* sig) {
+ secp256k1_scalar r, s;
+
+ (void)ctx;
+ ARG_CHECK(output != NULL);
+ ARG_CHECK(outputlen != NULL);
+ ARG_CHECK(sig != NULL);
+
+ secp256k1_ecdsa_signature_load(ctx, &r, &s, sig);
+ return secp256k1_ecdsa_sig_serialize(output, outputlen, &r, &s);
+}
+
+int secp256k1_ecdsa_verify(const secp256k1_context* ctx, const secp256k1_ecdsa_signature *sig, const unsigned char *msg32, const secp256k1_pubkey *pubkey) {
+ secp256k1_ge q;
+ secp256k1_scalar r, s;
+ secp256k1_scalar m;
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx));
+ ARG_CHECK(msg32 != NULL);
+ ARG_CHECK(sig != NULL);
+ ARG_CHECK(pubkey != NULL);
+
+ secp256k1_scalar_set_b32(&m, msg32, NULL);
+ secp256k1_ecdsa_signature_load(ctx, &r, &s, sig);
+ return (secp256k1_pubkey_load(ctx, &q, pubkey) &&
+ secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &r, &s, &q, &m));
+}
+
+static int nonce_function_rfc6979(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *algo16, void *data, unsigned int counter) {
+ unsigned char keydata[112];
+ int keylen = 64;
+ secp256k1_rfc6979_hmac_sha256_t rng;
+ unsigned int i;
+ /* We feed a byte array to the PRNG as input, consisting of:
+ * - the private key (32 bytes) and message (32 bytes), see RFC 6979 3.2d.
+ * - optionally 32 extra bytes of data, see RFC 6979 3.6 Additional Data.
+ * - optionally 16 extra bytes with the algorithm name (the extra data bytes
+ * are set to zeroes when not present, while the algorithm name is).
+ */
+ memcpy(keydata, key32, 32);
+ memcpy(keydata + 32, msg32, 32);
+ if (data != NULL) {
+ memcpy(keydata + 64, data, 32);
+ keylen = 96;
+ }
+ if (algo16 != NULL) {
+ memset(keydata + keylen, 0, 96 - keylen);
+ memcpy(keydata + 96, algo16, 16);
+ keylen = 112;
+ }
+ secp256k1_rfc6979_hmac_sha256_initialize(&rng, keydata, keylen);
+ memset(keydata, 0, sizeof(keydata));
+ for (i = 0; i <= counter; i++) {
+ secp256k1_rfc6979_hmac_sha256_generate(&rng, nonce32, 32);
+ }
+ secp256k1_rfc6979_hmac_sha256_finalize(&rng);
+ return 1;
+}
+
+const secp256k1_nonce_function secp256k1_nonce_function_rfc6979 = nonce_function_rfc6979;
+const secp256k1_nonce_function secp256k1_nonce_function_default = nonce_function_rfc6979;
+
+int secp256k1_ecdsa_sign(const secp256k1_context* ctx, secp256k1_ecdsa_signature *signature, const unsigned char *msg32, const unsigned char *seckey, secp256k1_nonce_function noncefp, const void* noncedata) {
+ secp256k1_scalar r, s;
+ secp256k1_scalar sec, non, msg;
+ int ret = 0;
+ int overflow = 0;
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx));
+ ARG_CHECK(msg32 != NULL);
+ ARG_CHECK(signature != NULL);
+ ARG_CHECK(seckey != NULL);
+ if (noncefp == NULL) {
+ noncefp = secp256k1_nonce_function_default;
+ }
+
+ secp256k1_scalar_set_b32(&sec, seckey, &overflow);
+ /* Fail if the secret key is invalid. */
+ if (!overflow && !secp256k1_scalar_is_zero(&sec)) {
+ unsigned int count = 0;
+ secp256k1_scalar_set_b32(&msg, msg32, NULL);
+ while (1) {
+ unsigned char nonce32[32];
+ ret = noncefp(nonce32, msg32, seckey, NULL, (void*)noncedata, count);
+ if (!ret) {
+ break;
+ }
+ secp256k1_scalar_set_b32(&non, nonce32, &overflow);
+ memset(nonce32, 0, 32);
+ if (!overflow && !secp256k1_scalar_is_zero(&non)) {
+ if (secp256k1_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, &r, &s, &sec, &msg, &non, NULL)) {
+ break;
+ }
+ }
+ count++;
+ }
+ secp256k1_scalar_clear(&msg);
+ secp256k1_scalar_clear(&non);
+ secp256k1_scalar_clear(&sec);
+ }
+ if (ret) {
+ secp256k1_ecdsa_signature_save(signature, &r, &s);
+ } else {
+ memset(signature, 0, sizeof(*signature));
+ }
+ return ret;
+}
+
+int secp256k1_ec_seckey_verify(const secp256k1_context* ctx, const unsigned char *seckey) {
+ secp256k1_scalar sec;
+ int ret;
+ int overflow;
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(seckey != NULL);
+ (void)ctx;
+
+ secp256k1_scalar_set_b32(&sec, seckey, &overflow);
+ ret = !overflow && !secp256k1_scalar_is_zero(&sec);
+ secp256k1_scalar_clear(&sec);
+ return ret;
+}
+
+int secp256k1_ec_pubkey_create(const secp256k1_context* ctx, secp256k1_pubkey *pubkey, const unsigned char *seckey) {
+ secp256k1_gej pj;
+ secp256k1_ge p;
+ secp256k1_scalar sec;
+ int overflow;
+ int ret = 0;
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx));
+ ARG_CHECK(pubkey != NULL);
+ ARG_CHECK(seckey != NULL);
+
+ secp256k1_scalar_set_b32(&sec, seckey, &overflow);
+ ret = (!overflow) & (!secp256k1_scalar_is_zero(&sec));
+ secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &pj, &sec);
+ secp256k1_ge_set_gej(&p, &pj);
+ secp256k1_pubkey_save(pubkey, &p);
+ secp256k1_scalar_clear(&sec);
+ if (!ret) {
+ memset(pubkey, 0, sizeof(*pubkey));
+ }
+ return ret;
+}
+
+int secp256k1_ec_privkey_tweak_add(const secp256k1_context* ctx, unsigned char *seckey, const unsigned char *tweak) {
+ secp256k1_scalar term;
+ secp256k1_scalar sec;
+ int ret = 0;
+ int overflow = 0;
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(seckey != NULL);
+ ARG_CHECK(tweak != NULL);
+ (void)ctx;
+
+ secp256k1_scalar_set_b32(&term, tweak, &overflow);
+ secp256k1_scalar_set_b32(&sec, seckey, NULL);
+
+ ret = !overflow && secp256k1_eckey_privkey_tweak_add(&sec, &term);
+ if (ret) {
+ secp256k1_scalar_get_b32(seckey, &sec);
+ }
+
+ secp256k1_scalar_clear(&sec);
+ secp256k1_scalar_clear(&term);
+ return ret;
+}
+
+int secp256k1_ec_pubkey_tweak_add(const secp256k1_context* ctx, secp256k1_pubkey *pubkey, const unsigned char *tweak) {
+ secp256k1_ge p;
+ secp256k1_scalar term;
+ int ret = 0;
+ int overflow = 0;
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx));
+ ARG_CHECK(pubkey != NULL);
+ ARG_CHECK(tweak != NULL);
+
+ secp256k1_scalar_set_b32(&term, tweak, &overflow);
+ if (!overflow && secp256k1_pubkey_load(ctx, &p, pubkey)) {
+ ret = secp256k1_eckey_pubkey_tweak_add(&ctx->ecmult_ctx, &p, &term);
+ if (ret) {
+ secp256k1_pubkey_save(pubkey, &p);
+ } else {
+ memset(pubkey, 0, sizeof(*pubkey));
+ }
+ }
+
+ return ret;
+}
+
+int secp256k1_ec_privkey_tweak_mul(const secp256k1_context* ctx, unsigned char *seckey, const unsigned char *tweak) {
+ secp256k1_scalar factor;
+ secp256k1_scalar sec;
+ int ret = 0;
+ int overflow = 0;
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(seckey != NULL);
+ ARG_CHECK(tweak != NULL);
+ (void)ctx;
+
+ secp256k1_scalar_set_b32(&factor, tweak, &overflow);
+ secp256k1_scalar_set_b32(&sec, seckey, NULL);
+ ret = !overflow && secp256k1_eckey_privkey_tweak_mul(&sec, &factor);
+ if (ret) {
+ secp256k1_scalar_get_b32(seckey, &sec);
+ }
+
+ secp256k1_scalar_clear(&sec);
+ secp256k1_scalar_clear(&factor);
+ return ret;
+}
+
+int secp256k1_ec_pubkey_tweak_mul(const secp256k1_context* ctx, secp256k1_pubkey *pubkey, const unsigned char *tweak) {
+ secp256k1_ge p;
+ secp256k1_scalar factor;
+ int ret = 0;
+ int overflow = 0;
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx));
+ ARG_CHECK(pubkey != NULL);
+ ARG_CHECK(tweak != NULL);
+
+ secp256k1_scalar_set_b32(&factor, tweak, &overflow);
+ if (!overflow && secp256k1_pubkey_load(ctx, &p, pubkey)) {
+ ret = secp256k1_eckey_pubkey_tweak_mul(&ctx->ecmult_ctx, &p, &factor);
+ if (ret) {
+ secp256k1_pubkey_save(pubkey, &p);
+ } else {
+ memset(pubkey, 0, sizeof(*pubkey));
+ }
+ }
+
+ return ret;
+}
+
+int secp256k1_ec_privkey_export(const secp256k1_context* ctx, unsigned char *privkey, size_t *privkeylen, const unsigned char *seckey, unsigned int flags) {
+ secp256k1_scalar key;
+ int ret = 0;
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(seckey != NULL);
+ ARG_CHECK(privkey != NULL);
+ ARG_CHECK(privkeylen != NULL);
+ ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx));
+
+ secp256k1_scalar_set_b32(&key, seckey, NULL);
+ ret = secp256k1_eckey_privkey_serialize(&ctx->ecmult_gen_ctx, privkey, privkeylen, &key, flags);
+ secp256k1_scalar_clear(&key);
+ return ret;
+}
+
+int secp256k1_ec_privkey_import(const secp256k1_context* ctx, unsigned char *seckey, const unsigned char *privkey, size_t privkeylen) {
+ secp256k1_scalar key;
+ int ret = 0;
+ ARG_CHECK(seckey != NULL);
+ ARG_CHECK(privkey != NULL);
+ (void)ctx;
+
+ ret = secp256k1_eckey_privkey_parse(&key, privkey, privkeylen);
+ if (ret) {
+ secp256k1_scalar_get_b32(seckey, &key);
+ }
+ secp256k1_scalar_clear(&key);
+ return ret;
+}
+
+int secp256k1_context_randomize(secp256k1_context* ctx, const unsigned char *seed32) {
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx));
+ secp256k1_ecmult_gen_blind(&ctx->ecmult_gen_ctx, seed32);
+ return 1;
+}
+
+int secp256k1_ec_pubkey_combine(const secp256k1_context* ctx, secp256k1_pubkey *pubnonce, const secp256k1_pubkey * const *pubnonces, int n) {
+ int i;
+ secp256k1_gej Qj;
+ secp256k1_ge Q;
+
+ ARG_CHECK(pubnonce != NULL);
+ ARG_CHECK(n >= 1);
+ ARG_CHECK(pubnonces != NULL);
+
+ secp256k1_gej_set_infinity(&Qj);
+
+ for (i = 0; i < n; i++) {
+ secp256k1_pubkey_load(ctx, &Q, pubnonces[i]);
+ secp256k1_gej_add_ge(&Qj, &Qj, &Q);
+ }
+ if (secp256k1_gej_is_infinity(&Qj)) {
+ memset(pubnonce, 0, sizeof(*pubnonce));
+ return 0;
+ }
+ secp256k1_ge_set_gej(&Q, &Qj);
+ secp256k1_pubkey_save(pubnonce, &Q);
+ return 1;
+}
+
+#ifdef ENABLE_MODULE_ECDH
+# include "modules/ecdh/main_impl.h"
+#endif
+
+#ifdef ENABLE_MODULE_SCHNORR
+# include "modules/schnorr/main_impl.h"
+#endif
+
+#ifdef ENABLE_MODULE_RECOVERY
+# include "modules/recovery/main_impl.h"
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/testrand.h b/crypto/secp256k1/libsecp256k1/src/testrand.h
new file mode 100644
index 000000000..041bb92c4
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/testrand.h
@@ -0,0 +1,28 @@
+/**********************************************************************
+ * Copyright (c) 2013, 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_TESTRAND_H_
+#define _SECP256K1_TESTRAND_H_
+
+#if defined HAVE_CONFIG_H
+#include "libsecp256k1-config.h"
+#endif
+
+/* A non-cryptographic RNG used only for test infrastructure. */
+
+/** Seed the pseudorandom number generator for testing. */
+SECP256K1_INLINE static void secp256k1_rand_seed(const unsigned char *seed16);
+
+/** Generate a pseudorandom 32-bit number. */
+static uint32_t secp256k1_rand32(void);
+
+/** Generate a pseudorandom 32-byte array. */
+static void secp256k1_rand256(unsigned char *b32);
+
+/** Generate a pseudorandom 32-byte array with long sequences of zero and one bits. */
+static void secp256k1_rand256_test(unsigned char *b32);
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/testrand_impl.h b/crypto/secp256k1/libsecp256k1/src/testrand_impl.h
new file mode 100644
index 000000000..7c3554266
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/testrand_impl.h
@@ -0,0 +1,60 @@
+/**********************************************************************
+ * Copyright (c) 2013, 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_TESTRAND_IMPL_H_
+#define _SECP256K1_TESTRAND_IMPL_H_
+
+#include <stdint.h>
+#include <string.h>
+
+#include "testrand.h"
+#include "hash.h"
+
+static secp256k1_rfc6979_hmac_sha256_t secp256k1_test_rng;
+static uint32_t secp256k1_test_rng_precomputed[8];
+static int secp256k1_test_rng_precomputed_used = 8;
+
+SECP256K1_INLINE static void secp256k1_rand_seed(const unsigned char *seed16) {
+ secp256k1_rfc6979_hmac_sha256_initialize(&secp256k1_test_rng, seed16, 16);
+}
+
+SECP256K1_INLINE static uint32_t secp256k1_rand32(void) {
+ if (secp256k1_test_rng_precomputed_used == 8) {
+ secp256k1_rfc6979_hmac_sha256_generate(&secp256k1_test_rng, (unsigned char*)(&secp256k1_test_rng_precomputed[0]), sizeof(secp256k1_test_rng_precomputed));
+ secp256k1_test_rng_precomputed_used = 0;
+ }
+ return secp256k1_test_rng_precomputed[secp256k1_test_rng_precomputed_used++];
+}
+
+static void secp256k1_rand256(unsigned char *b32) {
+ secp256k1_rfc6979_hmac_sha256_generate(&secp256k1_test_rng, b32, 32);
+}
+
+static void secp256k1_rand256_test(unsigned char *b32) {
+ int bits=0;
+ uint64_t ent = 0;
+ int entleft = 0;
+ memset(b32, 0, 32);
+ while (bits < 256) {
+ int now;
+ uint32_t val;
+ if (entleft < 12) {
+ ent |= ((uint64_t)secp256k1_rand32()) << entleft;
+ entleft += 32;
+ }
+ now = 1 + ((ent % 64)*((ent >> 6) % 32)+16)/31;
+ val = 1 & (ent >> 11);
+ ent >>= 12;
+ entleft -= 12;
+ while (now > 0 && bits < 256) {
+ b32[bits / 8] |= val << (bits % 8);
+ now--;
+ bits++;
+ }
+ }
+}
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/tests.c b/crypto/secp256k1/libsecp256k1/src/tests.c
new file mode 100644
index 000000000..3366d90fc
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/tests.c
@@ -0,0 +1,2357 @@
+/**********************************************************************
+ * Copyright (c) 2013, 2014, 2015 Pieter Wuille, Gregory Maxwell *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#if defined HAVE_CONFIG_H
+#include "libsecp256k1-config.h"
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <time.h>
+
+#include "include/secp256k1.h"
+#include "secp256k1.c"
+#include "testrand_impl.h"
+
+#ifdef ENABLE_OPENSSL_TESTS
+#include "openssl/bn.h"
+#include "openssl/ec.h"
+#include "openssl/ecdsa.h"
+#include "openssl/obj_mac.h"
+#endif
+
+static int count = 64;
+static secp256k1_context *ctx = NULL;
+
+void random_field_element_test(secp256k1_fe *fe) {
+ do {
+ unsigned char b32[32];
+ secp256k1_rand256_test(b32);
+ if (secp256k1_fe_set_b32(fe, b32)) {
+ break;
+ }
+ } while(1);
+}
+
+void random_field_element_magnitude(secp256k1_fe *fe) {
+ secp256k1_fe zero;
+ int n = secp256k1_rand32() % 9;
+ secp256k1_fe_normalize(fe);
+ if (n == 0) {
+ return;
+ }
+ secp256k1_fe_clear(&zero);
+ secp256k1_fe_negate(&zero, &zero, 0);
+ secp256k1_fe_mul_int(&zero, n - 1);
+ secp256k1_fe_add(fe, &zero);
+ VERIFY_CHECK(fe->magnitude == n);
+}
+
+void random_group_element_test(secp256k1_ge *ge) {
+ secp256k1_fe fe;
+ do {
+ random_field_element_test(&fe);
+ if (secp256k1_ge_set_xo_var(ge, &fe, secp256k1_rand32() & 1)) {
+ secp256k1_fe_normalize(&ge->y);
+ break;
+ }
+ } while(1);
+}
+
+void random_group_element_jacobian_test(secp256k1_gej *gej, const secp256k1_ge *ge) {
+ secp256k1_fe z2, z3;
+ do {
+ random_field_element_test(&gej->z);
+ if (!secp256k1_fe_is_zero(&gej->z)) {
+ break;
+ }
+ } while(1);
+ secp256k1_fe_sqr(&z2, &gej->z);
+ secp256k1_fe_mul(&z3, &z2, &gej->z);
+ secp256k1_fe_mul(&gej->x, &ge->x, &z2);
+ secp256k1_fe_mul(&gej->y, &ge->y, &z3);
+ gej->infinity = ge->infinity;
+}
+
+void random_scalar_order_test(secp256k1_scalar *num) {
+ do {
+ unsigned char b32[32];
+ int overflow = 0;
+ secp256k1_rand256_test(b32);
+ secp256k1_scalar_set_b32(num, b32, &overflow);
+ if (overflow || secp256k1_scalar_is_zero(num)) {
+ continue;
+ }
+ break;
+ } while(1);
+}
+
+void random_scalar_order(secp256k1_scalar *num) {
+ do {
+ unsigned char b32[32];
+ int overflow = 0;
+ secp256k1_rand256(b32);
+ secp256k1_scalar_set_b32(num, b32, &overflow);
+ if (overflow || secp256k1_scalar_is_zero(num)) {
+ continue;
+ }
+ break;
+ } while(1);
+}
+
+void run_context_tests(void) {
+ secp256k1_context *none = secp256k1_context_create(0);
+ secp256k1_context *sign = secp256k1_context_create(SECP256K1_CONTEXT_SIGN);
+ secp256k1_context *vrfy = secp256k1_context_create(SECP256K1_CONTEXT_VERIFY);
+ secp256k1_context *both = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
+
+ secp256k1_gej pubj;
+ secp256k1_ge pub;
+ secp256k1_scalar msg, key, nonce;
+ secp256k1_scalar sigr, sigs;
+
+ /*** clone and destroy all of them to make sure cloning was complete ***/
+ {
+ secp256k1_context *ctx_tmp;
+
+ ctx_tmp = none; none = secp256k1_context_clone(none); secp256k1_context_destroy(ctx_tmp);
+ ctx_tmp = sign; sign = secp256k1_context_clone(sign); secp256k1_context_destroy(ctx_tmp);
+ ctx_tmp = vrfy; vrfy = secp256k1_context_clone(vrfy); secp256k1_context_destroy(ctx_tmp);
+ ctx_tmp = both; both = secp256k1_context_clone(both); secp256k1_context_destroy(ctx_tmp);
+ }
+
+ /*** attempt to use them ***/
+ random_scalar_order_test(&msg);
+ random_scalar_order_test(&key);
+ secp256k1_ecmult_gen(&both->ecmult_gen_ctx, &pubj, &key);
+ secp256k1_ge_set_gej(&pub, &pubj);
+
+ /* obtain a working nonce */
+ do {
+ random_scalar_order_test(&nonce);
+ } while(!secp256k1_ecdsa_sig_sign(&both->ecmult_gen_ctx, &sigr, &sigs, &key, &msg, &nonce, NULL));
+
+ /* try signing */
+ CHECK(secp256k1_ecdsa_sig_sign(&sign->ecmult_gen_ctx, &sigr, &sigs, &key, &msg, &nonce, NULL));
+ CHECK(secp256k1_ecdsa_sig_sign(&both->ecmult_gen_ctx, &sigr, &sigs, &key, &msg, &nonce, NULL));
+
+ /* try verifying */
+ CHECK(secp256k1_ecdsa_sig_verify(&vrfy->ecmult_ctx, &sigr, &sigs, &pub, &msg));
+ CHECK(secp256k1_ecdsa_sig_verify(&both->ecmult_ctx, &sigr, &sigs, &pub, &msg));
+
+ /* cleanup */
+ secp256k1_context_destroy(none);
+ secp256k1_context_destroy(sign);
+ secp256k1_context_destroy(vrfy);
+ secp256k1_context_destroy(both);
+}
+
+/***** HASH TESTS *****/
+
+void run_sha256_tests(void) {
+ static const char *inputs[8] = {
+ "", "abc", "message digest", "secure hash algorithm", "SHA256 is considered to be safe",
+ "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
+ "For this sample, this 63-byte string will be used as input data",
+ "This is exactly 64 bytes long, not counting the terminating byte"
+ };
+ static const unsigned char outputs[8][32] = {
+ {0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55},
+ {0xba, 0x78, 0x16, 0xbf, 0x8f, 0x01, 0xcf, 0xea, 0x41, 0x41, 0x40, 0xde, 0x5d, 0xae, 0x22, 0x23, 0xb0, 0x03, 0x61, 0xa3, 0x96, 0x17, 0x7a, 0x9c, 0xb4, 0x10, 0xff, 0x61, 0xf2, 0x00, 0x15, 0xad},
+ {0xf7, 0x84, 0x6f, 0x55, 0xcf, 0x23, 0xe1, 0x4e, 0xeb, 0xea, 0xb5, 0xb4, 0xe1, 0x55, 0x0c, 0xad, 0x5b, 0x50, 0x9e, 0x33, 0x48, 0xfb, 0xc4, 0xef, 0xa3, 0xa1, 0x41, 0x3d, 0x39, 0x3c, 0xb6, 0x50},
+ {0xf3, 0x0c, 0xeb, 0x2b, 0xb2, 0x82, 0x9e, 0x79, 0xe4, 0xca, 0x97, 0x53, 0xd3, 0x5a, 0x8e, 0xcc, 0x00, 0x26, 0x2d, 0x16, 0x4c, 0xc0, 0x77, 0x08, 0x02, 0x95, 0x38, 0x1c, 0xbd, 0x64, 0x3f, 0x0d},
+ {0x68, 0x19, 0xd9, 0x15, 0xc7, 0x3f, 0x4d, 0x1e, 0x77, 0xe4, 0xe1, 0xb5, 0x2d, 0x1f, 0xa0, 0xf9, 0xcf, 0x9b, 0xea, 0xea, 0xd3, 0x93, 0x9f, 0x15, 0x87, 0x4b, 0xd9, 0x88, 0xe2, 0xa2, 0x36, 0x30},
+ {0x24, 0x8d, 0x6a, 0x61, 0xd2, 0x06, 0x38, 0xb8, 0xe5, 0xc0, 0x26, 0x93, 0x0c, 0x3e, 0x60, 0x39, 0xa3, 0x3c, 0xe4, 0x59, 0x64, 0xff, 0x21, 0x67, 0xf6, 0xec, 0xed, 0xd4, 0x19, 0xdb, 0x06, 0xc1},
+ {0xf0, 0x8a, 0x78, 0xcb, 0xba, 0xee, 0x08, 0x2b, 0x05, 0x2a, 0xe0, 0x70, 0x8f, 0x32, 0xfa, 0x1e, 0x50, 0xc5, 0xc4, 0x21, 0xaa, 0x77, 0x2b, 0xa5, 0xdb, 0xb4, 0x06, 0xa2, 0xea, 0x6b, 0xe3, 0x42},
+ {0xab, 0x64, 0xef, 0xf7, 0xe8, 0x8e, 0x2e, 0x46, 0x16, 0x5e, 0x29, 0xf2, 0xbc, 0xe4, 0x18, 0x26, 0xbd, 0x4c, 0x7b, 0x35, 0x52, 0xf6, 0xb3, 0x82, 0xa9, 0xe7, 0xd3, 0xaf, 0x47, 0xc2, 0x45, 0xf8}
+ };
+ int i;
+ for (i = 0; i < 8; i++) {
+ unsigned char out[32];
+ secp256k1_sha256_t hasher;
+ secp256k1_sha256_initialize(&hasher);
+ secp256k1_sha256_write(&hasher, (const unsigned char*)(inputs[i]), strlen(inputs[i]));
+ secp256k1_sha256_finalize(&hasher, out);
+ CHECK(memcmp(out, outputs[i], 32) == 0);
+ if (strlen(inputs[i]) > 0) {
+ int split = secp256k1_rand32() % strlen(inputs[i]);
+ secp256k1_sha256_initialize(&hasher);
+ secp256k1_sha256_write(&hasher, (const unsigned char*)(inputs[i]), split);
+ secp256k1_sha256_write(&hasher, (const unsigned char*)(inputs[i] + split), strlen(inputs[i]) - split);
+ secp256k1_sha256_finalize(&hasher, out);
+ CHECK(memcmp(out, outputs[i], 32) == 0);
+ }
+ }
+}
+
+void run_hmac_sha256_tests(void) {
+ static const char *keys[6] = {
+ "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
+ "\x4a\x65\x66\x65",
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa",
+ "\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19",
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa",
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ };
+ static const char *inputs[6] = {
+ "\x48\x69\x20\x54\x68\x65\x72\x65",
+ "\x77\x68\x61\x74\x20\x64\x6f\x20\x79\x61\x20\x77\x61\x6e\x74\x20\x66\x6f\x72\x20\x6e\x6f\x74\x68\x69\x6e\x67\x3f",
+ "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd",
+ "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd",
+ "\x54\x65\x73\x74\x20\x55\x73\x69\x6e\x67\x20\x4c\x61\x72\x67\x65\x72\x20\x54\x68\x61\x6e\x20\x42\x6c\x6f\x63\x6b\x2d\x53\x69\x7a\x65\x20\x4b\x65\x79\x20\x2d\x20\x48\x61\x73\x68\x20\x4b\x65\x79\x20\x46\x69\x72\x73\x74",
+ "\x54\x68\x69\x73\x20\x69\x73\x20\x61\x20\x74\x65\x73\x74\x20\x75\x73\x69\x6e\x67\x20\x61\x20\x6c\x61\x72\x67\x65\x72\x20\x74\x68\x61\x6e\x20\x62\x6c\x6f\x63\x6b\x2d\x73\x69\x7a\x65\x20\x6b\x65\x79\x20\x61\x6e\x64\x20\x61\x20\x6c\x61\x72\x67\x65\x72\x20\x74\x68\x61\x6e\x20\x62\x6c\x6f\x63\x6b\x2d\x73\x69\x7a\x65\x20\x64\x61\x74\x61\x2e\x20\x54\x68\x65\x20\x6b\x65\x79\x20\x6e\x65\x65\x64\x73\x20\x74\x6f\x20\x62\x65\x20\x68\x61\x73\x68\x65\x64\x20\x62\x65\x66\x6f\x72\x65\x20\x62\x65\x69\x6e\x67\x20\x75\x73\x65\x64\x20\x62\x79\x20\x74\x68\x65\x20\x48\x4d\x41\x43\x20\x61\x6c\x67\x6f\x72\x69\x74\x68\x6d\x2e"
+ };
+ static const unsigned char outputs[6][32] = {
+ {0xb0, 0x34, 0x4c, 0x61, 0xd8, 0xdb, 0x38, 0x53, 0x5c, 0xa8, 0xaf, 0xce, 0xaf, 0x0b, 0xf1, 0x2b, 0x88, 0x1d, 0xc2, 0x00, 0xc9, 0x83, 0x3d, 0xa7, 0x26, 0xe9, 0x37, 0x6c, 0x2e, 0x32, 0xcf, 0xf7},
+ {0x5b, 0xdc, 0xc1, 0x46, 0xbf, 0x60, 0x75, 0x4e, 0x6a, 0x04, 0x24, 0x26, 0x08, 0x95, 0x75, 0xc7, 0x5a, 0x00, 0x3f, 0x08, 0x9d, 0x27, 0x39, 0x83, 0x9d, 0xec, 0x58, 0xb9, 0x64, 0xec, 0x38, 0x43},
+ {0x77, 0x3e, 0xa9, 0x1e, 0x36, 0x80, 0x0e, 0x46, 0x85, 0x4d, 0xb8, 0xeb, 0xd0, 0x91, 0x81, 0xa7, 0x29, 0x59, 0x09, 0x8b, 0x3e, 0xf8, 0xc1, 0x22, 0xd9, 0x63, 0x55, 0x14, 0xce, 0xd5, 0x65, 0xfe},
+ {0x82, 0x55, 0x8a, 0x38, 0x9a, 0x44, 0x3c, 0x0e, 0xa4, 0xcc, 0x81, 0x98, 0x99, 0xf2, 0x08, 0x3a, 0x85, 0xf0, 0xfa, 0xa3, 0xe5, 0x78, 0xf8, 0x07, 0x7a, 0x2e, 0x3f, 0xf4, 0x67, 0x29, 0x66, 0x5b},
+ {0x60, 0xe4, 0x31, 0x59, 0x1e, 0xe0, 0xb6, 0x7f, 0x0d, 0x8a, 0x26, 0xaa, 0xcb, 0xf5, 0xb7, 0x7f, 0x8e, 0x0b, 0xc6, 0x21, 0x37, 0x28, 0xc5, 0x14, 0x05, 0x46, 0x04, 0x0f, 0x0e, 0xe3, 0x7f, 0x54},
+ {0x9b, 0x09, 0xff, 0xa7, 0x1b, 0x94, 0x2f, 0xcb, 0x27, 0x63, 0x5f, 0xbc, 0xd5, 0xb0, 0xe9, 0x44, 0xbf, 0xdc, 0x63, 0x64, 0x4f, 0x07, 0x13, 0x93, 0x8a, 0x7f, 0x51, 0x53, 0x5c, 0x3a, 0x35, 0xe2}
+ };
+ int i;
+ for (i = 0; i < 6; i++) {
+ secp256k1_hmac_sha256_t hasher;
+ unsigned char out[32];
+ secp256k1_hmac_sha256_initialize(&hasher, (const unsigned char*)(keys[i]), strlen(keys[i]));
+ secp256k1_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i]), strlen(inputs[i]));
+ secp256k1_hmac_sha256_finalize(&hasher, out);
+ CHECK(memcmp(out, outputs[i], 32) == 0);
+ if (strlen(inputs[i]) > 0) {
+ int split = secp256k1_rand32() % strlen(inputs[i]);
+ secp256k1_hmac_sha256_initialize(&hasher, (const unsigned char*)(keys[i]), strlen(keys[i]));
+ secp256k1_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i]), split);
+ secp256k1_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i] + split), strlen(inputs[i]) - split);
+ secp256k1_hmac_sha256_finalize(&hasher, out);
+ CHECK(memcmp(out, outputs[i], 32) == 0);
+ }
+ }
+}
+
+void run_rfc6979_hmac_sha256_tests(void) {
+ static const unsigned char key1[65] = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x00, 0x4b, 0xf5, 0x12, 0x2f, 0x34, 0x45, 0x54, 0xc5, 0x3b, 0xde, 0x2e, 0xbb, 0x8c, 0xd2, 0xb7, 0xe3, 0xd1, 0x60, 0x0a, 0xd6, 0x31, 0xc3, 0x85, 0xa5, 0xd7, 0xcc, 0xe2, 0x3c, 0x77, 0x85, 0x45, 0x9a, 0};
+ static const unsigned char out1[3][32] = {
+ {0x4f, 0xe2, 0x95, 0x25, 0xb2, 0x08, 0x68, 0x09, 0x15, 0x9a, 0xcd, 0xf0, 0x50, 0x6e, 0xfb, 0x86, 0xb0, 0xec, 0x93, 0x2c, 0x7b, 0xa4, 0x42, 0x56, 0xab, 0x32, 0x1e, 0x42, 0x1e, 0x67, 0xe9, 0xfb},
+ {0x2b, 0xf0, 0xff, 0xf1, 0xd3, 0xc3, 0x78, 0xa2, 0x2d, 0xc5, 0xde, 0x1d, 0x85, 0x65, 0x22, 0x32, 0x5c, 0x65, 0xb5, 0x04, 0x49, 0x1a, 0x0c, 0xbd, 0x01, 0xcb, 0x8f, 0x3a, 0xa6, 0x7f, 0xfd, 0x4a},
+ {0xf5, 0x28, 0xb4, 0x10, 0xcb, 0x54, 0x1f, 0x77, 0x00, 0x0d, 0x7a, 0xfb, 0x6c, 0x5b, 0x53, 0xc5, 0xc4, 0x71, 0xea, 0xb4, 0x3e, 0x46, 0x6d, 0x9a, 0xc5, 0x19, 0x0c, 0x39, 0xc8, 0x2f, 0xd8, 0x2e}
+ };
+
+ static const unsigned char key2[64] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55};
+ static const unsigned char out2[3][32] = {
+ {0x9c, 0x23, 0x6c, 0x16, 0x5b, 0x82, 0xae, 0x0c, 0xd5, 0x90, 0x65, 0x9e, 0x10, 0x0b, 0x6b, 0xab, 0x30, 0x36, 0xe7, 0xba, 0x8b, 0x06, 0x74, 0x9b, 0xaf, 0x69, 0x81, 0xe1, 0x6f, 0x1a, 0x2b, 0x95},
+ {0xdf, 0x47, 0x10, 0x61, 0x62, 0x5b, 0xc0, 0xea, 0x14, 0xb6, 0x82, 0xfe, 0xee, 0x2c, 0x9c, 0x02, 0xf2, 0x35, 0xda, 0x04, 0x20, 0x4c, 0x1d, 0x62, 0xa1, 0x53, 0x6c, 0x6e, 0x17, 0xae, 0xd7, 0xa9},
+ {0x75, 0x97, 0x88, 0x7c, 0xbd, 0x76, 0x32, 0x1f, 0x32, 0xe3, 0x04, 0x40, 0x67, 0x9a, 0x22, 0xcf, 0x7f, 0x8d, 0x9d, 0x2e, 0xac, 0x39, 0x0e, 0x58, 0x1f, 0xea, 0x09, 0x1c, 0xe2, 0x02, 0xba, 0x94}
+ };
+
+ secp256k1_rfc6979_hmac_sha256_t rng;
+ unsigned char out[32];
+ int i;
+
+ secp256k1_rfc6979_hmac_sha256_initialize(&rng, key1, 64);
+ for (i = 0; i < 3; i++) {
+ secp256k1_rfc6979_hmac_sha256_generate(&rng, out, 32);
+ CHECK(memcmp(out, out1[i], 32) == 0);
+ }
+ secp256k1_rfc6979_hmac_sha256_finalize(&rng);
+
+ secp256k1_rfc6979_hmac_sha256_initialize(&rng, key1, 65);
+ for (i = 0; i < 3; i++) {
+ secp256k1_rfc6979_hmac_sha256_generate(&rng, out, 32);
+ CHECK(memcmp(out, out1[i], 32) != 0);
+ }
+ secp256k1_rfc6979_hmac_sha256_finalize(&rng);
+
+ secp256k1_rfc6979_hmac_sha256_initialize(&rng, key2, 64);
+ for (i = 0; i < 3; i++) {
+ secp256k1_rfc6979_hmac_sha256_generate(&rng, out, 32);
+ CHECK(memcmp(out, out2[i], 32) == 0);
+ }
+ secp256k1_rfc6979_hmac_sha256_finalize(&rng);
+}
+
+/***** NUM TESTS *****/
+
+#ifndef USE_NUM_NONE
+void random_num_negate(secp256k1_num *num) {
+ if (secp256k1_rand32() & 1) {
+ secp256k1_num_negate(num);
+ }
+}
+
+void random_num_order_test(secp256k1_num *num) {
+ secp256k1_scalar sc;
+ random_scalar_order_test(&sc);
+ secp256k1_scalar_get_num(num, &sc);
+}
+
+void random_num_order(secp256k1_num *num) {
+ secp256k1_scalar sc;
+ random_scalar_order(&sc);
+ secp256k1_scalar_get_num(num, &sc);
+}
+
+void test_num_negate(void) {
+ secp256k1_num n1;
+ secp256k1_num n2;
+ random_num_order_test(&n1); /* n1 = R */
+ random_num_negate(&n1);
+ secp256k1_num_copy(&n2, &n1); /* n2 = R */
+ secp256k1_num_sub(&n1, &n2, &n1); /* n1 = n2-n1 = 0 */
+ CHECK(secp256k1_num_is_zero(&n1));
+ secp256k1_num_copy(&n1, &n2); /* n1 = R */
+ secp256k1_num_negate(&n1); /* n1 = -R */
+ CHECK(!secp256k1_num_is_zero(&n1));
+ secp256k1_num_add(&n1, &n2, &n1); /* n1 = n2+n1 = 0 */
+ CHECK(secp256k1_num_is_zero(&n1));
+ secp256k1_num_copy(&n1, &n2); /* n1 = R */
+ secp256k1_num_negate(&n1); /* n1 = -R */
+ CHECK(secp256k1_num_is_neg(&n1) != secp256k1_num_is_neg(&n2));
+ secp256k1_num_negate(&n1); /* n1 = R */
+ CHECK(secp256k1_num_eq(&n1, &n2));
+}
+
+void test_num_add_sub(void) {
+ secp256k1_num n1;
+ secp256k1_num n2;
+ secp256k1_num n1p2, n2p1, n1m2, n2m1;
+ int r = secp256k1_rand32();
+ random_num_order_test(&n1); /* n1 = R1 */
+ if (r & 1) {
+ random_num_negate(&n1);
+ }
+ random_num_order_test(&n2); /* n2 = R2 */
+ if (r & 2) {
+ random_num_negate(&n2);
+ }
+ secp256k1_num_add(&n1p2, &n1, &n2); /* n1p2 = R1 + R2 */
+ secp256k1_num_add(&n2p1, &n2, &n1); /* n2p1 = R2 + R1 */
+ secp256k1_num_sub(&n1m2, &n1, &n2); /* n1m2 = R1 - R2 */
+ secp256k1_num_sub(&n2m1, &n2, &n1); /* n2m1 = R2 - R1 */
+ CHECK(secp256k1_num_eq(&n1p2, &n2p1));
+ CHECK(!secp256k1_num_eq(&n1p2, &n1m2));
+ secp256k1_num_negate(&n2m1); /* n2m1 = -R2 + R1 */
+ CHECK(secp256k1_num_eq(&n2m1, &n1m2));
+ CHECK(!secp256k1_num_eq(&n2m1, &n1));
+ secp256k1_num_add(&n2m1, &n2m1, &n2); /* n2m1 = -R2 + R1 + R2 = R1 */
+ CHECK(secp256k1_num_eq(&n2m1, &n1));
+ CHECK(!secp256k1_num_eq(&n2p1, &n1));
+ secp256k1_num_sub(&n2p1, &n2p1, &n2); /* n2p1 = R2 + R1 - R2 = R1 */
+ CHECK(secp256k1_num_eq(&n2p1, &n1));
+}
+
+void run_num_smalltests(void) {
+ int i;
+ for (i = 0; i < 100*count; i++) {
+ test_num_negate();
+ test_num_add_sub();
+ }
+}
+#endif
+
+/***** SCALAR TESTS *****/
+
+void scalar_test(void) {
+ secp256k1_scalar s;
+ secp256k1_scalar s1;
+ secp256k1_scalar s2;
+#ifndef USE_NUM_NONE
+ secp256k1_num snum, s1num, s2num;
+ secp256k1_num order, half_order;
+#endif
+ unsigned char c[32];
+
+ /* Set 's' to a random scalar, with value 'snum'. */
+ random_scalar_order_test(&s);
+
+ /* Set 's1' to a random scalar, with value 's1num'. */
+ random_scalar_order_test(&s1);
+
+ /* Set 's2' to a random scalar, with value 'snum2', and byte array representation 'c'. */
+ random_scalar_order_test(&s2);
+ secp256k1_scalar_get_b32(c, &s2);
+
+#ifndef USE_NUM_NONE
+ secp256k1_scalar_get_num(&snum, &s);
+ secp256k1_scalar_get_num(&s1num, &s1);
+ secp256k1_scalar_get_num(&s2num, &s2);
+
+ secp256k1_scalar_order_get_num(&order);
+ half_order = order;
+ secp256k1_num_shift(&half_order, 1);
+#endif
+
+ {
+ int i;
+ /* Test that fetching groups of 4 bits from a scalar and recursing n(i)=16*n(i-1)+p(i) reconstructs it. */
+ secp256k1_scalar n;
+ secp256k1_scalar_set_int(&n, 0);
+ for (i = 0; i < 256; i += 4) {
+ secp256k1_scalar t;
+ int j;
+ secp256k1_scalar_set_int(&t, secp256k1_scalar_get_bits(&s, 256 - 4 - i, 4));
+ for (j = 0; j < 4; j++) {
+ secp256k1_scalar_add(&n, &n, &n);
+ }
+ secp256k1_scalar_add(&n, &n, &t);
+ }
+ CHECK(secp256k1_scalar_eq(&n, &s));
+ }
+
+ {
+ /* Test that fetching groups of randomly-sized bits from a scalar and recursing n(i)=b*n(i-1)+p(i) reconstructs it. */
+ secp256k1_scalar n;
+ int i = 0;
+ secp256k1_scalar_set_int(&n, 0);
+ while (i < 256) {
+ secp256k1_scalar t;
+ int j;
+ int now = (secp256k1_rand32() % 15) + 1;
+ if (now + i > 256) {
+ now = 256 - i;
+ }
+ secp256k1_scalar_set_int(&t, secp256k1_scalar_get_bits_var(&s, 256 - now - i, now));
+ for (j = 0; j < now; j++) {
+ secp256k1_scalar_add(&n, &n, &n);
+ }
+ secp256k1_scalar_add(&n, &n, &t);
+ i += now;
+ }
+ CHECK(secp256k1_scalar_eq(&n, &s));
+ }
+
+#ifndef USE_NUM_NONE
+ {
+ /* Test that adding the scalars together is equal to adding their numbers together modulo the order. */
+ secp256k1_num rnum;
+ secp256k1_num r2num;
+ secp256k1_scalar r;
+ secp256k1_num_add(&rnum, &snum, &s2num);
+ secp256k1_num_mod(&rnum, &order);
+ secp256k1_scalar_add(&r, &s, &s2);
+ secp256k1_scalar_get_num(&r2num, &r);
+ CHECK(secp256k1_num_eq(&rnum, &r2num));
+ }
+
+ {
+ /* Test that multipying the scalars is equal to multiplying their numbers modulo the order. */
+ secp256k1_scalar r;
+ secp256k1_num r2num;
+ secp256k1_num rnum;
+ secp256k1_num_mul(&rnum, &snum, &s2num);
+ secp256k1_num_mod(&rnum, &order);
+ secp256k1_scalar_mul(&r, &s, &s2);
+ secp256k1_scalar_get_num(&r2num, &r);
+ CHECK(secp256k1_num_eq(&rnum, &r2num));
+ /* The result can only be zero if at least one of the factors was zero. */
+ CHECK(secp256k1_scalar_is_zero(&r) == (secp256k1_scalar_is_zero(&s) || secp256k1_scalar_is_zero(&s2)));
+ /* The results can only be equal to one of the factors if that factor was zero, or the other factor was one. */
+ CHECK(secp256k1_num_eq(&rnum, &snum) == (secp256k1_scalar_is_zero(&s) || secp256k1_scalar_is_one(&s2)));
+ CHECK(secp256k1_num_eq(&rnum, &s2num) == (secp256k1_scalar_is_zero(&s2) || secp256k1_scalar_is_one(&s)));
+ }
+
+ {
+ secp256k1_scalar neg;
+ secp256k1_num negnum;
+ secp256k1_num negnum2;
+ /* Check that comparison with zero matches comparison with zero on the number. */
+ CHECK(secp256k1_num_is_zero(&snum) == secp256k1_scalar_is_zero(&s));
+ /* Check that comparison with the half order is equal to testing for high scalar. */
+ CHECK(secp256k1_scalar_is_high(&s) == (secp256k1_num_cmp(&snum, &half_order) > 0));
+ secp256k1_scalar_negate(&neg, &s);
+ secp256k1_num_sub(&negnum, &order, &snum);
+ secp256k1_num_mod(&negnum, &order);
+ /* Check that comparison with the half order is equal to testing for high scalar after negation. */
+ CHECK(secp256k1_scalar_is_high(&neg) == (secp256k1_num_cmp(&negnum, &half_order) > 0));
+ /* Negating should change the high property, unless the value was already zero. */
+ CHECK((secp256k1_scalar_is_high(&s) == secp256k1_scalar_is_high(&neg)) == secp256k1_scalar_is_zero(&s));
+ secp256k1_scalar_get_num(&negnum2, &neg);
+ /* Negating a scalar should be equal to (order - n) mod order on the number. */
+ CHECK(secp256k1_num_eq(&negnum, &negnum2));
+ secp256k1_scalar_add(&neg, &neg, &s);
+ /* Adding a number to its negation should result in zero. */
+ CHECK(secp256k1_scalar_is_zero(&neg));
+ secp256k1_scalar_negate(&neg, &neg);
+ /* Negating zero should still result in zero. */
+ CHECK(secp256k1_scalar_is_zero(&neg));
+ }
+
+ {
+ /* Test secp256k1_scalar_mul_shift_var. */
+ secp256k1_scalar r;
+ secp256k1_num one;
+ secp256k1_num rnum;
+ secp256k1_num rnum2;
+ unsigned char cone[1] = {0x01};
+ unsigned int shift = 256 + (secp256k1_rand32() % 257);
+ secp256k1_scalar_mul_shift_var(&r, &s1, &s2, shift);
+ secp256k1_num_mul(&rnum, &s1num, &s2num);
+ secp256k1_num_shift(&rnum, shift - 1);
+ secp256k1_num_set_bin(&one, cone, 1);
+ secp256k1_num_add(&rnum, &rnum, &one);
+ secp256k1_num_shift(&rnum, 1);
+ secp256k1_scalar_get_num(&rnum2, &r);
+ CHECK(secp256k1_num_eq(&rnum, &rnum2));
+ }
+
+ {
+ /* test secp256k1_scalar_shr_int */
+ secp256k1_scalar r;
+ int i;
+ random_scalar_order_test(&r);
+ for (i = 0; i < 100; ++i) {
+ int low;
+ int shift = 1 + (secp256k1_rand32() % 15);
+ int expected = r.d[0] % (1 << shift);
+ low = secp256k1_scalar_shr_int(&r, shift);
+ CHECK(expected == low);
+ }
+ }
+#endif
+
+ {
+ /* Test that scalar inverses are equal to the inverse of their number modulo the order. */
+ if (!secp256k1_scalar_is_zero(&s)) {
+ secp256k1_scalar inv;
+#ifndef USE_NUM_NONE
+ secp256k1_num invnum;
+ secp256k1_num invnum2;
+#endif
+ secp256k1_scalar_inverse(&inv, &s);
+#ifndef USE_NUM_NONE
+ secp256k1_num_mod_inverse(&invnum, &snum, &order);
+ secp256k1_scalar_get_num(&invnum2, &inv);
+ CHECK(secp256k1_num_eq(&invnum, &invnum2));
+#endif
+ secp256k1_scalar_mul(&inv, &inv, &s);
+ /* Multiplying a scalar with its inverse must result in one. */
+ CHECK(secp256k1_scalar_is_one(&inv));
+ secp256k1_scalar_inverse(&inv, &inv);
+ /* Inverting one must result in one. */
+ CHECK(secp256k1_scalar_is_one(&inv));
+ }
+ }
+
+ {
+ /* Test commutativity of add. */
+ secp256k1_scalar r1, r2;
+ secp256k1_scalar_add(&r1, &s1, &s2);
+ secp256k1_scalar_add(&r2, &s2, &s1);
+ CHECK(secp256k1_scalar_eq(&r1, &r2));
+ }
+
+ {
+ secp256k1_scalar r1, r2;
+ secp256k1_scalar b;
+ int i;
+ /* Test add_bit. */
+ int bit = secp256k1_rand32() % 256;
+ secp256k1_scalar_set_int(&b, 1);
+ CHECK(secp256k1_scalar_is_one(&b));
+ for (i = 0; i < bit; i++) {
+ secp256k1_scalar_add(&b, &b, &b);
+ }
+ r1 = s1;
+ r2 = s1;
+ if (!secp256k1_scalar_add(&r1, &r1, &b)) {
+ /* No overflow happened. */
+ secp256k1_scalar_cadd_bit(&r2, bit, 1);
+ CHECK(secp256k1_scalar_eq(&r1, &r2));
+ /* cadd is a noop when flag is zero */
+ secp256k1_scalar_cadd_bit(&r2, bit, 0);
+ CHECK(secp256k1_scalar_eq(&r1, &r2));
+ }
+ }
+
+ {
+ /* Test commutativity of mul. */
+ secp256k1_scalar r1, r2;
+ secp256k1_scalar_mul(&r1, &s1, &s2);
+ secp256k1_scalar_mul(&r2, &s2, &s1);
+ CHECK(secp256k1_scalar_eq(&r1, &r2));
+ }
+
+ {
+ /* Test associativity of add. */
+ secp256k1_scalar r1, r2;
+ secp256k1_scalar_add(&r1, &s1, &s2);
+ secp256k1_scalar_add(&r1, &r1, &s);
+ secp256k1_scalar_add(&r2, &s2, &s);
+ secp256k1_scalar_add(&r2, &s1, &r2);
+ CHECK(secp256k1_scalar_eq(&r1, &r2));
+ }
+
+ {
+ /* Test associativity of mul. */
+ secp256k1_scalar r1, r2;
+ secp256k1_scalar_mul(&r1, &s1, &s2);
+ secp256k1_scalar_mul(&r1, &r1, &s);
+ secp256k1_scalar_mul(&r2, &s2, &s);
+ secp256k1_scalar_mul(&r2, &s1, &r2);
+ CHECK(secp256k1_scalar_eq(&r1, &r2));
+ }
+
+ {
+ /* Test distributitivity of mul over add. */
+ secp256k1_scalar r1, r2, t;
+ secp256k1_scalar_add(&r1, &s1, &s2);
+ secp256k1_scalar_mul(&r1, &r1, &s);
+ secp256k1_scalar_mul(&r2, &s1, &s);
+ secp256k1_scalar_mul(&t, &s2, &s);
+ secp256k1_scalar_add(&r2, &r2, &t);
+ CHECK(secp256k1_scalar_eq(&r1, &r2));
+ }
+
+ {
+ /* Test square. */
+ secp256k1_scalar r1, r2;
+ secp256k1_scalar_sqr(&r1, &s1);
+ secp256k1_scalar_mul(&r2, &s1, &s1);
+ CHECK(secp256k1_scalar_eq(&r1, &r2));
+ }
+
+ {
+ /* Test multiplicative identity. */
+ secp256k1_scalar r1, v1;
+ secp256k1_scalar_set_int(&v1,1);
+ secp256k1_scalar_mul(&r1, &s1, &v1);
+ CHECK(secp256k1_scalar_eq(&r1, &s1));
+ }
+
+ {
+ /* Test additive identity. */
+ secp256k1_scalar r1, v0;
+ secp256k1_scalar_set_int(&v0,0);
+ secp256k1_scalar_add(&r1, &s1, &v0);
+ CHECK(secp256k1_scalar_eq(&r1, &s1));
+ }
+
+ {
+ /* Test zero product property. */
+ secp256k1_scalar r1, v0;
+ secp256k1_scalar_set_int(&v0,0);
+ secp256k1_scalar_mul(&r1, &s1, &v0);
+ CHECK(secp256k1_scalar_eq(&r1, &v0));
+ }
+
+}
+
+void run_scalar_tests(void) {
+ int i;
+ for (i = 0; i < 128 * count; i++) {
+ scalar_test();
+ }
+
+ {
+ /* (-1)+1 should be zero. */
+ secp256k1_scalar s, o;
+ secp256k1_scalar_set_int(&s, 1);
+ CHECK(secp256k1_scalar_is_one(&s));
+ secp256k1_scalar_negate(&o, &s);
+ secp256k1_scalar_add(&o, &o, &s);
+ CHECK(secp256k1_scalar_is_zero(&o));
+ secp256k1_scalar_negate(&o, &o);
+ CHECK(secp256k1_scalar_is_zero(&o));
+ }
+
+#ifndef USE_NUM_NONE
+ {
+ /* A scalar with value of the curve order should be 0. */
+ secp256k1_num order;
+ secp256k1_scalar zero;
+ unsigned char bin[32];
+ int overflow = 0;
+ secp256k1_scalar_order_get_num(&order);
+ secp256k1_num_get_bin(bin, 32, &order);
+ secp256k1_scalar_set_b32(&zero, bin, &overflow);
+ CHECK(overflow == 1);
+ CHECK(secp256k1_scalar_is_zero(&zero));
+ }
+#endif
+}
+
+/***** FIELD TESTS *****/
+
+void random_fe(secp256k1_fe *x) {
+ unsigned char bin[32];
+ do {
+ secp256k1_rand256(bin);
+ if (secp256k1_fe_set_b32(x, bin)) {
+ return;
+ }
+ } while(1);
+}
+
+void random_fe_non_zero(secp256k1_fe *nz) {
+ int tries = 10;
+ while (--tries >= 0) {
+ random_fe(nz);
+ secp256k1_fe_normalize(nz);
+ if (!secp256k1_fe_is_zero(nz)) {
+ break;
+ }
+ }
+ /* Infinitesimal probability of spurious failure here */
+ CHECK(tries >= 0);
+}
+
+void random_fe_non_square(secp256k1_fe *ns) {
+ secp256k1_fe r;
+ random_fe_non_zero(ns);
+ if (secp256k1_fe_sqrt_var(&r, ns)) {
+ secp256k1_fe_negate(ns, ns, 1);
+ }
+}
+
+int check_fe_equal(const secp256k1_fe *a, const secp256k1_fe *b) {
+ secp256k1_fe an = *a;
+ secp256k1_fe bn = *b;
+ secp256k1_fe_normalize_weak(&an);
+ secp256k1_fe_normalize_var(&bn);
+ return secp256k1_fe_equal_var(&an, &bn);
+}
+
+int check_fe_inverse(const secp256k1_fe *a, const secp256k1_fe *ai) {
+ secp256k1_fe x;
+ secp256k1_fe one = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1);
+ secp256k1_fe_mul(&x, a, ai);
+ return check_fe_equal(&x, &one);
+}
+
+void run_field_convert(void) {
+ static const unsigned char b32[32] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
+ 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29,
+ 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x40
+ };
+ static const secp256k1_fe_storage fes = SECP256K1_FE_STORAGE_CONST(
+ 0x00010203UL, 0x04050607UL, 0x11121314UL, 0x15161718UL,
+ 0x22232425UL, 0x26272829UL, 0x33343536UL, 0x37383940UL
+ );
+ static const secp256k1_fe fe = SECP256K1_FE_CONST(
+ 0x00010203UL, 0x04050607UL, 0x11121314UL, 0x15161718UL,
+ 0x22232425UL, 0x26272829UL, 0x33343536UL, 0x37383940UL
+ );
+ secp256k1_fe fe2;
+ unsigned char b322[32];
+ secp256k1_fe_storage fes2;
+ /* Check conversions to fe. */
+ CHECK(secp256k1_fe_set_b32(&fe2, b32));
+ CHECK(secp256k1_fe_equal_var(&fe, &fe2));
+ secp256k1_fe_from_storage(&fe2, &fes);
+ CHECK(secp256k1_fe_equal_var(&fe, &fe2));
+ /* Check conversion from fe. */
+ secp256k1_fe_get_b32(b322, &fe);
+ CHECK(memcmp(b322, b32, 32) == 0);
+ secp256k1_fe_to_storage(&fes2, &fe);
+ CHECK(memcmp(&fes2, &fes, sizeof(fes)) == 0);
+}
+
+int fe_memcmp(const secp256k1_fe *a, const secp256k1_fe *b) {
+ secp256k1_fe t = *b;
+#ifdef VERIFY
+ t.magnitude = a->magnitude;
+ t.normalized = a->normalized;
+#endif
+ return memcmp(a, &t, sizeof(secp256k1_fe));
+}
+
+void run_field_misc(void) {
+ secp256k1_fe x;
+ secp256k1_fe y;
+ secp256k1_fe z;
+ secp256k1_fe q;
+ secp256k1_fe fe5 = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 5);
+ int i, j;
+ for (i = 0; i < 5*count; i++) {
+ secp256k1_fe_storage xs, ys, zs;
+ random_fe(&x);
+ random_fe_non_zero(&y);
+ /* Test the fe equality and comparison operations. */
+ CHECK(secp256k1_fe_cmp_var(&x, &x) == 0);
+ CHECK(secp256k1_fe_equal_var(&x, &x));
+ z = x;
+ secp256k1_fe_add(&z,&y);
+ /* Test fe conditional move; z is not normalized here. */
+ q = x;
+ secp256k1_fe_cmov(&x, &z, 0);
+ VERIFY_CHECK(!x.normalized && x.magnitude == z.magnitude);
+ secp256k1_fe_cmov(&x, &x, 1);
+ CHECK(fe_memcmp(&x, &z) != 0);
+ CHECK(fe_memcmp(&x, &q) == 0);
+ secp256k1_fe_cmov(&q, &z, 1);
+ VERIFY_CHECK(!q.normalized && q.magnitude == z.magnitude);
+ CHECK(fe_memcmp(&q, &z) == 0);
+ secp256k1_fe_normalize_var(&x);
+ secp256k1_fe_normalize_var(&z);
+ CHECK(!secp256k1_fe_equal_var(&x, &z));
+ secp256k1_fe_normalize_var(&q);
+ secp256k1_fe_cmov(&q, &z, (i&1));
+ VERIFY_CHECK(q.normalized && q.magnitude == 1);
+ for (j = 0; j < 6; j++) {
+ secp256k1_fe_negate(&z, &z, j+1);
+ secp256k1_fe_normalize_var(&q);
+ secp256k1_fe_cmov(&q, &z, (j&1));
+ VERIFY_CHECK(!q.normalized && q.magnitude == (j+2));
+ }
+ secp256k1_fe_normalize_var(&z);
+ /* Test storage conversion and conditional moves. */
+ secp256k1_fe_to_storage(&xs, &x);
+ secp256k1_fe_to_storage(&ys, &y);
+ secp256k1_fe_to_storage(&zs, &z);
+ secp256k1_fe_storage_cmov(&zs, &xs, 0);
+ secp256k1_fe_storage_cmov(&zs, &zs, 1);
+ CHECK(memcmp(&xs, &zs, sizeof(xs)) != 0);
+ secp256k1_fe_storage_cmov(&ys, &xs, 1);
+ CHECK(memcmp(&xs, &ys, sizeof(xs)) == 0);
+ secp256k1_fe_from_storage(&x, &xs);
+ secp256k1_fe_from_storage(&y, &ys);
+ secp256k1_fe_from_storage(&z, &zs);
+ /* Test that mul_int, mul, and add agree. */
+ secp256k1_fe_add(&y, &x);
+ secp256k1_fe_add(&y, &x);
+ z = x;
+ secp256k1_fe_mul_int(&z, 3);
+ CHECK(check_fe_equal(&y, &z));
+ secp256k1_fe_add(&y, &x);
+ secp256k1_fe_add(&z, &x);
+ CHECK(check_fe_equal(&z, &y));
+ z = x;
+ secp256k1_fe_mul_int(&z, 5);
+ secp256k1_fe_mul(&q, &x, &fe5);
+ CHECK(check_fe_equal(&z, &q));
+ secp256k1_fe_negate(&x, &x, 1);
+ secp256k1_fe_add(&z, &x);
+ secp256k1_fe_add(&q, &x);
+ CHECK(check_fe_equal(&y, &z));
+ CHECK(check_fe_equal(&q, &y));
+ }
+}
+
+void run_field_inv(void) {
+ secp256k1_fe x, xi, xii;
+ int i;
+ for (i = 0; i < 10*count; i++) {
+ random_fe_non_zero(&x);
+ secp256k1_fe_inv(&xi, &x);
+ CHECK(check_fe_inverse(&x, &xi));
+ secp256k1_fe_inv(&xii, &xi);
+ CHECK(check_fe_equal(&x, &xii));
+ }
+}
+
+void run_field_inv_var(void) {
+ secp256k1_fe x, xi, xii;
+ int i;
+ for (i = 0; i < 10*count; i++) {
+ random_fe_non_zero(&x);
+ secp256k1_fe_inv_var(&xi, &x);
+ CHECK(check_fe_inverse(&x, &xi));
+ secp256k1_fe_inv_var(&xii, &xi);
+ CHECK(check_fe_equal(&x, &xii));
+ }
+}
+
+void run_field_inv_all_var(void) {
+ secp256k1_fe x[16], xi[16], xii[16];
+ int i;
+ /* Check it's safe to call for 0 elements */
+ secp256k1_fe_inv_all_var(0, xi, x);
+ for (i = 0; i < count; i++) {
+ size_t j;
+ size_t len = (secp256k1_rand32() & 15) + 1;
+ for (j = 0; j < len; j++) {
+ random_fe_non_zero(&x[j]);
+ }
+ secp256k1_fe_inv_all_var(len, xi, x);
+ for (j = 0; j < len; j++) {
+ CHECK(check_fe_inverse(&x[j], &xi[j]));
+ }
+ secp256k1_fe_inv_all_var(len, xii, xi);
+ for (j = 0; j < len; j++) {
+ CHECK(check_fe_equal(&x[j], &xii[j]));
+ }
+ }
+}
+
+void run_sqr(void) {
+ secp256k1_fe x, s;
+
+ {
+ int i;
+ secp256k1_fe_set_int(&x, 1);
+ secp256k1_fe_negate(&x, &x, 1);
+
+ for (i = 1; i <= 512; ++i) {
+ secp256k1_fe_mul_int(&x, 2);
+ secp256k1_fe_normalize(&x);
+ secp256k1_fe_sqr(&s, &x);
+ }
+ }
+}
+
+void test_sqrt(const secp256k1_fe *a, const secp256k1_fe *k) {
+ secp256k1_fe r1, r2;
+ int v = secp256k1_fe_sqrt_var(&r1, a);
+ CHECK((v == 0) == (k == NULL));
+
+ if (k != NULL) {
+ /* Check that the returned root is +/- the given known answer */
+ secp256k1_fe_negate(&r2, &r1, 1);
+ secp256k1_fe_add(&r1, k); secp256k1_fe_add(&r2, k);
+ secp256k1_fe_normalize(&r1); secp256k1_fe_normalize(&r2);
+ CHECK(secp256k1_fe_is_zero(&r1) || secp256k1_fe_is_zero(&r2));
+ }
+}
+
+void run_sqrt(void) {
+ secp256k1_fe ns, x, s, t;
+ int i;
+
+ /* Check sqrt(0) is 0 */
+ secp256k1_fe_set_int(&x, 0);
+ secp256k1_fe_sqr(&s, &x);
+ test_sqrt(&s, &x);
+
+ /* Check sqrt of small squares (and their negatives) */
+ for (i = 1; i <= 100; i++) {
+ secp256k1_fe_set_int(&x, i);
+ secp256k1_fe_sqr(&s, &x);
+ test_sqrt(&s, &x);
+ secp256k1_fe_negate(&t, &s, 1);
+ test_sqrt(&t, NULL);
+ }
+
+ /* Consistency checks for large random values */
+ for (i = 0; i < 10; i++) {
+ int j;
+ random_fe_non_square(&ns);
+ for (j = 0; j < count; j++) {
+ random_fe(&x);
+ secp256k1_fe_sqr(&s, &x);
+ test_sqrt(&s, &x);
+ secp256k1_fe_negate(&t, &s, 1);
+ test_sqrt(&t, NULL);
+ secp256k1_fe_mul(&t, &s, &ns);
+ test_sqrt(&t, NULL);
+ }
+ }
+}
+
+/***** GROUP TESTS *****/
+
+void ge_equals_ge(const secp256k1_ge *a, const secp256k1_ge *b) {
+ CHECK(a->infinity == b->infinity);
+ if (a->infinity) {
+ return;
+ }
+ CHECK(secp256k1_fe_equal_var(&a->x, &b->x));
+ CHECK(secp256k1_fe_equal_var(&a->y, &b->y));
+}
+
+/* This compares jacobian points including their Z, not just their geometric meaning. */
+int gej_xyz_equals_gej(const secp256k1_gej *a, const secp256k1_gej *b) {
+ secp256k1_gej a2;
+ secp256k1_gej b2;
+ int ret = 1;
+ ret &= a->infinity == b->infinity;
+ if (ret && !a->infinity) {
+ a2 = *a;
+ b2 = *b;
+ secp256k1_fe_normalize(&a2.x);
+ secp256k1_fe_normalize(&a2.y);
+ secp256k1_fe_normalize(&a2.z);
+ secp256k1_fe_normalize(&b2.x);
+ secp256k1_fe_normalize(&b2.y);
+ secp256k1_fe_normalize(&b2.z);
+ ret &= secp256k1_fe_cmp_var(&a2.x, &b2.x) == 0;
+ ret &= secp256k1_fe_cmp_var(&a2.y, &b2.y) == 0;
+ ret &= secp256k1_fe_cmp_var(&a2.z, &b2.z) == 0;
+ }
+ return ret;
+}
+
+void ge_equals_gej(const secp256k1_ge *a, const secp256k1_gej *b) {
+ secp256k1_fe z2s;
+ secp256k1_fe u1, u2, s1, s2;
+ CHECK(a->infinity == b->infinity);
+ if (a->infinity) {
+ return;
+ }
+ /* Check a.x * b.z^2 == b.x && a.y * b.z^3 == b.y, to avoid inverses. */
+ secp256k1_fe_sqr(&z2s, &b->z);
+ secp256k1_fe_mul(&u1, &a->x, &z2s);
+ u2 = b->x; secp256k1_fe_normalize_weak(&u2);
+ secp256k1_fe_mul(&s1, &a->y, &z2s); secp256k1_fe_mul(&s1, &s1, &b->z);
+ s2 = b->y; secp256k1_fe_normalize_weak(&s2);
+ CHECK(secp256k1_fe_equal_var(&u1, &u2));
+ CHECK(secp256k1_fe_equal_var(&s1, &s2));
+}
+
+void test_ge(void) {
+ int i, i1;
+#ifdef USE_ENDOMORPHISM
+ int runs = 6;
+#else
+ int runs = 4;
+#endif
+ /* Points: (infinity, p1, p1, -p1, -p1, p2, p2, -p2, -p2, p3, p3, -p3, -p3, p4, p4, -p4, -p4).
+ * The second in each pair of identical points uses a random Z coordinate in the Jacobian form.
+ * All magnitudes are randomized.
+ * All 17*17 combinations of points are added to eachother, using all applicable methods.
+ *
+ * When the endomorphism code is compiled in, p5 = lambda*p1 and p6 = lambda^2*p1 are added as well.
+ */
+ secp256k1_ge *ge = (secp256k1_ge *)malloc(sizeof(secp256k1_ge) * (1 + 4 * runs));
+ secp256k1_gej *gej = (secp256k1_gej *)malloc(sizeof(secp256k1_gej) * (1 + 4 * runs));
+ secp256k1_fe *zinv = (secp256k1_fe *)malloc(sizeof(secp256k1_fe) * (1 + 4 * runs));
+ secp256k1_fe zf;
+ secp256k1_fe zfi2, zfi3;
+
+ secp256k1_gej_set_infinity(&gej[0]);
+ secp256k1_ge_clear(&ge[0]);
+ secp256k1_ge_set_gej_var(&ge[0], &gej[0]);
+ for (i = 0; i < runs; i++) {
+ int j;
+ secp256k1_ge g;
+ random_group_element_test(&g);
+#ifdef USE_ENDOMORPHISM
+ if (i >= runs - 2) {
+ secp256k1_ge_mul_lambda(&g, &ge[1]);
+ }
+ if (i >= runs - 1) {
+ secp256k1_ge_mul_lambda(&g, &g);
+ }
+#endif
+ ge[1 + 4 * i] = g;
+ ge[2 + 4 * i] = g;
+ secp256k1_ge_neg(&ge[3 + 4 * i], &g);
+ secp256k1_ge_neg(&ge[4 + 4 * i], &g);
+ secp256k1_gej_set_ge(&gej[1 + 4 * i], &ge[1 + 4 * i]);
+ random_group_element_jacobian_test(&gej[2 + 4 * i], &ge[2 + 4 * i]);
+ secp256k1_gej_set_ge(&gej[3 + 4 * i], &ge[3 + 4 * i]);
+ random_group_element_jacobian_test(&gej[4 + 4 * i], &ge[4 + 4 * i]);
+ for (j = 0; j < 4; j++) {
+ random_field_element_magnitude(&ge[1 + j + 4 * i].x);
+ random_field_element_magnitude(&ge[1 + j + 4 * i].y);
+ random_field_element_magnitude(&gej[1 + j + 4 * i].x);
+ random_field_element_magnitude(&gej[1 + j + 4 * i].y);
+ random_field_element_magnitude(&gej[1 + j + 4 * i].z);
+ }
+ }
+
+ /* Compute z inverses. */
+ {
+ secp256k1_fe *zs = malloc(sizeof(secp256k1_fe) * (1 + 4 * runs));
+ for (i = 0; i < 4 * runs + 1; i++) {
+ if (i == 0) {
+ /* The point at infinity does not have a meaningful z inverse. Any should do. */
+ do {
+ random_field_element_test(&zs[i]);
+ } while(secp256k1_fe_is_zero(&zs[i]));
+ } else {
+ zs[i] = gej[i].z;
+ }
+ }
+ secp256k1_fe_inv_all_var(4 * runs + 1, zinv, zs);
+ free(zs);
+ }
+
+ /* Generate random zf, and zfi2 = 1/zf^2, zfi3 = 1/zf^3 */
+ do {
+ random_field_element_test(&zf);
+ } while(secp256k1_fe_is_zero(&zf));
+ random_field_element_magnitude(&zf);
+ secp256k1_fe_inv_var(&zfi3, &zf);
+ secp256k1_fe_sqr(&zfi2, &zfi3);
+ secp256k1_fe_mul(&zfi3, &zfi3, &zfi2);
+
+ for (i1 = 0; i1 < 1 + 4 * runs; i1++) {
+ int i2;
+ for (i2 = 0; i2 < 1 + 4 * runs; i2++) {
+ /* Compute reference result using gej + gej (var). */
+ secp256k1_gej refj, resj;
+ secp256k1_ge ref;
+ secp256k1_fe zr;
+ secp256k1_gej_add_var(&refj, &gej[i1], &gej[i2], secp256k1_gej_is_infinity(&gej[i1]) ? NULL : &zr);
+ /* Check Z ratio. */
+ if (!secp256k1_gej_is_infinity(&gej[i1]) && !secp256k1_gej_is_infinity(&refj)) {
+ secp256k1_fe zrz; secp256k1_fe_mul(&zrz, &zr, &gej[i1].z);
+ CHECK(secp256k1_fe_equal_var(&zrz, &refj.z));
+ }
+ secp256k1_ge_set_gej_var(&ref, &refj);
+
+ /* Test gej + ge with Z ratio result (var). */
+ secp256k1_gej_add_ge_var(&resj, &gej[i1], &ge[i2], secp256k1_gej_is_infinity(&gej[i1]) ? NULL : &zr);
+ ge_equals_gej(&ref, &resj);
+ if (!secp256k1_gej_is_infinity(&gej[i1]) && !secp256k1_gej_is_infinity(&resj)) {
+ secp256k1_fe zrz; secp256k1_fe_mul(&zrz, &zr, &gej[i1].z);
+ CHECK(secp256k1_fe_equal_var(&zrz, &resj.z));
+ }
+
+ /* Test gej + ge (var, with additional Z factor). */
+ {
+ secp256k1_ge ge2_zfi = ge[i2]; /* the second term with x and y rescaled for z = 1/zf */
+ secp256k1_fe_mul(&ge2_zfi.x, &ge2_zfi.x, &zfi2);
+ secp256k1_fe_mul(&ge2_zfi.y, &ge2_zfi.y, &zfi3);
+ random_field_element_magnitude(&ge2_zfi.x);
+ random_field_element_magnitude(&ge2_zfi.y);
+ secp256k1_gej_add_zinv_var(&resj, &gej[i1], &ge2_zfi, &zf);
+ ge_equals_gej(&ref, &resj);
+ }
+
+ /* Test gej + ge (const). */
+ if (i2 != 0) {
+ /* secp256k1_gej_add_ge does not support its second argument being infinity. */
+ secp256k1_gej_add_ge(&resj, &gej[i1], &ge[i2]);
+ ge_equals_gej(&ref, &resj);
+ }
+
+ /* Test doubling (var). */
+ if ((i1 == 0 && i2 == 0) || ((i1 + 3)/4 == (i2 + 3)/4 && ((i1 + 3)%4)/2 == ((i2 + 3)%4)/2)) {
+ secp256k1_fe zr2;
+ /* Normal doubling with Z ratio result. */
+ secp256k1_gej_double_var(&resj, &gej[i1], &zr2);
+ ge_equals_gej(&ref, &resj);
+ /* Check Z ratio. */
+ secp256k1_fe_mul(&zr2, &zr2, &gej[i1].z);
+ CHECK(secp256k1_fe_equal_var(&zr2, &resj.z));
+ /* Normal doubling. */
+ secp256k1_gej_double_var(&resj, &gej[i2], NULL);
+ ge_equals_gej(&ref, &resj);
+ }
+
+ /* Test adding opposites. */
+ if ((i1 == 0 && i2 == 0) || ((i1 + 3)/4 == (i2 + 3)/4 && ((i1 + 3)%4)/2 != ((i2 + 3)%4)/2)) {
+ CHECK(secp256k1_ge_is_infinity(&ref));
+ }
+
+ /* Test adding infinity. */
+ if (i1 == 0) {
+ CHECK(secp256k1_ge_is_infinity(&ge[i1]));
+ CHECK(secp256k1_gej_is_infinity(&gej[i1]));
+ ge_equals_gej(&ref, &gej[i2]);
+ }
+ if (i2 == 0) {
+ CHECK(secp256k1_ge_is_infinity(&ge[i2]));
+ CHECK(secp256k1_gej_is_infinity(&gej[i2]));
+ ge_equals_gej(&ref, &gej[i1]);
+ }
+ }
+ }
+
+ /* Test adding all points together in random order equals infinity. */
+ {
+ secp256k1_gej sum = SECP256K1_GEJ_CONST_INFINITY;
+ secp256k1_gej *gej_shuffled = (secp256k1_gej *)malloc((4 * runs + 1) * sizeof(secp256k1_gej));
+ for (i = 0; i < 4 * runs + 1; i++) {
+ gej_shuffled[i] = gej[i];
+ }
+ for (i = 0; i < 4 * runs + 1; i++) {
+ int swap = i + secp256k1_rand32() % (4 * runs + 1 - i);
+ if (swap != i) {
+ secp256k1_gej t = gej_shuffled[i];
+ gej_shuffled[i] = gej_shuffled[swap];
+ gej_shuffled[swap] = t;
+ }
+ }
+ for (i = 0; i < 4 * runs + 1; i++) {
+ secp256k1_gej_add_var(&sum, &sum, &gej_shuffled[i], NULL);
+ }
+ CHECK(secp256k1_gej_is_infinity(&sum));
+ free(gej_shuffled);
+ }
+
+ /* Test batch gej -> ge conversion with and without known z ratios. */
+ {
+ secp256k1_fe *zr = (secp256k1_fe *)malloc((4 * runs + 1) * sizeof(secp256k1_fe));
+ secp256k1_ge *ge_set_table = (secp256k1_ge *)malloc((4 * runs + 1) * sizeof(secp256k1_ge));
+ secp256k1_ge *ge_set_all = (secp256k1_ge *)malloc((4 * runs + 1) * sizeof(secp256k1_ge));
+ for (i = 0; i < 4 * runs + 1; i++) {
+ /* Compute gej[i + 1].z / gez[i].z (with gej[n].z taken to be 1). */
+ if (i < 4 * runs) {
+ secp256k1_fe_mul(&zr[i + 1], &zinv[i], &gej[i + 1].z);
+ }
+ }
+ secp256k1_ge_set_table_gej_var(4 * runs + 1, ge_set_table, gej, zr);
+ secp256k1_ge_set_all_gej_var(4 * runs + 1, ge_set_all, gej, &ctx->error_callback);
+ for (i = 0; i < 4 * runs + 1; i++) {
+ secp256k1_fe s;
+ random_fe_non_zero(&s);
+ secp256k1_gej_rescale(&gej[i], &s);
+ ge_equals_gej(&ge_set_table[i], &gej[i]);
+ ge_equals_gej(&ge_set_all[i], &gej[i]);
+ }
+ free(ge_set_table);
+ free(ge_set_all);
+ free(zr);
+ }
+
+ free(ge);
+ free(gej);
+ free(zinv);
+}
+
+void test_add_neg_y_diff_x(void) {
+ /* The point of this test is to check that we can add two points
+ * whose y-coordinates are negatives of each other but whose x
+ * coordinates differ. If the x-coordinates were the same, these
+ * points would be negatives of each other and their sum is
+ * infinity. This is cool because it "covers up" any degeneracy
+ * in the addition algorithm that would cause the xy coordinates
+ * of the sum to be wrong (since infinity has no xy coordinates).
+ * HOWEVER, if the x-coordinates are different, infinity is the
+ * wrong answer, and such degeneracies are exposed. This is the
+ * root of https://github.com/bitcoin/secp256k1/issues/257 which
+ * this test is a regression test for.
+ *
+ * These points were generated in sage as
+ * # secp256k1 params
+ * F = FiniteField (0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F)
+ * C = EllipticCurve ([F (0), F (7)])
+ * G = C.lift_x(0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798)
+ * N = FiniteField(G.order())
+ *
+ * # endomorphism values (lambda is 1^{1/3} in N, beta is 1^{1/3} in F)
+ * x = polygen(N)
+ * lam = (1 - x^3).roots()[1][0]
+ *
+ * # random "bad pair"
+ * P = C.random_element()
+ * Q = -int(lam) * P
+ * print " P: %x %x" % P.xy()
+ * print " Q: %x %x" % Q.xy()
+ * print "P + Q: %x %x" % (P + Q).xy()
+ */
+ secp256k1_gej aj = SECP256K1_GEJ_CONST(
+ 0x8d24cd95, 0x0a355af1, 0x3c543505, 0x44238d30,
+ 0x0643d79f, 0x05a59614, 0x2f8ec030, 0xd58977cb,
+ 0x001e337a, 0x38093dcd, 0x6c0f386d, 0x0b1293a8,
+ 0x4d72c879, 0xd7681924, 0x44e6d2f3, 0x9190117d
+ );
+ secp256k1_gej bj = SECP256K1_GEJ_CONST(
+ 0xc7b74206, 0x1f788cd9, 0xabd0937d, 0x164a0d86,
+ 0x95f6ff75, 0xf19a4ce9, 0xd013bd7b, 0xbf92d2a7,
+ 0xffe1cc85, 0xc7f6c232, 0x93f0c792, 0xf4ed6c57,
+ 0xb28d3786, 0x2897e6db, 0xbb192d0b, 0x6e6feab2
+ );
+ secp256k1_gej sumj = SECP256K1_GEJ_CONST(
+ 0x671a63c0, 0x3efdad4c, 0x389a7798, 0x24356027,
+ 0xb3d69010, 0x278625c3, 0x5c86d390, 0x184a8f7a,
+ 0x5f6409c2, 0x2ce01f2b, 0x511fd375, 0x25071d08,
+ 0xda651801, 0x70e95caf, 0x8f0d893c, 0xbed8fbbe
+ );
+ secp256k1_ge b;
+ secp256k1_gej resj;
+ secp256k1_ge res;
+ secp256k1_ge_set_gej(&b, &bj);
+
+ secp256k1_gej_add_var(&resj, &aj, &bj, NULL);
+ secp256k1_ge_set_gej(&res, &resj);
+ ge_equals_gej(&res, &sumj);
+
+ secp256k1_gej_add_ge(&resj, &aj, &b);
+ secp256k1_ge_set_gej(&res, &resj);
+ ge_equals_gej(&res, &sumj);
+
+ secp256k1_gej_add_ge_var(&resj, &aj, &b, NULL);
+ secp256k1_ge_set_gej(&res, &resj);
+ ge_equals_gej(&res, &sumj);
+}
+
+void run_ge(void) {
+ int i;
+ for (i = 0; i < count * 32; i++) {
+ test_ge();
+ }
+ test_add_neg_y_diff_x();
+}
+
+void test_ec_combine(void) {
+ secp256k1_scalar sum = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0);
+ secp256k1_pubkey data[6];
+ const secp256k1_pubkey* d[6];
+ secp256k1_pubkey sd;
+ secp256k1_pubkey sd2;
+ secp256k1_gej Qj;
+ secp256k1_ge Q;
+ int i;
+ for (i = 1; i <= 6; i++) {
+ secp256k1_scalar s;
+ random_scalar_order_test(&s);
+ secp256k1_scalar_add(&sum, &sum, &s);
+ secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &Qj, &s);
+ secp256k1_ge_set_gej(&Q, &Qj);
+ secp256k1_pubkey_save(&data[i - 1], &Q);
+ d[i - 1] = &data[i - 1];
+ secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &Qj, &sum);
+ secp256k1_ge_set_gej(&Q, &Qj);
+ secp256k1_pubkey_save(&sd, &Q);
+ CHECK(secp256k1_ec_pubkey_combine(ctx, &sd2, d, i) == 1);
+ CHECK(memcmp(&sd, &sd2, sizeof(sd)) == 0);
+ }
+}
+
+void run_ec_combine(void) {
+ int i;
+ for (i = 0; i < count * 8; i++) {
+ test_ec_combine();
+ }
+}
+
+/***** ECMULT TESTS *****/
+
+void run_ecmult_chain(void) {
+ /* random starting point A (on the curve) */
+ secp256k1_gej a = SECP256K1_GEJ_CONST(
+ 0x8b30bbe9, 0xae2a9906, 0x96b22f67, 0x0709dff3,
+ 0x727fd8bc, 0x04d3362c, 0x6c7bf458, 0xe2846004,
+ 0xa357ae91, 0x5c4a6528, 0x1309edf2, 0x0504740f,
+ 0x0eb33439, 0x90216b4f, 0x81063cb6, 0x5f2f7e0f
+ );
+ /* two random initial factors xn and gn */
+ secp256k1_scalar xn = SECP256K1_SCALAR_CONST(
+ 0x84cc5452, 0xf7fde1ed, 0xb4d38a8c, 0xe9b1b84c,
+ 0xcef31f14, 0x6e569be9, 0x705d357a, 0x42985407
+ );
+ secp256k1_scalar gn = SECP256K1_SCALAR_CONST(
+ 0xa1e58d22, 0x553dcd42, 0xb2398062, 0x5d4c57a9,
+ 0x6e9323d4, 0x2b3152e5, 0xca2c3990, 0xedc7c9de
+ );
+ /* two small multipliers to be applied to xn and gn in every iteration: */
+ static const secp256k1_scalar xf = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0x1337);
+ static const secp256k1_scalar gf = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0x7113);
+ /* accumulators with the resulting coefficients to A and G */
+ secp256k1_scalar ae = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1);
+ secp256k1_scalar ge = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0);
+ /* actual points */
+ secp256k1_gej x;
+ secp256k1_gej x2;
+ int i;
+
+ /* the point being computed */
+ x = a;
+ for (i = 0; i < 200*count; i++) {
+ /* in each iteration, compute X = xn*X + gn*G; */
+ secp256k1_ecmult(&ctx->ecmult_ctx, &x, &x, &xn, &gn);
+ /* also compute ae and ge: the actual accumulated factors for A and G */
+ /* if X was (ae*A+ge*G), xn*X + gn*G results in (xn*ae*A + (xn*ge+gn)*G) */
+ secp256k1_scalar_mul(&ae, &ae, &xn);
+ secp256k1_scalar_mul(&ge, &ge, &xn);
+ secp256k1_scalar_add(&ge, &ge, &gn);
+ /* modify xn and gn */
+ secp256k1_scalar_mul(&xn, &xn, &xf);
+ secp256k1_scalar_mul(&gn, &gn, &gf);
+
+ /* verify */
+ if (i == 19999) {
+ /* expected result after 19999 iterations */
+ secp256k1_gej rp = SECP256K1_GEJ_CONST(
+ 0xD6E96687, 0xF9B10D09, 0x2A6F3543, 0x9D86CEBE,
+ 0xA4535D0D, 0x409F5358, 0x6440BD74, 0xB933E830,
+ 0xB95CBCA2, 0xC77DA786, 0x539BE8FD, 0x53354D2D,
+ 0x3B4F566A, 0xE6580454, 0x07ED6015, 0xEE1B2A88
+ );
+
+ secp256k1_gej_neg(&rp, &rp);
+ secp256k1_gej_add_var(&rp, &rp, &x, NULL);
+ CHECK(secp256k1_gej_is_infinity(&rp));
+ }
+ }
+ /* redo the computation, but directly with the resulting ae and ge coefficients: */
+ secp256k1_ecmult(&ctx->ecmult_ctx, &x2, &a, &ae, &ge);
+ secp256k1_gej_neg(&x2, &x2);
+ secp256k1_gej_add_var(&x2, &x2, &x, NULL);
+ CHECK(secp256k1_gej_is_infinity(&x2));
+}
+
+void test_point_times_order(const secp256k1_gej *point) {
+ /* X * (point + G) + (order-X) * (pointer + G) = 0 */
+ secp256k1_scalar x;
+ secp256k1_scalar nx;
+ secp256k1_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0);
+ secp256k1_scalar one = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1);
+ secp256k1_gej res1, res2;
+ secp256k1_ge res3;
+ unsigned char pub[65];
+ size_t psize = 65;
+ random_scalar_order_test(&x);
+ secp256k1_scalar_negate(&nx, &x);
+ secp256k1_ecmult(&ctx->ecmult_ctx, &res1, point, &x, &x); /* calc res1 = x * point + x * G; */
+ secp256k1_ecmult(&ctx->ecmult_ctx, &res2, point, &nx, &nx); /* calc res2 = (order - x) * point + (order - x) * G; */
+ secp256k1_gej_add_var(&res1, &res1, &res2, NULL);
+ CHECK(secp256k1_gej_is_infinity(&res1));
+ CHECK(secp256k1_gej_is_valid_var(&res1) == 0);
+ secp256k1_ge_set_gej(&res3, &res1);
+ CHECK(secp256k1_ge_is_infinity(&res3));
+ CHECK(secp256k1_ge_is_valid_var(&res3) == 0);
+ CHECK(secp256k1_eckey_pubkey_serialize(&res3, pub, &psize, 0) == 0);
+ psize = 65;
+ CHECK(secp256k1_eckey_pubkey_serialize(&res3, pub, &psize, 1) == 0);
+ /* check zero/one edge cases */
+ secp256k1_ecmult(&ctx->ecmult_ctx, &res1, point, &zero, &zero);
+ secp256k1_ge_set_gej(&res3, &res1);
+ CHECK(secp256k1_ge_is_infinity(&res3));
+ secp256k1_ecmult(&ctx->ecmult_ctx, &res1, point, &one, &zero);
+ secp256k1_ge_set_gej(&res3, &res1);
+ ge_equals_gej(&res3, point);
+ secp256k1_ecmult(&ctx->ecmult_ctx, &res1, point, &zero, &one);
+ secp256k1_ge_set_gej(&res3, &res1);
+ ge_equals_ge(&res3, &secp256k1_ge_const_g);
+}
+
+void run_point_times_order(void) {
+ int i;
+ secp256k1_fe x = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 2);
+ static const secp256k1_fe xr = SECP256K1_FE_CONST(
+ 0x7603CB59, 0xB0EF6C63, 0xFE608479, 0x2A0C378C,
+ 0xDB3233A8, 0x0F8A9A09, 0xA877DEAD, 0x31B38C45
+ );
+ for (i = 0; i < 500; i++) {
+ secp256k1_ge p;
+ if (secp256k1_ge_set_xo_var(&p, &x, 1)) {
+ secp256k1_gej j;
+ CHECK(secp256k1_ge_is_valid_var(&p));
+ secp256k1_gej_set_ge(&j, &p);
+ CHECK(secp256k1_gej_is_valid_var(&j));
+ test_point_times_order(&j);
+ }
+ secp256k1_fe_sqr(&x, &x);
+ }
+ secp256k1_fe_normalize_var(&x);
+ CHECK(secp256k1_fe_equal_var(&x, &xr));
+}
+
+void ecmult_const_random_mult(void) {
+ /* random starting point A (on the curve) */
+ secp256k1_ge a = SECP256K1_GE_CONST(
+ 0x6d986544, 0x57ff52b8, 0xcf1b8126, 0x5b802a5b,
+ 0xa97f9263, 0xb1e88044, 0x93351325, 0x91bc450a,
+ 0x535c59f7, 0x325e5d2b, 0xc391fbe8, 0x3c12787c,
+ 0x337e4a98, 0xe82a9011, 0x0123ba37, 0xdd769c7d
+ );
+ /* random initial factor xn */
+ secp256k1_scalar xn = SECP256K1_SCALAR_CONST(
+ 0x649d4f77, 0xc4242df7, 0x7f2079c9, 0x14530327,
+ 0xa31b876a, 0xd2d8ce2a, 0x2236d5c6, 0xd7b2029b
+ );
+ /* expected xn * A (from sage) */
+ secp256k1_ge expected_b = SECP256K1_GE_CONST(
+ 0x23773684, 0x4d209dc7, 0x098a786f, 0x20d06fcd,
+ 0x070a38bf, 0xc11ac651, 0x03004319, 0x1e2a8786,
+ 0xed8c3b8e, 0xc06dd57b, 0xd06ea66e, 0x45492b0f,
+ 0xb84e4e1b, 0xfb77e21f, 0x96baae2a, 0x63dec956
+ );
+ secp256k1_gej b;
+ secp256k1_ecmult_const(&b, &a, &xn);
+
+ CHECK(secp256k1_ge_is_valid_var(&a));
+ ge_equals_gej(&expected_b, &b);
+}
+
+void ecmult_const_commutativity(void) {
+ secp256k1_scalar a;
+ secp256k1_scalar b;
+ secp256k1_gej res1;
+ secp256k1_gej res2;
+ secp256k1_ge mid1;
+ secp256k1_ge mid2;
+ random_scalar_order_test(&a);
+ random_scalar_order_test(&b);
+
+ secp256k1_ecmult_const(&res1, &secp256k1_ge_const_g, &a);
+ secp256k1_ecmult_const(&res2, &secp256k1_ge_const_g, &b);
+ secp256k1_ge_set_gej(&mid1, &res1);
+ secp256k1_ge_set_gej(&mid2, &res2);
+ secp256k1_ecmult_const(&res1, &mid1, &b);
+ secp256k1_ecmult_const(&res2, &mid2, &a);
+ secp256k1_ge_set_gej(&mid1, &res1);
+ secp256k1_ge_set_gej(&mid2, &res2);
+ ge_equals_ge(&mid1, &mid2);
+}
+
+void ecmult_const_mult_zero_one(void) {
+ secp256k1_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0);
+ secp256k1_scalar one = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1);
+ secp256k1_scalar negone;
+ secp256k1_gej res1;
+ secp256k1_ge res2;
+ secp256k1_ge point;
+ secp256k1_scalar_negate(&negone, &one);
+
+ random_group_element_test(&point);
+ secp256k1_ecmult_const(&res1, &point, &zero);
+ secp256k1_ge_set_gej(&res2, &res1);
+ CHECK(secp256k1_ge_is_infinity(&res2));
+ secp256k1_ecmult_const(&res1, &point, &one);
+ secp256k1_ge_set_gej(&res2, &res1);
+ ge_equals_ge(&res2, &point);
+ secp256k1_ecmult_const(&res1, &point, &negone);
+ secp256k1_gej_neg(&res1, &res1);
+ secp256k1_ge_set_gej(&res2, &res1);
+ ge_equals_ge(&res2, &point);
+}
+
+void ecmult_const_chain_multiply(void) {
+ /* Check known result (randomly generated test problem from sage) */
+ const secp256k1_scalar scalar = SECP256K1_SCALAR_CONST(
+ 0x4968d524, 0x2abf9b7a, 0x466abbcf, 0x34b11b6d,
+ 0xcd83d307, 0x827bed62, 0x05fad0ce, 0x18fae63b
+ );
+ const secp256k1_gej expected_point = SECP256K1_GEJ_CONST(
+ 0x5494c15d, 0x32099706, 0xc2395f94, 0x348745fd,
+ 0x757ce30e, 0x4e8c90fb, 0xa2bad184, 0xf883c69f,
+ 0x5d195d20, 0xe191bf7f, 0x1be3e55f, 0x56a80196,
+ 0x6071ad01, 0xf1462f66, 0xc997fa94, 0xdb858435
+ );
+ secp256k1_gej point;
+ secp256k1_ge res;
+ int i;
+
+ secp256k1_gej_set_ge(&point, &secp256k1_ge_const_g);
+ for (i = 0; i < 100; ++i) {
+ secp256k1_ge tmp;
+ secp256k1_ge_set_gej(&tmp, &point);
+ secp256k1_ecmult_const(&point, &tmp, &scalar);
+ }
+ secp256k1_ge_set_gej(&res, &point);
+ ge_equals_gej(&res, &expected_point);
+}
+
+void run_ecmult_const_tests(void) {
+ ecmult_const_mult_zero_one();
+ ecmult_const_random_mult();
+ ecmult_const_commutativity();
+ ecmult_const_chain_multiply();
+}
+
+void test_wnaf(const secp256k1_scalar *number, int w) {
+ secp256k1_scalar x, two, t;
+ int wnaf[256];
+ int zeroes = -1;
+ int i;
+ int bits;
+ secp256k1_scalar_set_int(&x, 0);
+ secp256k1_scalar_set_int(&two, 2);
+ bits = secp256k1_ecmult_wnaf(wnaf, 256, number, w);
+ CHECK(bits <= 256);
+ for (i = bits-1; i >= 0; i--) {
+ int v = wnaf[i];
+ secp256k1_scalar_mul(&x, &x, &two);
+ if (v) {
+ CHECK(zeroes == -1 || zeroes >= w-1); /* check that distance between non-zero elements is at least w-1 */
+ zeroes=0;
+ CHECK((v & 1) == 1); /* check non-zero elements are odd */
+ CHECK(v <= (1 << (w-1)) - 1); /* check range below */
+ CHECK(v >= -(1 << (w-1)) - 1); /* check range above */
+ } else {
+ CHECK(zeroes != -1); /* check that no unnecessary zero padding exists */
+ zeroes++;
+ }
+ if (v >= 0) {
+ secp256k1_scalar_set_int(&t, v);
+ } else {
+ secp256k1_scalar_set_int(&t, -v);
+ secp256k1_scalar_negate(&t, &t);
+ }
+ secp256k1_scalar_add(&x, &x, &t);
+ }
+ CHECK(secp256k1_scalar_eq(&x, number)); /* check that wnaf represents number */
+}
+
+void test_constant_wnaf_negate(const secp256k1_scalar *number) {
+ secp256k1_scalar neg1 = *number;
+ secp256k1_scalar neg2 = *number;
+ int sign1 = 1;
+ int sign2 = 1;
+
+ if (!secp256k1_scalar_get_bits(&neg1, 0, 1)) {
+ secp256k1_scalar_negate(&neg1, &neg1);
+ sign1 = -1;
+ }
+ sign2 = secp256k1_scalar_cond_negate(&neg2, secp256k1_scalar_is_even(&neg2));
+ CHECK(sign1 == sign2);
+ CHECK(secp256k1_scalar_eq(&neg1, &neg2));
+}
+
+void test_constant_wnaf(const secp256k1_scalar *number, int w) {
+ secp256k1_scalar x, shift;
+ int wnaf[256] = {0};
+ int i;
+#ifdef USE_ENDOMORPHISM
+ int skew;
+#endif
+ secp256k1_scalar num = *number;
+
+ secp256k1_scalar_set_int(&x, 0);
+ secp256k1_scalar_set_int(&shift, 1 << w);
+ /* With USE_ENDOMORPHISM on we only consider 128-bit numbers */
+#ifdef USE_ENDOMORPHISM
+ for (i = 0; i < 16; ++i) {
+ secp256k1_scalar_shr_int(&num, 8);
+ }
+ skew = secp256k1_wnaf_const(wnaf, num, w);
+#else
+ secp256k1_wnaf_const(wnaf, num, w);
+#endif
+
+ for (i = WNAF_SIZE(w); i >= 0; --i) {
+ secp256k1_scalar t;
+ int v = wnaf[i];
+ CHECK(v != 0); /* check nonzero */
+ CHECK(v & 1); /* check parity */
+ CHECK(v > -(1 << w)); /* check range above */
+ CHECK(v < (1 << w)); /* check range below */
+
+ secp256k1_scalar_mul(&x, &x, &shift);
+ if (v >= 0) {
+ secp256k1_scalar_set_int(&t, v);
+ } else {
+ secp256k1_scalar_set_int(&t, -v);
+ secp256k1_scalar_negate(&t, &t);
+ }
+ secp256k1_scalar_add(&x, &x, &t);
+ }
+#ifdef USE_ENDOMORPHISM
+ /* Skew num because when encoding 128-bit numbers as odd we use an offset */
+ secp256k1_scalar_cadd_bit(&num, skew == 2, 1);
+#endif
+ CHECK(secp256k1_scalar_eq(&x, &num));
+}
+
+void run_wnaf(void) {
+ int i;
+ secp256k1_scalar n = {{0}};
+
+ /* Sanity check: 1 and 2 are the smallest odd and even numbers and should
+ * have easier-to-diagnose failure modes */
+ n.d[0] = 1;
+ test_constant_wnaf(&n, 4);
+ n.d[0] = 2;
+ test_constant_wnaf(&n, 4);
+ /* Random tests */
+ for (i = 0; i < count; i++) {
+ random_scalar_order(&n);
+ test_wnaf(&n, 4+(i%10));
+ test_constant_wnaf_negate(&n);
+ test_constant_wnaf(&n, 4 + (i % 10));
+ }
+}
+
+void test_ecmult_constants(void) {
+ /* Test ecmult_gen() for [0..36) and [order-36..0). */
+ secp256k1_scalar x;
+ secp256k1_gej r;
+ secp256k1_ge ng;
+ int i;
+ int j;
+ secp256k1_ge_neg(&ng, &secp256k1_ge_const_g);
+ for (i = 0; i < 36; i++ ) {
+ secp256k1_scalar_set_int(&x, i);
+ secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &r, &x);
+ for (j = 0; j < i; j++) {
+ if (j == i - 1) {
+ ge_equals_gej(&secp256k1_ge_const_g, &r);
+ }
+ secp256k1_gej_add_ge(&r, &r, &ng);
+ }
+ CHECK(secp256k1_gej_is_infinity(&r));
+ }
+ for (i = 1; i <= 36; i++ ) {
+ secp256k1_scalar_set_int(&x, i);
+ secp256k1_scalar_negate(&x, &x);
+ secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &r, &x);
+ for (j = 0; j < i; j++) {
+ if (j == i - 1) {
+ ge_equals_gej(&ng, &r);
+ }
+ secp256k1_gej_add_ge(&r, &r, &secp256k1_ge_const_g);
+ }
+ CHECK(secp256k1_gej_is_infinity(&r));
+ }
+}
+
+void run_ecmult_constants(void) {
+ test_ecmult_constants();
+}
+
+void test_ecmult_gen_blind(void) {
+ /* Test ecmult_gen() blinding and confirm that the blinding changes, the affline points match, and the z's don't match. */
+ secp256k1_scalar key;
+ secp256k1_scalar b;
+ unsigned char seed32[32];
+ secp256k1_gej pgej;
+ secp256k1_gej pgej2;
+ secp256k1_gej i;
+ secp256k1_ge pge;
+ random_scalar_order_test(&key);
+ secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &pgej, &key);
+ secp256k1_rand256(seed32);
+ b = ctx->ecmult_gen_ctx.blind;
+ i = ctx->ecmult_gen_ctx.initial;
+ secp256k1_ecmult_gen_blind(&ctx->ecmult_gen_ctx, seed32);
+ CHECK(!secp256k1_scalar_eq(&b, &ctx->ecmult_gen_ctx.blind));
+ secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &pgej2, &key);
+ CHECK(!gej_xyz_equals_gej(&pgej, &pgej2));
+ CHECK(!gej_xyz_equals_gej(&i, &ctx->ecmult_gen_ctx.initial));
+ secp256k1_ge_set_gej(&pge, &pgej);
+ ge_equals_gej(&pge, &pgej2);
+}
+
+void test_ecmult_gen_blind_reset(void) {
+ /* Test ecmult_gen() blinding reset and confirm that the blinding is consistent. */
+ secp256k1_scalar b;
+ secp256k1_gej initial;
+ secp256k1_ecmult_gen_blind(&ctx->ecmult_gen_ctx, 0);
+ b = ctx->ecmult_gen_ctx.blind;
+ initial = ctx->ecmult_gen_ctx.initial;
+ secp256k1_ecmult_gen_blind(&ctx->ecmult_gen_ctx, 0);
+ CHECK(secp256k1_scalar_eq(&b, &ctx->ecmult_gen_ctx.blind));
+ CHECK(gej_xyz_equals_gej(&initial, &ctx->ecmult_gen_ctx.initial));
+}
+
+void run_ecmult_gen_blind(void) {
+ int i;
+ test_ecmult_gen_blind_reset();
+ for (i = 0; i < 10; i++) {
+ test_ecmult_gen_blind();
+ }
+}
+
+#ifdef USE_ENDOMORPHISM
+/***** ENDOMORPHISH TESTS *****/
+void test_scalar_split(void) {
+ secp256k1_scalar full;
+ secp256k1_scalar s1, slam;
+ const unsigned char zero[32] = {0};
+ unsigned char tmp[32];
+
+ random_scalar_order_test(&full);
+ secp256k1_scalar_split_lambda(&s1, &slam, &full);
+
+ /* check that both are <= 128 bits in size */
+ if (secp256k1_scalar_is_high(&s1)) {
+ secp256k1_scalar_negate(&s1, &s1);
+ }
+ if (secp256k1_scalar_is_high(&slam)) {
+ secp256k1_scalar_negate(&slam, &slam);
+ }
+
+ secp256k1_scalar_get_b32(tmp, &s1);
+ CHECK(memcmp(zero, tmp, 16) == 0);
+ secp256k1_scalar_get_b32(tmp, &slam);
+ CHECK(memcmp(zero, tmp, 16) == 0);
+}
+
+void run_endomorphism_tests(void) {
+ test_scalar_split();
+}
+#endif
+
+void random_sign(secp256k1_scalar *sigr, secp256k1_scalar *sigs, const secp256k1_scalar *key, const secp256k1_scalar *msg, int *recid) {
+ secp256k1_scalar nonce;
+ do {
+ random_scalar_order_test(&nonce);
+ } while(!secp256k1_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, sigr, sigs, key, msg, &nonce, recid));
+}
+
+void test_ecdsa_sign_verify(void) {
+ secp256k1_gej pubj;
+ secp256k1_ge pub;
+ secp256k1_scalar one;
+ secp256k1_scalar msg, key;
+ secp256k1_scalar sigr, sigs;
+ int recid;
+ int getrec;
+ random_scalar_order_test(&msg);
+ random_scalar_order_test(&key);
+ secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &pubj, &key);
+ secp256k1_ge_set_gej(&pub, &pubj);
+ getrec = secp256k1_rand32()&1;
+ random_sign(&sigr, &sigs, &key, &msg, getrec?&recid:NULL);
+ if (getrec) {
+ CHECK(recid >= 0 && recid < 4);
+ }
+ CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sigr, &sigs, &pub, &msg));
+ secp256k1_scalar_set_int(&one, 1);
+ secp256k1_scalar_add(&msg, &msg, &one);
+ CHECK(!secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sigr, &sigs, &pub, &msg));
+}
+
+void run_ecdsa_sign_verify(void) {
+ int i;
+ for (i = 0; i < 10*count; i++) {
+ test_ecdsa_sign_verify();
+ }
+}
+
+/** Dummy nonce generation function that just uses a precomputed nonce, and fails if it is not accepted. Use only for testing. */
+static int precomputed_nonce_function(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *algo16, void *data, unsigned int counter) {
+ (void)msg32;
+ (void)key32;
+ (void)algo16;
+ memcpy(nonce32, data, 32);
+ return (counter == 0);
+}
+
+static int nonce_function_test_fail(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *algo16, void *data, unsigned int counter) {
+ /* Dummy nonce generator that has a fatal error on the first counter value. */
+ if (counter == 0) {
+ return 0;
+ }
+ return nonce_function_rfc6979(nonce32, msg32, key32, algo16, data, counter - 1);
+}
+
+static int nonce_function_test_retry(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *algo16, void *data, unsigned int counter) {
+ /* Dummy nonce generator that produces unacceptable nonces for the first several counter values. */
+ if (counter < 3) {
+ memset(nonce32, counter==0 ? 0 : 255, 32);
+ if (counter == 2) {
+ nonce32[31]--;
+ }
+ return 1;
+ }
+ if (counter < 5) {
+ static const unsigned char order[] = {
+ 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+ 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFE,
+ 0xBA,0xAE,0xDC,0xE6,0xAF,0x48,0xA0,0x3B,
+ 0xBF,0xD2,0x5E,0x8C,0xD0,0x36,0x41,0x41
+ };
+ memcpy(nonce32, order, 32);
+ if (counter == 4) {
+ nonce32[31]++;
+ }
+ return 1;
+ }
+ /* Retry rate of 6979 is negligible esp. as we only call this in determinstic tests. */
+ /* If someone does fine a case where it retries for secp256k1, we'd like to know. */
+ if (counter > 5) {
+ return 0;
+ }
+ return nonce_function_rfc6979(nonce32, msg32, key32, algo16, data, counter - 5);
+}
+
+int is_empty_signature(const secp256k1_ecdsa_signature *sig) {
+ static const unsigned char res[sizeof(secp256k1_ecdsa_signature)] = {0};
+ return memcmp(sig, res, sizeof(secp256k1_ecdsa_signature)) == 0;
+}
+
+void test_ecdsa_end_to_end(void) {
+ unsigned char extra[32] = {0x00};
+ unsigned char privkey[32];
+ unsigned char message[32];
+ unsigned char privkey2[32];
+ secp256k1_ecdsa_signature signature[5];
+ unsigned char sig[74];
+ size_t siglen = 74;
+ unsigned char pubkeyc[65];
+ size_t pubkeyclen = 65;
+ secp256k1_pubkey pubkey;
+ unsigned char seckey[300];
+ size_t seckeylen = 300;
+
+ /* Generate a random key and message. */
+ {
+ secp256k1_scalar msg, key;
+ random_scalar_order_test(&msg);
+ random_scalar_order_test(&key);
+ secp256k1_scalar_get_b32(privkey, &key);
+ secp256k1_scalar_get_b32(message, &msg);
+ }
+
+ /* Construct and verify corresponding public key. */
+ CHECK(secp256k1_ec_seckey_verify(ctx, privkey) == 1);
+ CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, privkey) == 1);
+
+ /* Verify exporting and importing public key. */
+ CHECK(secp256k1_ec_pubkey_serialize(ctx, pubkeyc, &pubkeyclen, &pubkey, secp256k1_rand32() % 2) == 1);
+ memset(&pubkey, 0, sizeof(pubkey));
+ CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, pubkeyc, pubkeyclen) == 1);
+
+ /* Verify private key import and export. */
+ CHECK(secp256k1_ec_privkey_export(ctx, seckey, &seckeylen, privkey, (secp256k1_rand32() % 2) == 1) ? SECP256K1_EC_COMPRESSED : 0);
+ CHECK(secp256k1_ec_privkey_import(ctx, privkey2, seckey, seckeylen) == 1);
+ CHECK(memcmp(privkey, privkey2, 32) == 0);
+
+ /* Optionally tweak the keys using addition. */
+ if (secp256k1_rand32() % 3 == 0) {
+ int ret1;
+ int ret2;
+ unsigned char rnd[32];
+ secp256k1_pubkey pubkey2;
+ secp256k1_rand256_test(rnd);
+ ret1 = secp256k1_ec_privkey_tweak_add(ctx, privkey, rnd);
+ ret2 = secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, rnd);
+ CHECK(ret1 == ret2);
+ if (ret1 == 0) {
+ return;
+ }
+ CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey2, privkey) == 1);
+ CHECK(memcmp(&pubkey, &pubkey2, sizeof(pubkey)) == 0);
+ }
+
+ /* Optionally tweak the keys using multiplication. */
+ if (secp256k1_rand32() % 3 == 0) {
+ int ret1;
+ int ret2;
+ unsigned char rnd[32];
+ secp256k1_pubkey pubkey2;
+ secp256k1_rand256_test(rnd);
+ ret1 = secp256k1_ec_privkey_tweak_mul(ctx, privkey, rnd);
+ ret2 = secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey, rnd);
+ CHECK(ret1 == ret2);
+ if (ret1 == 0) {
+ return;
+ }
+ CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey2, privkey) == 1);
+ CHECK(memcmp(&pubkey, &pubkey2, sizeof(pubkey)) == 0);
+ }
+
+ /* Sign. */
+ CHECK(secp256k1_ecdsa_sign(ctx, &signature[0], message, privkey, NULL, NULL) == 1);
+ CHECK(secp256k1_ecdsa_sign(ctx, &signature[4], message, privkey, NULL, NULL) == 1);
+ CHECK(secp256k1_ecdsa_sign(ctx, &signature[1], message, privkey, NULL, extra) == 1);
+ extra[31] = 1;
+ CHECK(secp256k1_ecdsa_sign(ctx, &signature[2], message, privkey, NULL, extra) == 1);
+ extra[31] = 0;
+ extra[0] = 1;
+ CHECK(secp256k1_ecdsa_sign(ctx, &signature[3], message, privkey, NULL, extra) == 1);
+ CHECK(memcmp(&signature[0], &signature[4], sizeof(signature[0])) == 0);
+ CHECK(memcmp(&signature[0], &signature[1], sizeof(signature[0])) != 0);
+ CHECK(memcmp(&signature[0], &signature[2], sizeof(signature[0])) != 0);
+ CHECK(memcmp(&signature[0], &signature[3], sizeof(signature[0])) != 0);
+ CHECK(memcmp(&signature[1], &signature[2], sizeof(signature[0])) != 0);
+ CHECK(memcmp(&signature[1], &signature[3], sizeof(signature[0])) != 0);
+ CHECK(memcmp(&signature[2], &signature[3], sizeof(signature[0])) != 0);
+ /* Verify. */
+ CHECK(secp256k1_ecdsa_verify(ctx, &signature[0], message, &pubkey) == 1);
+ CHECK(secp256k1_ecdsa_verify(ctx, &signature[1], message, &pubkey) == 1);
+ CHECK(secp256k1_ecdsa_verify(ctx, &signature[2], message, &pubkey) == 1);
+ CHECK(secp256k1_ecdsa_verify(ctx, &signature[3], message, &pubkey) == 1);
+
+ /* Serialize/parse DER and verify again */
+ CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, sig, &siglen, &signature[0]) == 1);
+ memset(&signature[0], 0, sizeof(signature[0]));
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &signature[0], sig, siglen) == 1);
+ CHECK(secp256k1_ecdsa_verify(ctx, &signature[0], message, &pubkey) == 1);
+ /* Serialize/destroy/parse DER and verify again. */
+ siglen = 74;
+ CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, sig, &siglen, &signature[0]) == 1);
+ sig[secp256k1_rand32() % siglen] += 1 + (secp256k1_rand32() % 255);
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &signature[0], sig, siglen) == 0 ||
+ secp256k1_ecdsa_verify(ctx, &signature[0], message, &pubkey) == 0);
+}
+
+void test_random_pubkeys(void) {
+ secp256k1_ge elem;
+ secp256k1_ge elem2;
+ unsigned char in[65];
+ /* Generate some randomly sized pubkeys. */
+ uint32_t r = secp256k1_rand32();
+ size_t len = (r & 3) == 0 ? 65 : 33;
+ r>>=2;
+ if ((r & 3) == 0) {
+ len = (r & 252) >> 3;
+ }
+ r>>=8;
+ if (len == 65) {
+ in[0] = (r & 2) ? 4 : ((r & 1)? 6 : 7);
+ } else {
+ in[0] = (r & 1) ? 2 : 3;
+ }
+ r>>=2;
+ if ((r & 7) == 0) {
+ in[0] = (r & 2040) >> 3;
+ }
+ r>>=11;
+ if (len > 1) {
+ secp256k1_rand256(&in[1]);
+ }
+ if (len > 33) {
+ secp256k1_rand256(&in[33]);
+ }
+ if (secp256k1_eckey_pubkey_parse(&elem, in, len)) {
+ unsigned char out[65];
+ unsigned char firstb;
+ int res;
+ size_t size = len;
+ firstb = in[0];
+ /* If the pubkey can be parsed, it should round-trip... */
+ CHECK(secp256k1_eckey_pubkey_serialize(&elem, out, &size, (len == 33) ? SECP256K1_EC_COMPRESSED : 0));
+ CHECK(size == len);
+ CHECK(memcmp(&in[1], &out[1], len-1) == 0);
+ /* ... except for the type of hybrid inputs. */
+ if ((in[0] != 6) && (in[0] != 7)) {
+ CHECK(in[0] == out[0]);
+ }
+ size = 65;
+ CHECK(secp256k1_eckey_pubkey_serialize(&elem, in, &size, 0));
+ CHECK(size == 65);
+ CHECK(secp256k1_eckey_pubkey_parse(&elem2, in, size));
+ ge_equals_ge(&elem,&elem2);
+ /* Check that the X9.62 hybrid type is checked. */
+ in[0] = (r & 1) ? 6 : 7;
+ res = secp256k1_eckey_pubkey_parse(&elem2, in, size);
+ if (firstb == 2 || firstb == 3) {
+ if (in[0] == firstb + 4) {
+ CHECK(res);
+ } else {
+ CHECK(!res);
+ }
+ }
+ if (res) {
+ ge_equals_ge(&elem,&elem2);
+ CHECK(secp256k1_eckey_pubkey_serialize(&elem, out, &size, 0));
+ CHECK(memcmp(&in[1], &out[1], 64) == 0);
+ }
+ }
+}
+
+void run_random_pubkeys(void) {
+ int i;
+ for (i = 0; i < 10*count; i++) {
+ test_random_pubkeys();
+ }
+}
+
+void run_ecdsa_end_to_end(void) {
+ int i;
+ for (i = 0; i < 64*count; i++) {
+ test_ecdsa_end_to_end();
+ }
+}
+
+/* Tests several edge cases. */
+void test_ecdsa_edge_cases(void) {
+ int t;
+ secp256k1_ecdsa_signature sig;
+
+ /* Test the case where ECDSA recomputes a point that is infinity. */
+ {
+ secp256k1_gej keyj;
+ secp256k1_ge key;
+ secp256k1_scalar msg;
+ secp256k1_scalar sr, ss;
+ secp256k1_scalar_set_int(&ss, 1);
+ secp256k1_scalar_negate(&ss, &ss);
+ secp256k1_scalar_inverse(&ss, &ss);
+ secp256k1_scalar_set_int(&sr, 1);
+ secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &keyj, &sr);
+ secp256k1_ge_set_gej(&key, &keyj);
+ msg = ss;
+ CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0);
+ }
+
+ /*Signature where s would be zero.*/
+ {
+ unsigned char signature[72];
+ size_t siglen;
+ const unsigned char nonce[32] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ };
+ static const unsigned char nonce2[32] = {
+ 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+ 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFE,
+ 0xBA,0xAE,0xDC,0xE6,0xAF,0x48,0xA0,0x3B,
+ 0xBF,0xD2,0x5E,0x8C,0xD0,0x36,0x41,0x40
+ };
+ const unsigned char key[32] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ };
+ unsigned char msg[32] = {
+ 0x86, 0x41, 0x99, 0x81, 0x06, 0x23, 0x44, 0x53,
+ 0xaa, 0x5f, 0x9d, 0x6a, 0x31, 0x78, 0xf4, 0xf7,
+ 0xb8, 0x12, 0xe0, 0x0b, 0x81, 0x7a, 0x77, 0x62,
+ 0x65, 0xdf, 0xdd, 0x31, 0xb9, 0x3e, 0x29, 0xa9,
+ };
+ CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, precomputed_nonce_function, nonce) == 0);
+ CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, precomputed_nonce_function, nonce2) == 0);
+ msg[31] = 0xaa;
+ CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, precomputed_nonce_function, nonce) == 1);
+ CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, precomputed_nonce_function, nonce2) == 1);
+ siglen = 72;
+ CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, signature, &siglen, &sig) == 1);
+ siglen = 10;
+ CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, signature, &siglen, &sig) == 0);
+ }
+
+ /* Nonce function corner cases. */
+ for (t = 0; t < 2; t++) {
+ static const unsigned char zero[32] = {0x00};
+ int i;
+ unsigned char key[32];
+ unsigned char msg[32];
+ secp256k1_ecdsa_signature sig2;
+ secp256k1_scalar sr[512], ss;
+ const unsigned char *extra;
+ extra = t == 0 ? NULL : zero;
+ memset(msg, 0, 32);
+ msg[31] = 1;
+ /* High key results in signature failure. */
+ memset(key, 0xFF, 32);
+ CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, NULL, extra) == 0);
+ CHECK(is_empty_signature(&sig));
+ /* Zero key results in signature failure. */
+ memset(key, 0, 32);
+ CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, NULL, extra) == 0);
+ CHECK(is_empty_signature(&sig));
+ /* Nonce function failure results in signature failure. */
+ key[31] = 1;
+ CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, nonce_function_test_fail, extra) == 0);
+ CHECK(is_empty_signature(&sig));
+ /* The retry loop successfully makes its way to the first good value. */
+ CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, nonce_function_test_retry, extra) == 1);
+ CHECK(!is_empty_signature(&sig));
+ CHECK(secp256k1_ecdsa_sign(ctx, &sig2, msg, key, nonce_function_rfc6979, extra) == 1);
+ CHECK(!is_empty_signature(&sig2));
+ CHECK(memcmp(&sig, &sig2, sizeof(sig)) == 0);
+ /* The default nonce function is determinstic. */
+ CHECK(secp256k1_ecdsa_sign(ctx, &sig2, msg, key, NULL, extra) == 1);
+ CHECK(!is_empty_signature(&sig2));
+ CHECK(memcmp(&sig, &sig2, sizeof(sig)) == 0);
+ /* The default nonce function changes output with different messages. */
+ for(i = 0; i < 256; i++) {
+ int j;
+ msg[0] = i;
+ CHECK(secp256k1_ecdsa_sign(ctx, &sig2, msg, key, NULL, extra) == 1);
+ CHECK(!is_empty_signature(&sig2));
+ secp256k1_ecdsa_signature_load(ctx, &sr[i], &ss, &sig2);
+ for (j = 0; j < i; j++) {
+ CHECK(!secp256k1_scalar_eq(&sr[i], &sr[j]));
+ }
+ }
+ msg[0] = 0;
+ msg[31] = 2;
+ /* The default nonce function changes output with different keys. */
+ for(i = 256; i < 512; i++) {
+ int j;
+ key[0] = i - 256;
+ CHECK(secp256k1_ecdsa_sign(ctx, &sig2, msg, key, NULL, extra) == 1);
+ CHECK(!is_empty_signature(&sig2));
+ secp256k1_ecdsa_signature_load(ctx, &sr[i], &ss, &sig2);
+ for (j = 0; j < i; j++) {
+ CHECK(!secp256k1_scalar_eq(&sr[i], &sr[j]));
+ }
+ }
+ key[0] = 0;
+ }
+
+ /* Privkey export where pubkey is the point at infinity. */
+ {
+ unsigned char privkey[300];
+ unsigned char seckey[32] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe,
+ 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0, 0x3b,
+ 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x41,
+ };
+ size_t outlen = 300;
+ CHECK(!secp256k1_ec_privkey_export(ctx, privkey, &outlen, seckey, 0));
+ outlen = 300;
+ CHECK(!secp256k1_ec_privkey_export(ctx, privkey, &outlen, seckey, SECP256K1_EC_COMPRESSED));
+ }
+}
+
+void run_ecdsa_edge_cases(void) {
+ test_ecdsa_edge_cases();
+}
+
+#ifdef ENABLE_OPENSSL_TESTS
+EC_KEY *get_openssl_key(const secp256k1_scalar *key) {
+ unsigned char privkey[300];
+ size_t privkeylen;
+ const unsigned char* pbegin = privkey;
+ int compr = secp256k1_rand32() & 1;
+ EC_KEY *ec_key = EC_KEY_new_by_curve_name(NID_secp256k1);
+ CHECK(secp256k1_eckey_privkey_serialize(&ctx->ecmult_gen_ctx, privkey, &privkeylen, key, compr ? SECP256K1_EC_COMPRESSED : 0));
+ CHECK(d2i_ECPrivateKey(&ec_key, &pbegin, privkeylen));
+ CHECK(EC_KEY_check_key(ec_key));
+ return ec_key;
+}
+
+void test_ecdsa_openssl(void) {
+ secp256k1_gej qj;
+ secp256k1_ge q;
+ secp256k1_scalar sigr, sigs;
+ secp256k1_scalar one;
+ secp256k1_scalar msg2;
+ secp256k1_scalar key, msg;
+ EC_KEY *ec_key;
+ unsigned int sigsize = 80;
+ size_t secp_sigsize = 80;
+ unsigned char message[32];
+ unsigned char signature[80];
+ secp256k1_rand256_test(message);
+ secp256k1_scalar_set_b32(&msg, message, NULL);
+ random_scalar_order_test(&key);
+ secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &qj, &key);
+ secp256k1_ge_set_gej(&q, &qj);
+ ec_key = get_openssl_key(&key);
+ CHECK(ec_key != NULL);
+ CHECK(ECDSA_sign(0, message, sizeof(message), signature, &sigsize, ec_key));
+ CHECK(secp256k1_ecdsa_sig_parse(&sigr, &sigs, signature, sigsize));
+ CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sigr, &sigs, &q, &msg));
+ secp256k1_scalar_set_int(&one, 1);
+ secp256k1_scalar_add(&msg2, &msg, &one);
+ CHECK(!secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sigr, &sigs, &q, &msg2));
+
+ random_sign(&sigr, &sigs, &key, &msg, NULL);
+ CHECK(secp256k1_ecdsa_sig_serialize(signature, &secp_sigsize, &sigr, &sigs));
+ CHECK(ECDSA_verify(0, message, sizeof(message), signature, secp_sigsize, ec_key) == 1);
+
+ EC_KEY_free(ec_key);
+}
+
+void run_ecdsa_openssl(void) {
+ int i;
+ for (i = 0; i < 10*count; i++) {
+ test_ecdsa_openssl();
+ }
+}
+#endif
+
+#ifdef ENABLE_MODULE_ECDH
+# include "modules/ecdh/tests_impl.h"
+#endif
+
+#ifdef ENABLE_MODULE_SCHNORR
+# include "modules/schnorr/tests_impl.h"
+#endif
+
+#ifdef ENABLE_MODULE_RECOVERY
+# include "modules/recovery/tests_impl.h"
+#endif
+
+int main(int argc, char **argv) {
+ unsigned char seed16[16] = {0};
+ unsigned char run32[32] = {0};
+ /* find iteration count */
+ if (argc > 1) {
+ count = strtol(argv[1], NULL, 0);
+ }
+
+ /* find random seed */
+ if (argc > 2) {
+ int pos = 0;
+ const char* ch = argv[2];
+ while (pos < 16 && ch[0] != 0 && ch[1] != 0) {
+ unsigned short sh;
+ if (sscanf(ch, "%2hx", &sh)) {
+ seed16[pos] = sh;
+ } else {
+ break;
+ }
+ ch += 2;
+ pos++;
+ }
+ } else {
+ FILE *frand = fopen("/dev/urandom", "r");
+ if ((frand == NULL) || !fread(&seed16, sizeof(seed16), 1, frand)) {
+ uint64_t t = time(NULL) * (uint64_t)1337;
+ seed16[0] ^= t;
+ seed16[1] ^= t >> 8;
+ seed16[2] ^= t >> 16;
+ seed16[3] ^= t >> 24;
+ seed16[4] ^= t >> 32;
+ seed16[5] ^= t >> 40;
+ seed16[6] ^= t >> 48;
+ seed16[7] ^= t >> 56;
+ }
+ fclose(frand);
+ }
+ secp256k1_rand_seed(seed16);
+
+ printf("test count = %i\n", count);
+ printf("random seed = %02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", seed16[0], seed16[1], seed16[2], seed16[3], seed16[4], seed16[5], seed16[6], seed16[7], seed16[8], seed16[9], seed16[10], seed16[11], seed16[12], seed16[13], seed16[14], seed16[15]);
+
+ /* initialize */
+ run_context_tests();
+ ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
+
+ if (secp256k1_rand32() & 1) {
+ secp256k1_rand256(run32);
+ CHECK(secp256k1_context_randomize(ctx, (secp256k1_rand32() & 1) ? run32 : NULL));
+ }
+
+ run_sha256_tests();
+ run_hmac_sha256_tests();
+ run_rfc6979_hmac_sha256_tests();
+
+#ifndef USE_NUM_NONE
+ /* num tests */
+ run_num_smalltests();
+#endif
+
+ /* scalar tests */
+ run_scalar_tests();
+
+ /* field tests */
+ run_field_inv();
+ run_field_inv_var();
+ run_field_inv_all_var();
+ run_field_misc();
+ run_field_convert();
+ run_sqr();
+ run_sqrt();
+
+ /* group tests */
+ run_ge();
+
+ /* ecmult tests */
+ run_wnaf();
+ run_point_times_order();
+ run_ecmult_chain();
+ run_ecmult_constants();
+ run_ecmult_gen_blind();
+ run_ecmult_const_tests();
+ run_ec_combine();
+
+ /* endomorphism tests */
+#ifdef USE_ENDOMORPHISM
+ run_endomorphism_tests();
+#endif
+
+#ifdef ENABLE_MODULE_ECDH
+ /* ecdh tests */
+ run_ecdh_tests();
+#endif
+
+ /* ecdsa tests */
+ run_random_pubkeys();
+ run_ecdsa_sign_verify();
+ run_ecdsa_end_to_end();
+ run_ecdsa_edge_cases();
+#ifdef ENABLE_OPENSSL_TESTS
+ run_ecdsa_openssl();
+#endif
+
+#ifdef ENABLE_MODULE_SCHNORR
+ /* Schnorr tests */
+ run_schnorr_tests();
+#endif
+
+#ifdef ENABLE_MODULE_RECOVERY
+ /* ECDSA pubkey recovery tests */
+ run_recovery_tests();
+#endif
+
+ secp256k1_rand256(run32);
+ printf("random run = %02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", run32[0], run32[1], run32[2], run32[3], run32[4], run32[5], run32[6], run32[7], run32[8], run32[9], run32[10], run32[11], run32[12], run32[13], run32[14], run32[15]);
+
+ /* shutdown */
+ secp256k1_context_destroy(ctx);
+
+ printf("no problems found\n");
+ return 0;
+}
diff --git a/crypto/secp256k1/libsecp256k1/src/util.h b/crypto/secp256k1/libsecp256k1/src/util.h
new file mode 100644
index 000000000..4eef4ded4
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/util.h
@@ -0,0 +1,110 @@
+/**********************************************************************
+ * Copyright (c) 2013, 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_UTIL_H_
+#define _SECP256K1_UTIL_H_
+
+#if defined HAVE_CONFIG_H
+#include "libsecp256k1-config.h"
+#endif
+
+#include <stdlib.h>
+#include <stdint.h>
+#include <stdio.h>
+
+typedef struct {
+ void (*fn)(const char *text, void* data);
+ const void* data;
+} secp256k1_callback;
+
+static SECP256K1_INLINE void secp256k1_callback_call(const secp256k1_callback * const cb, const char * const text) {
+ cb->fn(text, (void*)cb->data);
+}
+
+#ifdef DETERMINISTIC
+#define TEST_FAILURE(msg) do { \
+ fprintf(stderr, "%s\n", msg); \
+ abort(); \
+} while(0);
+#else
+#define TEST_FAILURE(msg) do { \
+ fprintf(stderr, "%s:%d: %s\n", __FILE__, __LINE__, msg); \
+ abort(); \
+} while(0)
+#endif
+
+#ifdef HAVE_BUILTIN_EXPECT
+#define EXPECT(x,c) __builtin_expect((x),(c))
+#else
+#define EXPECT(x,c) (x)
+#endif
+
+#ifdef DETERMINISTIC
+#define CHECK(cond) do { \
+ if (EXPECT(!(cond), 0)) { \
+ TEST_FAILURE("test condition failed"); \
+ } \
+} while(0)
+#else
+#define CHECK(cond) do { \
+ if (EXPECT(!(cond), 0)) { \
+ TEST_FAILURE("test condition failed: " #cond); \
+ } \
+} while(0)
+#endif
+
+/* Like assert(), but when VERIFY is defined, and side-effect safe. */
+#ifdef VERIFY
+#define VERIFY_CHECK CHECK
+#define VERIFY_SETUP(stmt) do { stmt; } while(0)
+#else
+#define VERIFY_CHECK(cond) do { (void)(cond); } while(0)
+#define VERIFY_SETUP(stmt)
+#endif
+
+static SECP256K1_INLINE void *checked_malloc(const secp256k1_callback* cb, size_t size) {
+ void *ret = malloc(size);
+ if (ret == NULL) {
+ secp256k1_callback_call(cb, "Out of memory");
+ }
+ return ret;
+}
+
+/* Macro for restrict, when available and not in a VERIFY build. */
+#if defined(SECP256K1_BUILD) && defined(VERIFY)
+# define SECP256K1_RESTRICT
+#else
+# if (!defined(__STDC_VERSION__) || (__STDC_VERSION__ < 199901L) )
+# if SECP256K1_GNUC_PREREQ(3,0)
+# define SECP256K1_RESTRICT __restrict__
+# elif (defined(_MSC_VER) && _MSC_VER >= 1400)
+# define SECP256K1_RESTRICT __restrict
+# else
+# define SECP256K1_RESTRICT
+# endif
+# else
+# define SECP256K1_RESTRICT restrict
+# endif
+#endif
+
+#if defined(_WIN32)
+# define I64FORMAT "I64d"
+# define I64uFORMAT "I64u"
+#else
+# define I64FORMAT "lld"
+# define I64uFORMAT "llu"
+#endif
+
+#if defined(HAVE___INT128)
+# if defined(__GNUC__)
+# define SECP256K1_GNUC_EXT __extension__
+# else
+# define SECP256K1_GNUC_EXT
+# endif
+SECP256K1_GNUC_EXT typedef unsigned __int128 uint128_t;
+#endif
+
+#endif