Bitcoin Core 28.0.0
P2P Digital Currency
Loading...
Searching...
No Matches
scalar_4x64_impl.h
Go to the documentation of this file.
1/***********************************************************************
2 * Copyright (c) 2013, 2014 Pieter Wuille *
3 * Distributed under the MIT software license, see the accompanying *
4 * file COPYING or https://www.opensource.org/licenses/mit-license.php.*
5 ***********************************************************************/
6
7#ifndef SECP256K1_SCALAR_REPR_IMPL_H
8#define SECP256K1_SCALAR_REPR_IMPL_H
9
10#include "checkmem.h"
11#include "int128.h"
12#include "modinv64_impl.h"
13#include "util.h"
14
15/* Limbs of the secp256k1 order. */
16#define SECP256K1_N_0 ((uint64_t)0xBFD25E8CD0364141ULL)
17#define SECP256K1_N_1 ((uint64_t)0xBAAEDCE6AF48A03BULL)
18#define SECP256K1_N_2 ((uint64_t)0xFFFFFFFFFFFFFFFEULL)
19#define SECP256K1_N_3 ((uint64_t)0xFFFFFFFFFFFFFFFFULL)
20
21/* Limbs of 2^256 minus the secp256k1 order. */
22#define SECP256K1_N_C_0 (~SECP256K1_N_0 + 1)
23#define SECP256K1_N_C_1 (~SECP256K1_N_1)
24#define SECP256K1_N_C_2 (1)
25
26/* Limbs of half the secp256k1 order. */
27#define SECP256K1_N_H_0 ((uint64_t)0xDFE92F46681B20A0ULL)
28#define SECP256K1_N_H_1 ((uint64_t)0x5D576E7357A4501DULL)
29#define SECP256K1_N_H_2 ((uint64_t)0xFFFFFFFFFFFFFFFFULL)
30#define SECP256K1_N_H_3 ((uint64_t)0x7FFFFFFFFFFFFFFFULL)
31
33 r->d[0] = 0;
34 r->d[1] = 0;
35 r->d[2] = 0;
36 r->d[3] = 0;
37}
38
40 r->d[0] = v;
41 r->d[1] = 0;
42 r->d[2] = 0;
43 r->d[3] = 0;
44
46}
47
48SECP256K1_INLINE static uint32_t secp256k1_scalar_get_bits_limb32(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
50 VERIFY_CHECK(count > 0 && count <= 32);
51 VERIFY_CHECK((offset + count - 1) >> 6 == offset >> 6);
52
53 return (a->d[offset >> 6] >> (offset & 0x3F)) & (0xFFFFFFFF >> (32 - count));
54}
55
56SECP256K1_INLINE static uint32_t secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
58 VERIFY_CHECK(count > 0 && count <= 32);
59 VERIFY_CHECK(offset + count <= 256);
60
61 if ((offset + count - 1) >> 6 == offset >> 6) {
62 return secp256k1_scalar_get_bits_limb32(a, offset, count);
63 } else {
64 VERIFY_CHECK((offset >> 6) + 1 < 4);
65 return ((a->d[offset >> 6] >> (offset & 0x3F)) | (a->d[(offset >> 6) + 1] << (64 - (offset & 0x3F)))) & (0xFFFFFFFF >> (32 - count));
66 }
67}
68
70 int yes = 0;
71 int no = 0;
72 no |= (a->d[3] < SECP256K1_N_3); /* No need for a > check. */
73 no |= (a->d[2] < SECP256K1_N_2);
74 yes |= (a->d[2] > SECP256K1_N_2) & ~no;
75 no |= (a->d[1] < SECP256K1_N_1);
76 yes |= (a->d[1] > SECP256K1_N_1) & ~no;
77 yes |= (a->d[0] >= SECP256K1_N_0) & ~no;
78 return yes;
79}
80
81SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar *r, unsigned int overflow) {
83 VERIFY_CHECK(overflow <= 1);
84
95 r->d[3] = secp256k1_u128_to_u64(&t);
96
98 return overflow;
99}
100
102 int overflow;
106
107 secp256k1_u128_from_u64(&t, a->d[0]);
120 VERIFY_CHECK(overflow == 0 || overflow == 1);
121 secp256k1_scalar_reduce(r, overflow);
122
124 return overflow;
125}
126
127static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) {
129 volatile int vflag = flag;
131 VERIFY_CHECK(bit < 256);
132
133 bit += ((uint32_t) vflag - 1) & 0x100; /* forcing (bit >> 6) > 3 makes this a noop */
134 secp256k1_u128_from_u64(&t, r->d[0]);
135 secp256k1_u128_accum_u64(&t, ((uint64_t)((bit >> 6) == 0)) << (bit & 0x3F));
138 secp256k1_u128_accum_u64(&t, ((uint64_t)((bit >> 6) == 1)) << (bit & 0x3F));
141 secp256k1_u128_accum_u64(&t, ((uint64_t)((bit >> 6) == 2)) << (bit & 0x3F));
144 secp256k1_u128_accum_u64(&t, ((uint64_t)((bit >> 6) == 3)) << (bit & 0x3F));
145 r->d[3] = secp256k1_u128_to_u64(&t);
146
149}
150
151static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) {
152 int over;
153 r->d[0] = secp256k1_read_be64(&b32[24]);
154 r->d[1] = secp256k1_read_be64(&b32[16]);
155 r->d[2] = secp256k1_read_be64(&b32[8]);
156 r->d[3] = secp256k1_read_be64(&b32[0]);
158 if (overflow) {
159 *overflow = over;
160 }
161
163}
164
165static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) {
167
168 secp256k1_write_be64(&bin[0], a->d[3]);
169 secp256k1_write_be64(&bin[8], a->d[2]);
170 secp256k1_write_be64(&bin[16], a->d[1]);
171 secp256k1_write_be64(&bin[24], a->d[0]);
172}
173
176
177 return (a->d[0] | a->d[1] | a->d[2] | a->d[3]) == 0;
178}
179
181 uint64_t nonzero = 0xFFFFFFFFFFFFFFFFULL * (secp256k1_scalar_is_zero(a) == 0);
184
185 secp256k1_u128_from_u64(&t, ~a->d[0]);
187 r->d[0] = secp256k1_u128_to_u64(&t) & nonzero; secp256k1_u128_rshift(&t, 64);
188 secp256k1_u128_accum_u64(&t, ~a->d[1]);
190 r->d[1] = secp256k1_u128_to_u64(&t) & nonzero; secp256k1_u128_rshift(&t, 64);
191 secp256k1_u128_accum_u64(&t, ~a->d[2]);
193 r->d[2] = secp256k1_u128_to_u64(&t) & nonzero; secp256k1_u128_rshift(&t, 64);
194 secp256k1_u128_accum_u64(&t, ~a->d[3]);
196 r->d[3] = secp256k1_u128_to_u64(&t) & nonzero;
197
199}
200
202 /* Writing `/` for field division and `//` for integer division, we compute
203 *
204 * a/2 = (a - (a&1))/2 + (a&1)/2
205 * = (a >> 1) + (a&1 ? 1/2 : 0)
206 * = (a >> 1) + (a&1 ? n//2+1 : 0),
207 *
208 * where n is the group order and in the last equality we have used 1/2 = n//2+1 (mod n).
209 * For n//2, we have the constants SECP256K1_N_H_0, ...
210 *
211 * This sum does not overflow. The most extreme case is a = -2, the largest odd scalar. Here:
212 * - the left summand is: a >> 1 = (a - a&1)/2 = (n-2-1)//2 = (n-3)//2
213 * - the right summand is: a&1 ? n//2+1 : 0 = n//2+1 = (n-1)//2 + 2//2 = (n+1)//2
214 * Together they sum to (n-3)//2 + (n+1)//2 = (2n-2)//2 = n - 1, which is less than n.
215 */
216 uint64_t mask = -(uint64_t)(a->d[0] & 1U);
219
220 secp256k1_u128_from_u64(&t, (a->d[0] >> 1) | (a->d[1] << 63));
223 secp256k1_u128_accum_u64(&t, (a->d[1] >> 1) | (a->d[2] << 63));
226 secp256k1_u128_accum_u64(&t, (a->d[2] >> 1) | (a->d[3] << 63));
229 r->d[3] = secp256k1_u128_to_u64(&t) + (a->d[3] >> 1) + (SECP256K1_N_H_3 & mask);
230#ifdef VERIFY
231 /* The line above only computed the bottom 64 bits of r->d[3]; redo the computation
232 * in full 128 bits to make sure the top 64 bits are indeed zero. */
233 secp256k1_u128_accum_u64(&t, a->d[3] >> 1);
237
239#endif
240}
241
244
245 return ((a->d[0] ^ 1) | a->d[1] | a->d[2] | a->d[3]) == 0;
246}
247
249 int yes = 0;
250 int no = 0;
252
253 no |= (a->d[3] < SECP256K1_N_H_3);
254 yes |= (a->d[3] > SECP256K1_N_H_3) & ~no;
255 no |= (a->d[2] < SECP256K1_N_H_2) & ~yes; /* No need for a > check. */
256 no |= (a->d[1] < SECP256K1_N_H_1) & ~yes;
257 yes |= (a->d[1] > SECP256K1_N_H_1) & ~no;
258 yes |= (a->d[0] > SECP256K1_N_H_0) & ~no;
259 return yes;
260}
261
263 /* If we are flag = 0, mask = 00...00 and this is a no-op;
264 * if we are flag = 1, mask = 11...11 and this is identical to secp256k1_scalar_negate */
265 volatile int vflag = flag;
266 uint64_t mask = -vflag;
267 uint64_t nonzero = (secp256k1_scalar_is_zero(r) != 0) - 1;
270
271 secp256k1_u128_from_u64(&t, r->d[0] ^ mask);
273 r->d[0] = secp256k1_u128_to_u64(&t) & nonzero; secp256k1_u128_rshift(&t, 64);
274 secp256k1_u128_accum_u64(&t, r->d[1] ^ mask);
276 r->d[1] = secp256k1_u128_to_u64(&t) & nonzero; secp256k1_u128_rshift(&t, 64);
277 secp256k1_u128_accum_u64(&t, r->d[2] ^ mask);
279 r->d[2] = secp256k1_u128_to_u64(&t) & nonzero; secp256k1_u128_rshift(&t, 64);
280 secp256k1_u128_accum_u64(&t, r->d[3] ^ mask);
282 r->d[3] = secp256k1_u128_to_u64(&t) & nonzero;
283
285 return 2 * (mask == 0) - 1;
286}
287
288/* Inspired by the macros in OpenSSL's crypto/bn/asm/x86_64-gcc.c. */
289
291#define muladd(a,b) { \
292 uint64_t tl, th; \
293 { \
294 secp256k1_uint128 t; \
295 secp256k1_u128_mul(&t, a, b); \
296 th = secp256k1_u128_hi_u64(&t); /* at most 0xFFFFFFFFFFFFFFFE */ \
297 tl = secp256k1_u128_to_u64(&t); \
298 } \
299 c0 += tl; /* overflow is handled on the next line */ \
300 th += (c0 < tl); /* at most 0xFFFFFFFFFFFFFFFF */ \
301 c1 += th; /* overflow is handled on the next line */ \
302 c2 += (c1 < th); /* never overflows by contract (verified in the next line) */ \
303 VERIFY_CHECK((c1 >= th) || (c2 != 0)); \
304}
305
307#define muladd_fast(a,b) { \
308 uint64_t tl, th; \
309 { \
310 secp256k1_uint128 t; \
311 secp256k1_u128_mul(&t, a, b); \
312 th = secp256k1_u128_hi_u64(&t); /* at most 0xFFFFFFFFFFFFFFFE */ \
313 tl = secp256k1_u128_to_u64(&t); \
314 } \
315 c0 += tl; /* overflow is handled on the next line */ \
316 th += (c0 < tl); /* at most 0xFFFFFFFFFFFFFFFF */ \
317 c1 += th; /* never overflows by contract (verified in the next line) */ \
318 VERIFY_CHECK(c1 >= th); \
319}
320
322#define sumadd(a) { \
323 unsigned int over; \
324 c0 += (a); /* overflow is handled on the next line */ \
325 over = (c0 < (a)); \
326 c1 += over; /* overflow is handled on the next line */ \
327 c2 += (c1 < over); /* never overflows by contract */ \
328}
329
331#define sumadd_fast(a) { \
332 c0 += (a); /* overflow is handled on the next line */ \
333 c1 += (c0 < (a)); /* never overflows by contract (verified the next line) */ \
334 VERIFY_CHECK((c1 != 0) | (c0 >= (a))); \
335 VERIFY_CHECK(c2 == 0); \
336}
337
339#define extract(n) { \
340 (n) = c0; \
341 c0 = c1; \
342 c1 = c2; \
343 c2 = 0; \
344}
345
347#define extract_fast(n) { \
348 (n) = c0; \
349 c0 = c1; \
350 c1 = 0; \
351 VERIFY_CHECK(c2 == 0); \
352}
353
354static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l) {
355#ifdef USE_ASM_X86_64
356 /* Reduce 512 bits into 385. */
357 uint64_t m0, m1, m2, m3, m4, m5, m6;
358 uint64_t p0, p1, p2, p3, p4;
359 uint64_t c;
360
361 __asm__ __volatile__(
362 /* Preload. */
363 "movq 32(%%rsi), %%r11\n"
364 "movq 40(%%rsi), %%r12\n"
365 "movq 48(%%rsi), %%r13\n"
366 "movq 56(%%rsi), %%r14\n"
367 /* Initialize r8,r9,r10 */
368 "movq 0(%%rsi), %%r8\n"
369 "xorq %%r9, %%r9\n"
370 "xorq %%r10, %%r10\n"
371 /* (r8,r9) += n0 * c0 */
372 "movq %8, %%rax\n"
373 "mulq %%r11\n"
374 "addq %%rax, %%r8\n"
375 "adcq %%rdx, %%r9\n"
376 /* extract m0 */
377 "movq %%r8, %q0\n"
378 "xorq %%r8, %%r8\n"
379 /* (r9,r10) += l1 */
380 "addq 8(%%rsi), %%r9\n"
381 "adcq $0, %%r10\n"
382 /* (r9,r10,r8) += n1 * c0 */
383 "movq %8, %%rax\n"
384 "mulq %%r12\n"
385 "addq %%rax, %%r9\n"
386 "adcq %%rdx, %%r10\n"
387 "adcq $0, %%r8\n"
388 /* (r9,r10,r8) += n0 * c1 */
389 "movq %9, %%rax\n"
390 "mulq %%r11\n"
391 "addq %%rax, %%r9\n"
392 "adcq %%rdx, %%r10\n"
393 "adcq $0, %%r8\n"
394 /* extract m1 */
395 "movq %%r9, %q1\n"
396 "xorq %%r9, %%r9\n"
397 /* (r10,r8,r9) += l2 */
398 "addq 16(%%rsi), %%r10\n"
399 "adcq $0, %%r8\n"
400 "adcq $0, %%r9\n"
401 /* (r10,r8,r9) += n2 * c0 */
402 "movq %8, %%rax\n"
403 "mulq %%r13\n"
404 "addq %%rax, %%r10\n"
405 "adcq %%rdx, %%r8\n"
406 "adcq $0, %%r9\n"
407 /* (r10,r8,r9) += n1 * c1 */
408 "movq %9, %%rax\n"
409 "mulq %%r12\n"
410 "addq %%rax, %%r10\n"
411 "adcq %%rdx, %%r8\n"
412 "adcq $0, %%r9\n"
413 /* (r10,r8,r9) += n0 */
414 "addq %%r11, %%r10\n"
415 "adcq $0, %%r8\n"
416 "adcq $0, %%r9\n"
417 /* extract m2 */
418 "movq %%r10, %q2\n"
419 "xorq %%r10, %%r10\n"
420 /* (r8,r9,r10) += l3 */
421 "addq 24(%%rsi), %%r8\n"
422 "adcq $0, %%r9\n"
423 "adcq $0, %%r10\n"
424 /* (r8,r9,r10) += n3 * c0 */
425 "movq %8, %%rax\n"
426 "mulq %%r14\n"
427 "addq %%rax, %%r8\n"
428 "adcq %%rdx, %%r9\n"
429 "adcq $0, %%r10\n"
430 /* (r8,r9,r10) += n2 * c1 */
431 "movq %9, %%rax\n"
432 "mulq %%r13\n"
433 "addq %%rax, %%r8\n"
434 "adcq %%rdx, %%r9\n"
435 "adcq $0, %%r10\n"
436 /* (r8,r9,r10) += n1 */
437 "addq %%r12, %%r8\n"
438 "adcq $0, %%r9\n"
439 "adcq $0, %%r10\n"
440 /* extract m3 */
441 "movq %%r8, %q3\n"
442 "xorq %%r8, %%r8\n"
443 /* (r9,r10,r8) += n3 * c1 */
444 "movq %9, %%rax\n"
445 "mulq %%r14\n"
446 "addq %%rax, %%r9\n"
447 "adcq %%rdx, %%r10\n"
448 "adcq $0, %%r8\n"
449 /* (r9,r10,r8) += n2 */
450 "addq %%r13, %%r9\n"
451 "adcq $0, %%r10\n"
452 "adcq $0, %%r8\n"
453 /* extract m4 */
454 "movq %%r9, %q4\n"
455 /* (r10,r8) += n3 */
456 "addq %%r14, %%r10\n"
457 "adcq $0, %%r8\n"
458 /* extract m5 */
459 "movq %%r10, %q5\n"
460 /* extract m6 */
461 "movq %%r8, %q6\n"
462 : "=&g"(m0), "=&g"(m1), "=&g"(m2), "=g"(m3), "=g"(m4), "=g"(m5), "=g"(m6)
463 : "S"(l), "i"(SECP256K1_N_C_0), "i"(SECP256K1_N_C_1)
464 : "rax", "rdx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "cc");
465
466 SECP256K1_CHECKMEM_MSAN_DEFINE(&m0, sizeof(m0));
467 SECP256K1_CHECKMEM_MSAN_DEFINE(&m1, sizeof(m1));
468 SECP256K1_CHECKMEM_MSAN_DEFINE(&m2, sizeof(m2));
469 SECP256K1_CHECKMEM_MSAN_DEFINE(&m3, sizeof(m3));
470 SECP256K1_CHECKMEM_MSAN_DEFINE(&m4, sizeof(m4));
471 SECP256K1_CHECKMEM_MSAN_DEFINE(&m5, sizeof(m5));
472 SECP256K1_CHECKMEM_MSAN_DEFINE(&m6, sizeof(m6));
473
474 /* Reduce 385 bits into 258. */
475 __asm__ __volatile__(
476 /* Preload */
477 "movq %q9, %%r11\n"
478 "movq %q10, %%r12\n"
479 "movq %q11, %%r13\n"
480 /* Initialize (r8,r9,r10) */
481 "movq %q5, %%r8\n"
482 "xorq %%r9, %%r9\n"
483 "xorq %%r10, %%r10\n"
484 /* (r8,r9) += m4 * c0 */
485 "movq %12, %%rax\n"
486 "mulq %%r11\n"
487 "addq %%rax, %%r8\n"
488 "adcq %%rdx, %%r9\n"
489 /* extract p0 */
490 "movq %%r8, %q0\n"
491 "xorq %%r8, %%r8\n"
492 /* (r9,r10) += m1 */
493 "addq %q6, %%r9\n"
494 "adcq $0, %%r10\n"
495 /* (r9,r10,r8) += m5 * c0 */
496 "movq %12, %%rax\n"
497 "mulq %%r12\n"
498 "addq %%rax, %%r9\n"
499 "adcq %%rdx, %%r10\n"
500 "adcq $0, %%r8\n"
501 /* (r9,r10,r8) += m4 * c1 */
502 "movq %13, %%rax\n"
503 "mulq %%r11\n"
504 "addq %%rax, %%r9\n"
505 "adcq %%rdx, %%r10\n"
506 "adcq $0, %%r8\n"
507 /* extract p1 */
508 "movq %%r9, %q1\n"
509 "xorq %%r9, %%r9\n"
510 /* (r10,r8,r9) += m2 */
511 "addq %q7, %%r10\n"
512 "adcq $0, %%r8\n"
513 "adcq $0, %%r9\n"
514 /* (r10,r8,r9) += m6 * c0 */
515 "movq %12, %%rax\n"
516 "mulq %%r13\n"
517 "addq %%rax, %%r10\n"
518 "adcq %%rdx, %%r8\n"
519 "adcq $0, %%r9\n"
520 /* (r10,r8,r9) += m5 * c1 */
521 "movq %13, %%rax\n"
522 "mulq %%r12\n"
523 "addq %%rax, %%r10\n"
524 "adcq %%rdx, %%r8\n"
525 "adcq $0, %%r9\n"
526 /* (r10,r8,r9) += m4 */
527 "addq %%r11, %%r10\n"
528 "adcq $0, %%r8\n"
529 "adcq $0, %%r9\n"
530 /* extract p2 */
531 "movq %%r10, %q2\n"
532 /* (r8,r9) += m3 */
533 "addq %q8, %%r8\n"
534 "adcq $0, %%r9\n"
535 /* (r8,r9) += m6 * c1 */
536 "movq %13, %%rax\n"
537 "mulq %%r13\n"
538 "addq %%rax, %%r8\n"
539 "adcq %%rdx, %%r9\n"
540 /* (r8,r9) += m5 */
541 "addq %%r12, %%r8\n"
542 "adcq $0, %%r9\n"
543 /* extract p3 */
544 "movq %%r8, %q3\n"
545 /* (r9) += m6 */
546 "addq %%r13, %%r9\n"
547 /* extract p4 */
548 "movq %%r9, %q4\n"
549 : "=&g"(p0), "=&g"(p1), "=&g"(p2), "=g"(p3), "=g"(p4)
550 : "g"(m0), "g"(m1), "g"(m2), "g"(m3), "g"(m4), "g"(m5), "g"(m6), "i"(SECP256K1_N_C_0), "i"(SECP256K1_N_C_1)
551 : "rax", "rdx", "r8", "r9", "r10", "r11", "r12", "r13", "cc");
552
553 SECP256K1_CHECKMEM_MSAN_DEFINE(&p0, sizeof(p0));
554 SECP256K1_CHECKMEM_MSAN_DEFINE(&p1, sizeof(p1));
555 SECP256K1_CHECKMEM_MSAN_DEFINE(&p2, sizeof(p2));
556 SECP256K1_CHECKMEM_MSAN_DEFINE(&p3, sizeof(p3));
557 SECP256K1_CHECKMEM_MSAN_DEFINE(&p4, sizeof(p4));
558
559 /* Reduce 258 bits into 256. */
560 __asm__ __volatile__(
561 /* Preload */
562 "movq %q5, %%r10\n"
563 /* (rax,rdx) = p4 * c0 */
564 "movq %7, %%rax\n"
565 "mulq %%r10\n"
566 /* (rax,rdx) += p0 */
567 "addq %q1, %%rax\n"
568 "adcq $0, %%rdx\n"
569 /* extract r0 */
570 "movq %%rax, 0(%q6)\n"
571 /* Move to (r8,r9) */
572 "movq %%rdx, %%r8\n"
573 "xorq %%r9, %%r9\n"
574 /* (r8,r9) += p1 */
575 "addq %q2, %%r8\n"
576 "adcq $0, %%r9\n"
577 /* (r8,r9) += p4 * c1 */
578 "movq %8, %%rax\n"
579 "mulq %%r10\n"
580 "addq %%rax, %%r8\n"
581 "adcq %%rdx, %%r9\n"
582 /* Extract r1 */
583 "movq %%r8, 8(%q6)\n"
584 "xorq %%r8, %%r8\n"
585 /* (r9,r8) += p4 */
586 "addq %%r10, %%r9\n"
587 "adcq $0, %%r8\n"
588 /* (r9,r8) += p2 */
589 "addq %q3, %%r9\n"
590 "adcq $0, %%r8\n"
591 /* Extract r2 */
592 "movq %%r9, 16(%q6)\n"
593 "xorq %%r9, %%r9\n"
594 /* (r8,r9) += p3 */
595 "addq %q4, %%r8\n"
596 "adcq $0, %%r9\n"
597 /* Extract r3 */
598 "movq %%r8, 24(%q6)\n"
599 /* Extract c */
600 "movq %%r9, %q0\n"
601 : "=g"(c)
602 : "g"(p0), "g"(p1), "g"(p2), "g"(p3), "g"(p4), "D"(r), "i"(SECP256K1_N_C_0), "i"(SECP256K1_N_C_1)
603 : "rax", "rdx", "r8", "r9", "r10", "cc", "memory");
604
605 SECP256K1_CHECKMEM_MSAN_DEFINE(r, sizeof(*r));
606 SECP256K1_CHECKMEM_MSAN_DEFINE(&c, sizeof(c));
607
608#else
610 uint64_t c, c0, c1, c2;
611 uint64_t n0 = l[4], n1 = l[5], n2 = l[6], n3 = l[7];
612 uint64_t m0, m1, m2, m3, m4, m5;
613 uint32_t m6;
614 uint64_t p0, p1, p2, p3;
615 uint32_t p4;
616
617 /* Reduce 512 bits into 385. */
618 /* m[0..6] = l[0..3] + n[0..3] * SECP256K1_N_C. */
619 c0 = l[0]; c1 = 0; c2 = 0;
621 extract_fast(m0);
622 sumadd_fast(l[1]);
625 extract(m1);
626 sumadd(l[2]);
629 sumadd(n0);
630 extract(m2);
631 sumadd(l[3]);
634 sumadd(n1);
635 extract(m3);
637 sumadd(n2);
638 extract(m4);
639 sumadd_fast(n3);
640 extract_fast(m5);
641 VERIFY_CHECK(c0 <= 1);
642 m6 = c0;
643
644 /* Reduce 385 bits into 258. */
645 /* p[0..4] = m[0..3] + m[4..6] * SECP256K1_N_C. */
646 c0 = m0; c1 = 0; c2 = 0;
648 extract_fast(p0);
649 sumadd_fast(m1);
652 extract(p1);
653 sumadd(m2);
656 sumadd(m4);
657 extract(p2);
658 sumadd_fast(m3);
660 sumadd_fast(m5);
661 extract_fast(p3);
662 p4 = c0 + m6;
663 VERIFY_CHECK(p4 <= 2);
664
665 /* Reduce 258 bits into 256. */
666 /* r[0..3] = p[0..3] + p[4] * SECP256K1_N_C. */
667 secp256k1_u128_from_u64(&c128, p0);
669 r->d[0] = secp256k1_u128_to_u64(&c128); secp256k1_u128_rshift(&c128, 64);
670 secp256k1_u128_accum_u64(&c128, p1);
672 r->d[1] = secp256k1_u128_to_u64(&c128); secp256k1_u128_rshift(&c128, 64);
673 secp256k1_u128_accum_u64(&c128, p2);
674 secp256k1_u128_accum_u64(&c128, p4);
675 r->d[2] = secp256k1_u128_to_u64(&c128); secp256k1_u128_rshift(&c128, 64);
676 secp256k1_u128_accum_u64(&c128, p3);
677 r->d[3] = secp256k1_u128_to_u64(&c128);
678 c = secp256k1_u128_hi_u64(&c128);
679#endif
680
681 /* Final reduction of r. */
683}
684
685static void secp256k1_scalar_mul_512(uint64_t *l8, const secp256k1_scalar *a, const secp256k1_scalar *b) {
686#ifdef USE_ASM_X86_64
687 const uint64_t *pb = b->d;
688 __asm__ __volatile__(
689 /* Preload */
690 "movq 0(%%rdi), %%r15\n"
691 "movq 8(%%rdi), %%rbx\n"
692 "movq 16(%%rdi), %%rcx\n"
693 "movq 0(%%rdx), %%r11\n"
694 "movq 8(%%rdx), %%r12\n"
695 "movq 16(%%rdx), %%r13\n"
696 "movq 24(%%rdx), %%r14\n"
697 /* (rax,rdx) = a0 * b0 */
698 "movq %%r15, %%rax\n"
699 "mulq %%r11\n"
700 /* Extract l8[0] */
701 "movq %%rax, 0(%%rsi)\n"
702 /* (r8,r9,r10) = (rdx) */
703 "movq %%rdx, %%r8\n"
704 "xorq %%r9, %%r9\n"
705 "xorq %%r10, %%r10\n"
706 /* (r8,r9,r10) += a0 * b1 */
707 "movq %%r15, %%rax\n"
708 "mulq %%r12\n"
709 "addq %%rax, %%r8\n"
710 "adcq %%rdx, %%r9\n"
711 "adcq $0, %%r10\n"
712 /* (r8,r9,r10) += a1 * b0 */
713 "movq %%rbx, %%rax\n"
714 "mulq %%r11\n"
715 "addq %%rax, %%r8\n"
716 "adcq %%rdx, %%r9\n"
717 "adcq $0, %%r10\n"
718 /* Extract l8[1] */
719 "movq %%r8, 8(%%rsi)\n"
720 "xorq %%r8, %%r8\n"
721 /* (r9,r10,r8) += a0 * b2 */
722 "movq %%r15, %%rax\n"
723 "mulq %%r13\n"
724 "addq %%rax, %%r9\n"
725 "adcq %%rdx, %%r10\n"
726 "adcq $0, %%r8\n"
727 /* (r9,r10,r8) += a1 * b1 */
728 "movq %%rbx, %%rax\n"
729 "mulq %%r12\n"
730 "addq %%rax, %%r9\n"
731 "adcq %%rdx, %%r10\n"
732 "adcq $0, %%r8\n"
733 /* (r9,r10,r8) += a2 * b0 */
734 "movq %%rcx, %%rax\n"
735 "mulq %%r11\n"
736 "addq %%rax, %%r9\n"
737 "adcq %%rdx, %%r10\n"
738 "adcq $0, %%r8\n"
739 /* Extract l8[2] */
740 "movq %%r9, 16(%%rsi)\n"
741 "xorq %%r9, %%r9\n"
742 /* (r10,r8,r9) += a0 * b3 */
743 "movq %%r15, %%rax\n"
744 "mulq %%r14\n"
745 "addq %%rax, %%r10\n"
746 "adcq %%rdx, %%r8\n"
747 "adcq $0, %%r9\n"
748 /* Preload a3 */
749 "movq 24(%%rdi), %%r15\n"
750 /* (r10,r8,r9) += a1 * b2 */
751 "movq %%rbx, %%rax\n"
752 "mulq %%r13\n"
753 "addq %%rax, %%r10\n"
754 "adcq %%rdx, %%r8\n"
755 "adcq $0, %%r9\n"
756 /* (r10,r8,r9) += a2 * b1 */
757 "movq %%rcx, %%rax\n"
758 "mulq %%r12\n"
759 "addq %%rax, %%r10\n"
760 "adcq %%rdx, %%r8\n"
761 "adcq $0, %%r9\n"
762 /* (r10,r8,r9) += a3 * b0 */
763 "movq %%r15, %%rax\n"
764 "mulq %%r11\n"
765 "addq %%rax, %%r10\n"
766 "adcq %%rdx, %%r8\n"
767 "adcq $0, %%r9\n"
768 /* Extract l8[3] */
769 "movq %%r10, 24(%%rsi)\n"
770 "xorq %%r10, %%r10\n"
771 /* (r8,r9,r10) += a1 * b3 */
772 "movq %%rbx, %%rax\n"
773 "mulq %%r14\n"
774 "addq %%rax, %%r8\n"
775 "adcq %%rdx, %%r9\n"
776 "adcq $0, %%r10\n"
777 /* (r8,r9,r10) += a2 * b2 */
778 "movq %%rcx, %%rax\n"
779 "mulq %%r13\n"
780 "addq %%rax, %%r8\n"
781 "adcq %%rdx, %%r9\n"
782 "adcq $0, %%r10\n"
783 /* (r8,r9,r10) += a3 * b1 */
784 "movq %%r15, %%rax\n"
785 "mulq %%r12\n"
786 "addq %%rax, %%r8\n"
787 "adcq %%rdx, %%r9\n"
788 "adcq $0, %%r10\n"
789 /* Extract l8[4] */
790 "movq %%r8, 32(%%rsi)\n"
791 "xorq %%r8, %%r8\n"
792 /* (r9,r10,r8) += a2 * b3 */
793 "movq %%rcx, %%rax\n"
794 "mulq %%r14\n"
795 "addq %%rax, %%r9\n"
796 "adcq %%rdx, %%r10\n"
797 "adcq $0, %%r8\n"
798 /* (r9,r10,r8) += a3 * b2 */
799 "movq %%r15, %%rax\n"
800 "mulq %%r13\n"
801 "addq %%rax, %%r9\n"
802 "adcq %%rdx, %%r10\n"
803 "adcq $0, %%r8\n"
804 /* Extract l8[5] */
805 "movq %%r9, 40(%%rsi)\n"
806 /* (r10,r8) += a3 * b3 */
807 "movq %%r15, %%rax\n"
808 "mulq %%r14\n"
809 "addq %%rax, %%r10\n"
810 "adcq %%rdx, %%r8\n"
811 /* Extract l8[6] */
812 "movq %%r10, 48(%%rsi)\n"
813 /* Extract l8[7] */
814 "movq %%r8, 56(%%rsi)\n"
815 : "+d"(pb)
816 : "S"(l8), "D"(a->d)
817 : "rax", "rbx", "rcx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", "cc", "memory");
818
819 SECP256K1_CHECKMEM_MSAN_DEFINE(l8, sizeof(*l8) * 8);
820
821#else
822 /* 160 bit accumulator. */
823 uint64_t c0 = 0, c1 = 0;
824 uint32_t c2 = 0;
825
826 /* l8[0..7] = a[0..3] * b[0..3]. */
827 muladd_fast(a->d[0], b->d[0]);
828 extract_fast(l8[0]);
829 muladd(a->d[0], b->d[1]);
830 muladd(a->d[1], b->d[0]);
831 extract(l8[1]);
832 muladd(a->d[0], b->d[2]);
833 muladd(a->d[1], b->d[1]);
834 muladd(a->d[2], b->d[0]);
835 extract(l8[2]);
836 muladd(a->d[0], b->d[3]);
837 muladd(a->d[1], b->d[2]);
838 muladd(a->d[2], b->d[1]);
839 muladd(a->d[3], b->d[0]);
840 extract(l8[3]);
841 muladd(a->d[1], b->d[3]);
842 muladd(a->d[2], b->d[2]);
843 muladd(a->d[3], b->d[1]);
844 extract(l8[4]);
845 muladd(a->d[2], b->d[3]);
846 muladd(a->d[3], b->d[2]);
847 extract(l8[5]);
848 muladd_fast(a->d[3], b->d[3]);
849 extract_fast(l8[6]);
850 VERIFY_CHECK(c1 == 0);
851 l8[7] = c0;
852#endif
853}
854
855#undef sumadd
856#undef sumadd_fast
857#undef muladd
858#undef muladd_fast
859#undef extract
860#undef extract_fast
861
863 uint64_t l[8];
866
869
871}
872
875
876 r1->d[0] = k->d[0];
877 r1->d[1] = k->d[1];
878 r1->d[2] = 0;
879 r1->d[3] = 0;
880 r2->d[0] = k->d[2];
881 r2->d[1] = k->d[3];
882 r2->d[2] = 0;
883 r2->d[3] = 0;
884
887}
888
892
893 return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3])) == 0;
894}
895
897 uint64_t l[8];
898 unsigned int shiftlimbs;
899 unsigned int shiftlow;
900 unsigned int shifthigh;
903 VERIFY_CHECK(shift >= 256);
904
906 shiftlimbs = shift >> 6;
907 shiftlow = shift & 0x3F;
908 shifthigh = 64 - shiftlow;
909 r->d[0] = shift < 512 ? (l[0 + shiftlimbs] >> shiftlow | (shift < 448 && shiftlow ? (l[1 + shiftlimbs] << shifthigh) : 0)) : 0;
910 r->d[1] = shift < 448 ? (l[1 + shiftlimbs] >> shiftlow | (shift < 384 && shiftlow ? (l[2 + shiftlimbs] << shifthigh) : 0)) : 0;
911 r->d[2] = shift < 384 ? (l[2 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[3 + shiftlimbs] << shifthigh) : 0)) : 0;
912 r->d[3] = shift < 320 ? (l[3 + shiftlimbs] >> shiftlow) : 0;
913 secp256k1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 6] >> ((shift - 1) & 0x3f)) & 1);
914
916}
917
919 uint64_t mask0, mask1;
920 volatile int vflag = flag;
922 SECP256K1_CHECKMEM_CHECK_VERIFY(r->d, sizeof(r->d));
923
924 mask0 = vflag + ~((uint64_t)0);
925 mask1 = ~mask0;
926 r->d[0] = (r->d[0] & mask0) | (a->d[0] & mask1);
927 r->d[1] = (r->d[1] & mask0) | (a->d[1] & mask1);
928 r->d[2] = (r->d[2] & mask0) | (a->d[2] & mask1);
929 r->d[3] = (r->d[3] & mask0) | (a->d[3] & mask1);
930
932}
933
935 const uint64_t a0 = a->v[0], a1 = a->v[1], a2 = a->v[2], a3 = a->v[3], a4 = a->v[4];
936
937 /* The output from secp256k1_modinv64{_var} should be normalized to range [0,modulus), and
938 * have limbs in [0,2^62). The modulus is < 2^256, so the top limb must be below 2^(256-62*4).
939 */
940 VERIFY_CHECK(a0 >> 62 == 0);
941 VERIFY_CHECK(a1 >> 62 == 0);
942 VERIFY_CHECK(a2 >> 62 == 0);
943 VERIFY_CHECK(a3 >> 62 == 0);
944 VERIFY_CHECK(a4 >> 8 == 0);
945
946 r->d[0] = a0 | a1 << 62;
947 r->d[1] = a1 >> 2 | a2 << 60;
948 r->d[2] = a2 >> 4 | a3 << 58;
949 r->d[3] = a3 >> 6 | a4 << 56;
950
952}
953
955 const uint64_t M62 = UINT64_MAX >> 2;
956 const uint64_t a0 = a->d[0], a1 = a->d[1], a2 = a->d[2], a3 = a->d[3];
958
959 r->v[0] = a0 & M62;
960 r->v[1] = (a0 >> 62 | a1 << 2) & M62;
961 r->v[2] = (a1 >> 60 | a2 << 4) & M62;
962 r->v[3] = (a2 >> 58 | a3 << 6) & M62;
963 r->v[4] = a3 >> 56;
964}
965
967 {{0x3FD25E8CD0364141LL, 0x2ABB739ABD2280EELL, -0x15LL, 0, 256}},
968 0x34F20099AA774EC1LL
969};
970
985
1000
1003
1004 return !(a->d[0] & 1);
1005}
1006
1007#endif /* SECP256K1_SCALAR_REPR_IMPL_H */
#define SECP256K1_CHECKMEM_MSAN_DEFINE(p, len)
Definition checkmem.h:60
#define SECP256K1_CHECKMEM_CHECK_VERIFY(p, len)
Definition checkmem.h:99
static SECP256K1_INLINE uint64_t secp256k1_u128_hi_u64(const secp256k1_uint128 *a)
static SECP256K1_INLINE void secp256k1_u128_from_u64(secp256k1_uint128 *r, uint64_t a)
static SECP256K1_INLINE void secp256k1_u128_rshift(secp256k1_uint128 *r, unsigned int n)
static SECP256K1_INLINE void secp256k1_u128_accum_u64(secp256k1_uint128 *r, uint64_t a)
static SECP256K1_INLINE void secp256k1_u128_accum_mul(secp256k1_uint128 *r, uint64_t a, uint64_t b)
static SECP256K1_INLINE uint64_t secp256k1_u128_to_u64(const secp256k1_uint128 *a)
static void secp256k1_modinv64(secp256k1_modinv64_signed62 *x, const secp256k1_modinv64_modinfo *modinfo)
static void secp256k1_modinv64_var(secp256k1_modinv64_signed62 *x, const secp256k1_modinv64_modinfo *modinfo)
#define SECP256K1_SCALAR_VERIFY(r)
Definition scalar.h:103
static SECP256K1_INLINE int secp256k1_scalar_is_even(const secp256k1_scalar *a)
static SECP256K1_INLINE int secp256k1_scalar_check_overflow(const secp256k1_scalar *a)
static SECP256K1_INLINE void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b, unsigned int shift)
static void secp256k1_scalar_half(secp256k1_scalar *r, const secp256k1_scalar *a)
#define SECP256K1_N_3
static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k)
static SECP256K1_INLINE void secp256k1_scalar_clear(secp256k1_scalar *r)
#define extract(n)
Extract the lowest 64 bits of (c0,c1,c2) into n, and left shift the number 64 bits.
#define SECP256K1_N_C_2
static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow)
#define SECP256K1_N_C_1
static SECP256K1_INLINE uint32_t secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count)
static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x)
static const secp256k1_modinv64_modinfo secp256k1_const_modinfo_scalar
#define sumadd_fast(a)
Add a to the number defined by (c0,c1).
static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar *a)
#define SECP256K1_N_1
static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l)
#define SECP256K1_N_2
#define SECP256K1_N_H_2
static void secp256k1_scalar_from_signed62(secp256k1_scalar *r, const secp256k1_modinv64_signed62 *a)
static SECP256K1_INLINE void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v)
static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *x)
#define SECP256K1_N_C_0
static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag)
#define extract_fast(n)
Extract the lowest 64 bits of (c0,c1,c2) into n, and left shift the number 64 bits.
#define muladd(a, b)
Add a*b to the number defined by (c0,c1,c2).
static void secp256k1_scalar_to_signed62(secp256k1_modinv64_signed62 *r, const secp256k1_scalar *a)
#define SECP256K1_N_H_0
static SECP256K1_INLINE int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b)
static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b)
#define sumadd(a)
Add a to the number defined by (c0,c1,c2).
static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag)
static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b)
#define SECP256K1_N_H_1
static SECP256K1_INLINE int secp256k1_scalar_reduce(secp256k1_scalar *r, unsigned int overflow)
#define SECP256K1_N_0
static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a)
static SECP256K1_INLINE int secp256k1_scalar_is_zero(const secp256k1_scalar *a)
static int secp256k1_scalar_is_high(const secp256k1_scalar *a)
static void secp256k1_scalar_mul_512(uint64_t *l8, const secp256k1_scalar *a, const secp256k1_scalar *b)
#define SECP256K1_N_H_3
static SECP256K1_INLINE uint32_t secp256k1_scalar_get_bits_limb32(const secp256k1_scalar *a, unsigned int offset, unsigned int count)
static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag)
#define muladd_fast(a, b)
Add a*b to the number defined by (c0,c1).
static SECP256K1_INLINE int secp256k1_scalar_is_one(const secp256k1_scalar *a)
#define SECP256K1_INLINE
Definition util.h:48
static SECP256K1_INLINE void secp256k1_write_be64(unsigned char *p, uint64_t x)
Definition util.h:386
#define VERIFY_CHECK(cond)
Definition util.h:153
static SECP256K1_INLINE uint64_t secp256k1_read_be64(const unsigned char *p)
Definition util.h:374
A scalar modulo the group order of the secp256k1 curve.
Definition scalar_4x64.h:13
uint64_t d[4]
Definition scalar_4x64.h:14
static int count