Let us walk on the 3-isogeny graph
Loading...
Searching...
No Matches
mont.c File Reference
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include "steps.h"
#include "mont.h"
#include "../common/fp/mulx/fp.h"
#include "poly.h"
#include "int64mask.h"
Include dependency graph for mont.c:

Go to the source code of this file.

Functions

void xA24 (proj *A24, const proj *A)
void xDBLADD (proj *R, proj *S, proj const *P, proj const *Q, proj const *PQ, proj const *A24, int Aaffine)
void xDBL (proj *Q, proj const *P, const proj *A24, int Aaffine)
void xADD (proj *S, proj const *P, proj const *Q, proj const *PQ)
void xMUL_dac (proj *Q, proj const *A24, int Aaffine, proj const *P, int64_t dac, int64_t daclen, int64_t maxdaclen)
void xMUL (proj *Q, proj const *A, int Aaffine, proj const *P, uintbig const *k, int64_t kbits)
void xMUL_vartime (proj *Q, proj const *A, int Aaffine, proj const *P, uintbig const *k)
void xISOG_matryoshka (proj *A, proj *P, int64_t Plen, proj const *K, int64_t k, int64_t klower, int64_t kupper)
void xISOG (proj *A, proj *P, int64_t Plen, proj const *K, int64_t k)
void xISOG_old (proj *A, proj *P, proj const *K, int64_t k)

Function Documentation

◆ xA24()

void xA24 ( proj * A24,
const proj * A )

Definition at line 20 of file mont.c.

21{
22 fp_double2(&A24->x, &A->z);
23 fp_double2(&A24->z, (const fp *)&A24->x);
24 fp_add2(&A24->x, &A->x);
25}
uint64_t fp[NUMBER_OF_WORDS]
Definition fp-gmp.h:22
A
Definition tests.py:29
fp z
Definition proj.h:20
fp x
Definition proj.h:19

References proj::x, and proj::z.

◆ xADD()

void xADD ( proj * S,
proj const * P,
proj const * Q,
proj const * PQ )

Definition at line 85 of file mont.c.

86{
87 fp a, b, c, d;
88 fp_add3(&a, &P->x, &P->z);
89 fp_sub3(&b, &P->x, &P->z);
90 fp_add3(&c, &Q->x, &Q->z);
91 fp_sub3(&d, &Q->x, &Q->z);
92 fp_mul2(&a, (const fp *)&d);
93 fp_mul2(&b, (const fp *)&c);
94 fp_add3(&c, (const fp *)&a, (const fp *)&b);
95 fp_sub3(&d, (const fp *)&a, (const fp *)&b);
96 fp_sq1(&c);
97 fp_sq1(&d);
98 fp_mul3(&S->x, &PQ->z, (const fp *)&c);
99 fp_mul3(&S->z, &PQ->x, (const fp *)&d);
100}
f a
Definition to_model.m:12

References a, proj::x, and proj::z.

◆ xDBL()

void xDBL ( proj * Q,
proj const * P,
const proj * A24,
int Aaffine )

Definition at line 78 of file mont.c.

79{
80 proj P2;
81 x2(&P2, P);
82 x2DBL(Q, &P2, A24, Aaffine);
83}
Definition proj.h:18

◆ xDBLADD()

void xDBLADD ( proj * R,
proj * S,
proj const * P,
proj const * Q,
proj const * PQ,
proj const * A24,
int Aaffine )

Definition at line 49 of file mont.c.

50{
51 fp tmp0, tmp1, tmp2;
52
53 fp_add3(&tmp0, &P->x, &P->z);
54 fp_sub3(&tmp1, &P->x, &P->z);
55 fp_sq2(&R->x, (const fp *)&tmp0);
56 fp_sub3(&tmp2, &Q->x, &Q->z);
57 fp_add3(&S->x, &Q->x, &Q->z);
58 fp_mul2(&tmp0, (const fp *)&tmp2);
59 fp_sq2(&R->z, (const fp *)&tmp1);
60 fp_mul2(&tmp1, (const fp *)&S->x);
61 fp_sub3(&tmp2, (const fp *)&R->x, (const fp *)&R->z);
62 if (Aaffine)
63 fp_quadruple1(&R->z);
64 else
65 fp_mul2(&R->z, &A24->z);
66 fp_mul2(&R->x, (const fp *)&R->z);
67 fp_mul3(&S->x, &A24->x, (const fp *)&tmp2);
68 fp_sub3(&S->z, (const fp *)&tmp0, (const fp *)&tmp1);
69 fp_add2(&R->z, (const fp *)&S->x);
70 fp_add3(&S->x, (const fp *)&tmp0, (const fp *)&tmp1);
71 fp_mul2(&R->z, (const fp *)&tmp2);
72 fp_sq1(&S->z);
73 fp_sq1(&S->x);
74 fp_mul2(&S->z, &PQ->x);
75 fp_mul2(&S->x, &PQ->z);
76}

References proj::x, and proj::z.

◆ xISOG()

void xISOG ( proj * A,
proj * P,
int64_t Plen,
proj const * K,
int64_t k )

Definition at line 789 of file mont.c.

790{
791 xISOG_matryoshka(A, P, Plen, K, k, k, k);
792}
#define xISOG_matryoshka
Definition mont.h:24

References xISOG_matryoshka.

◆ xISOG_matryoshka()

void xISOG_matryoshka ( proj * A,
proj * P,
int64_t Plen,
proj const * K,
int64_t k,
int64_t klower,
int64_t kupper )

Definition at line 429 of file mont.c.

430{
431 assert(3 <= klower);
432 assert(klower & 1);
433 assert(kupper & 1);
434
435 int64_t sqrtvelu = 0;
436 int64_t bs = 0;
437 int64_t gs = 0;
438
439 steps(&bs, &gs, klower);
440 if (bs)
441 {
442 sqrtvelu = 1;
443 assert(bs > 0);
444 assert(gs > 0);
445 assert(!(bs & 1));
446 }
447
448 fp tmp0, tmp1, tmp2, tmp3, tmp4;
449
450 proj A24;
451 proj Aed; // twisted Edwards curve coefficients
452
453 // A -> (A : C)
454 // Aed.z = 2C
455 fp_double2(&Aed.z, (const fp *)&A->z);
456
457 // Aed.x = A + 2C
458 fp_add3(&Aed.x, (const fp *)&A->x, (const fp *)&Aed.z);
459
460 // A24.x = A + 2C
461 fp_copy(A24.x, Aed.x);
462
463 // A24.z = 4C
464 fp_double2(&A24.z, (const fp *)&Aed.z);
465
466 // Aed.z = A - 2C
467 fp_sub3(&Aed.z, (const fp *)&A->x, (const fp *)&Aed.z);
468
469 proj M[kupper]; /* XXX: use less space */
470#ifndef NDEBUG
471 int Minit[kupper];
472
473 for (int64_t s = 0; s < kupper; ++s)
474 Minit[s] = 0;
475
476 Minit[1] = 1;
477#endif
478 M[1] = *K;
479 xDBL(&M[2], K, &A24, 0);
480#ifndef NDEBUG
481 Minit[2] = 1;
482#endif
483
484 if (sqrtvelu)
485 {
486 for (int64_t s = 3; s < kupper; ++s)
487 {
488 if (s & 1)
489 {
490 int64_t i = s / 2;
491 assert(s == 2 * i + 1);
492 if (i < bs)
493 {
494 if (s == 3)
495 {
496 assert(Minit[1]);
497 assert(Minit[2]);
498 xADD(&M[s], &M[2], &M[1], &M[1]);
499#ifndef NDEBUG
500 Minit[s] = 1;
501#endif
502 continue;
503 }
504 assert(Minit[s - 2]);
505 assert(Minit[s - 4]);
506 assert(Minit[2]);
507 xADD(&M[s], &M[s - 2], &M[2], &M[s - 4]);
508#ifndef NDEBUG
509 Minit[s] = 1;
510#endif
511 continue;
512 }
513 }
514 else
515 {
516 int64_t i = s / 2 - 1;
517 assert(s == 2 * i + 2);
518 if (i < (kupper - 1) / 2 - 2 * bs * gs)
519 {
520 if (s == 4)
521 {
522 assert(Minit[2]);
523 xDBL(&M[s], &M[2], &A24, 0);
524#ifndef NDEBUG
525 Minit[s] = 1;
526#endif
527 continue;
528 }
529 assert(Minit[s - 2]);
530 assert(Minit[s - 4]);
531 assert(Minit[2]);
532 xADD(&M[s], &M[s - 2], &M[2], &M[s - 4]);
533#ifndef NDEBUG
534 Minit[s] = 1;
535#endif
536 continue;
537 }
538 }
539
540 if (bs > 0)
541 {
542 if (s == 2 * bs)
543 {
544 assert(Minit[bs - 1]);
545 assert(Minit[bs + 1]);
546 assert(Minit[2]);
547 xADD(&M[s], &M[bs + 1], &M[bs - 1], &M[2]);
548#ifndef NDEBUG
549 Minit[s] = 1;
550#endif
551 continue;
552 }
553 else if (s == 4 * bs)
554 {
555 assert(Minit[2 * bs]);
556 xDBL(&M[s], &M[2 * bs], &A24, 0);
557#ifndef NDEBUG
558 Minit[s] = 1;
559#endif
560 continue;
561 }
562 else if (s == 6 * bs)
563 {
564 assert(Minit[2 * bs]);
565 assert(Minit[4 * bs]);
566 xADD(&M[s], &M[4 * bs], &M[2 * bs], &M[2 * bs]);
567#ifndef NDEBUG
568 Minit[s] = 1;
569#endif
570 continue;
571 }
572 else if (s % (4 * bs) == 2 * bs)
573 {
574 int64_t j = s / (4 * bs);
575 assert(s == 2 * bs * (2 * j + 1));
576 if (j < gs)
577 {
578 assert(Minit[s - 4 * bs]);
579 assert(Minit[s - 8 * bs]);
580 assert(Minit[4 * bs]);
581 xADD(&M[s], &M[s - 4 * bs], &M[4 * bs], &M[s - 8 * bs]);
582#ifndef NDEBUG
583 Minit[s] = 1;
584#endif
585 continue;
586 }
587 }
588 }
589 }
590 }
591 else
592 {
593 for (int64_t i = 3; i <= (kupper - 1) / 2; ++i)
594 {
595#ifndef NDEBUG
596 Minit[i] = 1;
597#endif
598 xADD(&M[i], &M[i - 1], K, &M[i - 2]);
599 }
600 }
601
602 proj Abatch;
603 fp_copy(Abatch.x, fp_1);
604 fp_copy(Abatch.z, fp_1);
605 int64_t fixPlen = 0;
606 if(Plen>0) {
607 fixPlen = Plen;
608 } else {
609 fixPlen = 1;
610 }
611 proj Qbatch[fixPlen];
612 for (int64_t h = 0; h < Plen; ++h)
613 {
614 fp_copy(Qbatch[h].x, fp_1);
615 fp_copy(Qbatch[h].z, fp_1);
616 }
617 fp Psum[fixPlen], Pdif[fixPlen];
618 for (int64_t h = 0; h < Plen; ++h)
619 {
620 //precomputations
621 // ( X + Z )
622 fp_add3(&Psum[h], (const fp *)&P[h].x, (const fp *)&P[h].z);
623 // ( X - Z )
624 fp_sub3(&Pdif[h], (const fp *)&P[h].x, (const fp *)&P[h].z);
625 }
626
627 if (sqrtvelu)
628 {
629 int64_t TIlen = 2 * bs + poly_tree1size(bs);
630 fp TI[TIlen];
631
632 for (int64_t i = 0; i < bs; ++i)
633 {
634 assert(Minit[2 * i + 1]);
635 fp_neg2(&TI[2 * i], (const fp *)&M[2 * i + 1].x);
636 fp_copy(TI[2 * i + 1], M[2 * i + 1].z);
637 }
638
639 poly_tree1(TI + 2 * bs, (const fp *)TI, bs);
640
641 fp Aprecomp[gs][8];
642 fp T1[3 * gs];
643 fp Tminus1[3 * gs];
644
645 for (int64_t j = 0; j < gs; ++j)
646 {
647 assert(Minit[2 * bs * (2 * j + 1)]);
648 biquad_precompute_curve(Aprecomp[j], &M[2 * bs * (2 * j + 1)], A);
649 biquad_postcompute_curve(T1 + 3 * j, Tminus1 + 3 * j, (const fp *)Aprecomp[j]);
650 }
653
654 int64_t precompsize = poly_multieval_precomputesize(bs, 2 * gs + 1);
655 fp precomp[precompsize];
656 poly_multieval_precompute(precomp, bs, 2 * gs + 1, (const fp *)TI, (const fp *)TI + 2 * bs);
657
658 fp v[bs];
659
660 poly_multieval_postcompute(v, bs, (const fp *)T1, 2 * gs + 1, (const fp *)TI, (const fp *)TI + 2 * bs, (const fp *)precomp);
661 fp_copy(Abatch.x, v[0]);
662 for (int64_t i = 1; i < bs; ++i)
663 fp_mul2(&Abatch.x, (const fp *)&v[i]);
664
665 poly_multieval_postcompute(v, bs, (const fp *)Tminus1, 2 * gs + 1, (const fp *)TI, (const fp *)TI + 2 * bs, (const fp *)precomp);
666 fp_copy(Abatch.z, v[0]);
667 for (int64_t i = 1; i < bs; ++i)
668 fp_mul2(&Abatch.z, (const fp *)&v[i]);
669
670 for (int64_t h = 0; h < Plen; ++h)
671 {
672 fp Pprecomp[6];
673 biquad_precompute_point(Pprecomp, &P[h]);
674
675 fp TP[3 * gs];
676 for (int64_t j = 0; j < gs; ++j)
677 biquad_postcompute_point(TP + 3 * j, (const fp *)Pprecomp, (const fp *)Aprecomp[j]);
678 poly_multiprod2(TP, gs);
679
680 fp TPinv[2 * gs + 1];
681 for (int64_t j = 0; j < 2 * gs + 1; ++j)
682 fp_copy(TPinv[j], TP[2 * gs - j]);
683
684 poly_multieval_postcompute(v, bs, (const fp *)TP, 2 * gs + 1, (const fp *)TI, (const fp *)TI + 2 * bs, (const fp *)precomp);
685 fp_copy(Qbatch[h].z, v[0]);
686 for (int64_t i = 1; i < bs; ++i)
687 fp_mul2(&Qbatch[h].z, (const fp *)&v[i]);
688
689 poly_multieval_postcompute(v, bs, (const fp *)TPinv, 2 * gs + 1, (const fp *)TI, (const fp *)TI + 2 * bs, (const fp *)precomp);
690 fp_copy(Qbatch[h].x, v[0]);
691 for (int64_t i = 1; i < bs; ++i)
692 fp_mul2(&Qbatch[h].x, (const fp *)&v[i]);
693 }
694
695 int64_t ignore = (k - 1) / 2 - 2 * bs * gs; /* skip i >= ignore */
696 for (int64_t i = 0; i < (kupper - 1) / 2 - 2 * bs * gs; ++i)
697 {
698 int64_t want = -((i - ignore) >> 61); /* 61 to reduce risk of compiler doing something silly */
699 assert(Minit[2 * i + 2]);
700 fp_sub3(&tmp4, (const fp *)&M[2 * i + 2].x, (const fp *)&M[2 * i + 2].z);
701 fp_add3(&tmp3, (const fp *)&M[2 * i + 2].x, (const fp *)&M[2 * i + 2].z);
702 fp_mul3(&tmp2, (const fp *)&Abatch.x, (const fp *)&tmp4);
703 fp_cmov_ctidh(&Abatch.x, (const fp *)&tmp2, want);
704 fp_mul3(&tmp2, (const fp *)&Abatch.z, (const fp *)&tmp3);
705 fp_cmov_ctidh(&Abatch.z, (const fp *)&tmp2, want);
706 for (int64_t h = 0; h < Plen; ++h)
707 {
708 fp_mul3(&tmp1, (const fp *)&tmp4, (const fp *)&Psum[h]);
709 fp_mul3(&tmp0, (const fp *)&tmp3, (const fp *)&Pdif[h]);
710 fp_add3(&tmp2, (const fp *)&tmp0, (const fp *)&tmp1);
711 fp_mul2(&tmp2, (const fp *)&Qbatch[h].x);
712 fp_cmov_ctidh(&Qbatch[h].x, (const fp *)&tmp2, want);
713 fp_sub3(&tmp2, (const fp *)&tmp0, (const fp *)&tmp1);
714 fp_mul2(&tmp2, (const fp *)&Qbatch[h].z);
715 fp_cmov_ctidh(&Qbatch[h].z, (const fp *)&tmp2, want);
716 }
717 }
718 }
719 else
720 {
721 // no sqrtvelu
722 int64_t ignore = (k + 1) / 2; /* skip i >= ignore */
723
724 assert(Minit[1]);
725 fp_sub3(&tmp4, (const fp *)&M[1].x, (const fp *)&M[1].z);
726 fp_add3(&tmp3, (const fp *)&M[1].x, (const fp *)&M[1].z);
727 fp_copy(Abatch.x, tmp4);
728 fp_copy(Abatch.z, tmp3);
729
730 for (int64_t h = 0; h < Plen; ++h)
731 {
732 fp_mul3(&tmp1, (const fp *)&tmp4, (const fp *)&Psum[h]);
733 fp_mul3(&tmp0, (const fp *)&tmp3, (const fp *)&Pdif[h]);
734 fp_add3(&Qbatch[h].x, (const fp *)&tmp0, (const fp *)&tmp1);
735 fp_sub3(&Qbatch[h].z, (const fp *)&tmp0, (const fp *)&tmp1);
736 }
737
738 for (int64_t i = 2; i <= (kupper - 1) / 2; ++i)
739 {
740 int64_t want = -((i - ignore) >> 61);
741 // 2 < i < (k-1)/2
742 assert(Minit[i]);
743 fp_sub3(&tmp4, (const fp *)&M[i].x, (const fp *)&M[i].z);
744 fp_add3(&tmp3, (const fp *)&M[i].x, (const fp *)&M[i].z);
745 fp_mul3(&tmp2, (const fp *)&Abatch.x, (const fp *)&tmp4);
746 fp_cmov_ctidh(&Abatch.x, (const fp *)&tmp2, want);
747 fp_mul3(&tmp2, (const fp *)&Abatch.z, (const fp *)&tmp3);
748 fp_cmov_ctidh(&Abatch.z, (const fp *)&tmp2, want);
749 for (int64_t h = 0; h < Plen; ++h)
750 {
751 // point evaluation
752 fp_mul3(&tmp1, (const fp *)&tmp4, (const fp *)&Psum[h]);
753 fp_mul3(&tmp0, (const fp *)&tmp3, (const fp *)&Pdif[h]);
754 fp_add3(&tmp2, (const fp *)&tmp0, (const fp *)&tmp1);
755 fp_mul2(&tmp2, (const fp *)&Qbatch[h].x);
756 fp_cmov_ctidh(&Qbatch[h].x, (const fp *)&tmp2, want);
757 fp_sub3(&tmp2, (const fp *)&tmp0, (const fp *)&tmp1);
758 fp_mul2(&tmp2, (const fp *)&Qbatch[h].z);
759 fp_cmov_ctidh(&Qbatch[h].z, (const fp *)&tmp2, want);
760 }
761 }
762 }
763
764 for (int64_t h = 0; h < Plen; ++h)
765 {
766 // point evaluation
767 // [∏( X − Z )( X i + Z i ) + ( X + Z )( X i − Z i )]^2
768 fp_sq1(&Qbatch[h].x);
769 fp_sq1(&Qbatch[h].z);
770
771 // X' = X * Qbatch[h].x
772 // X' = X * [∏( X − Z )( X i + Z i ) + ( X + Z )( X i − Z i )]^2
773 fp_mul2(&P[h].x, (const fp *)&Qbatch[h].x);
774 fp_mul2(&P[h].z, (const fp *)&Qbatch[h].z);
775 }
776
777 // Aed.x = Aed.x^k * Abatch.z^8
778 powpow8mod(&Aed.x, (const fp *)&Abatch.z, k, kupper);
779 // Aed.z = Aed.z^k * Abatch.x^8
780 powpow8mod(&Aed.z, (const fp *)&Abatch.x, k, kupper);
781
782 //compute Montgomery params
783 // ( A' : C' ) = ( 2 ( a'_E + d'_E ) : a'_E − d'_E )
784 fp_add3(&A->x, (const fp *)&Aed.x, (const fp *)&Aed.z);
785 fp_sub3(&A->z, (const fp *)&Aed.x, (const fp *)&Aed.z);
786 fp_double1(&A->x);
787}
#define fp_1
Definition fp-gmp.h:48
#define fp_copy
Definition fp-gmp.h:79
assert(var1 eq var2)
#define xADD
Definition mont.h:19
#define xDBL
Definition mont.h:18
#define poly_multieval_postcompute
Definition poly.h:70
#define poly_tree1
Definition poly.h:49
#define poly_multiprod2_selfreciprocal
Definition poly.h:40
#define poly_tree1size
Definition poly.h:52
#define poly_multieval_precomputesize
Definition poly.h:67
#define poly_multiprod2
Definition poly.h:37
#define poly_multieval_precompute
Definition poly.h:64
for i
for j
#define steps
Definition steps.h:7

References assert(), fp_1, fp_copy, i, j, poly_multieval_postcompute, poly_multieval_precompute, poly_multieval_precomputesize, poly_multiprod2, poly_multiprod2_selfreciprocal, poly_tree1, poly_tree1size, steps, proj::x, xADD, xDBL, and proj::z.

Here is the call graph for this function:

◆ xISOG_old()

void xISOG_old ( proj * A,
proj * P,
proj const * K,
int64_t k )

Definition at line 794 of file mont.c.

795{
796 assert(k >= 3);
797 assert(k % 2 == 1);
798
799 fp tmp0, tmp1, tmp2, Psum, Pdif;
800 proj Q, Aed, prod;
801
802 fp_add3(&Aed.z, (const fp *)&A->z, (const fp *)&A->z); //compute twisted Edwards curve coefficients
803 fp_add3(&Aed.x, (const fp *)&A->x, (const fp *)&Aed.z);
804 fp_sub3(&Aed.z, (const fp *)&A->x, (const fp *)&Aed.z);
805
806 fp_add3(&Psum, (const fp *)&P->x, (const fp *)&P->z); //precomputations
807 fp_sub3(&Pdif, (const fp *)&P->x, (const fp *)&P->z);
808
809 fp_sub3(&prod.x, &K->x, &K->z);
810 fp_add3(&prod.z, &K->x, &K->z);
811
812 fp_mul3(&tmp1, (const fp *)&prod.x, (const fp *)&Psum);
813 fp_mul3(&tmp0, (const fp *)&prod.z, (const fp *)&Pdif);
814 fp_add3(&Q.x, (const fp *)&tmp0, (const fp *)&tmp1);
815 fp_sub3(&Q.z, (const fp *)&tmp0, (const fp *)&tmp1);
816
817 proj M[k];
818 proj A24;
819 xA24(&A24, A);
820
821 M[0] = *K;
822 xDBL(&M[1], K, &A24, 0);
823 for (int64_t i = 2; i < k / 2; ++i)
824 {
825 xADD(&M[i], &M[(i - 1)], K, &M[(i - 2)]);
826 }
827
828 for (int64_t i = 1; i < k / 2; ++i)
829 {
830 fp_sub3(&tmp1, (const fp *)&M[i].x, (const fp *)&M[i].z);
831 fp_add3(&tmp0, (const fp *)&M[i].x, (const fp *)&M[i].z);
832 fp_mul2(&prod.x, (const fp *)&tmp1);
833 fp_mul2(&prod.z, (const fp *)&tmp0);
834 fp_mul2(&tmp1, (const fp *)&Psum);
835 fp_mul2(&tmp0, (const fp *)&Pdif);
836 fp_add3(&tmp2, (const fp *)&tmp0, (const fp *)&tmp1);
837 fp_mul2(&Q.x, (const fp *)&tmp2);
838 fp_sub3(&tmp2, (const fp *)&tmp0, (const fp *)&tmp1);
839 fp_mul2(&Q.z, (const fp *)&tmp2);
840 }
841
842 // point evaluation
843 fp_sq1(&Q.x);
844 fp_sq1(&Q.z);
845 fp_mul2(&P->x, (const fp *)&Q.x);
846 fp_mul2(&P->z, (const fp *)&Q.z);
847
848 //compute Aed.x^k, Aed.z^k
849 exp_by_squaring_(&Aed.x, &Aed.z, k);
850
851 //compute prod.x^8, prod.z^8
852 fp_sq1(&prod.x);
853 fp_sq1(&prod.x);
854 fp_sq1(&prod.x);
855 fp_sq1(&prod.z);
856 fp_sq1(&prod.z);
857 fp_sq1(&prod.z);
858
859 //compute image curve parameters
860 fp_mul2(&Aed.z, (const fp *)&prod.x);
861 fp_mul2(&Aed.x, (const fp *)&prod.z);
862
863 //compute Montgomery params
864 fp_add3(&A->x, (const fp *)&Aed.x, (const fp *)&Aed.z);
865 fp_sub3(&A->z, (const fp *)&Aed.x, (const fp *)&Aed.z);
866 fp_add2(&A->x, (const fp *)&A->x);
867}
#define xA24
Definition mont.h:17

References assert(), i, proj::x, xA24, xADD, xDBL, and proj::z.

Here is the call graph for this function:

◆ xMUL()

void xMUL ( proj * Q,
proj const * A,
int Aaffine,
proj const * P,
uintbig const * k,
int64_t kbits )

Definition at line 156 of file mont.c.

157{
158 proj R = *P;
159 proj A24;
160 const proj Pcopy = *P; /* in case Q = P */
161
162 fp_copy(Q->x, fp_1);
163 fp_copy(Q->z, fp_0);
164
165 xA24(&A24, A);
166
167 int64_t prevbit = 0;
168 while (kbits > 0)
169 {
170 --kbits;
171
172 int64_t bit = uintbig_bit(k, kbits);
173 int64_t bitxor = bit ^ prevbit;
174
175 proj_cswap(Q, &R, bitxor);
176
177 xDBLADD(Q, &R, Q, &R, &Pcopy, &A24, Aaffine);
178
179 prevbit = bit;
180 }
181 proj_cswap(Q, &R, prevbit);
182}
#define uintbig_bit
Definition fp-gmp.h:36
#define fp_0
Definition fp-gmp.h:50
#define xDBLADD
Definition mont.h:20

References fp_0, fp_1, fp_copy, uintbig_bit, proj::x, xA24, xDBLADD, and proj::z.

◆ xMUL_dac()

void xMUL_dac ( proj * Q,
proj const * A24,
int Aaffine,
proj const * P,
int64_t dac,
int64_t daclen,
int64_t maxdaclen )

Definition at line 110 of file mont.c.

111{
112 proj Pinput = *P; // keeping copy since Q can overlap P
113 proj P1 = Pinput;
114 proj P2;
115 xDBL(&P2, &P1, A24, Aaffine);
116 proj P3;
117 xADD(&P3, &P2, &P1, &P1);
118 // int64_t collision = fp_iszero(&Pinput.z);
119 int64_t collision = fp_iszero(Pinput.z);
120
121 for (;;)
122 {
123 int64_t want = 1 + int64mask_negative(daclen);
124 proj_cmov(Q, &P3, want);
125 if (maxdaclen <= 0)
126 break;
127
128 // invariant: P1+P2 = P3
129 // odd dac: want to replace P1,P2,P3 with P1,P3,P1+P3
130 // even dac: want to replace P1,P2,P3 with P2,P3,P2+P3
131
132 proj_cswap(&P1, &P2, 1 - (dac & 1));
133 // invariant: P1+P2 = P3
134 // all cases: want to replace P1,P2,P3 with P1,P3,P1+P3
135
136 // collision |= want&fp_iszero(&P2.z);
137 collision |= want & fp_iszero(P2.z);
138
139 proj next;
140 xADD(&next, &P3, &P1, &P2);
141 P2 = P3;
142 P3 = next;
143
144 maxdaclen -= 1;
145 daclen -= 1;
146 dac >>= 1;
147 }
148
149 proj_cmov(Q, &Pinput, collision);
150 // in case of collision, input has the right order
151}

References xADD, xDBL, and proj::z.

◆ xMUL_vartime()

void xMUL_vartime ( proj * Q,
proj const * A,
int Aaffine,
proj const * P,
uintbig const * k )

Definition at line 184 of file mont.c.

185{
186 int64_t kbits = UBITS;
187 while (kbits && !uintbig_bit(k, kbits - 1))
188 --kbits;
189 xMUL(Q, A, Aaffine, P, k, kbits);
190}
#define UBITS
Definition fp-gmp.h:247
#define xMUL
Definition mont.h:22

References UBITS, uintbig_bit, and xMUL.