diff options
author | Adam Ierymenko <adam.ierymenko@gmail.com> | 2018-03-13 06:51:17 -0700 |
---|---|---|
committer | Adam Ierymenko <adam.ierymenko@gmail.com> | 2018-03-13 06:51:17 -0700 |
commit | beb170e4fb4a42cf3770a731f222a63f0bdfe0f3 (patch) | |
tree | 11cde6c7374affaaf88a3eed48bd0018d6268fc4 /ext/ed25519-amd64-asm | |
parent | a59912f3afa7627f3da79804a9be693a7d314ebc (diff) | |
download | infinitytier-beb170e4fb4a42cf3770a731f222a63f0bdfe0f3.tar.gz infinitytier-beb170e4fb4a42cf3770a731f222a63f0bdfe0f3.zip |
Use X64 ASM ed25519 signatures on Linux/x64, which are about 10X faster. Will matter a lot for network controllers, not so much for other things.
Diffstat (limited to 'ext/ed25519-amd64-asm')
65 files changed, 37046 insertions, 0 deletions
diff --git a/ext/ed25519-amd64-asm/batch.c b/ext/ed25519-amd64-asm/batch.c new file mode 100644 index 00000000..955392ea --- /dev/null +++ b/ext/ed25519-amd64-asm/batch.c @@ -0,0 +1,94 @@ +#include "crypto_sign.h" + +#include "crypto_verify_32.h" +#include "crypto_hash_sha512.h" +#include "randombytes.h" + +#include "ge25519.h" +#include "hram.h" + +#define MAXBATCH 64 + +int crypto_sign_open_batch( + unsigned char* const m[],unsigned long long mlen[], + unsigned char* const sm[],const unsigned long long smlen[], + unsigned char* const pk[], + unsigned long long num + ) +{ + int ret = 0; + unsigned long long i, j; + shortsc25519 r[MAXBATCH]; + sc25519 scalars[2*MAXBATCH+1]; + ge25519 points[2*MAXBATCH+1]; + unsigned char hram[crypto_hash_sha512_BYTES]; + unsigned long long batchsize; + + for (i = 0;i < num;++i) mlen[i] = -1; + + while (num >= 3) { + batchsize = num; + if (batchsize > MAXBATCH) batchsize = MAXBATCH; + + for (i = 0;i < batchsize;++i) + if (smlen[i] < 64) goto fallback; + + randombytes((unsigned char*)r,sizeof(shortsc25519) * batchsize); + + /* Computing scalars[0] = ((r1s1 + r2s2 + ...)) */ + for(i=0;i<batchsize;i++) + { + sc25519_from32bytes(&scalars[i], sm[i]+32); + sc25519_mul_shortsc(&scalars[i], &scalars[i], &r[i]); + } + for(i=1;i<batchsize;i++) + sc25519_add(&scalars[0], &scalars[0], &scalars[i]); + + /* Computing scalars[1] ... scalars[batchsize] as r[i]*H(R[i],A[i],m[i]) */ + for(i=0;i<batchsize;i++) + { + get_hram(hram, sm[i], pk[i], m[i], smlen[i]); + sc25519_from64bytes(&scalars[i+1],hram); + sc25519_mul_shortsc(&scalars[i+1],&scalars[i+1],&r[i]); + } + /* Setting scalars[batchsize+1] ... scalars[2*batchsize] to r[i] */ + for(i=0;i<batchsize;i++) + sc25519_from_shortsc(&scalars[batchsize+i+1],&r[i]); + + /* Computing points */ + points[0] = ge25519_base; + + for(i=0;i<batchsize;i++) + if (ge25519_unpackneg_vartime(&points[i+1], pk[i])) goto fallback; + for(i=0;i<batchsize;i++) + if (ge25519_unpackneg_vartime(&points[batchsize+i+1], sm[i])) goto fallback; + + ge25519_multi_scalarmult_vartime(points, points, scalars, 2*batchsize+1); + + if (ge25519_isneutral_vartime(points)) { + for(i=0;i<batchsize;i++) + { + for(j=0;j<smlen[i]-64;j++) + m[i][j] = sm[i][j + 64]; + mlen[i] = smlen[i]-64; + } + } else { + fallback: + + for (i = 0;i < batchsize;++i) + ret |= crypto_sign_open(m[i], &mlen[i], sm[i], smlen[i], pk[i]); + } + + m += batchsize; + mlen += batchsize; + sm += batchsize; + smlen += batchsize; + pk += batchsize; + num -= batchsize; + } + + for (i = 0;i < num;++i) + ret |= crypto_sign_open(m[i], &mlen[i], sm[i], smlen[i], pk[i]); + + return ret; +} diff --git a/ext/ed25519-amd64-asm/choose_t.s b/ext/ed25519-amd64-asm/choose_t.s new file mode 100644 index 00000000..f10d8b8b --- /dev/null +++ b/ext/ed25519-amd64-asm/choose_t.s @@ -0,0 +1,1565 @@ + +# qhasm: int64 tp + +# qhasm: int64 pos + +# qhasm: int64 b + +# qhasm: int64 basep + +# qhasm: input tp + +# qhasm: input pos + +# qhasm: input b + +# qhasm: input basep + +# qhasm: int64 mask + +# qhasm: int64 u + +# qhasm: int64 tysubx0 + +# qhasm: int64 tysubx1 + +# qhasm: int64 tysubx2 + +# qhasm: int64 tysubx3 + +# qhasm: int64 txaddy0 + +# qhasm: int64 txaddy1 + +# qhasm: int64 txaddy2 + +# qhasm: int64 txaddy3 + +# qhasm: int64 tt2d0 + +# qhasm: int64 tt2d1 + +# qhasm: int64 tt2d2 + +# qhasm: int64 tt2d3 + +# qhasm: int64 tt0 + +# qhasm: int64 tt1 + +# qhasm: int64 tt2 + +# qhasm: int64 tt3 + +# qhasm: int64 subt0 + +# qhasm: int64 subt1 + +# qhasm: int64 t + +# qhasm: stack64 tp_stack + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: enter crypto_sign_ed25519_amd64_64_choose_t +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_64_choose_t +.globl crypto_sign_ed25519_amd64_64_choose_t +_crypto_sign_ed25519_amd64_64_choose_t: +crypto_sign_ed25519_amd64_64_choose_t: +mov %rsp,%r11 +and $31,%r11 +add $64,%r11 +sub %r11,%rsp + +# qhasm: caller1_stack = caller1 +# asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1 +# asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2 +# asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3 +# asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4 +# asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5 +# asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6 +# asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7 +# asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: tp_stack = tp +# asm 1: movq <tp=int64#1,>tp_stack=stack64#8 +# asm 2: movq <tp=%rdi,>tp_stack=56(%rsp) +movq %rdi,56(%rsp) + +# qhasm: pos *= 768 +# asm 1: imulq $768,<pos=int64#2,>pos=int64#1 +# asm 2: imulq $768,<pos=%rsi,>pos=%rdi +imulq $768,%rsi,%rdi + +# qhasm: mask = b +# asm 1: mov <b=int64#3,>mask=int64#2 +# asm 2: mov <b=%rdx,>mask=%rsi +mov %rdx,%rsi + +# qhasm: (int64) mask >>= 7 +# asm 1: sar $7,<mask=int64#2 +# asm 2: sar $7,<mask=%rsi +sar $7,%rsi + +# qhasm: u = b +# asm 1: mov <b=int64#3,>u=int64#5 +# asm 2: mov <b=%rdx,>u=%r8 +mov %rdx,%r8 + +# qhasm: u += mask +# asm 1: add <mask=int64#2,<u=int64#5 +# asm 2: add <mask=%rsi,<u=%r8 +add %rsi,%r8 + +# qhasm: u ^= mask +# asm 1: xor <mask=int64#2,<u=int64#5 +# asm 2: xor <mask=%rsi,<u=%r8 +xor %rsi,%r8 + +# qhasm: tysubx0 = 1 +# asm 1: mov $1,>tysubx0=int64#2 +# asm 2: mov $1,>tysubx0=%rsi +mov $1,%rsi + +# qhasm: tysubx1 = 0 +# asm 1: mov $0,>tysubx1=int64#6 +# asm 2: mov $0,>tysubx1=%r9 +mov $0,%r9 + +# qhasm: tysubx2 = 0 +# asm 1: mov $0,>tysubx2=int64#7 +# asm 2: mov $0,>tysubx2=%rax +mov $0,%rax + +# qhasm: tysubx3 = 0 +# asm 1: mov $0,>tysubx3=int64#8 +# asm 2: mov $0,>tysubx3=%r10 +mov $0,%r10 + +# qhasm: txaddy0 = 1 +# asm 1: mov $1,>txaddy0=int64#9 +# asm 2: mov $1,>txaddy0=%r11 +mov $1,%r11 + +# qhasm: txaddy1 = 0 +# asm 1: mov $0,>txaddy1=int64#10 +# asm 2: mov $0,>txaddy1=%r12 +mov $0,%r12 + +# qhasm: txaddy2 = 0 +# asm 1: mov $0,>txaddy2=int64#11 +# asm 2: mov $0,>txaddy2=%r13 +mov $0,%r13 + +# qhasm: txaddy3 = 0 +# asm 1: mov $0,>txaddy3=int64#12 +# asm 2: mov $0,>txaddy3=%r14 +mov $0,%r14 + +# qhasm: =? u - 1 +# asm 1: cmp $1,<u=int64#5 +# asm 2: cmp $1,<u=%r8 +cmp $1,%r8 + +# qhasm: t = *(uint64 *)(basep + 0 + pos) +# asm 1: movq 0(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 0(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 0(%rcx,%rdi),%r15 + +# qhasm: tysubx0 = t if = +# asm 1: cmove <t=int64#13,<tysubx0=int64#2 +# asm 2: cmove <t=%r15,<tysubx0=%rsi +cmove %r15,%rsi + +# qhasm: t = *(uint64 *)(basep + 8 + pos) +# asm 1: movq 8(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 8(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 8(%rcx,%rdi),%r15 + +# qhasm: tysubx1 = t if = +# asm 1: cmove <t=int64#13,<tysubx1=int64#6 +# asm 2: cmove <t=%r15,<tysubx1=%r9 +cmove %r15,%r9 + +# qhasm: t = *(uint64 *)(basep + 16 + pos) +# asm 1: movq 16(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 16(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 16(%rcx,%rdi),%r15 + +# qhasm: tysubx2 = t if = +# asm 1: cmove <t=int64#13,<tysubx2=int64#7 +# asm 2: cmove <t=%r15,<tysubx2=%rax +cmove %r15,%rax + +# qhasm: t = *(uint64 *)(basep + 24 + pos) +# asm 1: movq 24(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 24(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 24(%rcx,%rdi),%r15 + +# qhasm: tysubx3 = t if = +# asm 1: cmove <t=int64#13,<tysubx3=int64#8 +# asm 2: cmove <t=%r15,<tysubx3=%r10 +cmove %r15,%r10 + +# qhasm: t = *(uint64 *)(basep + 32 + pos) +# asm 1: movq 32(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 32(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 32(%rcx,%rdi),%r15 + +# qhasm: txaddy0 = t if = +# asm 1: cmove <t=int64#13,<txaddy0=int64#9 +# asm 2: cmove <t=%r15,<txaddy0=%r11 +cmove %r15,%r11 + +# qhasm: t = *(uint64 *)(basep + 40 + pos) +# asm 1: movq 40(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 40(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 40(%rcx,%rdi),%r15 + +# qhasm: txaddy1 = t if = +# asm 1: cmove <t=int64#13,<txaddy1=int64#10 +# asm 2: cmove <t=%r15,<txaddy1=%r12 +cmove %r15,%r12 + +# qhasm: t = *(uint64 *)(basep + 48 + pos) +# asm 1: movq 48(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 48(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 48(%rcx,%rdi),%r15 + +# qhasm: txaddy2 = t if = +# asm 1: cmove <t=int64#13,<txaddy2=int64#11 +# asm 2: cmove <t=%r15,<txaddy2=%r13 +cmove %r15,%r13 + +# qhasm: t = *(uint64 *)(basep + 56 + pos) +# asm 1: movq 56(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 56(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 56(%rcx,%rdi),%r15 + +# qhasm: txaddy3 = t if = +# asm 1: cmove <t=int64#13,<txaddy3=int64#12 +# asm 2: cmove <t=%r15,<txaddy3=%r14 +cmove %r15,%r14 + +# qhasm: =? u - 2 +# asm 1: cmp $2,<u=int64#5 +# asm 2: cmp $2,<u=%r8 +cmp $2,%r8 + +# qhasm: t = *(uint64 *)(basep + 96 + pos) +# asm 1: movq 96(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 96(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 96(%rcx,%rdi),%r15 + +# qhasm: tysubx0 = t if = +# asm 1: cmove <t=int64#13,<tysubx0=int64#2 +# asm 2: cmove <t=%r15,<tysubx0=%rsi +cmove %r15,%rsi + +# qhasm: t = *(uint64 *)(basep + 104 + pos) +# asm 1: movq 104(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 104(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 104(%rcx,%rdi),%r15 + +# qhasm: tysubx1 = t if = +# asm 1: cmove <t=int64#13,<tysubx1=int64#6 +# asm 2: cmove <t=%r15,<tysubx1=%r9 +cmove %r15,%r9 + +# qhasm: t = *(uint64 *)(basep + 112 + pos) +# asm 1: movq 112(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 112(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 112(%rcx,%rdi),%r15 + +# qhasm: tysubx2 = t if = +# asm 1: cmove <t=int64#13,<tysubx2=int64#7 +# asm 2: cmove <t=%r15,<tysubx2=%rax +cmove %r15,%rax + +# qhasm: t = *(uint64 *)(basep + 120 + pos) +# asm 1: movq 120(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 120(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 120(%rcx,%rdi),%r15 + +# qhasm: tysubx3 = t if = +# asm 1: cmove <t=int64#13,<tysubx3=int64#8 +# asm 2: cmove <t=%r15,<tysubx3=%r10 +cmove %r15,%r10 + +# qhasm: t = *(uint64 *)(basep + 128 + pos) +# asm 1: movq 128(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 128(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 128(%rcx,%rdi),%r15 + +# qhasm: txaddy0 = t if = +# asm 1: cmove <t=int64#13,<txaddy0=int64#9 +# asm 2: cmove <t=%r15,<txaddy0=%r11 +cmove %r15,%r11 + +# qhasm: t = *(uint64 *)(basep + 136 + pos) +# asm 1: movq 136(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 136(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 136(%rcx,%rdi),%r15 + +# qhasm: txaddy1 = t if = +# asm 1: cmove <t=int64#13,<txaddy1=int64#10 +# asm 2: cmove <t=%r15,<txaddy1=%r12 +cmove %r15,%r12 + +# qhasm: t = *(uint64 *)(basep + 144 + pos) +# asm 1: movq 144(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 144(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 144(%rcx,%rdi),%r15 + +# qhasm: txaddy2 = t if = +# asm 1: cmove <t=int64#13,<txaddy2=int64#11 +# asm 2: cmove <t=%r15,<txaddy2=%r13 +cmove %r15,%r13 + +# qhasm: t = *(uint64 *)(basep + 152 + pos) +# asm 1: movq 152(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 152(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 152(%rcx,%rdi),%r15 + +# qhasm: txaddy3 = t if = +# asm 1: cmove <t=int64#13,<txaddy3=int64#12 +# asm 2: cmove <t=%r15,<txaddy3=%r14 +cmove %r15,%r14 + +# qhasm: =? u - 3 +# asm 1: cmp $3,<u=int64#5 +# asm 2: cmp $3,<u=%r8 +cmp $3,%r8 + +# qhasm: t = *(uint64 *)(basep + 192 + pos) +# asm 1: movq 192(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 192(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 192(%rcx,%rdi),%r15 + +# qhasm: tysubx0 = t if = +# asm 1: cmove <t=int64#13,<tysubx0=int64#2 +# asm 2: cmove <t=%r15,<tysubx0=%rsi +cmove %r15,%rsi + +# qhasm: t = *(uint64 *)(basep + 200 + pos) +# asm 1: movq 200(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 200(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 200(%rcx,%rdi),%r15 + +# qhasm: tysubx1 = t if = +# asm 1: cmove <t=int64#13,<tysubx1=int64#6 +# asm 2: cmove <t=%r15,<tysubx1=%r9 +cmove %r15,%r9 + +# qhasm: t = *(uint64 *)(basep + 208 + pos) +# asm 1: movq 208(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 208(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 208(%rcx,%rdi),%r15 + +# qhasm: tysubx2 = t if = +# asm 1: cmove <t=int64#13,<tysubx2=int64#7 +# asm 2: cmove <t=%r15,<tysubx2=%rax +cmove %r15,%rax + +# qhasm: t = *(uint64 *)(basep + 216 + pos) +# asm 1: movq 216(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 216(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 216(%rcx,%rdi),%r15 + +# qhasm: tysubx3 = t if = +# asm 1: cmove <t=int64#13,<tysubx3=int64#8 +# asm 2: cmove <t=%r15,<tysubx3=%r10 +cmove %r15,%r10 + +# qhasm: t = *(uint64 *)(basep + 224 + pos) +# asm 1: movq 224(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 224(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 224(%rcx,%rdi),%r15 + +# qhasm: txaddy0 = t if = +# asm 1: cmove <t=int64#13,<txaddy0=int64#9 +# asm 2: cmove <t=%r15,<txaddy0=%r11 +cmove %r15,%r11 + +# qhasm: t = *(uint64 *)(basep + 232 + pos) +# asm 1: movq 232(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 232(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 232(%rcx,%rdi),%r15 + +# qhasm: txaddy1 = t if = +# asm 1: cmove <t=int64#13,<txaddy1=int64#10 +# asm 2: cmove <t=%r15,<txaddy1=%r12 +cmove %r15,%r12 + +# qhasm: t = *(uint64 *)(basep + 240 + pos) +# asm 1: movq 240(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 240(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 240(%rcx,%rdi),%r15 + +# qhasm: txaddy2 = t if = +# asm 1: cmove <t=int64#13,<txaddy2=int64#11 +# asm 2: cmove <t=%r15,<txaddy2=%r13 +cmove %r15,%r13 + +# qhasm: t = *(uint64 *)(basep + 248 + pos) +# asm 1: movq 248(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 248(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 248(%rcx,%rdi),%r15 + +# qhasm: txaddy3 = t if = +# asm 1: cmove <t=int64#13,<txaddy3=int64#12 +# asm 2: cmove <t=%r15,<txaddy3=%r14 +cmove %r15,%r14 + +# qhasm: =? u - 4 +# asm 1: cmp $4,<u=int64#5 +# asm 2: cmp $4,<u=%r8 +cmp $4,%r8 + +# qhasm: t = *(uint64 *)(basep + 288 + pos) +# asm 1: movq 288(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 288(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 288(%rcx,%rdi),%r15 + +# qhasm: tysubx0 = t if = +# asm 1: cmove <t=int64#13,<tysubx0=int64#2 +# asm 2: cmove <t=%r15,<tysubx0=%rsi +cmove %r15,%rsi + +# qhasm: t = *(uint64 *)(basep + 296 + pos) +# asm 1: movq 296(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 296(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 296(%rcx,%rdi),%r15 + +# qhasm: tysubx1 = t if = +# asm 1: cmove <t=int64#13,<tysubx1=int64#6 +# asm 2: cmove <t=%r15,<tysubx1=%r9 +cmove %r15,%r9 + +# qhasm: t = *(uint64 *)(basep + 304 + pos) +# asm 1: movq 304(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 304(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 304(%rcx,%rdi),%r15 + +# qhasm: tysubx2 = t if = +# asm 1: cmove <t=int64#13,<tysubx2=int64#7 +# asm 2: cmove <t=%r15,<tysubx2=%rax +cmove %r15,%rax + +# qhasm: t = *(uint64 *)(basep + 312 + pos) +# asm 1: movq 312(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 312(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 312(%rcx,%rdi),%r15 + +# qhasm: tysubx3 = t if = +# asm 1: cmove <t=int64#13,<tysubx3=int64#8 +# asm 2: cmove <t=%r15,<tysubx3=%r10 +cmove %r15,%r10 + +# qhasm: t = *(uint64 *)(basep + 320 + pos) +# asm 1: movq 320(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 320(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 320(%rcx,%rdi),%r15 + +# qhasm: txaddy0 = t if = +# asm 1: cmove <t=int64#13,<txaddy0=int64#9 +# asm 2: cmove <t=%r15,<txaddy0=%r11 +cmove %r15,%r11 + +# qhasm: t = *(uint64 *)(basep + 328 + pos) +# asm 1: movq 328(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 328(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 328(%rcx,%rdi),%r15 + +# qhasm: txaddy1 = t if = +# asm 1: cmove <t=int64#13,<txaddy1=int64#10 +# asm 2: cmove <t=%r15,<txaddy1=%r12 +cmove %r15,%r12 + +# qhasm: t = *(uint64 *)(basep + 336 + pos) +# asm 1: movq 336(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 336(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 336(%rcx,%rdi),%r15 + +# qhasm: txaddy2 = t if = +# asm 1: cmove <t=int64#13,<txaddy2=int64#11 +# asm 2: cmove <t=%r15,<txaddy2=%r13 +cmove %r15,%r13 + +# qhasm: t = *(uint64 *)(basep + 344 + pos) +# asm 1: movq 344(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 344(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 344(%rcx,%rdi),%r15 + +# qhasm: txaddy3 = t if = +# asm 1: cmove <t=int64#13,<txaddy3=int64#12 +# asm 2: cmove <t=%r15,<txaddy3=%r14 +cmove %r15,%r14 + +# qhasm: =? u - 5 +# asm 1: cmp $5,<u=int64#5 +# asm 2: cmp $5,<u=%r8 +cmp $5,%r8 + +# qhasm: t = *(uint64 *)(basep + 384 + pos) +# asm 1: movq 384(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 384(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 384(%rcx,%rdi),%r15 + +# qhasm: tysubx0 = t if = +# asm 1: cmove <t=int64#13,<tysubx0=int64#2 +# asm 2: cmove <t=%r15,<tysubx0=%rsi +cmove %r15,%rsi + +# qhasm: t = *(uint64 *)(basep + 392 + pos) +# asm 1: movq 392(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 392(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 392(%rcx,%rdi),%r15 + +# qhasm: tysubx1 = t if = +# asm 1: cmove <t=int64#13,<tysubx1=int64#6 +# asm 2: cmove <t=%r15,<tysubx1=%r9 +cmove %r15,%r9 + +# qhasm: t = *(uint64 *)(basep + 400 + pos) +# asm 1: movq 400(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 400(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 400(%rcx,%rdi),%r15 + +# qhasm: tysubx2 = t if = +# asm 1: cmove <t=int64#13,<tysubx2=int64#7 +# asm 2: cmove <t=%r15,<tysubx2=%rax +cmove %r15,%rax + +# qhasm: t = *(uint64 *)(basep + 408 + pos) +# asm 1: movq 408(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 408(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 408(%rcx,%rdi),%r15 + +# qhasm: tysubx3 = t if = +# asm 1: cmove <t=int64#13,<tysubx3=int64#8 +# asm 2: cmove <t=%r15,<tysubx3=%r10 +cmove %r15,%r10 + +# qhasm: t = *(uint64 *)(basep + 416 + pos) +# asm 1: movq 416(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 416(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 416(%rcx,%rdi),%r15 + +# qhasm: txaddy0 = t if = +# asm 1: cmove <t=int64#13,<txaddy0=int64#9 +# asm 2: cmove <t=%r15,<txaddy0=%r11 +cmove %r15,%r11 + +# qhasm: t = *(uint64 *)(basep + 424 + pos) +# asm 1: movq 424(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 424(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 424(%rcx,%rdi),%r15 + +# qhasm: txaddy1 = t if = +# asm 1: cmove <t=int64#13,<txaddy1=int64#10 +# asm 2: cmove <t=%r15,<txaddy1=%r12 +cmove %r15,%r12 + +# qhasm: t = *(uint64 *)(basep + 432 + pos) +# asm 1: movq 432(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 432(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 432(%rcx,%rdi),%r15 + +# qhasm: txaddy2 = t if = +# asm 1: cmove <t=int64#13,<txaddy2=int64#11 +# asm 2: cmove <t=%r15,<txaddy2=%r13 +cmove %r15,%r13 + +# qhasm: t = *(uint64 *)(basep + 440 + pos) +# asm 1: movq 440(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 440(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 440(%rcx,%rdi),%r15 + +# qhasm: txaddy3 = t if = +# asm 1: cmove <t=int64#13,<txaddy3=int64#12 +# asm 2: cmove <t=%r15,<txaddy3=%r14 +cmove %r15,%r14 + +# qhasm: =? u - 6 +# asm 1: cmp $6,<u=int64#5 +# asm 2: cmp $6,<u=%r8 +cmp $6,%r8 + +# qhasm: t = *(uint64 *)(basep + 480 + pos) +# asm 1: movq 480(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 480(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 480(%rcx,%rdi),%r15 + +# qhasm: tysubx0 = t if = +# asm 1: cmove <t=int64#13,<tysubx0=int64#2 +# asm 2: cmove <t=%r15,<tysubx0=%rsi +cmove %r15,%rsi + +# qhasm: t = *(uint64 *)(basep + 488 + pos) +# asm 1: movq 488(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 488(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 488(%rcx,%rdi),%r15 + +# qhasm: tysubx1 = t if = +# asm 1: cmove <t=int64#13,<tysubx1=int64#6 +# asm 2: cmove <t=%r15,<tysubx1=%r9 +cmove %r15,%r9 + +# qhasm: t = *(uint64 *)(basep + 496 + pos) +# asm 1: movq 496(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 496(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 496(%rcx,%rdi),%r15 + +# qhasm: tysubx2 = t if = +# asm 1: cmove <t=int64#13,<tysubx2=int64#7 +# asm 2: cmove <t=%r15,<tysubx2=%rax +cmove %r15,%rax + +# qhasm: t = *(uint64 *)(basep + 504 + pos) +# asm 1: movq 504(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 504(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 504(%rcx,%rdi),%r15 + +# qhasm: tysubx3 = t if = +# asm 1: cmove <t=int64#13,<tysubx3=int64#8 +# asm 2: cmove <t=%r15,<tysubx3=%r10 +cmove %r15,%r10 + +# qhasm: t = *(uint64 *)(basep + 512 + pos) +# asm 1: movq 512(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 512(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 512(%rcx,%rdi),%r15 + +# qhasm: txaddy0 = t if = +# asm 1: cmove <t=int64#13,<txaddy0=int64#9 +# asm 2: cmove <t=%r15,<txaddy0=%r11 +cmove %r15,%r11 + +# qhasm: t = *(uint64 *)(basep + 520 + pos) +# asm 1: movq 520(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 520(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 520(%rcx,%rdi),%r15 + +# qhasm: txaddy1 = t if = +# asm 1: cmove <t=int64#13,<txaddy1=int64#10 +# asm 2: cmove <t=%r15,<txaddy1=%r12 +cmove %r15,%r12 + +# qhasm: t = *(uint64 *)(basep + 528 + pos) +# asm 1: movq 528(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 528(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 528(%rcx,%rdi),%r15 + +# qhasm: txaddy2 = t if = +# asm 1: cmove <t=int64#13,<txaddy2=int64#11 +# asm 2: cmove <t=%r15,<txaddy2=%r13 +cmove %r15,%r13 + +# qhasm: t = *(uint64 *)(basep + 536 + pos) +# asm 1: movq 536(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 536(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 536(%rcx,%rdi),%r15 + +# qhasm: txaddy3 = t if = +# asm 1: cmove <t=int64#13,<txaddy3=int64#12 +# asm 2: cmove <t=%r15,<txaddy3=%r14 +cmove %r15,%r14 + +# qhasm: =? u - 7 +# asm 1: cmp $7,<u=int64#5 +# asm 2: cmp $7,<u=%r8 +cmp $7,%r8 + +# qhasm: t = *(uint64 *)(basep + 576 + pos) +# asm 1: movq 576(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 576(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 576(%rcx,%rdi),%r15 + +# qhasm: tysubx0 = t if = +# asm 1: cmove <t=int64#13,<tysubx0=int64#2 +# asm 2: cmove <t=%r15,<tysubx0=%rsi +cmove %r15,%rsi + +# qhasm: t = *(uint64 *)(basep + 584 + pos) +# asm 1: movq 584(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 584(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 584(%rcx,%rdi),%r15 + +# qhasm: tysubx1 = t if = +# asm 1: cmove <t=int64#13,<tysubx1=int64#6 +# asm 2: cmove <t=%r15,<tysubx1=%r9 +cmove %r15,%r9 + +# qhasm: t = *(uint64 *)(basep + 592 + pos) +# asm 1: movq 592(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 592(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 592(%rcx,%rdi),%r15 + +# qhasm: tysubx2 = t if = +# asm 1: cmove <t=int64#13,<tysubx2=int64#7 +# asm 2: cmove <t=%r15,<tysubx2=%rax +cmove %r15,%rax + +# qhasm: t = *(uint64 *)(basep + 600 + pos) +# asm 1: movq 600(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 600(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 600(%rcx,%rdi),%r15 + +# qhasm: tysubx3 = t if = +# asm 1: cmove <t=int64#13,<tysubx3=int64#8 +# asm 2: cmove <t=%r15,<tysubx3=%r10 +cmove %r15,%r10 + +# qhasm: t = *(uint64 *)(basep + 608 + pos) +# asm 1: movq 608(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 608(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 608(%rcx,%rdi),%r15 + +# qhasm: txaddy0 = t if = +# asm 1: cmove <t=int64#13,<txaddy0=int64#9 +# asm 2: cmove <t=%r15,<txaddy0=%r11 +cmove %r15,%r11 + +# qhasm: t = *(uint64 *)(basep + 616 + pos) +# asm 1: movq 616(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 616(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 616(%rcx,%rdi),%r15 + +# qhasm: txaddy1 = t if = +# asm 1: cmove <t=int64#13,<txaddy1=int64#10 +# asm 2: cmove <t=%r15,<txaddy1=%r12 +cmove %r15,%r12 + +# qhasm: t = *(uint64 *)(basep + 624 + pos) +# asm 1: movq 624(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 624(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 624(%rcx,%rdi),%r15 + +# qhasm: txaddy2 = t if = +# asm 1: cmove <t=int64#13,<txaddy2=int64#11 +# asm 2: cmove <t=%r15,<txaddy2=%r13 +cmove %r15,%r13 + +# qhasm: t = *(uint64 *)(basep + 632 + pos) +# asm 1: movq 632(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 632(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 632(%rcx,%rdi),%r15 + +# qhasm: txaddy3 = t if = +# asm 1: cmove <t=int64#13,<txaddy3=int64#12 +# asm 2: cmove <t=%r15,<txaddy3=%r14 +cmove %r15,%r14 + +# qhasm: =? u - 8 +# asm 1: cmp $8,<u=int64#5 +# asm 2: cmp $8,<u=%r8 +cmp $8,%r8 + +# qhasm: t = *(uint64 *)(basep + 672 + pos) +# asm 1: movq 672(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 672(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 672(%rcx,%rdi),%r15 + +# qhasm: tysubx0 = t if = +# asm 1: cmove <t=int64#13,<tysubx0=int64#2 +# asm 2: cmove <t=%r15,<tysubx0=%rsi +cmove %r15,%rsi + +# qhasm: t = *(uint64 *)(basep + 680 + pos) +# asm 1: movq 680(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 680(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 680(%rcx,%rdi),%r15 + +# qhasm: tysubx1 = t if = +# asm 1: cmove <t=int64#13,<tysubx1=int64#6 +# asm 2: cmove <t=%r15,<tysubx1=%r9 +cmove %r15,%r9 + +# qhasm: t = *(uint64 *)(basep + 688 + pos) +# asm 1: movq 688(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 688(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 688(%rcx,%rdi),%r15 + +# qhasm: tysubx2 = t if = +# asm 1: cmove <t=int64#13,<tysubx2=int64#7 +# asm 2: cmove <t=%r15,<tysubx2=%rax +cmove %r15,%rax + +# qhasm: t = *(uint64 *)(basep + 696 + pos) +# asm 1: movq 696(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 696(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 696(%rcx,%rdi),%r15 + +# qhasm: tysubx3 = t if = +# asm 1: cmove <t=int64#13,<tysubx3=int64#8 +# asm 2: cmove <t=%r15,<tysubx3=%r10 +cmove %r15,%r10 + +# qhasm: t = *(uint64 *)(basep + 704 + pos) +# asm 1: movq 704(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 704(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 704(%rcx,%rdi),%r15 + +# qhasm: txaddy0 = t if = +# asm 1: cmove <t=int64#13,<txaddy0=int64#9 +# asm 2: cmove <t=%r15,<txaddy0=%r11 +cmove %r15,%r11 + +# qhasm: t = *(uint64 *)(basep + 712 + pos) +# asm 1: movq 712(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 712(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 712(%rcx,%rdi),%r15 + +# qhasm: txaddy1 = t if = +# asm 1: cmove <t=int64#13,<txaddy1=int64#10 +# asm 2: cmove <t=%r15,<txaddy1=%r12 +cmove %r15,%r12 + +# qhasm: t = *(uint64 *)(basep + 720 + pos) +# asm 1: movq 720(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 720(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 720(%rcx,%rdi),%r15 + +# qhasm: txaddy2 = t if = +# asm 1: cmove <t=int64#13,<txaddy2=int64#11 +# asm 2: cmove <t=%r15,<txaddy2=%r13 +cmove %r15,%r13 + +# qhasm: t = *(uint64 *)(basep + 728 + pos) +# asm 1: movq 728(<basep=int64#4,<pos=int64#1),>t=int64#13 +# asm 2: movq 728(<basep=%rcx,<pos=%rdi),>t=%r15 +movq 728(%rcx,%rdi),%r15 + +# qhasm: txaddy3 = t if = +# asm 1: cmove <t=int64#13,<txaddy3=int64#12 +# asm 2: cmove <t=%r15,<txaddy3=%r14 +cmove %r15,%r14 + +# qhasm: signed<? b - 0 +# asm 1: cmp $0,<b=int64#3 +# asm 2: cmp $0,<b=%rdx +cmp $0,%rdx + +# qhasm: t = tysubx0 +# asm 1: mov <tysubx0=int64#2,>t=int64#13 +# asm 2: mov <tysubx0=%rsi,>t=%r15 +mov %rsi,%r15 + +# qhasm: tysubx0 = txaddy0 if signed< +# asm 1: cmovl <txaddy0=int64#9,<tysubx0=int64#2 +# asm 2: cmovl <txaddy0=%r11,<tysubx0=%rsi +cmovl %r11,%rsi + +# qhasm: txaddy0 = t if signed< +# asm 1: cmovl <t=int64#13,<txaddy0=int64#9 +# asm 2: cmovl <t=%r15,<txaddy0=%r11 +cmovl %r15,%r11 + +# qhasm: t = tysubx1 +# asm 1: mov <tysubx1=int64#6,>t=int64#13 +# asm 2: mov <tysubx1=%r9,>t=%r15 +mov %r9,%r15 + +# qhasm: tysubx1 = txaddy1 if signed< +# asm 1: cmovl <txaddy1=int64#10,<tysubx1=int64#6 +# asm 2: cmovl <txaddy1=%r12,<tysubx1=%r9 +cmovl %r12,%r9 + +# qhasm: txaddy1 = t if signed< +# asm 1: cmovl <t=int64#13,<txaddy1=int64#10 +# asm 2: cmovl <t=%r15,<txaddy1=%r12 +cmovl %r15,%r12 + +# qhasm: t = tysubx2 +# asm 1: mov <tysubx2=int64#7,>t=int64#13 +# asm 2: mov <tysubx2=%rax,>t=%r15 +mov %rax,%r15 + +# qhasm: tysubx2 = txaddy2 if signed< +# asm 1: cmovl <txaddy2=int64#11,<tysubx2=int64#7 +# asm 2: cmovl <txaddy2=%r13,<tysubx2=%rax +cmovl %r13,%rax + +# qhasm: txaddy2 = t if signed< +# asm 1: cmovl <t=int64#13,<txaddy2=int64#11 +# asm 2: cmovl <t=%r15,<txaddy2=%r13 +cmovl %r15,%r13 + +# qhasm: t = tysubx3 +# asm 1: mov <tysubx3=int64#8,>t=int64#13 +# asm 2: mov <tysubx3=%r10,>t=%r15 +mov %r10,%r15 + +# qhasm: tysubx3 = txaddy3 if signed< +# asm 1: cmovl <txaddy3=int64#12,<tysubx3=int64#8 +# asm 2: cmovl <txaddy3=%r14,<tysubx3=%r10 +cmovl %r14,%r10 + +# qhasm: txaddy3 = t if signed< +# asm 1: cmovl <t=int64#13,<txaddy3=int64#12 +# asm 2: cmovl <t=%r15,<txaddy3=%r14 +cmovl %r15,%r14 + +# qhasm: tp = tp_stack +# asm 1: movq <tp_stack=stack64#8,>tp=int64#13 +# asm 2: movq <tp_stack=56(%rsp),>tp=%r15 +movq 56(%rsp),%r15 + +# qhasm: *(uint64 *)(tp + 0) = tysubx0 +# asm 1: movq <tysubx0=int64#2,0(<tp=int64#13) +# asm 2: movq <tysubx0=%rsi,0(<tp=%r15) +movq %rsi,0(%r15) + +# qhasm: *(uint64 *)(tp + 8) = tysubx1 +# asm 1: movq <tysubx1=int64#6,8(<tp=int64#13) +# asm 2: movq <tysubx1=%r9,8(<tp=%r15) +movq %r9,8(%r15) + +# qhasm: *(uint64 *)(tp + 16) = tysubx2 +# asm 1: movq <tysubx2=int64#7,16(<tp=int64#13) +# asm 2: movq <tysubx2=%rax,16(<tp=%r15) +movq %rax,16(%r15) + +# qhasm: *(uint64 *)(tp + 24) = tysubx3 +# asm 1: movq <tysubx3=int64#8,24(<tp=int64#13) +# asm 2: movq <tysubx3=%r10,24(<tp=%r15) +movq %r10,24(%r15) + +# qhasm: *(uint64 *)(tp + 32) = txaddy0 +# asm 1: movq <txaddy0=int64#9,32(<tp=int64#13) +# asm 2: movq <txaddy0=%r11,32(<tp=%r15) +movq %r11,32(%r15) + +# qhasm: *(uint64 *)(tp + 40) = txaddy1 +# asm 1: movq <txaddy1=int64#10,40(<tp=int64#13) +# asm 2: movq <txaddy1=%r12,40(<tp=%r15) +movq %r12,40(%r15) + +# qhasm: *(uint64 *)(tp + 48) = txaddy2 +# asm 1: movq <txaddy2=int64#11,48(<tp=int64#13) +# asm 2: movq <txaddy2=%r13,48(<tp=%r15) +movq %r13,48(%r15) + +# qhasm: *(uint64 *)(tp + 56) = txaddy3 +# asm 1: movq <txaddy3=int64#12,56(<tp=int64#13) +# asm 2: movq <txaddy3=%r14,56(<tp=%r15) +movq %r14,56(%r15) + +# qhasm: tt2d0 = 0 +# asm 1: mov $0,>tt2d0=int64#2 +# asm 2: mov $0,>tt2d0=%rsi +mov $0,%rsi + +# qhasm: tt2d1 = 0 +# asm 1: mov $0,>tt2d1=int64#6 +# asm 2: mov $0,>tt2d1=%r9 +mov $0,%r9 + +# qhasm: tt2d2 = 0 +# asm 1: mov $0,>tt2d2=int64#7 +# asm 2: mov $0,>tt2d2=%rax +mov $0,%rax + +# qhasm: tt2d3 = 0 +# asm 1: mov $0,>tt2d3=int64#8 +# asm 2: mov $0,>tt2d3=%r10 +mov $0,%r10 + +# qhasm: =? u - 1 +# asm 1: cmp $1,<u=int64#5 +# asm 2: cmp $1,<u=%r8 +cmp $1,%r8 + +# qhasm: t = *(uint64 *)(basep + 64 + pos) +# asm 1: movq 64(<basep=int64#4,<pos=int64#1),>t=int64#9 +# asm 2: movq 64(<basep=%rcx,<pos=%rdi),>t=%r11 +movq 64(%rcx,%rdi),%r11 + +# qhasm: tt2d0 = t if = +# asm 1: cmove <t=int64#9,<tt2d0=int64#2 +# asm 2: cmove <t=%r11,<tt2d0=%rsi +cmove %r11,%rsi + +# qhasm: t = *(uint64 *)(basep + 72 + pos) +# asm 1: movq 72(<basep=int64#4,<pos=int64#1),>t=int64#9 +# asm 2: movq 72(<basep=%rcx,<pos=%rdi),>t=%r11 +movq 72(%rcx,%rdi),%r11 + +# qhasm: tt2d1 = t if = +# asm 1: cmove <t=int64#9,<tt2d1=int64#6 +# asm 2: cmove <t=%r11,<tt2d1=%r9 +cmove %r11,%r9 + +# qhasm: t = *(uint64 *)(basep + 80 + pos) +# asm 1: movq 80(<basep=int64#4,<pos=int64#1),>t=int64#9 +# asm 2: movq 80(<basep=%rcx,<pos=%rdi),>t=%r11 +movq 80(%rcx,%rdi),%r11 + +# qhasm: tt2d2 = t if = +# asm 1: cmove <t=int64#9,<tt2d2=int64#7 +# asm 2: cmove <t=%r11,<tt2d2=%rax +cmove %r11,%rax + +# qhasm: t = *(uint64 *)(basep + 88 + pos) +# asm 1: movq 88(<basep=int64#4,<pos=int64#1),>t=int64#9 +# asm 2: movq 88(<basep=%rcx,<pos=%rdi),>t=%r11 +movq 88(%rcx,%rdi),%r11 + +# qhasm: tt2d3 = t if = +# asm 1: cmove <t=int64#9,<tt2d3=int64#8 +# asm 2: cmove <t=%r11,<tt2d3=%r10 +cmove %r11,%r10 + +# qhasm: =? u - 2 +# asm 1: cmp $2,<u=int64#5 +# asm 2: cmp $2,<u=%r8 +cmp $2,%r8 + +# qhasm: t = *(uint64 *)(basep + 160 + pos) +# asm 1: movq 160(<basep=int64#4,<pos=int64#1),>t=int64#9 +# asm 2: movq 160(<basep=%rcx,<pos=%rdi),>t=%r11 +movq 160(%rcx,%rdi),%r11 + +# qhasm: tt2d0 = t if = +# asm 1: cmove <t=int64#9,<tt2d0=int64#2 +# asm 2: cmove <t=%r11,<tt2d0=%rsi +cmove %r11,%rsi + +# qhasm: t = *(uint64 *)(basep + 168 + pos) +# asm 1: movq 168(<basep=int64#4,<pos=int64#1),>t=int64#9 +# asm 2: movq 168(<basep=%rcx,<pos=%rdi),>t=%r11 +movq 168(%rcx,%rdi),%r11 + +# qhasm: tt2d1 = t if = +# asm 1: cmove <t=int64#9,<tt2d1=int64#6 +# asm 2: cmove <t=%r11,<tt2d1=%r9 +cmove %r11,%r9 + +# qhasm: t = *(uint64 *)(basep + 176 + pos) +# asm 1: movq 176(<basep=int64#4,<pos=int64#1),>t=int64#9 +# asm 2: movq 176(<basep=%rcx,<pos=%rdi),>t=%r11 +movq 176(%rcx,%rdi),%r11 + +# qhasm: tt2d2 = t if = +# asm 1: cmove <t=int64#9,<tt2d2=int64#7 +# asm 2: cmove <t=%r11,<tt2d2=%rax +cmove %r11,%rax + +# qhasm: t = *(uint64 *)(basep + 184 + pos) +# asm 1: movq 184(<basep=int64#4,<pos=int64#1),>t=int64#9 +# asm 2: movq 184(<basep=%rcx,<pos=%rdi),>t=%r11 +movq 184(%rcx,%rdi),%r11 + +# qhasm: tt2d3 = t if = +# asm 1: cmove <t=int64#9,<tt2d3=int64#8 +# asm 2: cmove <t=%r11,<tt2d3=%r10 +cmove %r11,%r10 + +# qhasm: =? u - 3 +# asm 1: cmp $3,<u=int64#5 +# asm 2: cmp $3,<u=%r8 +cmp $3,%r8 + +# qhasm: t = *(uint64 *)(basep + 256 + pos) +# asm 1: movq 256(<basep=int64#4,<pos=int64#1),>t=int64#9 +# asm 2: movq 256(<basep=%rcx,<pos=%rdi),>t=%r11 +movq 256(%rcx,%rdi),%r11 + +# qhasm: tt2d0 = t if = +# asm 1: cmove <t=int64#9,<tt2d0=int64#2 +# asm 2: cmove <t=%r11,<tt2d0=%rsi +cmove %r11,%rsi + +# qhasm: t = *(uint64 *)(basep + 264 + pos) +# asm 1: movq 264(<basep=int64#4,<pos=int64#1),>t=int64#9 +# asm 2: movq 264(<basep=%rcx,<pos=%rdi),>t=%r11 +movq 264(%rcx,%rdi),%r11 + +# qhasm: tt2d1 = t if = +# asm 1: cmove <t=int64#9,<tt2d1=int64#6 +# asm 2: cmove <t=%r11,<tt2d1=%r9 +cmove %r11,%r9 + +# qhasm: t = *(uint64 *)(basep + 272 + pos) +# asm 1: movq 272(<basep=int64#4,<pos=int64#1),>t=int64#9 +# asm 2: movq 272(<basep=%rcx,<pos=%rdi),>t=%r11 +movq 272(%rcx,%rdi),%r11 + +# qhasm: tt2d2 = t if = +# asm 1: cmove <t=int64#9,<tt2d2=int64#7 +# asm 2: cmove <t=%r11,<tt2d2=%rax +cmove %r11,%rax + +# qhasm: t = *(uint64 *)(basep + 280 + pos) +# asm 1: movq 280(<basep=int64#4,<pos=int64#1),>t=int64#9 +# asm 2: movq 280(<basep=%rcx,<pos=%rdi),>t=%r11 +movq 280(%rcx,%rdi),%r11 + +# qhasm: tt2d3 = t if = +# asm 1: cmove <t=int64#9,<tt2d3=int64#8 +# asm 2: cmove <t=%r11,<tt2d3=%r10 +cmove %r11,%r10 + +# qhasm: =? u - 4 +# asm 1: cmp $4,<u=int64#5 +# asm 2: cmp $4,<u=%r8 +cmp $4,%r8 + +# qhasm: t = *(uint64 *)(basep + 352 + pos) +# asm 1: movq 352(<basep=int64#4,<pos=int64#1),>t=int64#9 +# asm 2: movq 352(<basep=%rcx,<pos=%rdi),>t=%r11 +movq 352(%rcx,%rdi),%r11 + +# qhasm: tt2d0 = t if = +# asm 1: cmove <t=int64#9,<tt2d0=int64#2 +# asm 2: cmove <t=%r11,<tt2d0=%rsi +cmove %r11,%rsi + +# qhasm: t = *(uint64 *)(basep + 360 + pos) +# asm 1: movq 360(<basep=int64#4,<pos=int64#1),>t=int64#9 +# asm 2: movq 360(<basep=%rcx,<pos=%rdi),>t=%r11 +movq 360(%rcx,%rdi),%r11 + +# qhasm: tt2d1 = t if = +# asm 1: cmove <t=int64#9,<tt2d1=int64#6 +# asm 2: cmove <t=%r11,<tt2d1=%r9 +cmove %r11,%r9 + +# qhasm: t = *(uint64 *)(basep + 368 + pos) +# asm 1: movq 368(<basep=int64#4,<pos=int64#1),>t=int64#9 +# asm 2: movq 368(<basep=%rcx,<pos=%rdi),>t=%r11 +movq 368(%rcx,%rdi),%r11 + +# qhasm: tt2d2 = t if = +# asm 1: cmove <t=int64#9,<tt2d2=int64#7 +# asm 2: cmove <t=%r11,<tt2d2=%rax +cmove %r11,%rax + +# qhasm: t = *(uint64 *)(basep + 376 + pos) +# asm 1: movq 376(<basep=int64#4,<pos=int64#1),>t=int64#9 +# asm 2: movq 376(<basep=%rcx,<pos=%rdi),>t=%r11 +movq 376(%rcx,%rdi),%r11 + +# qhasm: tt2d3 = t if = +# asm 1: cmove <t=int64#9,<tt2d3=int64#8 +# asm 2: cmove <t=%r11,<tt2d3=%r10 +cmove %r11,%r10 + +# qhasm: =? u - 5 +# asm 1: cmp $5,<u=int64#5 +# asm 2: cmp $5,<u=%r8 +cmp $5,%r8 + +# qhasm: t = *(uint64 *)(basep + 448 + pos) +# asm 1: movq 448(<basep=int64#4,<pos=int64#1),>t=int64#9 +# asm 2: movq 448(<basep=%rcx,<pos=%rdi),>t=%r11 +movq 448(%rcx,%rdi),%r11 + +# qhasm: tt2d0 = t if = +# asm 1: cmove <t=int64#9,<tt2d0=int64#2 +# asm 2: cmove <t=%r11,<tt2d0=%rsi +cmove %r11,%rsi + +# qhasm: t = *(uint64 *)(basep + 456 + pos) +# asm 1: movq 456(<basep=int64#4,<pos=int64#1),>t=int64#9 +# asm 2: movq 456(<basep=%rcx,<pos=%rdi),>t=%r11 +movq 456(%rcx,%rdi),%r11 + +# qhasm: tt2d1 = t if = +# asm 1: cmove <t=int64#9,<tt2d1=int64#6 +# asm 2: cmove <t=%r11,<tt2d1=%r9 +cmove %r11,%r9 + +# qhasm: t = *(uint64 *)(basep + 464 + pos) +# asm 1: movq 464(<basep=int64#4,<pos=int64#1),>t=int64#9 +# asm 2: movq 464(<basep=%rcx,<pos=%rdi),>t=%r11 +movq 464(%rcx,%rdi),%r11 + +# qhasm: tt2d2 = t if = +# asm 1: cmove <t=int64#9,<tt2d2=int64#7 +# asm 2: cmove <t=%r11,<tt2d2=%rax +cmove %r11,%rax + +# qhasm: t = *(uint64 *)(basep + 472 + pos) +# asm 1: movq 472(<basep=int64#4,<pos=int64#1),>t=int64#9 +# asm 2: movq 472(<basep=%rcx,<pos=%rdi),>t=%r11 +movq 472(%rcx,%rdi),%r11 + +# qhasm: tt2d3 = t if = +# asm 1: cmove <t=int64#9,<tt2d3=int64#8 +# asm 2: cmove <t=%r11,<tt2d3=%r10 +cmove %r11,%r10 + +# qhasm: =? u - 6 +# asm 1: cmp $6,<u=int64#5 +# asm 2: cmp $6,<u=%r8 +cmp $6,%r8 + +# qhasm: t = *(uint64 *)(basep + 544 + pos) +# asm 1: movq 544(<basep=int64#4,<pos=int64#1),>t=int64#9 +# asm 2: movq 544(<basep=%rcx,<pos=%rdi),>t=%r11 +movq 544(%rcx,%rdi),%r11 + +# qhasm: tt2d0 = t if = +# asm 1: cmove <t=int64#9,<tt2d0=int64#2 +# asm 2: cmove <t=%r11,<tt2d0=%rsi +cmove %r11,%rsi + +# qhasm: t = *(uint64 *)(basep + 552 + pos) +# asm 1: movq 552(<basep=int64#4,<pos=int64#1),>t=int64#9 +# asm 2: movq 552(<basep=%rcx,<pos=%rdi),>t=%r11 +movq 552(%rcx,%rdi),%r11 + +# qhasm: tt2d1 = t if = +# asm 1: cmove <t=int64#9,<tt2d1=int64#6 +# asm 2: cmove <t=%r11,<tt2d1=%r9 +cmove %r11,%r9 + +# qhasm: t = *(uint64 *)(basep + 560 + pos) +# asm 1: movq 560(<basep=int64#4,<pos=int64#1),>t=int64#9 +# asm 2: movq 560(<basep=%rcx,<pos=%rdi),>t=%r11 +movq 560(%rcx,%rdi),%r11 + +# qhasm: tt2d2 = t if = +# asm 1: cmove <t=int64#9,<tt2d2=int64#7 +# asm 2: cmove <t=%r11,<tt2d2=%rax +cmove %r11,%rax + +# qhasm: t = *(uint64 *)(basep + 568 + pos) +# asm 1: movq 568(<basep=int64#4,<pos=int64#1),>t=int64#9 +# asm 2: movq 568(<basep=%rcx,<pos=%rdi),>t=%r11 +movq 568(%rcx,%rdi),%r11 + +# qhasm: tt2d3 = t if = +# asm 1: cmove <t=int64#9,<tt2d3=int64#8 +# asm 2: cmove <t=%r11,<tt2d3=%r10 +cmove %r11,%r10 + +# qhasm: =? u - 7 +# asm 1: cmp $7,<u=int64#5 +# asm 2: cmp $7,<u=%r8 +cmp $7,%r8 + +# qhasm: t = *(uint64 *)(basep + 640 + pos) +# asm 1: movq 640(<basep=int64#4,<pos=int64#1),>t=int64#9 +# asm 2: movq 640(<basep=%rcx,<pos=%rdi),>t=%r11 +movq 640(%rcx,%rdi),%r11 + +# qhasm: tt2d0 = t if = +# asm 1: cmove <t=int64#9,<tt2d0=int64#2 +# asm 2: cmove <t=%r11,<tt2d0=%rsi +cmove %r11,%rsi + +# qhasm: t = *(uint64 *)(basep + 648 + pos) +# asm 1: movq 648(<basep=int64#4,<pos=int64#1),>t=int64#9 +# asm 2: movq 648(<basep=%rcx,<pos=%rdi),>t=%r11 +movq 648(%rcx,%rdi),%r11 + +# qhasm: tt2d1 = t if = +# asm 1: cmove <t=int64#9,<tt2d1=int64#6 +# asm 2: cmove <t=%r11,<tt2d1=%r9 +cmove %r11,%r9 + +# qhasm: t = *(uint64 *)(basep + 656 + pos) +# asm 1: movq 656(<basep=int64#4,<pos=int64#1),>t=int64#9 +# asm 2: movq 656(<basep=%rcx,<pos=%rdi),>t=%r11 +movq 656(%rcx,%rdi),%r11 + +# qhasm: tt2d2 = t if = +# asm 1: cmove <t=int64#9,<tt2d2=int64#7 +# asm 2: cmove <t=%r11,<tt2d2=%rax +cmove %r11,%rax + +# qhasm: t = *(uint64 *)(basep + 664 + pos) +# asm 1: movq 664(<basep=int64#4,<pos=int64#1),>t=int64#9 +# asm 2: movq 664(<basep=%rcx,<pos=%rdi),>t=%r11 +movq 664(%rcx,%rdi),%r11 + +# qhasm: tt2d3 = t if = +# asm 1: cmove <t=int64#9,<tt2d3=int64#8 +# asm 2: cmove <t=%r11,<tt2d3=%r10 +cmove %r11,%r10 + +# qhasm: =? u - 8 +# asm 1: cmp $8,<u=int64#5 +# asm 2: cmp $8,<u=%r8 +cmp $8,%r8 + +# qhasm: t = *(uint64 *)(basep + 736 + pos) +# asm 1: movq 736(<basep=int64#4,<pos=int64#1),>t=int64#5 +# asm 2: movq 736(<basep=%rcx,<pos=%rdi),>t=%r8 +movq 736(%rcx,%rdi),%r8 + +# qhasm: tt2d0 = t if = +# asm 1: cmove <t=int64#5,<tt2d0=int64#2 +# asm 2: cmove <t=%r8,<tt2d0=%rsi +cmove %r8,%rsi + +# qhasm: t = *(uint64 *)(basep + 744 + pos) +# asm 1: movq 744(<basep=int64#4,<pos=int64#1),>t=int64#5 +# asm 2: movq 744(<basep=%rcx,<pos=%rdi),>t=%r8 +movq 744(%rcx,%rdi),%r8 + +# qhasm: tt2d1 = t if = +# asm 1: cmove <t=int64#5,<tt2d1=int64#6 +# asm 2: cmove <t=%r8,<tt2d1=%r9 +cmove %r8,%r9 + +# qhasm: t = *(uint64 *)(basep + 752 + pos) +# asm 1: movq 752(<basep=int64#4,<pos=int64#1),>t=int64#5 +# asm 2: movq 752(<basep=%rcx,<pos=%rdi),>t=%r8 +movq 752(%rcx,%rdi),%r8 + +# qhasm: tt2d2 = t if = +# asm 1: cmove <t=int64#5,<tt2d2=int64#7 +# asm 2: cmove <t=%r8,<tt2d2=%rax +cmove %r8,%rax + +# qhasm: t = *(uint64 *)(basep + 760 + pos) +# asm 1: movq 760(<basep=int64#4,<pos=int64#1),>t=int64#1 +# asm 2: movq 760(<basep=%rcx,<pos=%rdi),>t=%rdi +movq 760(%rcx,%rdi),%rdi + +# qhasm: tt2d3 = t if = +# asm 1: cmove <t=int64#1,<tt2d3=int64#8 +# asm 2: cmove <t=%rdi,<tt2d3=%r10 +cmove %rdi,%r10 + +# qhasm: tt0 = 0 +# asm 1: mov $0,>tt0=int64#1 +# asm 2: mov $0,>tt0=%rdi +mov $0,%rdi + +# qhasm: tt1 = 0 +# asm 1: mov $0,>tt1=int64#4 +# asm 2: mov $0,>tt1=%rcx +mov $0,%rcx + +# qhasm: tt2 = 0 +# asm 1: mov $0,>tt2=int64#5 +# asm 2: mov $0,>tt2=%r8 +mov $0,%r8 + +# qhasm: tt3 = 0 +# asm 1: mov $0,>tt3=int64#9 +# asm 2: mov $0,>tt3=%r11 +mov $0,%r11 + +# qhasm: carry? tt0 -= tt2d0 +# asm 1: sub <tt2d0=int64#2,<tt0=int64#1 +# asm 2: sub <tt2d0=%rsi,<tt0=%rdi +sub %rsi,%rdi + +# qhasm: carry? tt1 -= tt2d1 - carry +# asm 1: sbb <tt2d1=int64#6,<tt1=int64#4 +# asm 2: sbb <tt2d1=%r9,<tt1=%rcx +sbb %r9,%rcx + +# qhasm: carry? tt2 -= tt2d2 - carry +# asm 1: sbb <tt2d2=int64#7,<tt2=int64#5 +# asm 2: sbb <tt2d2=%rax,<tt2=%r8 +sbb %rax,%r8 + +# qhasm: carry? tt3 -= tt2d3 - carry +# asm 1: sbb <tt2d3=int64#8,<tt3=int64#9 +# asm 2: sbb <tt2d3=%r10,<tt3=%r11 +sbb %r10,%r11 + +# qhasm: subt0 = 0 +# asm 1: mov $0,>subt0=int64#10 +# asm 2: mov $0,>subt0=%r12 +mov $0,%r12 + +# qhasm: subt1 = 38 +# asm 1: mov $38,>subt1=int64#11 +# asm 2: mov $38,>subt1=%r13 +mov $38,%r13 + +# qhasm: subt1 = subt0 if !carry +# asm 1: cmovae <subt0=int64#10,<subt1=int64#11 +# asm 2: cmovae <subt0=%r12,<subt1=%r13 +cmovae %r12,%r13 + +# qhasm: carry? tt0 -= subt1 +# asm 1: sub <subt1=int64#11,<tt0=int64#1 +# asm 2: sub <subt1=%r13,<tt0=%rdi +sub %r13,%rdi + +# qhasm: carry? tt1 -= subt0 - carry +# asm 1: sbb <subt0=int64#10,<tt1=int64#4 +# asm 2: sbb <subt0=%r12,<tt1=%rcx +sbb %r12,%rcx + +# qhasm: carry? tt2 -= subt0 - carry +# asm 1: sbb <subt0=int64#10,<tt2=int64#5 +# asm 2: sbb <subt0=%r12,<tt2=%r8 +sbb %r12,%r8 + +# qhasm: carry? tt3 -= subt0 - carry +# asm 1: sbb <subt0=int64#10,<tt3=int64#9 +# asm 2: sbb <subt0=%r12,<tt3=%r11 +sbb %r12,%r11 + +# qhasm: subt0 = subt1 if carry +# asm 1: cmovc <subt1=int64#11,<subt0=int64#10 +# asm 2: cmovc <subt1=%r13,<subt0=%r12 +cmovc %r13,%r12 + +# qhasm: tt0 -= subt0 +# asm 1: sub <subt0=int64#10,<tt0=int64#1 +# asm 2: sub <subt0=%r12,<tt0=%rdi +sub %r12,%rdi + +# qhasm: signed<? b - 0 +# asm 1: cmp $0,<b=int64#3 +# asm 2: cmp $0,<b=%rdx +cmp $0,%rdx + +# qhasm: tt2d0 = tt0 if signed< +# asm 1: cmovl <tt0=int64#1,<tt2d0=int64#2 +# asm 2: cmovl <tt0=%rdi,<tt2d0=%rsi +cmovl %rdi,%rsi + +# qhasm: tt2d1 = tt1 if signed< +# asm 1: cmovl <tt1=int64#4,<tt2d1=int64#6 +# asm 2: cmovl <tt1=%rcx,<tt2d1=%r9 +cmovl %rcx,%r9 + +# qhasm: tt2d2 = tt2 if signed< +# asm 1: cmovl <tt2=int64#5,<tt2d2=int64#7 +# asm 2: cmovl <tt2=%r8,<tt2d2=%rax +cmovl %r8,%rax + +# qhasm: tt2d3 = tt3 if signed< +# asm 1: cmovl <tt3=int64#9,<tt2d3=int64#8 +# asm 2: cmovl <tt3=%r11,<tt2d3=%r10 +cmovl %r11,%r10 + +# qhasm: *(uint64 *)(tp + 64) = tt2d0 +# asm 1: movq <tt2d0=int64#2,64(<tp=int64#13) +# asm 2: movq <tt2d0=%rsi,64(<tp=%r15) +movq %rsi,64(%r15) + +# qhasm: *(uint64 *)(tp + 72) = tt2d1 +# asm 1: movq <tt2d1=int64#6,72(<tp=int64#13) +# asm 2: movq <tt2d1=%r9,72(<tp=%r15) +movq %r9,72(%r15) + +# qhasm: *(uint64 *)(tp + 80) = tt2d2 +# asm 1: movq <tt2d2=int64#7,80(<tp=int64#13) +# asm 2: movq <tt2d2=%rax,80(<tp=%r15) +movq %rax,80(%r15) + +# qhasm: *(uint64 *)(tp + 88) = tt2d3 +# asm 1: movq <tt2d3=int64#8,88(<tp=int64#13) +# asm 2: movq <tt2d3=%r10,88(<tp=%r15) +movq %r10,88(%r15) + +# qhasm: caller1 = caller1_stack +# asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9 +# asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10 +# asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11 +# asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12 +# asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13 +# asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14 +# asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15 +# asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/ext/ed25519-amd64-asm/consts.s b/ext/ed25519-amd64-asm/consts.s new file mode 100644 index 00000000..c272383f --- /dev/null +++ b/ext/ed25519-amd64-asm/consts.s @@ -0,0 +1,39 @@ +.data + +.globl crypto_sign_ed25519_amd64_64_121666 +.globl crypto_sign_ed25519_amd64_64_MU0 +.globl crypto_sign_ed25519_amd64_64_MU1 +.globl crypto_sign_ed25519_amd64_64_MU2 +.globl crypto_sign_ed25519_amd64_64_MU3 +.globl crypto_sign_ed25519_amd64_64_MU4 +.globl crypto_sign_ed25519_amd64_64_ORDER0 +.globl crypto_sign_ed25519_amd64_64_ORDER1 +.globl crypto_sign_ed25519_amd64_64_ORDER2 +.globl crypto_sign_ed25519_amd64_64_ORDER3 +.globl crypto_sign_ed25519_amd64_64_EC2D0 +.globl crypto_sign_ed25519_amd64_64_EC2D1 +.globl crypto_sign_ed25519_amd64_64_EC2D2 +.globl crypto_sign_ed25519_amd64_64_EC2D3 +.globl crypto_sign_ed25519_amd64_64_38 + +.p2align 4 + +crypto_sign_ed25519_amd64_64_121666: .quad 121666 + +crypto_sign_ed25519_amd64_64_MU0: .quad 0xED9CE5A30A2C131B +crypto_sign_ed25519_amd64_64_MU1: .quad 0x2106215D086329A7 +crypto_sign_ed25519_amd64_64_MU2: .quad 0xFFFFFFFFFFFFFFEB +crypto_sign_ed25519_amd64_64_MU3: .quad 0xFFFFFFFFFFFFFFFF +crypto_sign_ed25519_amd64_64_MU4: .quad 0x000000000000000F + +crypto_sign_ed25519_amd64_64_ORDER0: .quad 0x5812631A5CF5D3ED +crypto_sign_ed25519_amd64_64_ORDER1: .quad 0x14DEF9DEA2F79CD6 +crypto_sign_ed25519_amd64_64_ORDER2: .quad 0x0000000000000000 +crypto_sign_ed25519_amd64_64_ORDER3: .quad 0x1000000000000000 + +crypto_sign_ed25519_amd64_64_EC2D0: .quad 0xEBD69B9426B2F146 +crypto_sign_ed25519_amd64_64_EC2D1: .quad 0x00E0149A8283B156 +crypto_sign_ed25519_amd64_64_EC2D2: .quad 0x198E80F2EEF3D130 +crypto_sign_ed25519_amd64_64_EC2D3: .quad 0xA406D9DC56DFFCE7 + +crypto_sign_ed25519_amd64_64_38: .quad 38 diff --git a/ext/ed25519-amd64-asm/fe25519.h b/ext/ed25519-amd64-asm/fe25519.h new file mode 100644 index 00000000..33ffabbe --- /dev/null +++ b/ext/ed25519-amd64-asm/fe25519.h @@ -0,0 +1,64 @@ +#ifndef FE25519_H +#define FE25519_H + +#define fe25519 crypto_sign_ed25519_amd64_64_fe25519 +#define fe25519_freeze crypto_sign_ed25519_amd64_64_fe25519_freeze +#define fe25519_unpack crypto_sign_ed25519_amd64_64_fe25519_unpack +#define fe25519_pack crypto_sign_ed25519_amd64_64_fe25519_pack +#define fe25519_iszero_vartime crypto_sign_ed25519_amd64_64_fe25519_iszero_vartime +#define fe25519_iseq_vartime crypto_sign_ed25519_amd64_64_fe25519_iseq_vartime +#define fe25519_cmov crypto_sign_ed25519_amd64_64_fe25519_cmov +#define fe25519_setint crypto_sign_ed25519_amd64_64_fe25519_setint +#define fe25519_neg crypto_sign_ed25519_amd64_64_fe25519_neg +#define fe25519_getparity crypto_sign_ed25519_amd64_64_fe25519_getparity +#define fe25519_add crypto_sign_ed25519_amd64_64_fe25519_add +#define fe25519_sub crypto_sign_ed25519_amd64_64_fe25519_sub +#define fe25519_mul crypto_sign_ed25519_amd64_64_fe25519_mul +#define fe25519_mul121666 crypto_sign_ed25519_amd64_64_fe25519_mul121666 +#define fe25519_square crypto_sign_ed25519_amd64_64_fe25519_square +#define fe25519_invert crypto_sign_ed25519_amd64_64_fe25519_invert +#define fe25519_pow2523 crypto_sign_ed25519_amd64_64_fe25519_pow2523 + +typedef struct +{ + unsigned long long v[4]; +} +fe25519; + +void fe25519_freeze(fe25519 *r); + +void fe25519_unpack(fe25519 *r, const unsigned char x[32]); + +void fe25519_pack(unsigned char r[32], const fe25519 *x); + +void fe25519_cmov(fe25519 *r, const fe25519 *x, unsigned char b); + +void fe25519_cswap(fe25519 *r, fe25519 *x, unsigned char b); + +void fe25519_setint(fe25519 *r, unsigned int v); + +void fe25519_neg(fe25519 *r, const fe25519 *x); + +unsigned char fe25519_getparity(const fe25519 *x); + +int fe25519_iszero_vartime(const fe25519 *x); + +int fe25519_iseq_vartime(const fe25519 *x, const fe25519 *y); + +void fe25519_add(fe25519 *r, const fe25519 *x, const fe25519 *y); + +void fe25519_sub(fe25519 *r, const fe25519 *x, const fe25519 *y); + +void fe25519_mul(fe25519 *r, const fe25519 *x, const fe25519 *y); + +void fe25519_mul121666(fe25519 *r, const fe25519 *x); + +void fe25519_square(fe25519 *r, const fe25519 *x); + +void fe25519_pow(fe25519 *r, const fe25519 *x, const unsigned char *e); + +void fe25519_invert(fe25519 *r, const fe25519 *x); + +void fe25519_pow2523(fe25519 *r, const fe25519 *x); + +#endif diff --git a/ext/ed25519-amd64-asm/fe25519_add.s b/ext/ed25519-amd64-asm/fe25519_add.s new file mode 100644 index 00000000..b2e56252 --- /dev/null +++ b/ext/ed25519-amd64-asm/fe25519_add.s @@ -0,0 +1,189 @@ + +# qhasm: int64 rp + +# qhasm: int64 xp + +# qhasm: int64 yp + +# qhasm: input rp + +# qhasm: input xp + +# qhasm: input yp + +# qhasm: int64 r0 + +# qhasm: int64 r1 + +# qhasm: int64 r2 + +# qhasm: int64 r3 + +# qhasm: int64 addt0 + +# qhasm: int64 addt1 + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: enter crypto_sign_ed25519_amd64_64_fe25519_add +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_64_fe25519_add +.globl crypto_sign_ed25519_amd64_64_fe25519_add +_crypto_sign_ed25519_amd64_64_fe25519_add: +crypto_sign_ed25519_amd64_64_fe25519_add: +mov %rsp,%r11 +and $31,%r11 +add $0,%r11 +sub %r11,%rsp + +# qhasm: r0 = *(uint64 *)(xp + 0) +# asm 1: movq 0(<xp=int64#2),>r0=int64#4 +# asm 2: movq 0(<xp=%rsi),>r0=%rcx +movq 0(%rsi),%rcx + +# qhasm: r1 = *(uint64 *)(xp + 8) +# asm 1: movq 8(<xp=int64#2),>r1=int64#5 +# asm 2: movq 8(<xp=%rsi),>r1=%r8 +movq 8(%rsi),%r8 + +# qhasm: r2 = *(uint64 *)(xp + 16) +# asm 1: movq 16(<xp=int64#2),>r2=int64#6 +# asm 2: movq 16(<xp=%rsi),>r2=%r9 +movq 16(%rsi),%r9 + +# qhasm: r3 = *(uint64 *)(xp + 24) +# asm 1: movq 24(<xp=int64#2),>r3=int64#2 +# asm 2: movq 24(<xp=%rsi),>r3=%rsi +movq 24(%rsi),%rsi + +# qhasm: carry? r0 += *(uint64 *)(yp + 0) +# asm 1: addq 0(<yp=int64#3),<r0=int64#4 +# asm 2: addq 0(<yp=%rdx),<r0=%rcx +addq 0(%rdx),%rcx + +# qhasm: carry? r1 += *(uint64 *)(yp + 8) + carry +# asm 1: adcq 8(<yp=int64#3),<r1=int64#5 +# asm 2: adcq 8(<yp=%rdx),<r1=%r8 +adcq 8(%rdx),%r8 + +# qhasm: carry? r2 += *(uint64 *)(yp + 16) + carry +# asm 1: adcq 16(<yp=int64#3),<r2=int64#6 +# asm 2: adcq 16(<yp=%rdx),<r2=%r9 +adcq 16(%rdx),%r9 + +# qhasm: carry? r3 += *(uint64 *)(yp + 24) + carry +# asm 1: adcq 24(<yp=int64#3),<r3=int64#2 +# asm 2: adcq 24(<yp=%rdx),<r3=%rsi +adcq 24(%rdx),%rsi + +# qhasm: addt0 = 0 +# asm 1: mov $0,>addt0=int64#3 +# asm 2: mov $0,>addt0=%rdx +mov $0,%rdx + +# qhasm: addt1 = 38 +# asm 1: mov $38,>addt1=int64#7 +# asm 2: mov $38,>addt1=%rax +mov $38,%rax + +# qhasm: addt1 = addt0 if !carry +# asm 1: cmovae <addt0=int64#3,<addt1=int64#7 +# asm 2: cmovae <addt0=%rdx,<addt1=%rax +cmovae %rdx,%rax + +# qhasm: carry? r0 += addt1 +# asm 1: add <addt1=int64#7,<r0=int64#4 +# asm 2: add <addt1=%rax,<r0=%rcx +add %rax,%rcx + +# qhasm: carry? r1 += addt0 + carry +# asm 1: adc <addt0=int64#3,<r1=int64#5 +# asm 2: adc <addt0=%rdx,<r1=%r8 +adc %rdx,%r8 + +# qhasm: carry? r2 += addt0 + carry +# asm 1: adc <addt0=int64#3,<r2=int64#6 +# asm 2: adc <addt0=%rdx,<r2=%r9 +adc %rdx,%r9 + +# qhasm: carry? r3 += addt0 + carry +# asm 1: adc <addt0=int64#3,<r3=int64#2 +# asm 2: adc <addt0=%rdx,<r3=%rsi +adc %rdx,%rsi + +# qhasm: addt0 = addt1 if carry +# asm 1: cmovc <addt1=int64#7,<addt0=int64#3 +# asm 2: cmovc <addt1=%rax,<addt0=%rdx +cmovc %rax,%rdx + +# qhasm: r0 += addt0 +# asm 1: add <addt0=int64#3,<r0=int64#4 +# asm 2: add <addt0=%rdx,<r0=%rcx +add %rdx,%rcx + +# qhasm: *(uint64 *)(rp + 0) = r0 +# asm 1: movq <r0=int64#4,0(<rp=int64#1) +# asm 2: movq <r0=%rcx,0(<rp=%rdi) +movq %rcx,0(%rdi) + +# qhasm: *(uint64 *)(rp + 8) = r1 +# asm 1: movq <r1=int64#5,8(<rp=int64#1) +# asm 2: movq <r1=%r8,8(<rp=%rdi) +movq %r8,8(%rdi) + +# qhasm: *(uint64 *)(rp + 16) = r2 +# asm 1: movq <r2=int64#6,16(<rp=int64#1) +# asm 2: movq <r2=%r9,16(<rp=%rdi) +movq %r9,16(%rdi) + +# qhasm: *(uint64 *)(rp + 24) = r3 +# asm 1: movq <r3=int64#2,24(<rp=int64#1) +# asm 2: movq <r3=%rsi,24(<rp=%rdi) +movq %rsi,24(%rdi) + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/ext/ed25519-amd64-asm/fe25519_freeze.s b/ext/ed25519-amd64-asm/fe25519_freeze.s new file mode 100644 index 00000000..dea29021 --- /dev/null +++ b/ext/ed25519-amd64-asm/fe25519_freeze.s @@ -0,0 +1,322 @@ + +# qhasm: int64 rp + +# qhasm: input rp + +# qhasm: int64 r0 + +# qhasm: int64 r1 + +# qhasm: int64 r2 + +# qhasm: int64 r3 + +# qhasm: int64 t0 + +# qhasm: int64 t1 + +# qhasm: int64 t2 + +# qhasm: int64 t3 + +# qhasm: int64 two63 + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: enter crypto_sign_ed25519_amd64_64_fe25519_freeze +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_64_fe25519_freeze +.globl crypto_sign_ed25519_amd64_64_fe25519_freeze +_crypto_sign_ed25519_amd64_64_fe25519_freeze: +crypto_sign_ed25519_amd64_64_fe25519_freeze: +mov %rsp,%r11 +and $31,%r11 +add $64,%r11 +sub %r11,%rsp + +# qhasm: caller1_stack = caller1 +# asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1 +# asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2 +# asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3 +# asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4 +# asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5 +# asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6 +# asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7 +# asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: r0 = *(uint64 *) (rp + 0) +# asm 1: movq 0(<rp=int64#1),>r0=int64#2 +# asm 2: movq 0(<rp=%rdi),>r0=%rsi +movq 0(%rdi),%rsi + +# qhasm: r1 = *(uint64 *) (rp + 8) +# asm 1: movq 8(<rp=int64#1),>r1=int64#3 +# asm 2: movq 8(<rp=%rdi),>r1=%rdx +movq 8(%rdi),%rdx + +# qhasm: r2 = *(uint64 *) (rp + 16) +# asm 1: movq 16(<rp=int64#1),>r2=int64#4 +# asm 2: movq 16(<rp=%rdi),>r2=%rcx +movq 16(%rdi),%rcx + +# qhasm: r3 = *(uint64 *) (rp + 24) +# asm 1: movq 24(<rp=int64#1),>r3=int64#5 +# asm 2: movq 24(<rp=%rdi),>r3=%r8 +movq 24(%rdi),%r8 + +# qhasm: t0 = r0 +# asm 1: mov <r0=int64#2,>t0=int64#6 +# asm 2: mov <r0=%rsi,>t0=%r9 +mov %rsi,%r9 + +# qhasm: t1 = r1 +# asm 1: mov <r1=int64#3,>t1=int64#7 +# asm 2: mov <r1=%rdx,>t1=%rax +mov %rdx,%rax + +# qhasm: t2 = r2 +# asm 1: mov <r2=int64#4,>t2=int64#8 +# asm 2: mov <r2=%rcx,>t2=%r10 +mov %rcx,%r10 + +# qhasm: t3 = r3 +# asm 1: mov <r3=int64#5,>t3=int64#9 +# asm 2: mov <r3=%r8,>t3=%r11 +mov %r8,%r11 + +# qhasm: two63 = 1 +# asm 1: mov $1,>two63=int64#10 +# asm 2: mov $1,>two63=%r12 +mov $1,%r12 + +# qhasm: two63 <<= 63 +# asm 1: shl $63,<two63=int64#10 +# asm 2: shl $63,<two63=%r12 +shl $63,%r12 + +# qhasm: carry? t0 += 19 +# asm 1: add $19,<t0=int64#6 +# asm 2: add $19,<t0=%r9 +add $19,%r9 + +# qhasm: carry? t1 += 0 + carry +# asm 1: adc $0,<t1=int64#7 +# asm 2: adc $0,<t1=%rax +adc $0,%rax + +# qhasm: carry? t2 += 0 + carry +# asm 1: adc $0,<t2=int64#8 +# asm 2: adc $0,<t2=%r10 +adc $0,%r10 + +# qhasm: carry? t3 += two63 + carry +# asm 1: adc <two63=int64#10,<t3=int64#9 +# asm 2: adc <two63=%r12,<t3=%r11 +adc %r12,%r11 + +# qhasm: r0 = t0 if carry +# asm 1: cmovc <t0=int64#6,<r0=int64#2 +# asm 2: cmovc <t0=%r9,<r0=%rsi +cmovc %r9,%rsi + +# qhasm: r1 = t1 if carry +# asm 1: cmovc <t1=int64#7,<r1=int64#3 +# asm 2: cmovc <t1=%rax,<r1=%rdx +cmovc %rax,%rdx + +# qhasm: r2 = t2 if carry +# asm 1: cmovc <t2=int64#8,<r2=int64#4 +# asm 2: cmovc <t2=%r10,<r2=%rcx +cmovc %r10,%rcx + +# qhasm: r3 = t3 if carry +# asm 1: cmovc <t3=int64#9,<r3=int64#5 +# asm 2: cmovc <t3=%r11,<r3=%r8 +cmovc %r11,%r8 + +# qhasm: t0 = r0 +# asm 1: mov <r0=int64#2,>t0=int64#6 +# asm 2: mov <r0=%rsi,>t0=%r9 +mov %rsi,%r9 + +# qhasm: t1 = r1 +# asm 1: mov <r1=int64#3,>t1=int64#7 +# asm 2: mov <r1=%rdx,>t1=%rax +mov %rdx,%rax + +# qhasm: t2 = r2 +# asm 1: mov <r2=int64#4,>t2=int64#8 +# asm 2: mov <r2=%rcx,>t2=%r10 +mov %rcx,%r10 + +# qhasm: t3 = r3 +# asm 1: mov <r3=int64#5,>t3=int64#9 +# asm 2: mov <r3=%r8,>t3=%r11 +mov %r8,%r11 + +# qhasm: carry? t0 += 19 +# asm 1: add $19,<t0=int64#6 +# asm 2: add $19,<t0=%r9 +add $19,%r9 + +# qhasm: carry? t1 += 0 + carry +# asm 1: adc $0,<t1=int64#7 +# asm 2: adc $0,<t1=%rax +adc $0,%rax + +# qhasm: carry? t2 += 0 + carry +# asm 1: adc $0,<t2=int64#8 +# asm 2: adc $0,<t2=%r10 +adc $0,%r10 + +# qhasm: carry? t3 += two63 + carry +# asm 1: adc <two63=int64#10,<t3=int64#9 +# asm 2: adc <two63=%r12,<t3=%r11 +adc %r12,%r11 + +# qhasm: r0 = t0 if carry +# asm 1: cmovc <t0=int64#6,<r0=int64#2 +# asm 2: cmovc <t0=%r9,<r0=%rsi +cmovc %r9,%rsi + +# qhasm: r1 = t1 if carry +# asm 1: cmovc <t1=int64#7,<r1=int64#3 +# asm 2: cmovc <t1=%rax,<r1=%rdx +cmovc %rax,%rdx + +# qhasm: r2 = t2 if carry +# asm 1: cmovc <t2=int64#8,<r2=int64#4 +# asm 2: cmovc <t2=%r10,<r2=%rcx +cmovc %r10,%rcx + +# qhasm: r3 = t3 if carry +# asm 1: cmovc <t3=int64#9,<r3=int64#5 +# asm 2: cmovc <t3=%r11,<r3=%r8 +cmovc %r11,%r8 + +# qhasm: *(uint64 *)(rp + 0) = r0 +# asm 1: movq <r0=int64#2,0(<rp=int64#1) +# asm 2: movq <r0=%rsi,0(<rp=%rdi) +movq %rsi,0(%rdi) + +# qhasm: *(uint64 *)(rp + 8) = r1 +# asm 1: movq <r1=int64#3,8(<rp=int64#1) +# asm 2: movq <r1=%rdx,8(<rp=%rdi) +movq %rdx,8(%rdi) + +# qhasm: *(uint64 *)(rp + 16) = r2 +# asm 1: movq <r2=int64#4,16(<rp=int64#1) +# asm 2: movq <r2=%rcx,16(<rp=%rdi) +movq %rcx,16(%rdi) + +# qhasm: *(uint64 *)(rp + 24) = r3 +# asm 1: movq <r3=int64#5,24(<rp=int64#1) +# asm 2: movq <r3=%r8,24(<rp=%rdi) +movq %r8,24(%rdi) + +# qhasm: caller1 = caller1_stack +# asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9 +# asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10 +# asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11 +# asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12 +# asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13 +# asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14 +# asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15 +# asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/ext/ed25519-amd64-asm/fe25519_getparity.c b/ext/ed25519-amd64-asm/fe25519_getparity.c new file mode 100644 index 00000000..a003ec8f --- /dev/null +++ b/ext/ed25519-amd64-asm/fe25519_getparity.c @@ -0,0 +1,8 @@ +#include "fe25519.h" + +unsigned char fe25519_getparity(const fe25519 *x) +{ + fe25519 t = *x; + fe25519_freeze(&t); + return (unsigned char)t.v[0] & 1; +} diff --git a/ext/ed25519-amd64-asm/fe25519_invert.c b/ext/ed25519-amd64-asm/fe25519_invert.c new file mode 100644 index 00000000..a46d141f --- /dev/null +++ b/ext/ed25519-amd64-asm/fe25519_invert.c @@ -0,0 +1,60 @@ +#include "fe25519.h" + +void fe25519_invert(fe25519 *r, const fe25519 *x) +{ + fe25519 z2; + fe25519 z9; + fe25519 z11; + fe25519 z2_5_0; + fe25519 z2_10_0; + fe25519 z2_20_0; + fe25519 z2_50_0; + fe25519 z2_100_0; + fe25519 t; + int i; + + /* 2 */ fe25519_square(&z2,x); + /* 4 */ fe25519_square(&t,&z2); + /* 8 */ fe25519_square(&t,&t); + /* 9 */ fe25519_mul(&z9,&t,x); + /* 11 */ fe25519_mul(&z11,&z9,&z2); + /* 22 */ fe25519_square(&t,&z11); + /* 2^5 - 2^0 = 31 */ fe25519_mul(&z2_5_0,&t,&z9); + + /* 2^6 - 2^1 */ fe25519_square(&t,&z2_5_0); + /* 2^20 - 2^10 */ for (i = 1;i < 5;i++) { fe25519_square(&t,&t); } + /* 2^10 - 2^0 */ fe25519_mul(&z2_10_0,&t,&z2_5_0); + + /* 2^11 - 2^1 */ fe25519_square(&t,&z2_10_0); + /* 2^20 - 2^10 */ for (i = 1;i < 10;i++) { fe25519_square(&t,&t); } + /* 2^20 - 2^0 */ fe25519_mul(&z2_20_0,&t,&z2_10_0); + + /* 2^21 - 2^1 */ fe25519_square(&t,&z2_20_0); + /* 2^40 - 2^20 */ for (i = 1;i < 20;i++) { fe25519_square(&t,&t); } + /* 2^40 - 2^0 */ fe25519_mul(&t,&t,&z2_20_0); + + /* 2^41 - 2^1 */ fe25519_square(&t,&t); + /* 2^50 - 2^10 */ for (i = 1;i < 10;i++) { fe25519_square(&t,&t); } + /* 2^50 - 2^0 */ fe25519_mul(&z2_50_0,&t,&z2_10_0); + + /* 2^51 - 2^1 */ fe25519_square(&t,&z2_50_0); + /* 2^100 - 2^50 */ for (i = 1;i < 50;i++) { fe25519_square(&t,&t); } + /* 2^100 - 2^0 */ fe25519_mul(&z2_100_0,&t,&z2_50_0); + + /* 2^101 - 2^1 */ fe25519_square(&t,&z2_100_0); + /* 2^200 - 2^100 */ for (i = 1;i < 100;i++) { fe25519_square(&t,&t); } + /* 2^200 - 2^0 */ fe25519_mul(&t,&t,&z2_100_0); + + /* 2^201 - 2^1 */ fe25519_square(&t,&t); + /* 2^250 - 2^50 */ for (i = 1;i < 50;i++) { fe25519_square(&t,&t); } + /* 2^250 - 2^0 */ fe25519_mul(&t,&t,&z2_50_0); + + /* 2^251 - 2^1 */ fe25519_square(&t,&t); + /* 2^252 - 2^2 */ fe25519_square(&t,&t); + /* 2^253 - 2^3 */ fe25519_square(&t,&t); + + /* 2^254 - 2^4 */ fe25519_square(&t,&t); + + /* 2^255 - 2^5 */ fe25519_square(&t,&t); + /* 2^255 - 21 */ fe25519_mul(r,&t,&z11); +} diff --git a/ext/ed25519-amd64-asm/fe25519_iseq.c b/ext/ed25519-amd64-asm/fe25519_iseq.c new file mode 100644 index 00000000..bf72f8c9 --- /dev/null +++ b/ext/ed25519-amd64-asm/fe25519_iseq.c @@ -0,0 +1,14 @@ +#include "fe25519.h" + +int fe25519_iseq_vartime(const fe25519 *x, const fe25519 *y) +{ + fe25519 t1 = *x; + fe25519 t2 = *y; + fe25519_freeze(&t1); + fe25519_freeze(&t2); + if(t1.v[0] != t2.v[0]) return 0; + if(t1.v[1] != t2.v[1]) return 0; + if(t1.v[2] != t2.v[2]) return 0; + if(t1.v[3] != t2.v[3]) return 0; + return 1; +} diff --git a/ext/ed25519-amd64-asm/fe25519_iszero.c b/ext/ed25519-amd64-asm/fe25519_iszero.c new file mode 100644 index 00000000..99e4dafa --- /dev/null +++ b/ext/ed25519-amd64-asm/fe25519_iszero.c @@ -0,0 +1,12 @@ +#include "fe25519.h" + +int fe25519_iszero_vartime(const fe25519 *x) +{ + fe25519 t = *x; + fe25519_freeze(&t); + if (t.v[0]) return 0; + if (t.v[1]) return 0; + if (t.v[2]) return 0; + if (t.v[3]) return 0; + return 1; +} diff --git a/ext/ed25519-amd64-asm/fe25519_mul.s b/ext/ed25519-amd64-asm/fe25519_mul.s new file mode 100644 index 00000000..7e24518d --- /dev/null +++ b/ext/ed25519-amd64-asm/fe25519_mul.s @@ -0,0 +1,865 @@ + +# qhasm: int64 rp + +# qhasm: int64 xp + +# qhasm: int64 yp + +# qhasm: input rp + +# qhasm: input xp + +# qhasm: input yp + +# qhasm: int64 r0 + +# qhasm: int64 r1 + +# qhasm: int64 r2 + +# qhasm: int64 r3 + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: int64 mulr4 + +# qhasm: int64 mulr5 + +# qhasm: int64 mulr6 + +# qhasm: int64 mulr7 + +# qhasm: int64 mulr8 + +# qhasm: int64 mulrax + +# qhasm: int64 mulrdx + +# qhasm: int64 mulx0 + +# qhasm: int64 mulx1 + +# qhasm: int64 mulx2 + +# qhasm: int64 mulx3 + +# qhasm: int64 mulc + +# qhasm: int64 mulzero + +# qhasm: int64 muli38 + +# qhasm: enter crypto_sign_ed25519_amd64_64_fe25519_mul +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_64_fe25519_mul +.globl crypto_sign_ed25519_amd64_64_fe25519_mul +_crypto_sign_ed25519_amd64_64_fe25519_mul: +crypto_sign_ed25519_amd64_64_fe25519_mul: +mov %rsp,%r11 +and $31,%r11 +add $64,%r11 +sub %r11,%rsp + +# qhasm: caller1_stack = caller1 +# asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1 +# asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2 +# asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3 +# asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4 +# asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5 +# asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6 +# asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7 +# asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: yp = yp +# asm 1: mov <yp=int64#3,>yp=int64#4 +# asm 2: mov <yp=%rdx,>yp=%rcx +mov %rdx,%rcx + +# qhasm: mulr4 = 0 +# asm 1: mov $0,>mulr4=int64#5 +# asm 2: mov $0,>mulr4=%r8 +mov $0,%r8 + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#6 +# asm 2: mov $0,>mulr5=%r9 +mov $0,%r9 + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#8 +# asm 2: mov $0,>mulr6=%r10 +mov $0,%r10 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#9 +# asm 2: mov $0,>mulr7=%r11 +mov $0,%r11 + +# qhasm: mulx0 = *(uint64 *)(xp + 0) +# asm 1: movq 0(<xp=int64#2),>mulx0=int64#10 +# asm 2: movq 0(<xp=%rsi),>mulx0=%r12 +movq 0(%rsi),%r12 + +# qhasm: mulrax = *(uint64 *)(yp + 0) +# asm 1: movq 0(<yp=int64#4),>mulrax=int64#7 +# asm 2: movq 0(<yp=%rcx),>mulrax=%rax +movq 0(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: r0 = mulrax +# asm 1: mov <mulrax=int64#7,>r0=int64#11 +# asm 2: mov <mulrax=%rax,>r0=%r13 +mov %rax,%r13 + +# qhasm: r1 = mulrdx +# asm 1: mov <mulrdx=int64#3,>r1=int64#12 +# asm 2: mov <mulrdx=%rdx,>r1=%r14 +mov %rdx,%r14 + +# qhasm: mulrax = *(uint64 *)(yp + 8) +# asm 1: movq 8(<yp=int64#4),>mulrax=int64#7 +# asm 2: movq 8(<yp=%rcx),>mulrax=%rax +movq 8(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: carry? r1 += mulrax +# asm 1: add <mulrax=int64#7,<r1=int64#12 +# asm 2: add <mulrax=%rax,<r1=%r14 +add %rax,%r14 + +# qhasm: r2 = 0 +# asm 1: mov $0,>r2=int64#13 +# asm 2: mov $0,>r2=%r15 +mov $0,%r15 + +# qhasm: r2 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<r2=int64#13 +# asm 2: adc <mulrdx=%rdx,<r2=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = *(uint64 *)(yp + 16) +# asm 1: movq 16(<yp=int64#4),>mulrax=int64#7 +# asm 2: movq 16(<yp=%rcx),>mulrax=%rax +movq 16(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: carry? r2 += mulrax +# asm 1: add <mulrax=int64#7,<r2=int64#13 +# asm 2: add <mulrax=%rax,<r2=%r15 +add %rax,%r15 + +# qhasm: r3 = 0 +# asm 1: mov $0,>r3=int64#14 +# asm 2: mov $0,>r3=%rbx +mov $0,%rbx + +# qhasm: r3 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<r3=int64#14 +# asm 2: adc <mulrdx=%rdx,<r3=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(yp + 24) +# asm 1: movq 24(<yp=int64#4),>mulrax=int64#7 +# asm 2: movq 24(<yp=%rcx),>mulrax=%rax +movq 24(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: carry? r3 += mulrax +# asm 1: add <mulrax=int64#7,<r3=int64#14 +# asm 2: add <mulrax=%rax,<r3=%rbx +add %rax,%rbx + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr4=int64#5 +# asm 2: adc <mulrdx=%rdx,<mulr4=%r8 +adc %rdx,%r8 + +# qhasm: mulx1 = *(uint64 *)(xp + 8) +# asm 1: movq 8(<xp=int64#2),>mulx1=int64#10 +# asm 2: movq 8(<xp=%rsi),>mulx1=%r12 +movq 8(%rsi),%r12 + +# qhasm: mulrax = *(uint64 *)(yp + 0) +# asm 1: movq 0(<yp=int64#4),>mulrax=int64#7 +# asm 2: movq 0(<yp=%rcx),>mulrax=%rax +movq 0(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? r1 += mulrax +# asm 1: add <mulrax=int64#7,<r1=int64#12 +# asm 2: add <mulrax=%rax,<r1=%r14 +add %rax,%r14 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(yp + 8) +# asm 1: movq 8(<yp=int64#4),>mulrax=int64#7 +# asm 2: movq 8(<yp=%rcx),>mulrax=%rax +movq 8(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? r2 += mulrax +# asm 1: add <mulrax=int64#7,<r2=int64#13 +# asm 2: add <mulrax=%rax,<r2=%r15 +add %rax,%r15 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? r2 += mulc +# asm 1: add <mulc=int64#15,<r2=int64#13 +# asm 2: add <mulc=%rbp,<r2=%r15 +add %rbp,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(yp + 16) +# asm 1: movq 16(<yp=int64#4),>mulrax=int64#7 +# asm 2: movq 16(<yp=%rcx),>mulrax=%rax +movq 16(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? r3 += mulrax +# asm 1: add <mulrax=int64#7,<r3=int64#14 +# asm 2: add <mulrax=%rax,<r3=%rbx +add %rax,%rbx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? r3 += mulc +# asm 1: add <mulc=int64#15,<r3=int64#14 +# asm 2: add <mulc=%rbp,<r3=%rbx +add %rbp,%rbx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(yp + 24) +# asm 1: movq 24(<yp=int64#4),>mulrax=int64#7 +# asm 2: movq 24(<yp=%rcx),>mulrax=%rax +movq 24(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#5 +# asm 2: add <mulrax=%rax,<mulr4=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#15,<mulr4=int64#5 +# asm 2: add <mulc=%rbp,<mulr4=%r8 +add %rbp,%r8 + +# qhasm: mulr5 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr5=int64#6 +# asm 2: adc <mulrdx=%rdx,<mulr5=%r9 +adc %rdx,%r9 + +# qhasm: mulx2 = *(uint64 *)(xp + 16) +# asm 1: movq 16(<xp=int64#2),>mulx2=int64#10 +# asm 2: movq 16(<xp=%rsi),>mulx2=%r12 +movq 16(%rsi),%r12 + +# qhasm: mulrax = *(uint64 *)(yp + 0) +# asm 1: movq 0(<yp=int64#4),>mulrax=int64#7 +# asm 2: movq 0(<yp=%rcx),>mulrax=%rax +movq 0(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? r2 += mulrax +# asm 1: add <mulrax=int64#7,<r2=int64#13 +# asm 2: add <mulrax=%rax,<r2=%r15 +add %rax,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(yp + 8) +# asm 1: movq 8(<yp=int64#4),>mulrax=int64#7 +# asm 2: movq 8(<yp=%rcx),>mulrax=%rax +movq 8(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? r3 += mulrax +# asm 1: add <mulrax=int64#7,<r3=int64#14 +# asm 2: add <mulrax=%rax,<r3=%rbx +add %rax,%rbx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? r3 += mulc +# asm 1: add <mulc=int64#15,<r3=int64#14 +# asm 2: add <mulc=%rbp,<r3=%rbx +add %rbp,%rbx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(yp + 16) +# asm 1: movq 16(<yp=int64#4),>mulrax=int64#7 +# asm 2: movq 16(<yp=%rcx),>mulrax=%rax +movq 16(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#5 +# asm 2: add <mulrax=%rax,<mulr4=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#15,<mulr4=int64#5 +# asm 2: add <mulc=%rbp,<mulr4=%r8 +add %rbp,%r8 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(yp + 24) +# asm 1: movq 24(<yp=int64#4),>mulrax=int64#7 +# asm 2: movq 24(<yp=%rcx),>mulrax=%rax +movq 24(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#6 +# asm 2: add <mulrax=%rax,<mulr5=%r9 +add %rax,%r9 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#15,<mulr5=int64#6 +# asm 2: add <mulc=%rbp,<mulr5=%r9 +add %rbp,%r9 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#8 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r10 +adc %rdx,%r10 + +# qhasm: mulx3 = *(uint64 *)(xp + 24) +# asm 1: movq 24(<xp=int64#2),>mulx3=int64#2 +# asm 2: movq 24(<xp=%rsi),>mulx3=%rsi +movq 24(%rsi),%rsi + +# qhasm: mulrax = *(uint64 *)(yp + 0) +# asm 1: movq 0(<yp=int64#4),>mulrax=int64#7 +# asm 2: movq 0(<yp=%rcx),>mulrax=%rax +movq 0(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#2 +# asm 2: mul <mulx3=%rsi +mul %rsi + +# qhasm: carry? r3 += mulrax +# asm 1: add <mulrax=int64#7,<r3=int64#14 +# asm 2: add <mulrax=%rax,<r3=%rbx +add %rax,%rbx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#10 +# asm 2: mov $0,>mulc=%r12 +mov $0,%r12 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#10 +# asm 2: adc <mulrdx=%rdx,<mulc=%r12 +adc %rdx,%r12 + +# qhasm: mulrax = *(uint64 *)(yp + 8) +# asm 1: movq 8(<yp=int64#4),>mulrax=int64#7 +# asm 2: movq 8(<yp=%rcx),>mulrax=%rax +movq 8(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#2 +# asm 2: mul <mulx3=%rsi +mul %rsi + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#5 +# asm 2: add <mulrax=%rax,<mulr4=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#10,<mulr4=int64#5 +# asm 2: add <mulc=%r12,<mulr4=%r8 +add %r12,%r8 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#10 +# asm 2: mov $0,>mulc=%r12 +mov $0,%r12 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#10 +# asm 2: adc <mulrdx=%rdx,<mulc=%r12 +adc %rdx,%r12 + +# qhasm: mulrax = *(uint64 *)(yp + 16) +# asm 1: movq 16(<yp=int64#4),>mulrax=int64#7 +# asm 2: movq 16(<yp=%rcx),>mulrax=%rax +movq 16(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#2 +# asm 2: mul <mulx3=%rsi +mul %rsi + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#6 +# asm 2: add <mulrax=%rax,<mulr5=%r9 +add %rax,%r9 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#10,<mulr5=int64#6 +# asm 2: add <mulc=%r12,<mulr5=%r9 +add %r12,%r9 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#10 +# asm 2: mov $0,>mulc=%r12 +mov $0,%r12 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#10 +# asm 2: adc <mulrdx=%rdx,<mulc=%r12 +adc %rdx,%r12 + +# qhasm: mulrax = *(uint64 *)(yp + 24) +# asm 1: movq 24(<yp=int64#4),>mulrax=int64#7 +# asm 2: movq 24(<yp=%rcx),>mulrax=%rax +movq 24(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#2 +# asm 2: mul <mulx3=%rsi +mul %rsi + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#8 +# asm 2: add <mulrax=%rax,<mulr6=%r10 +add %rax,%r10 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr6 += mulc +# asm 1: add <mulc=int64#10,<mulr6=int64#8 +# asm 2: add <mulc=%r12,<mulr6=%r10 +add %r12,%r10 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#9 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r11 +adc %rdx,%r11 + +# qhasm: mulrax = mulr4 +# asm 1: mov <mulr4=int64#5,>mulrax=int64#7 +# asm 2: mov <mulr4=%r8,>mulrax=%rax +mov %r8,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: mulr4 = mulrax +# asm 1: mov <mulrax=int64#7,>mulr4=int64#2 +# asm 2: mov <mulrax=%rax,>mulr4=%rsi +mov %rax,%rsi + +# qhasm: mulrax = mulr5 +# asm 1: mov <mulr5=int64#6,>mulrax=int64#7 +# asm 2: mov <mulr5=%r9,>mulrax=%rax +mov %r9,%rax + +# qhasm: mulr5 = mulrdx +# asm 1: mov <mulrdx=int64#3,>mulr5=int64#4 +# asm 2: mov <mulrdx=%rdx,>mulr5=%rcx +mov %rdx,%rcx + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#4 +# asm 2: add <mulrax=%rax,<mulr5=%rcx +add %rax,%rcx + +# qhasm: mulrax = mulr6 +# asm 1: mov <mulr6=int64#8,>mulrax=int64#7 +# asm 2: mov <mulr6=%r10,>mulrax=%rax +mov %r10,%rax + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#5 +# asm 2: mov $0,>mulr6=%r8 +mov $0,%r8 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#5 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r8 +adc %rdx,%r8 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#5 +# asm 2: add <mulrax=%rax,<mulr6=%r8 +add %rax,%r8 + +# qhasm: mulrax = mulr7 +# asm 1: mov <mulr7=int64#9,>mulrax=int64#7 +# asm 2: mov <mulr7=%r11,>mulrax=%rax +mov %r11,%rax + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#6 +# asm 2: mov $0,>mulr7=%r9 +mov $0,%r9 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#6 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r9 +adc %rdx,%r9 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr7 += mulrax +# asm 1: add <mulrax=int64#7,<mulr7=int64#6 +# asm 2: add <mulrax=%rax,<mulr7=%r9 +add %rax,%r9 + +# qhasm: mulr8 = 0 +# asm 1: mov $0,>mulr8=int64#7 +# asm 2: mov $0,>mulr8=%rax +mov $0,%rax + +# qhasm: mulr8 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr8=int64#7 +# asm 2: adc <mulrdx=%rdx,<mulr8=%rax +adc %rdx,%rax + +# qhasm: carry? r0 += mulr4 +# asm 1: add <mulr4=int64#2,<r0=int64#11 +# asm 2: add <mulr4=%rsi,<r0=%r13 +add %rsi,%r13 + +# qhasm: carry? r1 += mulr5 + carry +# asm 1: adc <mulr5=int64#4,<r1=int64#12 +# asm 2: adc <mulr5=%rcx,<r1=%r14 +adc %rcx,%r14 + +# qhasm: carry? r2 += mulr6 + carry +# asm 1: adc <mulr6=int64#5,<r2=int64#13 +# asm 2: adc <mulr6=%r8,<r2=%r15 +adc %r8,%r15 + +# qhasm: carry? r3 += mulr7 + carry +# asm 1: adc <mulr7=int64#6,<r3=int64#14 +# asm 2: adc <mulr7=%r9,<r3=%rbx +adc %r9,%rbx + +# qhasm: mulzero = 0 +# asm 1: mov $0,>mulzero=int64#2 +# asm 2: mov $0,>mulzero=%rsi +mov $0,%rsi + +# qhasm: mulr8 += mulzero + carry +# asm 1: adc <mulzero=int64#2,<mulr8=int64#7 +# asm 2: adc <mulzero=%rsi,<mulr8=%rax +adc %rsi,%rax + +# qhasm: mulr8 *= 38 +# asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#3 +# asm 2: imulq $38,<mulr8=%rax,>mulr8=%rdx +imulq $38,%rax,%rdx + +# qhasm: carry? r0 += mulr8 +# asm 1: add <mulr8=int64#3,<r0=int64#11 +# asm 2: add <mulr8=%rdx,<r0=%r13 +add %rdx,%r13 + +# qhasm: carry? r1 += mulzero + carry +# asm 1: adc <mulzero=int64#2,<r1=int64#12 +# asm 2: adc <mulzero=%rsi,<r1=%r14 +adc %rsi,%r14 + +# qhasm: carry? r2 += mulzero + carry +# asm 1: adc <mulzero=int64#2,<r2=int64#13 +# asm 2: adc <mulzero=%rsi,<r2=%r15 +adc %rsi,%r15 + +# qhasm: carry? r3 += mulzero + carry +# asm 1: adc <mulzero=int64#2,<r3=int64#14 +# asm 2: adc <mulzero=%rsi,<r3=%rbx +adc %rsi,%rbx + +# qhasm: mulzero += mulzero + carry +# asm 1: adc <mulzero=int64#2,<mulzero=int64#2 +# asm 2: adc <mulzero=%rsi,<mulzero=%rsi +adc %rsi,%rsi + +# qhasm: mulzero *= 38 +# asm 1: imulq $38,<mulzero=int64#2,>mulzero=int64#2 +# asm 2: imulq $38,<mulzero=%rsi,>mulzero=%rsi +imulq $38,%rsi,%rsi + +# qhasm: r0 += mulzero +# asm 1: add <mulzero=int64#2,<r0=int64#11 +# asm 2: add <mulzero=%rsi,<r0=%r13 +add %rsi,%r13 + +# qhasm: *(uint64 *)(rp + 8) = r1 +# asm 1: movq <r1=int64#12,8(<rp=int64#1) +# asm 2: movq <r1=%r14,8(<rp=%rdi) +movq %r14,8(%rdi) + +# qhasm: *(uint64 *)(rp + 16) = r2 +# asm 1: movq <r2=int64#13,16(<rp=int64#1) +# asm 2: movq <r2=%r15,16(<rp=%rdi) +movq %r15,16(%rdi) + +# qhasm: *(uint64 *)(rp + 24) = r3 +# asm 1: movq <r3=int64#14,24(<rp=int64#1) +# asm 2: movq <r3=%rbx,24(<rp=%rdi) +movq %rbx,24(%rdi) + +# qhasm: *(uint64 *)(rp + 0) = r0 +# asm 1: movq <r0=int64#11,0(<rp=int64#1) +# asm 2: movq <r0=%r13,0(<rp=%rdi) +movq %r13,0(%rdi) + +# qhasm: caller1 = caller1_stack +# asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9 +# asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10 +# asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11 +# asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12 +# asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13 +# asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14 +# asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15 +# asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/ext/ed25519-amd64-asm/fe25519_neg.c b/ext/ed25519-amd64-asm/fe25519_neg.c new file mode 100644 index 00000000..235b209d --- /dev/null +++ b/ext/ed25519-amd64-asm/fe25519_neg.c @@ -0,0 +1,8 @@ +#include "fe25519.h" + +void fe25519_neg(fe25519 *r, const fe25519 *x) +{ + fe25519 t; + fe25519_setint(&t,0); + fe25519_sub(r,&t,x); +} diff --git a/ext/ed25519-amd64-asm/fe25519_pack.c b/ext/ed25519-amd64-asm/fe25519_pack.c new file mode 100644 index 00000000..caf51853 --- /dev/null +++ b/ext/ed25519-amd64-asm/fe25519_pack.c @@ -0,0 +1,13 @@ +#include "fe25519.h" + +/* Assumes input x being reduced below 2^255 */ +void fe25519_pack(unsigned char r[32], const fe25519 *x) +{ + int i; + fe25519 t; + t = *x; + fe25519_freeze(&t); + /* assuming little-endian */ + for(i=0;i<32;i++) r[i] = i[(unsigned char *)&t.v]; +} + diff --git a/ext/ed25519-amd64-asm/fe25519_pow2523.c b/ext/ed25519-amd64-asm/fe25519_pow2523.c new file mode 100644 index 00000000..60042a0a --- /dev/null +++ b/ext/ed25519-amd64-asm/fe25519_pow2523.c @@ -0,0 +1,55 @@ +#include "fe25519.h" + +void fe25519_pow2523(fe25519 *r, const fe25519 *x) +{ + fe25519 z2; + fe25519 z9; + fe25519 z11; + fe25519 z2_5_0; + fe25519 z2_10_0; + fe25519 z2_20_0; + fe25519 z2_50_0; + fe25519 z2_100_0; + fe25519 t; + int i; + + /* 2 */ fe25519_square(&z2,x); + /* 4 */ fe25519_square(&t,&z2); + /* 8 */ fe25519_square(&t,&t); + /* 9 */ fe25519_mul(&z9,&t,x); + /* 11 */ fe25519_mul(&z11,&z9,&z2); + /* 22 */ fe25519_square(&t,&z11); + /* 2^5 - 2^0 = 31 */ fe25519_mul(&z2_5_0,&t,&z9); + + /* 2^6 - 2^1 */ fe25519_square(&t,&z2_5_0); + /* 2^10 - 2^5 */ for (i = 1;i < 5;i++) { fe25519_square(&t,&t); } + /* 2^10 - 2^0 */ fe25519_mul(&z2_10_0,&t,&z2_5_0); + + /* 2^11 - 2^1 */ fe25519_square(&t,&z2_10_0); + /* 2^20 - 2^10 */ for (i = 1;i < 10;i++) { fe25519_square(&t,&t); } + /* 2^20 - 2^0 */ fe25519_mul(&z2_20_0,&t,&z2_10_0); + + /* 2^21 - 2^1 */ fe25519_square(&t,&z2_20_0); + /* 2^40 - 2^20 */ for (i = 1;i < 20;i++) { fe25519_square(&t,&t); } + /* 2^40 - 2^0 */ fe25519_mul(&t,&t,&z2_20_0); + + /* 2^41 - 2^1 */ fe25519_square(&t,&t); + /* 2^50 - 2^10 */ for (i = 1;i < 10;i++) { fe25519_square(&t,&t); } + /* 2^50 - 2^0 */ fe25519_mul(&z2_50_0,&t,&z2_10_0); + + /* 2^51 - 2^1 */ fe25519_square(&t,&z2_50_0); + /* 2^100 - 2^50 */ for (i = 1;i < 50;i++) { fe25519_square(&t,&t); } + /* 2^100 - 2^0 */ fe25519_mul(&z2_100_0,&t,&z2_50_0); + + /* 2^101 - 2^1 */ fe25519_square(&t,&z2_100_0); + /* 2^200 - 2^100 */ for (i = 1;i < 100;i++) { fe25519_square(&t,&t); } + /* 2^200 - 2^0 */ fe25519_mul(&t,&t,&z2_100_0); + + /* 2^201 - 2^1 */ fe25519_square(&t,&t); + /* 2^250 - 2^50 */ for (i = 1;i < 50;i++) { fe25519_square(&t,&t); } + /* 2^250 - 2^0 */ fe25519_mul(&t,&t,&z2_50_0); + + /* 2^251 - 2^1 */ fe25519_square(&t,&t); + /* 2^252 - 2^2 */ fe25519_square(&t,&t); + /* 2^252 - 3 */ fe25519_mul(r,&t,x); +} diff --git a/ext/ed25519-amd64-asm/fe25519_setint.c b/ext/ed25519-amd64-asm/fe25519_setint.c new file mode 100644 index 00000000..585c4bdd --- /dev/null +++ b/ext/ed25519-amd64-asm/fe25519_setint.c @@ -0,0 +1,9 @@ +#include "fe25519.h" + +void fe25519_setint(fe25519 *r, unsigned int v) +{ + r->v[0] = v; + r->v[1] = 0; + r->v[2] = 0; + r->v[3] = 0; +} diff --git a/ext/ed25519-amd64-asm/fe25519_square.s b/ext/ed25519-amd64-asm/fe25519_square.s new file mode 100644 index 00000000..3f51fd13 --- /dev/null +++ b/ext/ed25519-amd64-asm/fe25519_square.s @@ -0,0 +1,639 @@ + +# qhasm: int64 rp + +# qhasm: int64 xp + +# qhasm: input rp + +# qhasm: input xp + +# qhasm: int64 r0 + +# qhasm: int64 r1 + +# qhasm: int64 r2 + +# qhasm: int64 r3 + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: int64 squarer4 + +# qhasm: int64 squarer5 + +# qhasm: int64 squarer6 + +# qhasm: int64 squarer7 + +# qhasm: int64 squarer8 + +# qhasm: int64 squarerax + +# qhasm: int64 squarerdx + +# qhasm: int64 squaret1 + +# qhasm: int64 squaret2 + +# qhasm: int64 squaret3 + +# qhasm: int64 squarec + +# qhasm: int64 squarezero + +# qhasm: int64 squarei38 + +# qhasm: enter crypto_sign_ed25519_amd64_64_fe25519_square +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_64_fe25519_square +.globl crypto_sign_ed25519_amd64_64_fe25519_square +_crypto_sign_ed25519_amd64_64_fe25519_square: +crypto_sign_ed25519_amd64_64_fe25519_square: +mov %rsp,%r11 +and $31,%r11 +add $64,%r11 +sub %r11,%rsp + +# qhasm: caller1_stack = caller1 +# asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1 +# asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2 +# asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3 +# asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4 +# asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5 +# asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6 +# asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7 +# asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: squarer7 = 0 +# asm 1: mov $0,>squarer7=int64#4 +# asm 2: mov $0,>squarer7=%rcx +mov $0,%rcx + +# qhasm: squarerax = *(uint64 *)(xp + 8) +# asm 1: movq 8(<xp=int64#2),>squarerax=int64#7 +# asm 2: movq 8(<xp=%rsi),>squarerax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 0) +# asm 1: mulq 0(<xp=int64#2) +# asm 2: mulq 0(<xp=%rsi) +mulq 0(%rsi) + +# qhasm: r1 = squarerax +# asm 1: mov <squarerax=int64#7,>r1=int64#5 +# asm 2: mov <squarerax=%rax,>r1=%r8 +mov %rax,%r8 + +# qhasm: r2 = squarerdx +# asm 1: mov <squarerdx=int64#3,>r2=int64#6 +# asm 2: mov <squarerdx=%rdx,>r2=%r9 +mov %rdx,%r9 + +# qhasm: squarerax = *(uint64 *)(xp + 16) +# asm 1: movq 16(<xp=int64#2),>squarerax=int64#7 +# asm 2: movq 16(<xp=%rsi),>squarerax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 8) +# asm 1: mulq 8(<xp=int64#2) +# asm 2: mulq 8(<xp=%rsi) +mulq 8(%rsi) + +# qhasm: r3 = squarerax +# asm 1: mov <squarerax=int64#7,>r3=int64#8 +# asm 2: mov <squarerax=%rax,>r3=%r10 +mov %rax,%r10 + +# qhasm: squarer4 = squarerdx +# asm 1: mov <squarerdx=int64#3,>squarer4=int64#9 +# asm 2: mov <squarerdx=%rdx,>squarer4=%r11 +mov %rdx,%r11 + +# qhasm: squarerax = *(uint64 *)(xp + 24) +# asm 1: movq 24(<xp=int64#2),>squarerax=int64#7 +# asm 2: movq 24(<xp=%rsi),>squarerax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 16) +# asm 1: mulq 16(<xp=int64#2) +# asm 2: mulq 16(<xp=%rsi) +mulq 16(%rsi) + +# qhasm: squarer5 = squarerax +# asm 1: mov <squarerax=int64#7,>squarer5=int64#10 +# asm 2: mov <squarerax=%rax,>squarer5=%r12 +mov %rax,%r12 + +# qhasm: squarer6 = squarerdx +# asm 1: mov <squarerdx=int64#3,>squarer6=int64#11 +# asm 2: mov <squarerdx=%rdx,>squarer6=%r13 +mov %rdx,%r13 + +# qhasm: squarerax = *(uint64 *)(xp + 16) +# asm 1: movq 16(<xp=int64#2),>squarerax=int64#7 +# asm 2: movq 16(<xp=%rsi),>squarerax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 0) +# asm 1: mulq 0(<xp=int64#2) +# asm 2: mulq 0(<xp=%rsi) +mulq 0(%rsi) + +# qhasm: carry? r2 += squarerax +# asm 1: add <squarerax=int64#7,<r2=int64#6 +# asm 2: add <squarerax=%rax,<r2=%r9 +add %rax,%r9 + +# qhasm: carry? r3 += squarerdx + carry +# asm 1: adc <squarerdx=int64#3,<r3=int64#8 +# asm 2: adc <squarerdx=%rdx,<r3=%r10 +adc %rdx,%r10 + +# qhasm: squarer4 += 0 + carry +# asm 1: adc $0,<squarer4=int64#9 +# asm 2: adc $0,<squarer4=%r11 +adc $0,%r11 + +# qhasm: squarerax = *(uint64 *)(xp + 24) +# asm 1: movq 24(<xp=int64#2),>squarerax=int64#7 +# asm 2: movq 24(<xp=%rsi),>squarerax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 8) +# asm 1: mulq 8(<xp=int64#2) +# asm 2: mulq 8(<xp=%rsi) +mulq 8(%rsi) + +# qhasm: carry? squarer4 += squarerax +# asm 1: add <squarerax=int64#7,<squarer4=int64#9 +# asm 2: add <squarerax=%rax,<squarer4=%r11 +add %rax,%r11 + +# qhasm: carry? squarer5 += squarerdx + carry +# asm 1: adc <squarerdx=int64#3,<squarer5=int64#10 +# asm 2: adc <squarerdx=%rdx,<squarer5=%r12 +adc %rdx,%r12 + +# qhasm: squarer6 += 0 + carry +# asm 1: adc $0,<squarer6=int64#11 +# asm 2: adc $0,<squarer6=%r13 +adc $0,%r13 + +# qhasm: squarerax = *(uint64 *)(xp + 24) +# asm 1: movq 24(<xp=int64#2),>squarerax=int64#7 +# asm 2: movq 24(<xp=%rsi),>squarerax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 0) +# asm 1: mulq 0(<xp=int64#2) +# asm 2: mulq 0(<xp=%rsi) +mulq 0(%rsi) + +# qhasm: carry? r3 += squarerax +# asm 1: add <squarerax=int64#7,<r3=int64#8 +# asm 2: add <squarerax=%rax,<r3=%r10 +add %rax,%r10 + +# qhasm: carry? squarer4 += squarerdx + carry +# asm 1: adc <squarerdx=int64#3,<squarer4=int64#9 +# asm 2: adc <squarerdx=%rdx,<squarer4=%r11 +adc %rdx,%r11 + +# qhasm: carry? squarer5 += 0 + carry +# asm 1: adc $0,<squarer5=int64#10 +# asm 2: adc $0,<squarer5=%r12 +adc $0,%r12 + +# qhasm: carry? squarer6 += 0 + carry +# asm 1: adc $0,<squarer6=int64#11 +# asm 2: adc $0,<squarer6=%r13 +adc $0,%r13 + +# qhasm: squarer7 += 0 + carry +# asm 1: adc $0,<squarer7=int64#4 +# asm 2: adc $0,<squarer7=%rcx +adc $0,%rcx + +# qhasm: carry? r1 += r1 +# asm 1: add <r1=int64#5,<r1=int64#5 +# asm 2: add <r1=%r8,<r1=%r8 +add %r8,%r8 + +# qhasm: carry? r2 += r2 + carry +# asm 1: adc <r2=int64#6,<r2=int64#6 +# asm 2: adc <r2=%r9,<r2=%r9 +adc %r9,%r9 + +# qhasm: carry? r3 += r3 + carry +# asm 1: adc <r3=int64#8,<r3=int64#8 +# asm 2: adc <r3=%r10,<r3=%r10 +adc %r10,%r10 + +# qhasm: carry? squarer4 += squarer4 + carry +# asm 1: adc <squarer4=int64#9,<squarer4=int64#9 +# asm 2: adc <squarer4=%r11,<squarer4=%r11 +adc %r11,%r11 + +# qhasm: carry? squarer5 += squarer5 + carry +# asm 1: adc <squarer5=int64#10,<squarer5=int64#10 +# asm 2: adc <squarer5=%r12,<squarer5=%r12 +adc %r12,%r12 + +# qhasm: carry? squarer6 += squarer6 + carry +# asm 1: adc <squarer6=int64#11,<squarer6=int64#11 +# asm 2: adc <squarer6=%r13,<squarer6=%r13 +adc %r13,%r13 + +# qhasm: squarer7 += squarer7 + carry +# asm 1: adc <squarer7=int64#4,<squarer7=int64#4 +# asm 2: adc <squarer7=%rcx,<squarer7=%rcx +adc %rcx,%rcx + +# qhasm: squarerax = *(uint64 *)(xp + 0) +# asm 1: movq 0(<xp=int64#2),>squarerax=int64#7 +# asm 2: movq 0(<xp=%rsi),>squarerax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 0) +# asm 1: mulq 0(<xp=int64#2) +# asm 2: mulq 0(<xp=%rsi) +mulq 0(%rsi) + +# qhasm: r0 = squarerax +# asm 1: mov <squarerax=int64#7,>r0=int64#12 +# asm 2: mov <squarerax=%rax,>r0=%r14 +mov %rax,%r14 + +# qhasm: squaret1 = squarerdx +# asm 1: mov <squarerdx=int64#3,>squaret1=int64#13 +# asm 2: mov <squarerdx=%rdx,>squaret1=%r15 +mov %rdx,%r15 + +# qhasm: squarerax = *(uint64 *)(xp + 8) +# asm 1: movq 8(<xp=int64#2),>squarerax=int64#7 +# asm 2: movq 8(<xp=%rsi),>squarerax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 8) +# asm 1: mulq 8(<xp=int64#2) +# asm 2: mulq 8(<xp=%rsi) +mulq 8(%rsi) + +# qhasm: squaret2 = squarerax +# asm 1: mov <squarerax=int64#7,>squaret2=int64#14 +# asm 2: mov <squarerax=%rax,>squaret2=%rbx +mov %rax,%rbx + +# qhasm: squaret3 = squarerdx +# asm 1: mov <squarerdx=int64#3,>squaret3=int64#15 +# asm 2: mov <squarerdx=%rdx,>squaret3=%rbp +mov %rdx,%rbp + +# qhasm: squarerax = *(uint64 *)(xp + 16) +# asm 1: movq 16(<xp=int64#2),>squarerax=int64#7 +# asm 2: movq 16(<xp=%rsi),>squarerax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 16) +# asm 1: mulq 16(<xp=int64#2) +# asm 2: mulq 16(<xp=%rsi) +mulq 16(%rsi) + +# qhasm: carry? r1 += squaret1 +# asm 1: add <squaret1=int64#13,<r1=int64#5 +# asm 2: add <squaret1=%r15,<r1=%r8 +add %r15,%r8 + +# qhasm: carry? r2 += squaret2 + carry +# asm 1: adc <squaret2=int64#14,<r2=int64#6 +# asm 2: adc <squaret2=%rbx,<r2=%r9 +adc %rbx,%r9 + +# qhasm: carry? r3 += squaret3 + carry +# asm 1: adc <squaret3=int64#15,<r3=int64#8 +# asm 2: adc <squaret3=%rbp,<r3=%r10 +adc %rbp,%r10 + +# qhasm: carry? squarer4 += squarerax + carry +# asm 1: adc <squarerax=int64#7,<squarer4=int64#9 +# asm 2: adc <squarerax=%rax,<squarer4=%r11 +adc %rax,%r11 + +# qhasm: carry? squarer5 += squarerdx + carry +# asm 1: adc <squarerdx=int64#3,<squarer5=int64#10 +# asm 2: adc <squarerdx=%rdx,<squarer5=%r12 +adc %rdx,%r12 + +# qhasm: carry? squarer6 += 0 + carry +# asm 1: adc $0,<squarer6=int64#11 +# asm 2: adc $0,<squarer6=%r13 +adc $0,%r13 + +# qhasm: squarer7 += 0 + carry +# asm 1: adc $0,<squarer7=int64#4 +# asm 2: adc $0,<squarer7=%rcx +adc $0,%rcx + +# qhasm: squarerax = *(uint64 *)(xp + 24) +# asm 1: movq 24(<xp=int64#2),>squarerax=int64#7 +# asm 2: movq 24(<xp=%rsi),>squarerax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 24) +# asm 1: mulq 24(<xp=int64#2) +# asm 2: mulq 24(<xp=%rsi) +mulq 24(%rsi) + +# qhasm: carry? squarer6 += squarerax +# asm 1: add <squarerax=int64#7,<squarer6=int64#11 +# asm 2: add <squarerax=%rax,<squarer6=%r13 +add %rax,%r13 + +# qhasm: squarer7 += squarerdx + carry +# asm 1: adc <squarerdx=int64#3,<squarer7=int64#4 +# asm 2: adc <squarerdx=%rdx,<squarer7=%rcx +adc %rdx,%rcx + +# qhasm: squarerax = squarer4 +# asm 1: mov <squarer4=int64#9,>squarerax=int64#7 +# asm 2: mov <squarer4=%r11,>squarerax=%rax +mov %r11,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: squarer4 = squarerax +# asm 1: mov <squarerax=int64#7,>squarer4=int64#2 +# asm 2: mov <squarerax=%rax,>squarer4=%rsi +mov %rax,%rsi + +# qhasm: squarerax = squarer5 +# asm 1: mov <squarer5=int64#10,>squarerax=int64#7 +# asm 2: mov <squarer5=%r12,>squarerax=%rax +mov %r12,%rax + +# qhasm: squarer5 = squarerdx +# asm 1: mov <squarerdx=int64#3,>squarer5=int64#9 +# asm 2: mov <squarerdx=%rdx,>squarer5=%r11 +mov %rdx,%r11 + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? squarer5 += squarerax +# asm 1: add <squarerax=int64#7,<squarer5=int64#9 +# asm 2: add <squarerax=%rax,<squarer5=%r11 +add %rax,%r11 + +# qhasm: squarerax = squarer6 +# asm 1: mov <squarer6=int64#11,>squarerax=int64#7 +# asm 2: mov <squarer6=%r13,>squarerax=%rax +mov %r13,%rax + +# qhasm: squarer6 = 0 +# asm 1: mov $0,>squarer6=int64#10 +# asm 2: mov $0,>squarer6=%r12 +mov $0,%r12 + +# qhasm: squarer6 += squarerdx + carry +# asm 1: adc <squarerdx=int64#3,<squarer6=int64#10 +# asm 2: adc <squarerdx=%rdx,<squarer6=%r12 +adc %rdx,%r12 + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? squarer6 += squarerax +# asm 1: add <squarerax=int64#7,<squarer6=int64#10 +# asm 2: add <squarerax=%rax,<squarer6=%r12 +add %rax,%r12 + +# qhasm: squarerax = squarer7 +# asm 1: mov <squarer7=int64#4,>squarerax=int64#7 +# asm 2: mov <squarer7=%rcx,>squarerax=%rax +mov %rcx,%rax + +# qhasm: squarer7 = 0 +# asm 1: mov $0,>squarer7=int64#4 +# asm 2: mov $0,>squarer7=%rcx +mov $0,%rcx + +# qhasm: squarer7 += squarerdx + carry +# asm 1: adc <squarerdx=int64#3,<squarer7=int64#4 +# asm 2: adc <squarerdx=%rdx,<squarer7=%rcx +adc %rdx,%rcx + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? squarer7 += squarerax +# asm 1: add <squarerax=int64#7,<squarer7=int64#4 +# asm 2: add <squarerax=%rax,<squarer7=%rcx +add %rax,%rcx + +# qhasm: squarer8 = 0 +# asm 1: mov $0,>squarer8=int64#7 +# asm 2: mov $0,>squarer8=%rax +mov $0,%rax + +# qhasm: squarer8 += squarerdx + carry +# asm 1: adc <squarerdx=int64#3,<squarer8=int64#7 +# asm 2: adc <squarerdx=%rdx,<squarer8=%rax +adc %rdx,%rax + +# qhasm: carry? r0 += squarer4 +# asm 1: add <squarer4=int64#2,<r0=int64#12 +# asm 2: add <squarer4=%rsi,<r0=%r14 +add %rsi,%r14 + +# qhasm: carry? r1 += squarer5 + carry +# asm 1: adc <squarer5=int64#9,<r1=int64#5 +# asm 2: adc <squarer5=%r11,<r1=%r8 +adc %r11,%r8 + +# qhasm: carry? r2 += squarer6 + carry +# asm 1: adc <squarer6=int64#10,<r2=int64#6 +# asm 2: adc <squarer6=%r12,<r2=%r9 +adc %r12,%r9 + +# qhasm: carry? r3 += squarer7 + carry +# asm 1: adc <squarer7=int64#4,<r3=int64#8 +# asm 2: adc <squarer7=%rcx,<r3=%r10 +adc %rcx,%r10 + +# qhasm: squarezero = 0 +# asm 1: mov $0,>squarezero=int64#2 +# asm 2: mov $0,>squarezero=%rsi +mov $0,%rsi + +# qhasm: squarer8 += squarezero + carry +# asm 1: adc <squarezero=int64#2,<squarer8=int64#7 +# asm 2: adc <squarezero=%rsi,<squarer8=%rax +adc %rsi,%rax + +# qhasm: squarer8 *= 38 +# asm 1: imulq $38,<squarer8=int64#7,>squarer8=int64#3 +# asm 2: imulq $38,<squarer8=%rax,>squarer8=%rdx +imulq $38,%rax,%rdx + +# qhasm: carry? r0 += squarer8 +# asm 1: add <squarer8=int64#3,<r0=int64#12 +# asm 2: add <squarer8=%rdx,<r0=%r14 +add %rdx,%r14 + +# qhasm: carry? r1 += squarezero + carry +# asm 1: adc <squarezero=int64#2,<r1=int64#5 +# asm 2: adc <squarezero=%rsi,<r1=%r8 +adc %rsi,%r8 + +# qhasm: carry? r2 += squarezero + carry +# asm 1: adc <squarezero=int64#2,<r2=int64#6 +# asm 2: adc <squarezero=%rsi,<r2=%r9 +adc %rsi,%r9 + +# qhasm: carry? r3 += squarezero + carry +# asm 1: adc <squarezero=int64#2,<r3=int64#8 +# asm 2: adc <squarezero=%rsi,<r3=%r10 +adc %rsi,%r10 + +# qhasm: squarezero += squarezero + carry +# asm 1: adc <squarezero=int64#2,<squarezero=int64#2 +# asm 2: adc <squarezero=%rsi,<squarezero=%rsi +adc %rsi,%rsi + +# qhasm: squarezero *= 38 +# asm 1: imulq $38,<squarezero=int64#2,>squarezero=int64#2 +# asm 2: imulq $38,<squarezero=%rsi,>squarezero=%rsi +imulq $38,%rsi,%rsi + +# qhasm: r0 += squarezero +# asm 1: add <squarezero=int64#2,<r0=int64#12 +# asm 2: add <squarezero=%rsi,<r0=%r14 +add %rsi,%r14 + +# qhasm: *(uint64 *)(rp + 8) = r1 +# asm 1: movq <r1=int64#5,8(<rp=int64#1) +# asm 2: movq <r1=%r8,8(<rp=%rdi) +movq %r8,8(%rdi) + +# qhasm: *(uint64 *)(rp + 16) = r2 +# asm 1: movq <r2=int64#6,16(<rp=int64#1) +# asm 2: movq <r2=%r9,16(<rp=%rdi) +movq %r9,16(%rdi) + +# qhasm: *(uint64 *)(rp + 24) = r3 +# asm 1: movq <r3=int64#8,24(<rp=int64#1) +# asm 2: movq <r3=%r10,24(<rp=%rdi) +movq %r10,24(%rdi) + +# qhasm: *(uint64 *)(rp + 0) = r0 +# asm 1: movq <r0=int64#12,0(<rp=int64#1) +# asm 2: movq <r0=%r14,0(<rp=%rdi) +movq %r14,0(%rdi) + +# qhasm: caller1 = caller1_stack +# asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9 +# asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10 +# asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11 +# asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12 +# asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13 +# asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14 +# asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15 +# asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/ext/ed25519-amd64-asm/fe25519_sub.s b/ext/ed25519-amd64-asm/fe25519_sub.s new file mode 100644 index 00000000..0b395bce --- /dev/null +++ b/ext/ed25519-amd64-asm/fe25519_sub.s @@ -0,0 +1,189 @@ + +# qhasm: int64 rp + +# qhasm: int64 xp + +# qhasm: int64 yp + +# qhasm: input rp + +# qhasm: input xp + +# qhasm: input yp + +# qhasm: int64 r0 + +# qhasm: int64 r1 + +# qhasm: int64 r2 + +# qhasm: int64 r3 + +# qhasm: int64 subt0 + +# qhasm: int64 subt1 + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: enter crypto_sign_ed25519_amd64_64_fe25519_sub +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_64_fe25519_sub +.globl crypto_sign_ed25519_amd64_64_fe25519_sub +_crypto_sign_ed25519_amd64_64_fe25519_sub: +crypto_sign_ed25519_amd64_64_fe25519_sub: +mov %rsp,%r11 +and $31,%r11 +add $0,%r11 +sub %r11,%rsp + +# qhasm: r0 = *(uint64 *)(xp + 0) +# asm 1: movq 0(<xp=int64#2),>r0=int64#4 +# asm 2: movq 0(<xp=%rsi),>r0=%rcx +movq 0(%rsi),%rcx + +# qhasm: r1 = *(uint64 *)(xp + 8) +# asm 1: movq 8(<xp=int64#2),>r1=int64#5 +# asm 2: movq 8(<xp=%rsi),>r1=%r8 +movq 8(%rsi),%r8 + +# qhasm: r2 = *(uint64 *)(xp + 16) +# asm 1: movq 16(<xp=int64#2),>r2=int64#6 +# asm 2: movq 16(<xp=%rsi),>r2=%r9 +movq 16(%rsi),%r9 + +# qhasm: r3 = *(uint64 *)(xp + 24) +# asm 1: movq 24(<xp=int64#2),>r3=int64#2 +# asm 2: movq 24(<xp=%rsi),>r3=%rsi +movq 24(%rsi),%rsi + +# qhasm: carry? r0 -= *(uint64 *)(yp + 0) +# asm 1: subq 0(<yp=int64#3),<r0=int64#4 +# asm 2: subq 0(<yp=%rdx),<r0=%rcx +subq 0(%rdx),%rcx + +# qhasm: carry? r1 -= *(uint64 *)(yp + 8) - carry +# asm 1: sbbq 8(<yp=int64#3),<r1=int64#5 +# asm 2: sbbq 8(<yp=%rdx),<r1=%r8 +sbbq 8(%rdx),%r8 + +# qhasm: carry? r2 -= *(uint64 *)(yp + 16) - carry +# asm 1: sbbq 16(<yp=int64#3),<r2=int64#6 +# asm 2: sbbq 16(<yp=%rdx),<r2=%r9 +sbbq 16(%rdx),%r9 + +# qhasm: carry? r3 -= *(uint64 *)(yp + 24) - carry +# asm 1: sbbq 24(<yp=int64#3),<r3=int64#2 +# asm 2: sbbq 24(<yp=%rdx),<r3=%rsi +sbbq 24(%rdx),%rsi + +# qhasm: subt0 = 0 +# asm 1: mov $0,>subt0=int64#3 +# asm 2: mov $0,>subt0=%rdx +mov $0,%rdx + +# qhasm: subt1 = 38 +# asm 1: mov $38,>subt1=int64#7 +# asm 2: mov $38,>subt1=%rax +mov $38,%rax + +# qhasm: subt1 = subt0 if !carry +# asm 1: cmovae <subt0=int64#3,<subt1=int64#7 +# asm 2: cmovae <subt0=%rdx,<subt1=%rax +cmovae %rdx,%rax + +# qhasm: carry? r0 -= subt1 +# asm 1: sub <subt1=int64#7,<r0=int64#4 +# asm 2: sub <subt1=%rax,<r0=%rcx +sub %rax,%rcx + +# qhasm: carry? r1 -= subt0 - carry +# asm 1: sbb <subt0=int64#3,<r1=int64#5 +# asm 2: sbb <subt0=%rdx,<r1=%r8 +sbb %rdx,%r8 + +# qhasm: carry? r2 -= subt0 - carry +# asm 1: sbb <subt0=int64#3,<r2=int64#6 +# asm 2: sbb <subt0=%rdx,<r2=%r9 +sbb %rdx,%r9 + +# qhasm: carry? r3 -= subt0 - carry +# asm 1: sbb <subt0=int64#3,<r3=int64#2 +# asm 2: sbb <subt0=%rdx,<r3=%rsi +sbb %rdx,%rsi + +# qhasm: subt0 = subt1 if carry +# asm 1: cmovc <subt1=int64#7,<subt0=int64#3 +# asm 2: cmovc <subt1=%rax,<subt0=%rdx +cmovc %rax,%rdx + +# qhasm: r0 -= subt0 +# asm 1: sub <subt0=int64#3,<r0=int64#4 +# asm 2: sub <subt0=%rdx,<r0=%rcx +sub %rdx,%rcx + +# qhasm: *(uint64 *)(rp + 0) = r0 +# asm 1: movq <r0=int64#4,0(<rp=int64#1) +# asm 2: movq <r0=%rcx,0(<rp=%rdi) +movq %rcx,0(%rdi) + +# qhasm: *(uint64 *)(rp + 8) = r1 +# asm 1: movq <r1=int64#5,8(<rp=int64#1) +# asm 2: movq <r1=%r8,8(<rp=%rdi) +movq %r8,8(%rdi) + +# qhasm: *(uint64 *)(rp + 16) = r2 +# asm 1: movq <r2=int64#6,16(<rp=int64#1) +# asm 2: movq <r2=%r9,16(<rp=%rdi) +movq %r9,16(%rdi) + +# qhasm: *(uint64 *)(rp + 24) = r3 +# asm 1: movq <r3=int64#2,24(<rp=int64#1) +# asm 2: movq <r3=%rsi,24(<rp=%rdi) +movq %rsi,24(%rdi) + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/ext/ed25519-amd64-asm/fe25519_unpack.c b/ext/ed25519-amd64-asm/fe25519_unpack.c new file mode 100644 index 00000000..b3b0f4d5 --- /dev/null +++ b/ext/ed25519-amd64-asm/fe25519_unpack.c @@ -0,0 +1,11 @@ +#include "fe25519.h" + +void fe25519_unpack(fe25519 *r, const unsigned char x[32]) +{ + /* assuming little-endian */ + r->v[0] = *(unsigned long long *)x; + r->v[1] = *(((unsigned long long *)x)+1); + r->v[2] = *(((unsigned long long *)x)+2); + r->v[3] = *(((unsigned long long *)x)+3); + r->v[3] &= 0x7fffffffffffffffULL; +} diff --git a/ext/ed25519-amd64-asm/ge25519.h b/ext/ed25519-amd64-asm/ge25519.h new file mode 100644 index 00000000..0b15136b --- /dev/null +++ b/ext/ed25519-amd64-asm/ge25519.h @@ -0,0 +1,95 @@ +#ifndef GE25519_H +#define GE25519_H + +#include "fe25519.h" +#include "sc25519.h" + +#define ge25519 crypto_sign_ed25519_amd64_64_ge25519 +#define ge25519_base crypto_sign_ed25519_amd64_64_ge25519_base +#define ge25519_unpackneg_vartime crypto_sign_ed25519_amd64_64_unpackneg_vartime +#define ge25519_pack crypto_sign_ed25519_amd64_64_pack +#define ge25519_isneutral_vartime crypto_sign_ed25519_amd64_64_isneutral_vartime +#define ge25519_add crypto_sign_ed25519_amd64_64_ge25519_add +#define ge25519_double crypto_sign_ed25519_amd64_64_ge25519_double +#define ge25519_double_scalarmult_vartime crypto_sign_ed25519_amd64_64_double_scalarmult_vartime +#define ge25519_multi_scalarmult_vartime crypto_sign_ed25519_amd64_64_ge25519_multi_scalarmult_vartime +#define ge25519_scalarmult_base crypto_sign_ed25519_amd64_64_scalarmult_base +#define ge25519_p1p1_to_p2 crypto_sign_ed25519_amd64_64_ge25519_p1p1_to_p2 +#define ge25519_p1p1_to_p3 crypto_sign_ed25519_amd64_64_ge25519_p1p1_to_p3 +#define ge25519_add_p1p1 crypto_sign_ed25519_amd64_64_ge25519_add_p1p1 +#define ge25519_dbl_p1p1 crypto_sign_ed25519_amd64_64_ge25519_dbl_p1p1 +#define choose_t crypto_sign_ed25519_amd64_64_choose_t +#define ge25519_nielsadd2 crypto_sign_ed25519_amd64_64_ge25519_nielsadd2 +#define ge25519_nielsadd_p1p1 crypto_sign_ed25519_amd64_64_ge25519_nielsadd_p1p1 +#define ge25519_pnielsadd_p1p1 crypto_sign_ed25519_amd64_64_ge25519_pnielsadd_p1p1 + + +#define ge25519_p3 ge25519 + +typedef struct +{ + fe25519 x; + fe25519 y; + fe25519 z; + fe25519 t; +} ge25519; + +typedef struct +{ + fe25519 x; + fe25519 z; + fe25519 y; + fe25519 t; +} ge25519_p1p1; + +typedef struct +{ + fe25519 x; + fe25519 y; + fe25519 z; +} ge25519_p2; + +typedef struct +{ + fe25519 ysubx; + fe25519 xaddy; + fe25519 t2d; +} ge25519_niels; + +typedef struct +{ + fe25519 ysubx; + fe25519 xaddy; + fe25519 z; + fe25519 t2d; +} ge25519_pniels; + +extern void ge25519_p1p1_to_p2(ge25519_p2 *r, const ge25519_p1p1 *p); +extern void ge25519_p1p1_to_p3(ge25519_p3 *r, const ge25519_p1p1 *p); +extern void ge25519_add_p1p1(ge25519_p1p1 *r, const ge25519_p3 *p, const ge25519_p3 *q); +extern void ge25519_dbl_p1p1(ge25519_p1p1 *r, const ge25519_p2 *p); +extern void choose_t(ge25519_niels *t, unsigned long long pos, signed long long b, const ge25519_niels *base_multiples); +extern void ge25519_nielsadd2(ge25519_p3 *r, const ge25519_niels *q); +extern void ge25519_nielsadd_p1p1(ge25519_p1p1 *r, const ge25519_p3 *p, const ge25519_niels *q); +extern void ge25519_pnielsadd_p1p1(ge25519_p1p1 *r, const ge25519_p3 *p, const ge25519_pniels *q); + +extern const ge25519 ge25519_base; + +extern int ge25519_unpackneg_vartime(ge25519 *r, const unsigned char p[32]); + +extern void ge25519_pack(unsigned char r[32], const ge25519 *p); + +extern int ge25519_isneutral_vartime(const ge25519 *p); + +extern void ge25519_add(ge25519 *r, const ge25519 *p, const ge25519 *q); + +extern void ge25519_double(ge25519 *r, const ge25519 *p); + +/* computes [s1]p1 + [s2]ge25519_base */ +extern void ge25519_double_scalarmult_vartime(ge25519 *r, const ge25519 *p1, const sc25519 *s1, const sc25519 *s2); + +extern void ge25519_multi_scalarmult_vartime(ge25519 *r, ge25519 *p, sc25519 *s, const unsigned long long npoints); + +extern void ge25519_scalarmult_base(ge25519 *r, const sc25519 *s); + +#endif diff --git a/ext/ed25519-amd64-asm/ge25519_add.c b/ext/ed25519-amd64-asm/ge25519_add.c new file mode 100644 index 00000000..c4d1c68a --- /dev/null +++ b/ext/ed25519-amd64-asm/ge25519_add.c @@ -0,0 +1,8 @@ +#include "ge25519.h" + +void ge25519_add(ge25519_p3 *r, const ge25519_p3 *p, const ge25519_p3 *q) +{ + ge25519_p1p1 grp1p1; + ge25519_add_p1p1(&grp1p1, p, q); + ge25519_p1p1_to_p3(r, &grp1p1); +} diff --git a/ext/ed25519-amd64-asm/ge25519_add_p1p1.s b/ext/ed25519-amd64-asm/ge25519_add_p1p1.s new file mode 100644 index 00000000..9557e075 --- /dev/null +++ b/ext/ed25519-amd64-asm/ge25519_add_p1p1.s @@ -0,0 +1,4554 @@ + +# qhasm: int64 rp + +# qhasm: int64 pp + +# qhasm: int64 qp + +# qhasm: input rp + +# qhasm: input pp + +# qhasm: input qp + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: int64 a0 + +# qhasm: int64 a1 + +# qhasm: int64 a2 + +# qhasm: int64 a3 + +# qhasm: stack64 a0_stack + +# qhasm: stack64 a1_stack + +# qhasm: stack64 a2_stack + +# qhasm: stack64 a3_stack + +# qhasm: int64 b0 + +# qhasm: int64 b1 + +# qhasm: int64 b2 + +# qhasm: int64 b3 + +# qhasm: stack64 b0_stack + +# qhasm: stack64 b1_stack + +# qhasm: stack64 b2_stack + +# qhasm: stack64 b3_stack + +# qhasm: int64 c0 + +# qhasm: int64 c1 + +# qhasm: int64 c2 + +# qhasm: int64 c3 + +# qhasm: stack64 c0_stack + +# qhasm: stack64 c1_stack + +# qhasm: stack64 c2_stack + +# qhasm: stack64 c3_stack + +# qhasm: int64 d0 + +# qhasm: int64 d1 + +# qhasm: int64 d2 + +# qhasm: int64 d3 + +# qhasm: stack64 d0_stack + +# qhasm: stack64 d1_stack + +# qhasm: stack64 d2_stack + +# qhasm: stack64 d3_stack + +# qhasm: int64 t10 + +# qhasm: int64 t11 + +# qhasm: int64 t12 + +# qhasm: int64 t13 + +# qhasm: stack64 t10_stack + +# qhasm: stack64 t11_stack + +# qhasm: stack64 t12_stack + +# qhasm: stack64 t13_stack + +# qhasm: int64 t20 + +# qhasm: int64 t21 + +# qhasm: int64 t22 + +# qhasm: int64 t23 + +# qhasm: stack64 t20_stack + +# qhasm: stack64 t21_stack + +# qhasm: stack64 t22_stack + +# qhasm: stack64 t23_stack + +# qhasm: int64 rx0 + +# qhasm: int64 rx1 + +# qhasm: int64 rx2 + +# qhasm: int64 rx3 + +# qhasm: int64 ry0 + +# qhasm: int64 ry1 + +# qhasm: int64 ry2 + +# qhasm: int64 ry3 + +# qhasm: int64 rz0 + +# qhasm: int64 rz1 + +# qhasm: int64 rz2 + +# qhasm: int64 rz3 + +# qhasm: int64 rt0 + +# qhasm: int64 rt1 + +# qhasm: int64 rt2 + +# qhasm: int64 rt3 + +# qhasm: int64 x0 + +# qhasm: int64 x1 + +# qhasm: int64 x2 + +# qhasm: int64 x3 + +# qhasm: int64 mulr4 + +# qhasm: int64 mulr5 + +# qhasm: int64 mulr6 + +# qhasm: int64 mulr7 + +# qhasm: int64 mulr8 + +# qhasm: int64 mulrax + +# qhasm: int64 mulrdx + +# qhasm: int64 mulx0 + +# qhasm: int64 mulx1 + +# qhasm: int64 mulx2 + +# qhasm: int64 mulx3 + +# qhasm: int64 mulc + +# qhasm: int64 mulzero + +# qhasm: int64 muli38 + +# qhasm: int64 addt0 + +# qhasm: int64 addt1 + +# qhasm: int64 subt0 + +# qhasm: int64 subt1 + +# qhasm: enter crypto_sign_ed25519_amd64_64_ge25519_add_p1p1 +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_64_ge25519_add_p1p1 +.globl crypto_sign_ed25519_amd64_64_ge25519_add_p1p1 +_crypto_sign_ed25519_amd64_64_ge25519_add_p1p1: +crypto_sign_ed25519_amd64_64_ge25519_add_p1p1: +mov %rsp,%r11 +and $31,%r11 +add $192,%r11 +sub %r11,%rsp + +# qhasm: caller1_stack = caller1 +# asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1 +# asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2 +# asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3 +# asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4 +# asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5 +# asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6 +# asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7 +# asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: qp = qp +# asm 1: mov <qp=int64#3,>qp=int64#4 +# asm 2: mov <qp=%rdx,>qp=%rcx +mov %rdx,%rcx + +# qhasm: a0 = *(uint64 *)(pp + 32) +# asm 1: movq 32(<pp=int64#2),>a0=int64#3 +# asm 2: movq 32(<pp=%rsi),>a0=%rdx +movq 32(%rsi),%rdx + +# qhasm: a1 = *(uint64 *)(pp + 40) +# asm 1: movq 40(<pp=int64#2),>a1=int64#5 +# asm 2: movq 40(<pp=%rsi),>a1=%r8 +movq 40(%rsi),%r8 + +# qhasm: a2 = *(uint64 *)(pp + 48) +# asm 1: movq 48(<pp=int64#2),>a2=int64#6 +# asm 2: movq 48(<pp=%rsi),>a2=%r9 +movq 48(%rsi),%r9 + +# qhasm: a3 = *(uint64 *)(pp + 56) +# asm 1: movq 56(<pp=int64#2),>a3=int64#7 +# asm 2: movq 56(<pp=%rsi),>a3=%rax +movq 56(%rsi),%rax + +# qhasm: b0 = a0 +# asm 1: mov <a0=int64#3,>b0=int64#8 +# asm 2: mov <a0=%rdx,>b0=%r10 +mov %rdx,%r10 + +# qhasm: b1 = a1 +# asm 1: mov <a1=int64#5,>b1=int64#9 +# asm 2: mov <a1=%r8,>b1=%r11 +mov %r8,%r11 + +# qhasm: b2 = a2 +# asm 1: mov <a2=int64#6,>b2=int64#10 +# asm 2: mov <a2=%r9,>b2=%r12 +mov %r9,%r12 + +# qhasm: b3 = a3 +# asm 1: mov <a3=int64#7,>b3=int64#11 +# asm 2: mov <a3=%rax,>b3=%r13 +mov %rax,%r13 + +# qhasm: carry? a0 -= *(uint64 *)(pp + 0) +# asm 1: subq 0(<pp=int64#2),<a0=int64#3 +# asm 2: subq 0(<pp=%rsi),<a0=%rdx +subq 0(%rsi),%rdx + +# qhasm: carry? a1 -= *(uint64 *)(pp + 8) - carry +# asm 1: sbbq 8(<pp=int64#2),<a1=int64#5 +# asm 2: sbbq 8(<pp=%rsi),<a1=%r8 +sbbq 8(%rsi),%r8 + +# qhasm: carry? a2 -= *(uint64 *)(pp + 16) - carry +# asm 1: sbbq 16(<pp=int64#2),<a2=int64#6 +# asm 2: sbbq 16(<pp=%rsi),<a2=%r9 +sbbq 16(%rsi),%r9 + +# qhasm: carry? a3 -= *(uint64 *)(pp + 24) - carry +# asm 1: sbbq 24(<pp=int64#2),<a3=int64#7 +# asm 2: sbbq 24(<pp=%rsi),<a3=%rax +sbbq 24(%rsi),%rax + +# qhasm: subt0 = 0 +# asm 1: mov $0,>subt0=int64#12 +# asm 2: mov $0,>subt0=%r14 +mov $0,%r14 + +# qhasm: subt1 = 38 +# asm 1: mov $38,>subt1=int64#13 +# asm 2: mov $38,>subt1=%r15 +mov $38,%r15 + +# qhasm: subt1 = subt0 if !carry +# asm 1: cmovae <subt0=int64#12,<subt1=int64#13 +# asm 2: cmovae <subt0=%r14,<subt1=%r15 +cmovae %r14,%r15 + +# qhasm: carry? a0 -= subt1 +# asm 1: sub <subt1=int64#13,<a0=int64#3 +# asm 2: sub <subt1=%r15,<a0=%rdx +sub %r15,%rdx + +# qhasm: carry? a1 -= subt0 - carry +# asm 1: sbb <subt0=int64#12,<a1=int64#5 +# asm 2: sbb <subt0=%r14,<a1=%r8 +sbb %r14,%r8 + +# qhasm: carry? a2 -= subt0 - carry +# asm 1: sbb <subt0=int64#12,<a2=int64#6 +# asm 2: sbb <subt0=%r14,<a2=%r9 +sbb %r14,%r9 + +# qhasm: carry? a3 -= subt0 - carry +# asm 1: sbb <subt0=int64#12,<a3=int64#7 +# asm 2: sbb <subt0=%r14,<a3=%rax +sbb %r14,%rax + +# qhasm: subt0 = subt1 if carry +# asm 1: cmovc <subt1=int64#13,<subt0=int64#12 +# asm 2: cmovc <subt1=%r15,<subt0=%r14 +cmovc %r15,%r14 + +# qhasm: a0 -= subt0 +# asm 1: sub <subt0=int64#12,<a0=int64#3 +# asm 2: sub <subt0=%r14,<a0=%rdx +sub %r14,%rdx + +# qhasm: carry? b0 += *(uint64 *)(pp + 0) +# asm 1: addq 0(<pp=int64#2),<b0=int64#8 +# asm 2: addq 0(<pp=%rsi),<b0=%r10 +addq 0(%rsi),%r10 + +# qhasm: carry? b1 += *(uint64 *)(pp + 8) + carry +# asm 1: adcq 8(<pp=int64#2),<b1=int64#9 +# asm 2: adcq 8(<pp=%rsi),<b1=%r11 +adcq 8(%rsi),%r11 + +# qhasm: carry? b2 += *(uint64 *)(pp + 16) + carry +# asm 1: adcq 16(<pp=int64#2),<b2=int64#10 +# asm 2: adcq 16(<pp=%rsi),<b2=%r12 +adcq 16(%rsi),%r12 + +# qhasm: carry? b3 += *(uint64 *)(pp + 24) + carry +# asm 1: adcq 24(<pp=int64#2),<b3=int64#11 +# asm 2: adcq 24(<pp=%rsi),<b3=%r13 +adcq 24(%rsi),%r13 + +# qhasm: addt0 = 0 +# asm 1: mov $0,>addt0=int64#12 +# asm 2: mov $0,>addt0=%r14 +mov $0,%r14 + +# qhasm: addt1 = 38 +# asm 1: mov $38,>addt1=int64#13 +# asm 2: mov $38,>addt1=%r15 +mov $38,%r15 + +# qhasm: addt1 = addt0 if !carry +# asm 1: cmovae <addt0=int64#12,<addt1=int64#13 +# asm 2: cmovae <addt0=%r14,<addt1=%r15 +cmovae %r14,%r15 + +# qhasm: carry? b0 += addt1 +# asm 1: add <addt1=int64#13,<b0=int64#8 +# asm 2: add <addt1=%r15,<b0=%r10 +add %r15,%r10 + +# qhasm: carry? b1 += addt0 + carry +# asm 1: adc <addt0=int64#12,<b1=int64#9 +# asm 2: adc <addt0=%r14,<b1=%r11 +adc %r14,%r11 + +# qhasm: carry? b2 += addt0 + carry +# asm 1: adc <addt0=int64#12,<b2=int64#10 +# asm 2: adc <addt0=%r14,<b2=%r12 +adc %r14,%r12 + +# qhasm: carry? b3 += addt0 + carry +# asm 1: adc <addt0=int64#12,<b3=int64#11 +# asm 2: adc <addt0=%r14,<b3=%r13 +adc %r14,%r13 + +# qhasm: addt0 = addt1 if carry +# asm 1: cmovc <addt1=int64#13,<addt0=int64#12 +# asm 2: cmovc <addt1=%r15,<addt0=%r14 +cmovc %r15,%r14 + +# qhasm: b0 += addt0 +# asm 1: add <addt0=int64#12,<b0=int64#8 +# asm 2: add <addt0=%r14,<b0=%r10 +add %r14,%r10 + +# qhasm: a0_stack = a0 +# asm 1: movq <a0=int64#3,>a0_stack=stack64#8 +# asm 2: movq <a0=%rdx,>a0_stack=56(%rsp) +movq %rdx,56(%rsp) + +# qhasm: a1_stack = a1 +# asm 1: movq <a1=int64#5,>a1_stack=stack64#9 +# asm 2: movq <a1=%r8,>a1_stack=64(%rsp) +movq %r8,64(%rsp) + +# qhasm: a2_stack = a2 +# asm 1: movq <a2=int64#6,>a2_stack=stack64#10 +# asm 2: movq <a2=%r9,>a2_stack=72(%rsp) +movq %r9,72(%rsp) + +# qhasm: a3_stack = a3 +# asm 1: movq <a3=int64#7,>a3_stack=stack64#11 +# asm 2: movq <a3=%rax,>a3_stack=80(%rsp) +movq %rax,80(%rsp) + +# qhasm: b0_stack = b0 +# asm 1: movq <b0=int64#8,>b0_stack=stack64#12 +# asm 2: movq <b0=%r10,>b0_stack=88(%rsp) +movq %r10,88(%rsp) + +# qhasm: b1_stack = b1 +# asm 1: movq <b1=int64#9,>b1_stack=stack64#13 +# asm 2: movq <b1=%r11,>b1_stack=96(%rsp) +movq %r11,96(%rsp) + +# qhasm: b2_stack = b2 +# asm 1: movq <b2=int64#10,>b2_stack=stack64#14 +# asm 2: movq <b2=%r12,>b2_stack=104(%rsp) +movq %r12,104(%rsp) + +# qhasm: b3_stack = b3 +# asm 1: movq <b3=int64#11,>b3_stack=stack64#15 +# asm 2: movq <b3=%r13,>b3_stack=112(%rsp) +movq %r13,112(%rsp) + +# qhasm: t10 = *(uint64 *)(qp + 32) +# asm 1: movq 32(<qp=int64#4),>t10=int64#3 +# asm 2: movq 32(<qp=%rcx),>t10=%rdx +movq 32(%rcx),%rdx + +# qhasm: t11 = *(uint64 *)(qp + 40) +# asm 1: movq 40(<qp=int64#4),>t11=int64#5 +# asm 2: movq 40(<qp=%rcx),>t11=%r8 +movq 40(%rcx),%r8 + +# qhasm: t12 = *(uint64 *)(qp + 48) +# asm 1: movq 48(<qp=int64#4),>t12=int64#6 +# asm 2: movq 48(<qp=%rcx),>t12=%r9 +movq 48(%rcx),%r9 + +# qhasm: t13 = *(uint64 *)(qp + 56) +# asm 1: movq 56(<qp=int64#4),>t13=int64#7 +# asm 2: movq 56(<qp=%rcx),>t13=%rax +movq 56(%rcx),%rax + +# qhasm: t20 = t10 +# asm 1: mov <t10=int64#3,>t20=int64#8 +# asm 2: mov <t10=%rdx,>t20=%r10 +mov %rdx,%r10 + +# qhasm: t21 = t11 +# asm 1: mov <t11=int64#5,>t21=int64#9 +# asm 2: mov <t11=%r8,>t21=%r11 +mov %r8,%r11 + +# qhasm: t22 = t12 +# asm 1: mov <t12=int64#6,>t22=int64#10 +# asm 2: mov <t12=%r9,>t22=%r12 +mov %r9,%r12 + +# qhasm: t23 = t13 +# asm 1: mov <t13=int64#7,>t23=int64#11 +# asm 2: mov <t13=%rax,>t23=%r13 +mov %rax,%r13 + +# qhasm: carry? t10 -= *(uint64 *) (qp + 0) +# asm 1: subq 0(<qp=int64#4),<t10=int64#3 +# asm 2: subq 0(<qp=%rcx),<t10=%rdx +subq 0(%rcx),%rdx + +# qhasm: carry? t11 -= *(uint64 *) (qp + 8) - carry +# asm 1: sbbq 8(<qp=int64#4),<t11=int64#5 +# asm 2: sbbq 8(<qp=%rcx),<t11=%r8 +sbbq 8(%rcx),%r8 + +# qhasm: carry? t12 -= *(uint64 *) (qp + 16) - carry +# asm 1: sbbq 16(<qp=int64#4),<t12=int64#6 +# asm 2: sbbq 16(<qp=%rcx),<t12=%r9 +sbbq 16(%rcx),%r9 + +# qhasm: carry? t13 -= *(uint64 *) (qp + 24) - carry +# asm 1: sbbq 24(<qp=int64#4),<t13=int64#7 +# asm 2: sbbq 24(<qp=%rcx),<t13=%rax +sbbq 24(%rcx),%rax + +# qhasm: subt0 = 0 +# asm 1: mov $0,>subt0=int64#12 +# asm 2: mov $0,>subt0=%r14 +mov $0,%r14 + +# qhasm: subt1 = 38 +# asm 1: mov $38,>subt1=int64#13 +# asm 2: mov $38,>subt1=%r15 +mov $38,%r15 + +# qhasm: subt1 = subt0 if !carry +# asm 1: cmovae <subt0=int64#12,<subt1=int64#13 +# asm 2: cmovae <subt0=%r14,<subt1=%r15 +cmovae %r14,%r15 + +# qhasm: carry? t10 -= subt1 +# asm 1: sub <subt1=int64#13,<t10=int64#3 +# asm 2: sub <subt1=%r15,<t10=%rdx +sub %r15,%rdx + +# qhasm: carry? t11 -= subt0 - carry +# asm 1: sbb <subt0=int64#12,<t11=int64#5 +# asm 2: sbb <subt0=%r14,<t11=%r8 +sbb %r14,%r8 + +# qhasm: carry? t12 -= subt0 - carry +# asm 1: sbb <subt0=int64#12,<t12=int64#6 +# asm 2: sbb <subt0=%r14,<t12=%r9 +sbb %r14,%r9 + +# qhasm: carry? t13 -= subt0 - carry +# asm 1: sbb <subt0=int64#12,<t13=int64#7 +# asm 2: sbb <subt0=%r14,<t13=%rax +sbb %r14,%rax + +# qhasm: subt0 = subt1 if carry +# asm 1: cmovc <subt1=int64#13,<subt0=int64#12 +# asm 2: cmovc <subt1=%r15,<subt0=%r14 +cmovc %r15,%r14 + +# qhasm: t10 -= subt0 +# asm 1: sub <subt0=int64#12,<t10=int64#3 +# asm 2: sub <subt0=%r14,<t10=%rdx +sub %r14,%rdx + +# qhasm: carry? t20 += *(uint64 *) (qp + 0) +# asm 1: addq 0(<qp=int64#4),<t20=int64#8 +# asm 2: addq 0(<qp=%rcx),<t20=%r10 +addq 0(%rcx),%r10 + +# qhasm: carry? t21 += *(uint64 *) (qp + 8) + carry +# asm 1: adcq 8(<qp=int64#4),<t21=int64#9 +# asm 2: adcq 8(<qp=%rcx),<t21=%r11 +adcq 8(%rcx),%r11 + +# qhasm: carry? t22 += *(uint64 *) (qp + 16) + carry +# asm 1: adcq 16(<qp=int64#4),<t22=int64#10 +# asm 2: adcq 16(<qp=%rcx),<t22=%r12 +adcq 16(%rcx),%r12 + +# qhasm: carry? t23 += *(uint64 *) (qp + 24) + carry +# asm 1: adcq 24(<qp=int64#4),<t23=int64#11 +# asm 2: adcq 24(<qp=%rcx),<t23=%r13 +adcq 24(%rcx),%r13 + +# qhasm: addt0 = 0 +# asm 1: mov $0,>addt0=int64#12 +# asm 2: mov $0,>addt0=%r14 +mov $0,%r14 + +# qhasm: addt1 = 38 +# asm 1: mov $38,>addt1=int64#13 +# asm 2: mov $38,>addt1=%r15 +mov $38,%r15 + +# qhasm: addt1 = addt0 if !carry +# asm 1: cmovae <addt0=int64#12,<addt1=int64#13 +# asm 2: cmovae <addt0=%r14,<addt1=%r15 +cmovae %r14,%r15 + +# qhasm: carry? t20 += addt1 +# asm 1: add <addt1=int64#13,<t20=int64#8 +# asm 2: add <addt1=%r15,<t20=%r10 +add %r15,%r10 + +# qhasm: carry? t21 += addt0 + carry +# asm 1: adc <addt0=int64#12,<t21=int64#9 +# asm 2: adc <addt0=%r14,<t21=%r11 +adc %r14,%r11 + +# qhasm: carry? t22 += addt0 + carry +# asm 1: adc <addt0=int64#12,<t22=int64#10 +# asm 2: adc <addt0=%r14,<t22=%r12 +adc %r14,%r12 + +# qhasm: carry? t23 += addt0 + carry +# asm 1: adc <addt0=int64#12,<t23=int64#11 +# asm 2: adc <addt0=%r14,<t23=%r13 +adc %r14,%r13 + +# qhasm: addt0 = addt1 if carry +# asm 1: cmovc <addt1=int64#13,<addt0=int64#12 +# asm 2: cmovc <addt1=%r15,<addt0=%r14 +cmovc %r15,%r14 + +# qhasm: t20 += addt0 +# asm 1: add <addt0=int64#12,<t20=int64#8 +# asm 2: add <addt0=%r14,<t20=%r10 +add %r14,%r10 + +# qhasm: t10_stack = t10 +# asm 1: movq <t10=int64#3,>t10_stack=stack64#16 +# asm 2: movq <t10=%rdx,>t10_stack=120(%rsp) +movq %rdx,120(%rsp) + +# qhasm: t11_stack = t11 +# asm 1: movq <t11=int64#5,>t11_stack=stack64#17 +# asm 2: movq <t11=%r8,>t11_stack=128(%rsp) +movq %r8,128(%rsp) + +# qhasm: t12_stack = t12 +# asm 1: movq <t12=int64#6,>t12_stack=stack64#18 +# asm 2: movq <t12=%r9,>t12_stack=136(%rsp) +movq %r9,136(%rsp) + +# qhasm: t13_stack = t13 +# asm 1: movq <t13=int64#7,>t13_stack=stack64#19 +# asm 2: movq <t13=%rax,>t13_stack=144(%rsp) +movq %rax,144(%rsp) + +# qhasm: t20_stack = t20 +# asm 1: movq <t20=int64#8,>t20_stack=stack64#20 +# asm 2: movq <t20=%r10,>t20_stack=152(%rsp) +movq %r10,152(%rsp) + +# qhasm: t21_stack = t21 +# asm 1: movq <t21=int64#9,>t21_stack=stack64#21 +# asm 2: movq <t21=%r11,>t21_stack=160(%rsp) +movq %r11,160(%rsp) + +# qhasm: t22_stack = t22 +# asm 1: movq <t22=int64#10,>t22_stack=stack64#22 +# asm 2: movq <t22=%r12,>t22_stack=168(%rsp) +movq %r12,168(%rsp) + +# qhasm: t23_stack = t23 +# asm 1: movq <t23=int64#11,>t23_stack=stack64#23 +# asm 2: movq <t23=%r13,>t23_stack=176(%rsp) +movq %r13,176(%rsp) + +# qhasm: mulr4 = 0 +# asm 1: mov $0,>mulr4=int64#5 +# asm 2: mov $0,>mulr4=%r8 +mov $0,%r8 + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#6 +# asm 2: mov $0,>mulr5=%r9 +mov $0,%r9 + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#8 +# asm 2: mov $0,>mulr6=%r10 +mov $0,%r10 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#9 +# asm 2: mov $0,>mulr7=%r11 +mov $0,%r11 + +# qhasm: mulx0 = a0_stack +# asm 1: movq <a0_stack=stack64#8,>mulx0=int64#10 +# asm 2: movq <a0_stack=56(%rsp),>mulx0=%r12 +movq 56(%rsp),%r12 + +# qhasm: mulrax = t10_stack +# asm 1: movq <t10_stack=stack64#16,>mulrax=int64#7 +# asm 2: movq <t10_stack=120(%rsp),>mulrax=%rax +movq 120(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: a0 = mulrax +# asm 1: mov <mulrax=int64#7,>a0=int64#11 +# asm 2: mov <mulrax=%rax,>a0=%r13 +mov %rax,%r13 + +# qhasm: a1 = mulrdx +# asm 1: mov <mulrdx=int64#3,>a1=int64#12 +# asm 2: mov <mulrdx=%rdx,>a1=%r14 +mov %rdx,%r14 + +# qhasm: mulrax = t11_stack +# asm 1: movq <t11_stack=stack64#17,>mulrax=int64#7 +# asm 2: movq <t11_stack=128(%rsp),>mulrax=%rax +movq 128(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: carry? a1 += mulrax +# asm 1: add <mulrax=int64#7,<a1=int64#12 +# asm 2: add <mulrax=%rax,<a1=%r14 +add %rax,%r14 + +# qhasm: a2 = 0 +# asm 1: mov $0,>a2=int64#13 +# asm 2: mov $0,>a2=%r15 +mov $0,%r15 + +# qhasm: a2 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<a2=int64#13 +# asm 2: adc <mulrdx=%rdx,<a2=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = t12_stack +# asm 1: movq <t12_stack=stack64#18,>mulrax=int64#7 +# asm 2: movq <t12_stack=136(%rsp),>mulrax=%rax +movq 136(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: carry? a2 += mulrax +# asm 1: add <mulrax=int64#7,<a2=int64#13 +# asm 2: add <mulrax=%rax,<a2=%r15 +add %rax,%r15 + +# qhasm: a3 = 0 +# asm 1: mov $0,>a3=int64#14 +# asm 2: mov $0,>a3=%rbx +mov $0,%rbx + +# qhasm: a3 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<a3=int64#14 +# asm 2: adc <mulrdx=%rdx,<a3=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = t13_stack +# asm 1: movq <t13_stack=stack64#19,>mulrax=int64#7 +# asm 2: movq <t13_stack=144(%rsp),>mulrax=%rax +movq 144(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: carry? a3 += mulrax +# asm 1: add <mulrax=int64#7,<a3=int64#14 +# asm 2: add <mulrax=%rax,<a3=%rbx +add %rax,%rbx + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr4=int64#5 +# asm 2: adc <mulrdx=%rdx,<mulr4=%r8 +adc %rdx,%r8 + +# qhasm: mulx1 = a1_stack +# asm 1: movq <a1_stack=stack64#9,>mulx1=int64#10 +# asm 2: movq <a1_stack=64(%rsp),>mulx1=%r12 +movq 64(%rsp),%r12 + +# qhasm: mulrax = t10_stack +# asm 1: movq <t10_stack=stack64#16,>mulrax=int64#7 +# asm 2: movq <t10_stack=120(%rsp),>mulrax=%rax +movq 120(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? a1 += mulrax +# asm 1: add <mulrax=int64#7,<a1=int64#12 +# asm 2: add <mulrax=%rax,<a1=%r14 +add %rax,%r14 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = t11_stack +# asm 1: movq <t11_stack=stack64#17,>mulrax=int64#7 +# asm 2: movq <t11_stack=128(%rsp),>mulrax=%rax +movq 128(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? a2 += mulrax +# asm 1: add <mulrax=int64#7,<a2=int64#13 +# asm 2: add <mulrax=%rax,<a2=%r15 +add %rax,%r15 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? a2 += mulc +# asm 1: add <mulc=int64#15,<a2=int64#13 +# asm 2: add <mulc=%rbp,<a2=%r15 +add %rbp,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = t12_stack +# asm 1: movq <t12_stack=stack64#18,>mulrax=int64#7 +# asm 2: movq <t12_stack=136(%rsp),>mulrax=%rax +movq 136(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? a3 += mulrax +# asm 1: add <mulrax=int64#7,<a3=int64#14 +# asm 2: add <mulrax=%rax,<a3=%rbx +add %rax,%rbx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? a3 += mulc +# asm 1: add <mulc=int64#15,<a3=int64#14 +# asm 2: add <mulc=%rbp,<a3=%rbx +add %rbp,%rbx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = t13_stack +# asm 1: movq <t13_stack=stack64#19,>mulrax=int64#7 +# asm 2: movq <t13_stack=144(%rsp),>mulrax=%rax +movq 144(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#5 +# asm 2: add <mulrax=%rax,<mulr4=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#15,<mulr4=int64#5 +# asm 2: add <mulc=%rbp,<mulr4=%r8 +add %rbp,%r8 + +# qhasm: mulr5 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr5=int64#6 +# asm 2: adc <mulrdx=%rdx,<mulr5=%r9 +adc %rdx,%r9 + +# qhasm: mulx2 = a2_stack +# asm 1: movq <a2_stack=stack64#10,>mulx2=int64#10 +# asm 2: movq <a2_stack=72(%rsp),>mulx2=%r12 +movq 72(%rsp),%r12 + +# qhasm: mulrax = t10_stack +# asm 1: movq <t10_stack=stack64#16,>mulrax=int64#7 +# asm 2: movq <t10_stack=120(%rsp),>mulrax=%rax +movq 120(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? a2 += mulrax +# asm 1: add <mulrax=int64#7,<a2=int64#13 +# asm 2: add <mulrax=%rax,<a2=%r15 +add %rax,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = t11_stack +# asm 1: movq <t11_stack=stack64#17,>mulrax=int64#7 +# asm 2: movq <t11_stack=128(%rsp),>mulrax=%rax +movq 128(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? a3 += mulrax +# asm 1: add <mulrax=int64#7,<a3=int64#14 +# asm 2: add <mulrax=%rax,<a3=%rbx +add %rax,%rbx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? a3 += mulc +# asm 1: add <mulc=int64#15,<a3=int64#14 +# asm 2: add <mulc=%rbp,<a3=%rbx +add %rbp,%rbx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = t12_stack +# asm 1: movq <t12_stack=stack64#18,>mulrax=int64#7 +# asm 2: movq <t12_stack=136(%rsp),>mulrax=%rax +movq 136(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#5 +# asm 2: add <mulrax=%rax,<mulr4=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#15,<mulr4=int64#5 +# asm 2: add <mulc=%rbp,<mulr4=%r8 +add %rbp,%r8 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = t13_stack +# asm 1: movq <t13_stack=stack64#19,>mulrax=int64#7 +# asm 2: movq <t13_stack=144(%rsp),>mulrax=%rax +movq 144(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#6 +# asm 2: add <mulrax=%rax,<mulr5=%r9 +add %rax,%r9 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#15,<mulr5=int64#6 +# asm 2: add <mulc=%rbp,<mulr5=%r9 +add %rbp,%r9 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#8 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r10 +adc %rdx,%r10 + +# qhasm: mulx3 = a3_stack +# asm 1: movq <a3_stack=stack64#11,>mulx3=int64#10 +# asm 2: movq <a3_stack=80(%rsp),>mulx3=%r12 +movq 80(%rsp),%r12 + +# qhasm: mulrax = t10_stack +# asm 1: movq <t10_stack=stack64#16,>mulrax=int64#7 +# asm 2: movq <t10_stack=120(%rsp),>mulrax=%rax +movq 120(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#10 +# asm 2: mul <mulx3=%r12 +mul %r12 + +# qhasm: carry? a3 += mulrax +# asm 1: add <mulrax=int64#7,<a3=int64#14 +# asm 2: add <mulrax=%rax,<a3=%rbx +add %rax,%rbx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = t11_stack +# asm 1: movq <t11_stack=stack64#17,>mulrax=int64#7 +# asm 2: movq <t11_stack=128(%rsp),>mulrax=%rax +movq 128(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#10 +# asm 2: mul <mulx3=%r12 +mul %r12 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#5 +# asm 2: add <mulrax=%rax,<mulr4=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#15,<mulr4=int64#5 +# asm 2: add <mulc=%rbp,<mulr4=%r8 +add %rbp,%r8 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = t12_stack +# asm 1: movq <t12_stack=stack64#18,>mulrax=int64#7 +# asm 2: movq <t12_stack=136(%rsp),>mulrax=%rax +movq 136(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#10 +# asm 2: mul <mulx3=%r12 +mul %r12 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#6 +# asm 2: add <mulrax=%rax,<mulr5=%r9 +add %rax,%r9 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#15,<mulr5=int64#6 +# asm 2: add <mulc=%rbp,<mulr5=%r9 +add %rbp,%r9 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = t13_stack +# asm 1: movq <t13_stack=stack64#19,>mulrax=int64#7 +# asm 2: movq <t13_stack=144(%rsp),>mulrax=%rax +movq 144(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#10 +# asm 2: mul <mulx3=%r12 +mul %r12 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#8 +# asm 2: add <mulrax=%rax,<mulr6=%r10 +add %rax,%r10 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr6 += mulc +# asm 1: add <mulc=int64#15,<mulr6=int64#8 +# asm 2: add <mulc=%rbp,<mulr6=%r10 +add %rbp,%r10 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#9 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r11 +adc %rdx,%r11 + +# qhasm: mulrax = mulr4 +# asm 1: mov <mulr4=int64#5,>mulrax=int64#7 +# asm 2: mov <mulr4=%r8,>mulrax=%rax +mov %r8,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: mulr4 = mulrax +# asm 1: mov <mulrax=int64#7,>mulr4=int64#5 +# asm 2: mov <mulrax=%rax,>mulr4=%r8 +mov %rax,%r8 + +# qhasm: mulrax = mulr5 +# asm 1: mov <mulr5=int64#6,>mulrax=int64#7 +# asm 2: mov <mulr5=%r9,>mulrax=%rax +mov %r9,%rax + +# qhasm: mulr5 = mulrdx +# asm 1: mov <mulrdx=int64#3,>mulr5=int64#6 +# asm 2: mov <mulrdx=%rdx,>mulr5=%r9 +mov %rdx,%r9 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#6 +# asm 2: add <mulrax=%rax,<mulr5=%r9 +add %rax,%r9 + +# qhasm: mulrax = mulr6 +# asm 1: mov <mulr6=int64#8,>mulrax=int64#7 +# asm 2: mov <mulr6=%r10,>mulrax=%rax +mov %r10,%rax + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#8 +# asm 2: mov $0,>mulr6=%r10 +mov $0,%r10 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#8 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r10 +adc %rdx,%r10 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#8 +# asm 2: add <mulrax=%rax,<mulr6=%r10 +add %rax,%r10 + +# qhasm: mulrax = mulr7 +# asm 1: mov <mulr7=int64#9,>mulrax=int64#7 +# asm 2: mov <mulr7=%r11,>mulrax=%rax +mov %r11,%rax + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#9 +# asm 2: mov $0,>mulr7=%r11 +mov $0,%r11 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#9 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r11 +adc %rdx,%r11 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr7 += mulrax +# asm 1: add <mulrax=int64#7,<mulr7=int64#9 +# asm 2: add <mulrax=%rax,<mulr7=%r11 +add %rax,%r11 + +# qhasm: mulr8 = 0 +# asm 1: mov $0,>mulr8=int64#7 +# asm 2: mov $0,>mulr8=%rax +mov $0,%rax + +# qhasm: mulr8 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr8=int64#7 +# asm 2: adc <mulrdx=%rdx,<mulr8=%rax +adc %rdx,%rax + +# qhasm: carry? a0 += mulr4 +# asm 1: add <mulr4=int64#5,<a0=int64#11 +# asm 2: add <mulr4=%r8,<a0=%r13 +add %r8,%r13 + +# qhasm: carry? a1 += mulr5 + carry +# asm 1: adc <mulr5=int64#6,<a1=int64#12 +# asm 2: adc <mulr5=%r9,<a1=%r14 +adc %r9,%r14 + +# qhasm: carry? a2 += mulr6 + carry +# asm 1: adc <mulr6=int64#8,<a2=int64#13 +# asm 2: adc <mulr6=%r10,<a2=%r15 +adc %r10,%r15 + +# qhasm: carry? a3 += mulr7 + carry +# asm 1: adc <mulr7=int64#9,<a3=int64#14 +# asm 2: adc <mulr7=%r11,<a3=%rbx +adc %r11,%rbx + +# qhasm: mulzero = 0 +# asm 1: mov $0,>mulzero=int64#3 +# asm 2: mov $0,>mulzero=%rdx +mov $0,%rdx + +# qhasm: mulr8 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<mulr8=int64#7 +# asm 2: adc <mulzero=%rdx,<mulr8=%rax +adc %rdx,%rax + +# qhasm: mulr8 *= 38 +# asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#5 +# asm 2: imulq $38,<mulr8=%rax,>mulr8=%r8 +imulq $38,%rax,%r8 + +# qhasm: carry? a0 += mulr8 +# asm 1: add <mulr8=int64#5,<a0=int64#11 +# asm 2: add <mulr8=%r8,<a0=%r13 +add %r8,%r13 + +# qhasm: carry? a1 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<a1=int64#12 +# asm 2: adc <mulzero=%rdx,<a1=%r14 +adc %rdx,%r14 + +# qhasm: carry? a2 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<a2=int64#13 +# asm 2: adc <mulzero=%rdx,<a2=%r15 +adc %rdx,%r15 + +# qhasm: carry? a3 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<a3=int64#14 +# asm 2: adc <mulzero=%rdx,<a3=%rbx +adc %rdx,%rbx + +# qhasm: mulzero += mulzero + carry +# asm 1: adc <mulzero=int64#3,<mulzero=int64#3 +# asm 2: adc <mulzero=%rdx,<mulzero=%rdx +adc %rdx,%rdx + +# qhasm: mulzero *= 38 +# asm 1: imulq $38,<mulzero=int64#3,>mulzero=int64#3 +# asm 2: imulq $38,<mulzero=%rdx,>mulzero=%rdx +imulq $38,%rdx,%rdx + +# qhasm: a0 += mulzero +# asm 1: add <mulzero=int64#3,<a0=int64#11 +# asm 2: add <mulzero=%rdx,<a0=%r13 +add %rdx,%r13 + +# qhasm: a0_stack = a0 +# asm 1: movq <a0=int64#11,>a0_stack=stack64#8 +# asm 2: movq <a0=%r13,>a0_stack=56(%rsp) +movq %r13,56(%rsp) + +# qhasm: a1_stack = a1 +# asm 1: movq <a1=int64#12,>a1_stack=stack64#9 +# asm 2: movq <a1=%r14,>a1_stack=64(%rsp) +movq %r14,64(%rsp) + +# qhasm: a2_stack = a2 +# asm 1: movq <a2=int64#13,>a2_stack=stack64#10 +# asm 2: movq <a2=%r15,>a2_stack=72(%rsp) +movq %r15,72(%rsp) + +# qhasm: a3_stack = a3 +# asm 1: movq <a3=int64#14,>a3_stack=stack64#11 +# asm 2: movq <a3=%rbx,>a3_stack=80(%rsp) +movq %rbx,80(%rsp) + +# qhasm: mulr4 = 0 +# asm 1: mov $0,>mulr4=int64#5 +# asm 2: mov $0,>mulr4=%r8 +mov $0,%r8 + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#6 +# asm 2: mov $0,>mulr5=%r9 +mov $0,%r9 + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#8 +# asm 2: mov $0,>mulr6=%r10 +mov $0,%r10 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#9 +# asm 2: mov $0,>mulr7=%r11 +mov $0,%r11 + +# qhasm: mulx0 = b0_stack +# asm 1: movq <b0_stack=stack64#12,>mulx0=int64#10 +# asm 2: movq <b0_stack=88(%rsp),>mulx0=%r12 +movq 88(%rsp),%r12 + +# qhasm: mulrax = t20_stack +# asm 1: movq <t20_stack=stack64#20,>mulrax=int64#7 +# asm 2: movq <t20_stack=152(%rsp),>mulrax=%rax +movq 152(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: rx0 = mulrax +# asm 1: mov <mulrax=int64#7,>rx0=int64#11 +# asm 2: mov <mulrax=%rax,>rx0=%r13 +mov %rax,%r13 + +# qhasm: rx1 = mulrdx +# asm 1: mov <mulrdx=int64#3,>rx1=int64#12 +# asm 2: mov <mulrdx=%rdx,>rx1=%r14 +mov %rdx,%r14 + +# qhasm: mulrax = t21_stack +# asm 1: movq <t21_stack=stack64#21,>mulrax=int64#7 +# asm 2: movq <t21_stack=160(%rsp),>mulrax=%rax +movq 160(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: carry? rx1 += mulrax +# asm 1: add <mulrax=int64#7,<rx1=int64#12 +# asm 2: add <mulrax=%rax,<rx1=%r14 +add %rax,%r14 + +# qhasm: rx2 = 0 +# asm 1: mov $0,>rx2=int64#13 +# asm 2: mov $0,>rx2=%r15 +mov $0,%r15 + +# qhasm: rx2 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<rx2=int64#13 +# asm 2: adc <mulrdx=%rdx,<rx2=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = t22_stack +# asm 1: movq <t22_stack=stack64#22,>mulrax=int64#7 +# asm 2: movq <t22_stack=168(%rsp),>mulrax=%rax +movq 168(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: carry? rx2 += mulrax +# asm 1: add <mulrax=int64#7,<rx2=int64#13 +# asm 2: add <mulrax=%rax,<rx2=%r15 +add %rax,%r15 + +# qhasm: rx3 = 0 +# asm 1: mov $0,>rx3=int64#14 +# asm 2: mov $0,>rx3=%rbx +mov $0,%rbx + +# qhasm: rx3 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<rx3=int64#14 +# asm 2: adc <mulrdx=%rdx,<rx3=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = t23_stack +# asm 1: movq <t23_stack=stack64#23,>mulrax=int64#7 +# asm 2: movq <t23_stack=176(%rsp),>mulrax=%rax +movq 176(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: carry? rx3 += mulrax +# asm 1: add <mulrax=int64#7,<rx3=int64#14 +# asm 2: add <mulrax=%rax,<rx3=%rbx +add %rax,%rbx + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr4=int64#5 +# asm 2: adc <mulrdx=%rdx,<mulr4=%r8 +adc %rdx,%r8 + +# qhasm: mulx1 = b1_stack +# asm 1: movq <b1_stack=stack64#13,>mulx1=int64#10 +# asm 2: movq <b1_stack=96(%rsp),>mulx1=%r12 +movq 96(%rsp),%r12 + +# qhasm: mulrax = t20_stack +# asm 1: movq <t20_stack=stack64#20,>mulrax=int64#7 +# asm 2: movq <t20_stack=152(%rsp),>mulrax=%rax +movq 152(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? rx1 += mulrax +# asm 1: add <mulrax=int64#7,<rx1=int64#12 +# asm 2: add <mulrax=%rax,<rx1=%r14 +add %rax,%r14 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = t21_stack +# asm 1: movq <t21_stack=stack64#21,>mulrax=int64#7 +# asm 2: movq <t21_stack=160(%rsp),>mulrax=%rax +movq 160(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? rx2 += mulrax +# asm 1: add <mulrax=int64#7,<rx2=int64#13 +# asm 2: add <mulrax=%rax,<rx2=%r15 +add %rax,%r15 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? rx2 += mulc +# asm 1: add <mulc=int64#15,<rx2=int64#13 +# asm 2: add <mulc=%rbp,<rx2=%r15 +add %rbp,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = t22_stack +# asm 1: movq <t22_stack=stack64#22,>mulrax=int64#7 +# asm 2: movq <t22_stack=168(%rsp),>mulrax=%rax +movq 168(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? rx3 += mulrax +# asm 1: add <mulrax=int64#7,<rx3=int64#14 +# asm 2: add <mulrax=%rax,<rx3=%rbx +add %rax,%rbx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? rx3 += mulc +# asm 1: add <mulc=int64#15,<rx3=int64#14 +# asm 2: add <mulc=%rbp,<rx3=%rbx +add %rbp,%rbx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = t23_stack +# asm 1: movq <t23_stack=stack64#23,>mulrax=int64#7 +# asm 2: movq <t23_stack=176(%rsp),>mulrax=%rax +movq 176(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#5 +# asm 2: add <mulrax=%rax,<mulr4=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#15,<mulr4=int64#5 +# asm 2: add <mulc=%rbp,<mulr4=%r8 +add %rbp,%r8 + +# qhasm: mulr5 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr5=int64#6 +# asm 2: adc <mulrdx=%rdx,<mulr5=%r9 +adc %rdx,%r9 + +# qhasm: mulx2 = b2_stack +# asm 1: movq <b2_stack=stack64#14,>mulx2=int64#10 +# asm 2: movq <b2_stack=104(%rsp),>mulx2=%r12 +movq 104(%rsp),%r12 + +# qhasm: mulrax = t20_stack +# asm 1: movq <t20_stack=stack64#20,>mulrax=int64#7 +# asm 2: movq <t20_stack=152(%rsp),>mulrax=%rax +movq 152(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? rx2 += mulrax +# asm 1: add <mulrax=int64#7,<rx2=int64#13 +# asm 2: add <mulrax=%rax,<rx2=%r15 +add %rax,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = t21_stack +# asm 1: movq <t21_stack=stack64#21,>mulrax=int64#7 +# asm 2: movq <t21_stack=160(%rsp),>mulrax=%rax +movq 160(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? rx3 += mulrax +# asm 1: add <mulrax=int64#7,<rx3=int64#14 +# asm 2: add <mulrax=%rax,<rx3=%rbx +add %rax,%rbx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? rx3 += mulc +# asm 1: add <mulc=int64#15,<rx3=int64#14 +# asm 2: add <mulc=%rbp,<rx3=%rbx +add %rbp,%rbx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = t22_stack +# asm 1: movq <t22_stack=stack64#22,>mulrax=int64#7 +# asm 2: movq <t22_stack=168(%rsp),>mulrax=%rax +movq 168(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#5 +# asm 2: add <mulrax=%rax,<mulr4=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#15,<mulr4=int64#5 +# asm 2: add <mulc=%rbp,<mulr4=%r8 +add %rbp,%r8 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = t23_stack +# asm 1: movq <t23_stack=stack64#23,>mulrax=int64#7 +# asm 2: movq <t23_stack=176(%rsp),>mulrax=%rax +movq 176(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#6 +# asm 2: add <mulrax=%rax,<mulr5=%r9 +add %rax,%r9 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#15,<mulr5=int64#6 +# asm 2: add <mulc=%rbp,<mulr5=%r9 +add %rbp,%r9 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#8 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r10 +adc %rdx,%r10 + +# qhasm: mulx3 = b3_stack +# asm 1: movq <b3_stack=stack64#15,>mulx3=int64#10 +# asm 2: movq <b3_stack=112(%rsp),>mulx3=%r12 +movq 112(%rsp),%r12 + +# qhasm: mulrax = t20_stack +# asm 1: movq <t20_stack=stack64#20,>mulrax=int64#7 +# asm 2: movq <t20_stack=152(%rsp),>mulrax=%rax +movq 152(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#10 +# asm 2: mul <mulx3=%r12 +mul %r12 + +# qhasm: carry? rx3 += mulrax +# asm 1: add <mulrax=int64#7,<rx3=int64#14 +# asm 2: add <mulrax=%rax,<rx3=%rbx +add %rax,%rbx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = t21_stack +# asm 1: movq <t21_stack=stack64#21,>mulrax=int64#7 +# asm 2: movq <t21_stack=160(%rsp),>mulrax=%rax +movq 160(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#10 +# asm 2: mul <mulx3=%r12 +mul %r12 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#5 +# asm 2: add <mulrax=%rax,<mulr4=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#15,<mulr4=int64#5 +# asm 2: add <mulc=%rbp,<mulr4=%r8 +add %rbp,%r8 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = t22_stack +# asm 1: movq <t22_stack=stack64#22,>mulrax=int64#7 +# asm 2: movq <t22_stack=168(%rsp),>mulrax=%rax +movq 168(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#10 +# asm 2: mul <mulx3=%r12 +mul %r12 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#6 +# asm 2: add <mulrax=%rax,<mulr5=%r9 +add %rax,%r9 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#15,<mulr5=int64#6 +# asm 2: add <mulc=%rbp,<mulr5=%r9 +add %rbp,%r9 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = t23_stack +# asm 1: movq <t23_stack=stack64#23,>mulrax=int64#7 +# asm 2: movq <t23_stack=176(%rsp),>mulrax=%rax +movq 176(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#10 +# asm 2: mul <mulx3=%r12 +mul %r12 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#8 +# asm 2: add <mulrax=%rax,<mulr6=%r10 +add %rax,%r10 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr6 += mulc +# asm 1: add <mulc=int64#15,<mulr6=int64#8 +# asm 2: add <mulc=%rbp,<mulr6=%r10 +add %rbp,%r10 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#9 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r11 +adc %rdx,%r11 + +# qhasm: mulrax = mulr4 +# asm 1: mov <mulr4=int64#5,>mulrax=int64#7 +# asm 2: mov <mulr4=%r8,>mulrax=%rax +mov %r8,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: mulr4 = mulrax +# asm 1: mov <mulrax=int64#7,>mulr4=int64#5 +# asm 2: mov <mulrax=%rax,>mulr4=%r8 +mov %rax,%r8 + +# qhasm: mulrax = mulr5 +# asm 1: mov <mulr5=int64#6,>mulrax=int64#7 +# asm 2: mov <mulr5=%r9,>mulrax=%rax +mov %r9,%rax + +# qhasm: mulr5 = mulrdx +# asm 1: mov <mulrdx=int64#3,>mulr5=int64#6 +# asm 2: mov <mulrdx=%rdx,>mulr5=%r9 +mov %rdx,%r9 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#6 +# asm 2: add <mulrax=%rax,<mulr5=%r9 +add %rax,%r9 + +# qhasm: mulrax = mulr6 +# asm 1: mov <mulr6=int64#8,>mulrax=int64#7 +# asm 2: mov <mulr6=%r10,>mulrax=%rax +mov %r10,%rax + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#8 +# asm 2: mov $0,>mulr6=%r10 +mov $0,%r10 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#8 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r10 +adc %rdx,%r10 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#8 +# asm 2: add <mulrax=%rax,<mulr6=%r10 +add %rax,%r10 + +# qhasm: mulrax = mulr7 +# asm 1: mov <mulr7=int64#9,>mulrax=int64#7 +# asm 2: mov <mulr7=%r11,>mulrax=%rax +mov %r11,%rax + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#9 +# asm 2: mov $0,>mulr7=%r11 +mov $0,%r11 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#9 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r11 +adc %rdx,%r11 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr7 += mulrax +# asm 1: add <mulrax=int64#7,<mulr7=int64#9 +# asm 2: add <mulrax=%rax,<mulr7=%r11 +add %rax,%r11 + +# qhasm: mulr8 = 0 +# asm 1: mov $0,>mulr8=int64#7 +# asm 2: mov $0,>mulr8=%rax +mov $0,%rax + +# qhasm: mulr8 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr8=int64#7 +# asm 2: adc <mulrdx=%rdx,<mulr8=%rax +adc %rdx,%rax + +# qhasm: carry? rx0 += mulr4 +# asm 1: add <mulr4=int64#5,<rx0=int64#11 +# asm 2: add <mulr4=%r8,<rx0=%r13 +add %r8,%r13 + +# qhasm: carry? rx1 += mulr5 + carry +# asm 1: adc <mulr5=int64#6,<rx1=int64#12 +# asm 2: adc <mulr5=%r9,<rx1=%r14 +adc %r9,%r14 + +# qhasm: carry? rx2 += mulr6 + carry +# asm 1: adc <mulr6=int64#8,<rx2=int64#13 +# asm 2: adc <mulr6=%r10,<rx2=%r15 +adc %r10,%r15 + +# qhasm: carry? rx3 += mulr7 + carry +# asm 1: adc <mulr7=int64#9,<rx3=int64#14 +# asm 2: adc <mulr7=%r11,<rx3=%rbx +adc %r11,%rbx + +# qhasm: mulzero = 0 +# asm 1: mov $0,>mulzero=int64#3 +# asm 2: mov $0,>mulzero=%rdx +mov $0,%rdx + +# qhasm: mulr8 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<mulr8=int64#7 +# asm 2: adc <mulzero=%rdx,<mulr8=%rax +adc %rdx,%rax + +# qhasm: mulr8 *= 38 +# asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#5 +# asm 2: imulq $38,<mulr8=%rax,>mulr8=%r8 +imulq $38,%rax,%r8 + +# qhasm: carry? rx0 += mulr8 +# asm 1: add <mulr8=int64#5,<rx0=int64#11 +# asm 2: add <mulr8=%r8,<rx0=%r13 +add %r8,%r13 + +# qhasm: carry? rx1 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<rx1=int64#12 +# asm 2: adc <mulzero=%rdx,<rx1=%r14 +adc %rdx,%r14 + +# qhasm: carry? rx2 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<rx2=int64#13 +# asm 2: adc <mulzero=%rdx,<rx2=%r15 +adc %rdx,%r15 + +# qhasm: carry? rx3 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<rx3=int64#14 +# asm 2: adc <mulzero=%rdx,<rx3=%rbx +adc %rdx,%rbx + +# qhasm: mulzero += mulzero + carry +# asm 1: adc <mulzero=int64#3,<mulzero=int64#3 +# asm 2: adc <mulzero=%rdx,<mulzero=%rdx +adc %rdx,%rdx + +# qhasm: mulzero *= 38 +# asm 1: imulq $38,<mulzero=int64#3,>mulzero=int64#3 +# asm 2: imulq $38,<mulzero=%rdx,>mulzero=%rdx +imulq $38,%rdx,%rdx + +# qhasm: rx0 += mulzero +# asm 1: add <mulzero=int64#3,<rx0=int64#11 +# asm 2: add <mulzero=%rdx,<rx0=%r13 +add %rdx,%r13 + +# qhasm: ry0 = rx0 +# asm 1: mov <rx0=int64#11,>ry0=int64#3 +# asm 2: mov <rx0=%r13,>ry0=%rdx +mov %r13,%rdx + +# qhasm: ry1 = rx1 +# asm 1: mov <rx1=int64#12,>ry1=int64#5 +# asm 2: mov <rx1=%r14,>ry1=%r8 +mov %r14,%r8 + +# qhasm: ry2 = rx2 +# asm 1: mov <rx2=int64#13,>ry2=int64#6 +# asm 2: mov <rx2=%r15,>ry2=%r9 +mov %r15,%r9 + +# qhasm: ry3 = rx3 +# asm 1: mov <rx3=int64#14,>ry3=int64#7 +# asm 2: mov <rx3=%rbx,>ry3=%rax +mov %rbx,%rax + +# qhasm: carry? ry0 += a0_stack +# asm 1: addq <a0_stack=stack64#8,<ry0=int64#3 +# asm 2: addq <a0_stack=56(%rsp),<ry0=%rdx +addq 56(%rsp),%rdx + +# qhasm: carry? ry1 += a1_stack + carry +# asm 1: adcq <a1_stack=stack64#9,<ry1=int64#5 +# asm 2: adcq <a1_stack=64(%rsp),<ry1=%r8 +adcq 64(%rsp),%r8 + +# qhasm: carry? ry2 += a2_stack + carry +# asm 1: adcq <a2_stack=stack64#10,<ry2=int64#6 +# asm 2: adcq <a2_stack=72(%rsp),<ry2=%r9 +adcq 72(%rsp),%r9 + +# qhasm: carry? ry3 += a3_stack + carry +# asm 1: adcq <a3_stack=stack64#11,<ry3=int64#7 +# asm 2: adcq <a3_stack=80(%rsp),<ry3=%rax +adcq 80(%rsp),%rax + +# qhasm: addt0 = 0 +# asm 1: mov $0,>addt0=int64#8 +# asm 2: mov $0,>addt0=%r10 +mov $0,%r10 + +# qhasm: addt1 = 38 +# asm 1: mov $38,>addt1=int64#9 +# asm 2: mov $38,>addt1=%r11 +mov $38,%r11 + +# qhasm: addt1 = addt0 if !carry +# asm 1: cmovae <addt0=int64#8,<addt1=int64#9 +# asm 2: cmovae <addt0=%r10,<addt1=%r11 +cmovae %r10,%r11 + +# qhasm: carry? ry0 += addt1 +# asm 1: add <addt1=int64#9,<ry0=int64#3 +# asm 2: add <addt1=%r11,<ry0=%rdx +add %r11,%rdx + +# qhasm: carry? ry1 += addt0 + carry +# asm 1: adc <addt0=int64#8,<ry1=int64#5 +# asm 2: adc <addt0=%r10,<ry1=%r8 +adc %r10,%r8 + +# qhasm: carry? ry2 += addt0 + carry +# asm 1: adc <addt0=int64#8,<ry2=int64#6 +# asm 2: adc <addt0=%r10,<ry2=%r9 +adc %r10,%r9 + +# qhasm: carry? ry3 += addt0 + carry +# asm 1: adc <addt0=int64#8,<ry3=int64#7 +# asm 2: adc <addt0=%r10,<ry3=%rax +adc %r10,%rax + +# qhasm: addt0 = addt1 if carry +# asm 1: cmovc <addt1=int64#9,<addt0=int64#8 +# asm 2: cmovc <addt1=%r11,<addt0=%r10 +cmovc %r11,%r10 + +# qhasm: ry0 += addt0 +# asm 1: add <addt0=int64#8,<ry0=int64#3 +# asm 2: add <addt0=%r10,<ry0=%rdx +add %r10,%rdx + +# qhasm: carry? rx0 -= a0_stack +# asm 1: subq <a0_stack=stack64#8,<rx0=int64#11 +# asm 2: subq <a0_stack=56(%rsp),<rx0=%r13 +subq 56(%rsp),%r13 + +# qhasm: carry? rx1 -= a1_stack - carry +# asm 1: sbbq <a1_stack=stack64#9,<rx1=int64#12 +# asm 2: sbbq <a1_stack=64(%rsp),<rx1=%r14 +sbbq 64(%rsp),%r14 + +# qhasm: carry? rx2 -= a2_stack - carry +# asm 1: sbbq <a2_stack=stack64#10,<rx2=int64#13 +# asm 2: sbbq <a2_stack=72(%rsp),<rx2=%r15 +sbbq 72(%rsp),%r15 + +# qhasm: carry? rx3 -= a3_stack - carry +# asm 1: sbbq <a3_stack=stack64#11,<rx3=int64#14 +# asm 2: sbbq <a3_stack=80(%rsp),<rx3=%rbx +sbbq 80(%rsp),%rbx + +# qhasm: subt0 = 0 +# asm 1: mov $0,>subt0=int64#8 +# asm 2: mov $0,>subt0=%r10 +mov $0,%r10 + +# qhasm: subt1 = 38 +# asm 1: mov $38,>subt1=int64#9 +# asm 2: mov $38,>subt1=%r11 +mov $38,%r11 + +# qhasm: subt1 = subt0 if !carry +# asm 1: cmovae <subt0=int64#8,<subt1=int64#9 +# asm 2: cmovae <subt0=%r10,<subt1=%r11 +cmovae %r10,%r11 + +# qhasm: carry? rx0 -= subt1 +# asm 1: sub <subt1=int64#9,<rx0=int64#11 +# asm 2: sub <subt1=%r11,<rx0=%r13 +sub %r11,%r13 + +# qhasm: carry? rx1 -= subt0 - carry +# asm 1: sbb <subt0=int64#8,<rx1=int64#12 +# asm 2: sbb <subt0=%r10,<rx1=%r14 +sbb %r10,%r14 + +# qhasm: carry? rx2 -= subt0 - carry +# asm 1: sbb <subt0=int64#8,<rx2=int64#13 +# asm 2: sbb <subt0=%r10,<rx2=%r15 +sbb %r10,%r15 + +# qhasm: carry? rx3 -= subt0 - carry +# asm 1: sbb <subt0=int64#8,<rx3=int64#14 +# asm 2: sbb <subt0=%r10,<rx3=%rbx +sbb %r10,%rbx + +# qhasm: subt0 = subt1 if carry +# asm 1: cmovc <subt1=int64#9,<subt0=int64#8 +# asm 2: cmovc <subt1=%r11,<subt0=%r10 +cmovc %r11,%r10 + +# qhasm: rx0 -= subt0 +# asm 1: sub <subt0=int64#8,<rx0=int64#11 +# asm 2: sub <subt0=%r10,<rx0=%r13 +sub %r10,%r13 + +# qhasm: *(uint64 *) (rp + 0) = rx0 +# asm 1: movq <rx0=int64#11,0(<rp=int64#1) +# asm 2: movq <rx0=%r13,0(<rp=%rdi) +movq %r13,0(%rdi) + +# qhasm: *(uint64 *) (rp + 8) = rx1 +# asm 1: movq <rx1=int64#12,8(<rp=int64#1) +# asm 2: movq <rx1=%r14,8(<rp=%rdi) +movq %r14,8(%rdi) + +# qhasm: *(uint64 *) (rp + 16) = rx2 +# asm 1: movq <rx2=int64#13,16(<rp=int64#1) +# asm 2: movq <rx2=%r15,16(<rp=%rdi) +movq %r15,16(%rdi) + +# qhasm: *(uint64 *) (rp + 24) = rx3 +# asm 1: movq <rx3=int64#14,24(<rp=int64#1) +# asm 2: movq <rx3=%rbx,24(<rp=%rdi) +movq %rbx,24(%rdi) + +# qhasm: *(uint64 *) (rp + 64) = ry0 +# asm 1: movq <ry0=int64#3,64(<rp=int64#1) +# asm 2: movq <ry0=%rdx,64(<rp=%rdi) +movq %rdx,64(%rdi) + +# qhasm: *(uint64 *) (rp + 72) = ry1 +# asm 1: movq <ry1=int64#5,72(<rp=int64#1) +# asm 2: movq <ry1=%r8,72(<rp=%rdi) +movq %r8,72(%rdi) + +# qhasm: *(uint64 *) (rp + 80) = ry2 +# asm 1: movq <ry2=int64#6,80(<rp=int64#1) +# asm 2: movq <ry2=%r9,80(<rp=%rdi) +movq %r9,80(%rdi) + +# qhasm: *(uint64 *) (rp + 88) = ry3 +# asm 1: movq <ry3=int64#7,88(<rp=int64#1) +# asm 2: movq <ry3=%rax,88(<rp=%rdi) +movq %rax,88(%rdi) + +# qhasm: mulr4 = 0 +# asm 1: mov $0,>mulr4=int64#5 +# asm 2: mov $0,>mulr4=%r8 +mov $0,%r8 + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#6 +# asm 2: mov $0,>mulr5=%r9 +mov $0,%r9 + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#8 +# asm 2: mov $0,>mulr6=%r10 +mov $0,%r10 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#9 +# asm 2: mov $0,>mulr7=%r11 +mov $0,%r11 + +# qhasm: mulx0 = *(uint64 *)(pp + 96) +# asm 1: movq 96(<pp=int64#2),>mulx0=int64#10 +# asm 2: movq 96(<pp=%rsi),>mulx0=%r12 +movq 96(%rsi),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 96) +# asm 1: movq 96(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 96(<qp=%rcx),>mulrax=%rax +movq 96(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: c0 = mulrax +# asm 1: mov <mulrax=int64#7,>c0=int64#11 +# asm 2: mov <mulrax=%rax,>c0=%r13 +mov %rax,%r13 + +# qhasm: c1 = mulrdx +# asm 1: mov <mulrdx=int64#3,>c1=int64#12 +# asm 2: mov <mulrdx=%rdx,>c1=%r14 +mov %rdx,%r14 + +# qhasm: mulrax = *(uint64 *)(qp + 104) +# asm 1: movq 104(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 104(<qp=%rcx),>mulrax=%rax +movq 104(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: carry? c1 += mulrax +# asm 1: add <mulrax=int64#7,<c1=int64#12 +# asm 2: add <mulrax=%rax,<c1=%r14 +add %rax,%r14 + +# qhasm: c2 = 0 +# asm 1: mov $0,>c2=int64#13 +# asm 2: mov $0,>c2=%r15 +mov $0,%r15 + +# qhasm: c2 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<c2=int64#13 +# asm 2: adc <mulrdx=%rdx,<c2=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = *(uint64 *)(qp + 112) +# asm 1: movq 112(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 112(<qp=%rcx),>mulrax=%rax +movq 112(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: carry? c2 += mulrax +# asm 1: add <mulrax=int64#7,<c2=int64#13 +# asm 2: add <mulrax=%rax,<c2=%r15 +add %rax,%r15 + +# qhasm: c3 = 0 +# asm 1: mov $0,>c3=int64#14 +# asm 2: mov $0,>c3=%rbx +mov $0,%rbx + +# qhasm: c3 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<c3=int64#14 +# asm 2: adc <mulrdx=%rdx,<c3=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(qp + 120) +# asm 1: movq 120(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 120(<qp=%rcx),>mulrax=%rax +movq 120(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: carry? c3 += mulrax +# asm 1: add <mulrax=int64#7,<c3=int64#14 +# asm 2: add <mulrax=%rax,<c3=%rbx +add %rax,%rbx + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr4=int64#5 +# asm 2: adc <mulrdx=%rdx,<mulr4=%r8 +adc %rdx,%r8 + +# qhasm: mulx1 = *(uint64 *)(pp + 104) +# asm 1: movq 104(<pp=int64#2),>mulx1=int64#10 +# asm 2: movq 104(<pp=%rsi),>mulx1=%r12 +movq 104(%rsi),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 96) +# asm 1: movq 96(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 96(<qp=%rcx),>mulrax=%rax +movq 96(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? c1 += mulrax +# asm 1: add <mulrax=int64#7,<c1=int64#12 +# asm 2: add <mulrax=%rax,<c1=%r14 +add %rax,%r14 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 104) +# asm 1: movq 104(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 104(<qp=%rcx),>mulrax=%rax +movq 104(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? c2 += mulrax +# asm 1: add <mulrax=int64#7,<c2=int64#13 +# asm 2: add <mulrax=%rax,<c2=%r15 +add %rax,%r15 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? c2 += mulc +# asm 1: add <mulc=int64#15,<c2=int64#13 +# asm 2: add <mulc=%rbp,<c2=%r15 +add %rbp,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 112) +# asm 1: movq 112(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 112(<qp=%rcx),>mulrax=%rax +movq 112(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? c3 += mulrax +# asm 1: add <mulrax=int64#7,<c3=int64#14 +# asm 2: add <mulrax=%rax,<c3=%rbx +add %rax,%rbx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? c3 += mulc +# asm 1: add <mulc=int64#15,<c3=int64#14 +# asm 2: add <mulc=%rbp,<c3=%rbx +add %rbp,%rbx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 120) +# asm 1: movq 120(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 120(<qp=%rcx),>mulrax=%rax +movq 120(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#5 +# asm 2: add <mulrax=%rax,<mulr4=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#15,<mulr4=int64#5 +# asm 2: add <mulc=%rbp,<mulr4=%r8 +add %rbp,%r8 + +# qhasm: mulr5 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr5=int64#6 +# asm 2: adc <mulrdx=%rdx,<mulr5=%r9 +adc %rdx,%r9 + +# qhasm: mulx2 = *(uint64 *)(pp + 112) +# asm 1: movq 112(<pp=int64#2),>mulx2=int64#10 +# asm 2: movq 112(<pp=%rsi),>mulx2=%r12 +movq 112(%rsi),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 96) +# asm 1: movq 96(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 96(<qp=%rcx),>mulrax=%rax +movq 96(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? c2 += mulrax +# asm 1: add <mulrax=int64#7,<c2=int64#13 +# asm 2: add <mulrax=%rax,<c2=%r15 +add %rax,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 104) +# asm 1: movq 104(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 104(<qp=%rcx),>mulrax=%rax +movq 104(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? c3 += mulrax +# asm 1: add <mulrax=int64#7,<c3=int64#14 +# asm 2: add <mulrax=%rax,<c3=%rbx +add %rax,%rbx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? c3 += mulc +# asm 1: add <mulc=int64#15,<c3=int64#14 +# asm 2: add <mulc=%rbp,<c3=%rbx +add %rbp,%rbx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 112) +# asm 1: movq 112(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 112(<qp=%rcx),>mulrax=%rax +movq 112(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#5 +# asm 2: add <mulrax=%rax,<mulr4=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#15,<mulr4=int64#5 +# asm 2: add <mulc=%rbp,<mulr4=%r8 +add %rbp,%r8 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 120) +# asm 1: movq 120(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 120(<qp=%rcx),>mulrax=%rax +movq 120(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#6 +# asm 2: add <mulrax=%rax,<mulr5=%r9 +add %rax,%r9 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#15,<mulr5=int64#6 +# asm 2: add <mulc=%rbp,<mulr5=%r9 +add %rbp,%r9 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#8 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r10 +adc %rdx,%r10 + +# qhasm: mulx3 = *(uint64 *)(pp + 120) +# asm 1: movq 120(<pp=int64#2),>mulx3=int64#10 +# asm 2: movq 120(<pp=%rsi),>mulx3=%r12 +movq 120(%rsi),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 96) +# asm 1: movq 96(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 96(<qp=%rcx),>mulrax=%rax +movq 96(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#10 +# asm 2: mul <mulx3=%r12 +mul %r12 + +# qhasm: carry? c3 += mulrax +# asm 1: add <mulrax=int64#7,<c3=int64#14 +# asm 2: add <mulrax=%rax,<c3=%rbx +add %rax,%rbx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 104) +# asm 1: movq 104(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 104(<qp=%rcx),>mulrax=%rax +movq 104(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#10 +# asm 2: mul <mulx3=%r12 +mul %r12 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#5 +# asm 2: add <mulrax=%rax,<mulr4=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#15,<mulr4=int64#5 +# asm 2: add <mulc=%rbp,<mulr4=%r8 +add %rbp,%r8 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 112) +# asm 1: movq 112(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 112(<qp=%rcx),>mulrax=%rax +movq 112(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#10 +# asm 2: mul <mulx3=%r12 +mul %r12 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#6 +# asm 2: add <mulrax=%rax,<mulr5=%r9 +add %rax,%r9 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#15,<mulr5=int64#6 +# asm 2: add <mulc=%rbp,<mulr5=%r9 +add %rbp,%r9 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 120) +# asm 1: movq 120(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 120(<qp=%rcx),>mulrax=%rax +movq 120(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#10 +# asm 2: mul <mulx3=%r12 +mul %r12 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#8 +# asm 2: add <mulrax=%rax,<mulr6=%r10 +add %rax,%r10 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr6 += mulc +# asm 1: add <mulc=int64#15,<mulr6=int64#8 +# asm 2: add <mulc=%rbp,<mulr6=%r10 +add %rbp,%r10 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#9 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r11 +adc %rdx,%r11 + +# qhasm: mulrax = mulr4 +# asm 1: mov <mulr4=int64#5,>mulrax=int64#7 +# asm 2: mov <mulr4=%r8,>mulrax=%rax +mov %r8,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: mulr4 = mulrax +# asm 1: mov <mulrax=int64#7,>mulr4=int64#5 +# asm 2: mov <mulrax=%rax,>mulr4=%r8 +mov %rax,%r8 + +# qhasm: mulrax = mulr5 +# asm 1: mov <mulr5=int64#6,>mulrax=int64#7 +# asm 2: mov <mulr5=%r9,>mulrax=%rax +mov %r9,%rax + +# qhasm: mulr5 = mulrdx +# asm 1: mov <mulrdx=int64#3,>mulr5=int64#6 +# asm 2: mov <mulrdx=%rdx,>mulr5=%r9 +mov %rdx,%r9 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#6 +# asm 2: add <mulrax=%rax,<mulr5=%r9 +add %rax,%r9 + +# qhasm: mulrax = mulr6 +# asm 1: mov <mulr6=int64#8,>mulrax=int64#7 +# asm 2: mov <mulr6=%r10,>mulrax=%rax +mov %r10,%rax + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#8 +# asm 2: mov $0,>mulr6=%r10 +mov $0,%r10 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#8 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r10 +adc %rdx,%r10 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#8 +# asm 2: add <mulrax=%rax,<mulr6=%r10 +add %rax,%r10 + +# qhasm: mulrax = mulr7 +# asm 1: mov <mulr7=int64#9,>mulrax=int64#7 +# asm 2: mov <mulr7=%r11,>mulrax=%rax +mov %r11,%rax + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#9 +# asm 2: mov $0,>mulr7=%r11 +mov $0,%r11 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#9 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r11 +adc %rdx,%r11 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr7 += mulrax +# asm 1: add <mulrax=int64#7,<mulr7=int64#9 +# asm 2: add <mulrax=%rax,<mulr7=%r11 +add %rax,%r11 + +# qhasm: mulr8 = 0 +# asm 1: mov $0,>mulr8=int64#7 +# asm 2: mov $0,>mulr8=%rax +mov $0,%rax + +# qhasm: mulr8 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr8=int64#7 +# asm 2: adc <mulrdx=%rdx,<mulr8=%rax +adc %rdx,%rax + +# qhasm: carry? c0 += mulr4 +# asm 1: add <mulr4=int64#5,<c0=int64#11 +# asm 2: add <mulr4=%r8,<c0=%r13 +add %r8,%r13 + +# qhasm: carry? c1 += mulr5 + carry +# asm 1: adc <mulr5=int64#6,<c1=int64#12 +# asm 2: adc <mulr5=%r9,<c1=%r14 +adc %r9,%r14 + +# qhasm: carry? c2 += mulr6 + carry +# asm 1: adc <mulr6=int64#8,<c2=int64#13 +# asm 2: adc <mulr6=%r10,<c2=%r15 +adc %r10,%r15 + +# qhasm: carry? c3 += mulr7 + carry +# asm 1: adc <mulr7=int64#9,<c3=int64#14 +# asm 2: adc <mulr7=%r11,<c3=%rbx +adc %r11,%rbx + +# qhasm: mulzero = 0 +# asm 1: mov $0,>mulzero=int64#3 +# asm 2: mov $0,>mulzero=%rdx +mov $0,%rdx + +# qhasm: mulr8 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<mulr8=int64#7 +# asm 2: adc <mulzero=%rdx,<mulr8=%rax +adc %rdx,%rax + +# qhasm: mulr8 *= 38 +# asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#5 +# asm 2: imulq $38,<mulr8=%rax,>mulr8=%r8 +imulq $38,%rax,%r8 + +# qhasm: carry? c0 += mulr8 +# asm 1: add <mulr8=int64#5,<c0=int64#11 +# asm 2: add <mulr8=%r8,<c0=%r13 +add %r8,%r13 + +# qhasm: carry? c1 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<c1=int64#12 +# asm 2: adc <mulzero=%rdx,<c1=%r14 +adc %rdx,%r14 + +# qhasm: carry? c2 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<c2=int64#13 +# asm 2: adc <mulzero=%rdx,<c2=%r15 +adc %rdx,%r15 + +# qhasm: carry? c3 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<c3=int64#14 +# asm 2: adc <mulzero=%rdx,<c3=%rbx +adc %rdx,%rbx + +# qhasm: mulzero += mulzero + carry +# asm 1: adc <mulzero=int64#3,<mulzero=int64#3 +# asm 2: adc <mulzero=%rdx,<mulzero=%rdx +adc %rdx,%rdx + +# qhasm: mulzero *= 38 +# asm 1: imulq $38,<mulzero=int64#3,>mulzero=int64#3 +# asm 2: imulq $38,<mulzero=%rdx,>mulzero=%rdx +imulq $38,%rdx,%rdx + +# qhasm: c0 += mulzero +# asm 1: add <mulzero=int64#3,<c0=int64#11 +# asm 2: add <mulzero=%rdx,<c0=%r13 +add %rdx,%r13 + +# qhasm: c0_stack = c0 +# asm 1: movq <c0=int64#11,>c0_stack=stack64#8 +# asm 2: movq <c0=%r13,>c0_stack=56(%rsp) +movq %r13,56(%rsp) + +# qhasm: c1_stack = c1 +# asm 1: movq <c1=int64#12,>c1_stack=stack64#9 +# asm 2: movq <c1=%r14,>c1_stack=64(%rsp) +movq %r14,64(%rsp) + +# qhasm: c2_stack = c2 +# asm 1: movq <c2=int64#13,>c2_stack=stack64#10 +# asm 2: movq <c2=%r15,>c2_stack=72(%rsp) +movq %r15,72(%rsp) + +# qhasm: c3_stack = c3 +# asm 1: movq <c3=int64#14,>c3_stack=stack64#11 +# asm 2: movq <c3=%rbx,>c3_stack=80(%rsp) +movq %rbx,80(%rsp) + +# qhasm: mulr4 = 0 +# asm 1: mov $0,>mulr4=int64#5 +# asm 2: mov $0,>mulr4=%r8 +mov $0,%r8 + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#6 +# asm 2: mov $0,>mulr5=%r9 +mov $0,%r9 + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#8 +# asm 2: mov $0,>mulr6=%r10 +mov $0,%r10 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#9 +# asm 2: mov $0,>mulr7=%r11 +mov $0,%r11 + +# qhasm: mulx0 = c0_stack +# asm 1: movq <c0_stack=stack64#8,>mulx0=int64#10 +# asm 2: movq <c0_stack=56(%rsp),>mulx0=%r12 +movq 56(%rsp),%r12 + +# qhasm: mulrax = *(uint64 *)&crypto_sign_ed25519_amd64_64_EC2D0 +# asm 1: movq crypto_sign_ed25519_amd64_64_EC2D0,>mulrax=int64#7 +# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D0,>mulrax=%rax +movq crypto_sign_ed25519_amd64_64_EC2D0,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: c0 = mulrax +# asm 1: mov <mulrax=int64#7,>c0=int64#11 +# asm 2: mov <mulrax=%rax,>c0=%r13 +mov %rax,%r13 + +# qhasm: c1 = mulrdx +# asm 1: mov <mulrdx=int64#3,>c1=int64#12 +# asm 2: mov <mulrdx=%rdx,>c1=%r14 +mov %rdx,%r14 + +# qhasm: mulrax = *(uint64 *)&crypto_sign_ed25519_amd64_64_EC2D1 +# asm 1: movq crypto_sign_ed25519_amd64_64_EC2D1,>mulrax=int64#7 +# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D1,>mulrax=%rax +movq crypto_sign_ed25519_amd64_64_EC2D1,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: carry? c1 += mulrax +# asm 1: add <mulrax=int64#7,<c1=int64#12 +# asm 2: add <mulrax=%rax,<c1=%r14 +add %rax,%r14 + +# qhasm: c2 = 0 +# asm 1: mov $0,>c2=int64#13 +# asm 2: mov $0,>c2=%r15 +mov $0,%r15 + +# qhasm: c2 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<c2=int64#13 +# asm 2: adc <mulrdx=%rdx,<c2=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = *(uint64 *)&crypto_sign_ed25519_amd64_64_EC2D2 +# asm 1: movq crypto_sign_ed25519_amd64_64_EC2D2,>mulrax=int64#7 +# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D2,>mulrax=%rax +movq crypto_sign_ed25519_amd64_64_EC2D2,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: carry? c2 += mulrax +# asm 1: add <mulrax=int64#7,<c2=int64#13 +# asm 2: add <mulrax=%rax,<c2=%r15 +add %rax,%r15 + +# qhasm: c3 = 0 +# asm 1: mov $0,>c3=int64#14 +# asm 2: mov $0,>c3=%rbx +mov $0,%rbx + +# qhasm: c3 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<c3=int64#14 +# asm 2: adc <mulrdx=%rdx,<c3=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)&crypto_sign_ed25519_amd64_64_EC2D3 +# asm 1: movq crypto_sign_ed25519_amd64_64_EC2D3,>mulrax=int64#7 +# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D3,>mulrax=%rax +movq crypto_sign_ed25519_amd64_64_EC2D3,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: carry? c3 += mulrax +# asm 1: add <mulrax=int64#7,<c3=int64#14 +# asm 2: add <mulrax=%rax,<c3=%rbx +add %rax,%rbx + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr4=int64#5 +# asm 2: adc <mulrdx=%rdx,<mulr4=%r8 +adc %rdx,%r8 + +# qhasm: mulx1 = c1_stack +# asm 1: movq <c1_stack=stack64#9,>mulx1=int64#10 +# asm 2: movq <c1_stack=64(%rsp),>mulx1=%r12 +movq 64(%rsp),%r12 + +# qhasm: mulrax = *(uint64 *)&crypto_sign_ed25519_amd64_64_EC2D0 +# asm 1: movq crypto_sign_ed25519_amd64_64_EC2D0,>mulrax=int64#7 +# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D0,>mulrax=%rax +movq crypto_sign_ed25519_amd64_64_EC2D0,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? c1 += mulrax +# asm 1: add <mulrax=int64#7,<c1=int64#12 +# asm 2: add <mulrax=%rax,<c1=%r14 +add %rax,%r14 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)&crypto_sign_ed25519_amd64_64_EC2D1 +# asm 1: movq crypto_sign_ed25519_amd64_64_EC2D1,>mulrax=int64#7 +# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D1,>mulrax=%rax +movq crypto_sign_ed25519_amd64_64_EC2D1,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? c2 += mulrax +# asm 1: add <mulrax=int64#7,<c2=int64#13 +# asm 2: add <mulrax=%rax,<c2=%r15 +add %rax,%r15 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? c2 += mulc +# asm 1: add <mulc=int64#15,<c2=int64#13 +# asm 2: add <mulc=%rbp,<c2=%r15 +add %rbp,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)&crypto_sign_ed25519_amd64_64_EC2D2 +# asm 1: movq crypto_sign_ed25519_amd64_64_EC2D2,>mulrax=int64#7 +# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D2,>mulrax=%rax +movq crypto_sign_ed25519_amd64_64_EC2D2,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? c3 += mulrax +# asm 1: add <mulrax=int64#7,<c3=int64#14 +# asm 2: add <mulrax=%rax,<c3=%rbx +add %rax,%rbx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? c3 += mulc +# asm 1: add <mulc=int64#15,<c3=int64#14 +# asm 2: add <mulc=%rbp,<c3=%rbx +add %rbp,%rbx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)&crypto_sign_ed25519_amd64_64_EC2D3 +# asm 1: movq crypto_sign_ed25519_amd64_64_EC2D3,>mulrax=int64#7 +# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D3,>mulrax=%rax +movq crypto_sign_ed25519_amd64_64_EC2D3,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#5 +# asm 2: add <mulrax=%rax,<mulr4=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#15,<mulr4=int64#5 +# asm 2: add <mulc=%rbp,<mulr4=%r8 +add %rbp,%r8 + +# qhasm: mulr5 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr5=int64#6 +# asm 2: adc <mulrdx=%rdx,<mulr5=%r9 +adc %rdx,%r9 + +# qhasm: mulx2 = c2_stack +# asm 1: movq <c2_stack=stack64#10,>mulx2=int64#10 +# asm 2: movq <c2_stack=72(%rsp),>mulx2=%r12 +movq 72(%rsp),%r12 + +# qhasm: mulrax = *(uint64 *)&crypto_sign_ed25519_amd64_64_EC2D0 +# asm 1: movq crypto_sign_ed25519_amd64_64_EC2D0,>mulrax=int64#7 +# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D0,>mulrax=%rax +movq crypto_sign_ed25519_amd64_64_EC2D0,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? c2 += mulrax +# asm 1: add <mulrax=int64#7,<c2=int64#13 +# asm 2: add <mulrax=%rax,<c2=%r15 +add %rax,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)&crypto_sign_ed25519_amd64_64_EC2D1 +# asm 1: movq crypto_sign_ed25519_amd64_64_EC2D1,>mulrax=int64#7 +# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D1,>mulrax=%rax +movq crypto_sign_ed25519_amd64_64_EC2D1,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? c3 += mulrax +# asm 1: add <mulrax=int64#7,<c3=int64#14 +# asm 2: add <mulrax=%rax,<c3=%rbx +add %rax,%rbx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? c3 += mulc +# asm 1: add <mulc=int64#15,<c3=int64#14 +# asm 2: add <mulc=%rbp,<c3=%rbx +add %rbp,%rbx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)&crypto_sign_ed25519_amd64_64_EC2D2 +# asm 1: movq crypto_sign_ed25519_amd64_64_EC2D2,>mulrax=int64#7 +# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D2,>mulrax=%rax +movq crypto_sign_ed25519_amd64_64_EC2D2,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#5 +# asm 2: add <mulrax=%rax,<mulr4=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#15,<mulr4=int64#5 +# asm 2: add <mulc=%rbp,<mulr4=%r8 +add %rbp,%r8 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)&crypto_sign_ed25519_amd64_64_EC2D3 +# asm 1: movq crypto_sign_ed25519_amd64_64_EC2D3,>mulrax=int64#7 +# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D3,>mulrax=%rax +movq crypto_sign_ed25519_amd64_64_EC2D3,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#6 +# asm 2: add <mulrax=%rax,<mulr5=%r9 +add %rax,%r9 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#15,<mulr5=int64#6 +# asm 2: add <mulc=%rbp,<mulr5=%r9 +add %rbp,%r9 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#8 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r10 +adc %rdx,%r10 + +# qhasm: mulx3 = c3_stack +# asm 1: movq <c3_stack=stack64#11,>mulx3=int64#10 +# asm 2: movq <c3_stack=80(%rsp),>mulx3=%r12 +movq 80(%rsp),%r12 + +# qhasm: mulrax = *(uint64 *)&crypto_sign_ed25519_amd64_64_EC2D0 +# asm 1: movq crypto_sign_ed25519_amd64_64_EC2D0,>mulrax=int64#7 +# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D0,>mulrax=%rax +movq crypto_sign_ed25519_amd64_64_EC2D0,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#10 +# asm 2: mul <mulx3=%r12 +mul %r12 + +# qhasm: carry? c3 += mulrax +# asm 1: add <mulrax=int64#7,<c3=int64#14 +# asm 2: add <mulrax=%rax,<c3=%rbx +add %rax,%rbx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)&crypto_sign_ed25519_amd64_64_EC2D1 +# asm 1: movq crypto_sign_ed25519_amd64_64_EC2D1,>mulrax=int64#7 +# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D1,>mulrax=%rax +movq crypto_sign_ed25519_amd64_64_EC2D1,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#10 +# asm 2: mul <mulx3=%r12 +mul %r12 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#5 +# asm 2: add <mulrax=%rax,<mulr4=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#15,<mulr4=int64#5 +# asm 2: add <mulc=%rbp,<mulr4=%r8 +add %rbp,%r8 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)&crypto_sign_ed25519_amd64_64_EC2D2 +# asm 1: movq crypto_sign_ed25519_amd64_64_EC2D2,>mulrax=int64#7 +# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D2,>mulrax=%rax +movq crypto_sign_ed25519_amd64_64_EC2D2,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#10 +# asm 2: mul <mulx3=%r12 +mul %r12 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#6 +# asm 2: add <mulrax=%rax,<mulr5=%r9 +add %rax,%r9 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#15,<mulr5=int64#6 +# asm 2: add <mulc=%rbp,<mulr5=%r9 +add %rbp,%r9 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)&crypto_sign_ed25519_amd64_64_EC2D3 +# asm 1: movq crypto_sign_ed25519_amd64_64_EC2D3,>mulrax=int64#7 +# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D3,>mulrax=%rax +movq crypto_sign_ed25519_amd64_64_EC2D3,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#10 +# asm 2: mul <mulx3=%r12 +mul %r12 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#8 +# asm 2: add <mulrax=%rax,<mulr6=%r10 +add %rax,%r10 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr6 += mulc +# asm 1: add <mulc=int64#15,<mulr6=int64#8 +# asm 2: add <mulc=%rbp,<mulr6=%r10 +add %rbp,%r10 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#9 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r11 +adc %rdx,%r11 + +# qhasm: mulrax = mulr4 +# asm 1: mov <mulr4=int64#5,>mulrax=int64#7 +# asm 2: mov <mulr4=%r8,>mulrax=%rax +mov %r8,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: mulr4 = mulrax +# asm 1: mov <mulrax=int64#7,>mulr4=int64#5 +# asm 2: mov <mulrax=%rax,>mulr4=%r8 +mov %rax,%r8 + +# qhasm: mulrax = mulr5 +# asm 1: mov <mulr5=int64#6,>mulrax=int64#7 +# asm 2: mov <mulr5=%r9,>mulrax=%rax +mov %r9,%rax + +# qhasm: mulr5 = mulrdx +# asm 1: mov <mulrdx=int64#3,>mulr5=int64#6 +# asm 2: mov <mulrdx=%rdx,>mulr5=%r9 +mov %rdx,%r9 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#6 +# asm 2: add <mulrax=%rax,<mulr5=%r9 +add %rax,%r9 + +# qhasm: mulrax = mulr6 +# asm 1: mov <mulr6=int64#8,>mulrax=int64#7 +# asm 2: mov <mulr6=%r10,>mulrax=%rax +mov %r10,%rax + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#8 +# asm 2: mov $0,>mulr6=%r10 +mov $0,%r10 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#8 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r10 +adc %rdx,%r10 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#8 +# asm 2: add <mulrax=%rax,<mulr6=%r10 +add %rax,%r10 + +# qhasm: mulrax = mulr7 +# asm 1: mov <mulr7=int64#9,>mulrax=int64#7 +# asm 2: mov <mulr7=%r11,>mulrax=%rax +mov %r11,%rax + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#9 +# asm 2: mov $0,>mulr7=%r11 +mov $0,%r11 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#9 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r11 +adc %rdx,%r11 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr7 += mulrax +# asm 1: add <mulrax=int64#7,<mulr7=int64#9 +# asm 2: add <mulrax=%rax,<mulr7=%r11 +add %rax,%r11 + +# qhasm: mulr8 = 0 +# asm 1: mov $0,>mulr8=int64#7 +# asm 2: mov $0,>mulr8=%rax +mov $0,%rax + +# qhasm: mulr8 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr8=int64#7 +# asm 2: adc <mulrdx=%rdx,<mulr8=%rax +adc %rdx,%rax + +# qhasm: carry? c0 += mulr4 +# asm 1: add <mulr4=int64#5,<c0=int64#11 +# asm 2: add <mulr4=%r8,<c0=%r13 +add %r8,%r13 + +# qhasm: carry? c1 += mulr5 + carry +# asm 1: adc <mulr5=int64#6,<c1=int64#12 +# asm 2: adc <mulr5=%r9,<c1=%r14 +adc %r9,%r14 + +# qhasm: carry? c2 += mulr6 + carry +# asm 1: adc <mulr6=int64#8,<c2=int64#13 +# asm 2: adc <mulr6=%r10,<c2=%r15 +adc %r10,%r15 + +# qhasm: carry? c3 += mulr7 + carry +# asm 1: adc <mulr7=int64#9,<c3=int64#14 +# asm 2: adc <mulr7=%r11,<c3=%rbx +adc %r11,%rbx + +# qhasm: mulzero = 0 +# asm 1: mov $0,>mulzero=int64#3 +# asm 2: mov $0,>mulzero=%rdx +mov $0,%rdx + +# qhasm: mulr8 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<mulr8=int64#7 +# asm 2: adc <mulzero=%rdx,<mulr8=%rax +adc %rdx,%rax + +# qhasm: mulr8 *= 38 +# asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#5 +# asm 2: imulq $38,<mulr8=%rax,>mulr8=%r8 +imulq $38,%rax,%r8 + +# qhasm: carry? c0 += mulr8 +# asm 1: add <mulr8=int64#5,<c0=int64#11 +# asm 2: add <mulr8=%r8,<c0=%r13 +add %r8,%r13 + +# qhasm: carry? c1 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<c1=int64#12 +# asm 2: adc <mulzero=%rdx,<c1=%r14 +adc %rdx,%r14 + +# qhasm: carry? c2 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<c2=int64#13 +# asm 2: adc <mulzero=%rdx,<c2=%r15 +adc %rdx,%r15 + +# qhasm: carry? c3 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<c3=int64#14 +# asm 2: adc <mulzero=%rdx,<c3=%rbx +adc %rdx,%rbx + +# qhasm: mulzero += mulzero + carry +# asm 1: adc <mulzero=int64#3,<mulzero=int64#3 +# asm 2: adc <mulzero=%rdx,<mulzero=%rdx +adc %rdx,%rdx + +# qhasm: mulzero *= 38 +# asm 1: imulq $38,<mulzero=int64#3,>mulzero=int64#3 +# asm 2: imulq $38,<mulzero=%rdx,>mulzero=%rdx +imulq $38,%rdx,%rdx + +# qhasm: c0 += mulzero +# asm 1: add <mulzero=int64#3,<c0=int64#11 +# asm 2: add <mulzero=%rdx,<c0=%r13 +add %rdx,%r13 + +# qhasm: c0_stack = c0 +# asm 1: movq <c0=int64#11,>c0_stack=stack64#8 +# asm 2: movq <c0=%r13,>c0_stack=56(%rsp) +movq %r13,56(%rsp) + +# qhasm: c1_stack = c1 +# asm 1: movq <c1=int64#12,>c1_stack=stack64#9 +# asm 2: movq <c1=%r14,>c1_stack=64(%rsp) +movq %r14,64(%rsp) + +# qhasm: c2_stack = c2 +# asm 1: movq <c2=int64#13,>c2_stack=stack64#10 +# asm 2: movq <c2=%r15,>c2_stack=72(%rsp) +movq %r15,72(%rsp) + +# qhasm: c3_stack = c3 +# asm 1: movq <c3=int64#14,>c3_stack=stack64#11 +# asm 2: movq <c3=%rbx,>c3_stack=80(%rsp) +movq %rbx,80(%rsp) + +# qhasm: mulr4 = 0 +# asm 1: mov $0,>mulr4=int64#5 +# asm 2: mov $0,>mulr4=%r8 +mov $0,%r8 + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#6 +# asm 2: mov $0,>mulr5=%r9 +mov $0,%r9 + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#8 +# asm 2: mov $0,>mulr6=%r10 +mov $0,%r10 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#9 +# asm 2: mov $0,>mulr7=%r11 +mov $0,%r11 + +# qhasm: mulx0 = *(uint64 *)(pp + 64) +# asm 1: movq 64(<pp=int64#2),>mulx0=int64#10 +# asm 2: movq 64(<pp=%rsi),>mulx0=%r12 +movq 64(%rsi),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 64) +# asm 1: movq 64(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 64(<qp=%rcx),>mulrax=%rax +movq 64(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: rt0 = mulrax +# asm 1: mov <mulrax=int64#7,>rt0=int64#11 +# asm 2: mov <mulrax=%rax,>rt0=%r13 +mov %rax,%r13 + +# qhasm: rt1 = mulrdx +# asm 1: mov <mulrdx=int64#3,>rt1=int64#12 +# asm 2: mov <mulrdx=%rdx,>rt1=%r14 +mov %rdx,%r14 + +# qhasm: mulrax = *(uint64 *)(qp + 72) +# asm 1: movq 72(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 72(<qp=%rcx),>mulrax=%rax +movq 72(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: carry? rt1 += mulrax +# asm 1: add <mulrax=int64#7,<rt1=int64#12 +# asm 2: add <mulrax=%rax,<rt1=%r14 +add %rax,%r14 + +# qhasm: rt2 = 0 +# asm 1: mov $0,>rt2=int64#13 +# asm 2: mov $0,>rt2=%r15 +mov $0,%r15 + +# qhasm: rt2 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<rt2=int64#13 +# asm 2: adc <mulrdx=%rdx,<rt2=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = *(uint64 *)(qp + 80) +# asm 1: movq 80(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 80(<qp=%rcx),>mulrax=%rax +movq 80(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: carry? rt2 += mulrax +# asm 1: add <mulrax=int64#7,<rt2=int64#13 +# asm 2: add <mulrax=%rax,<rt2=%r15 +add %rax,%r15 + +# qhasm: rt3 = 0 +# asm 1: mov $0,>rt3=int64#14 +# asm 2: mov $0,>rt3=%rbx +mov $0,%rbx + +# qhasm: rt3 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<rt3=int64#14 +# asm 2: adc <mulrdx=%rdx,<rt3=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(qp + 88) +# asm 1: movq 88(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 88(<qp=%rcx),>mulrax=%rax +movq 88(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: carry? rt3 += mulrax +# asm 1: add <mulrax=int64#7,<rt3=int64#14 +# asm 2: add <mulrax=%rax,<rt3=%rbx +add %rax,%rbx + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr4=int64#5 +# asm 2: adc <mulrdx=%rdx,<mulr4=%r8 +adc %rdx,%r8 + +# qhasm: mulx1 = *(uint64 *)(pp + 72) +# asm 1: movq 72(<pp=int64#2),>mulx1=int64#10 +# asm 2: movq 72(<pp=%rsi),>mulx1=%r12 +movq 72(%rsi),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 64) +# asm 1: movq 64(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 64(<qp=%rcx),>mulrax=%rax +movq 64(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? rt1 += mulrax +# asm 1: add <mulrax=int64#7,<rt1=int64#12 +# asm 2: add <mulrax=%rax,<rt1=%r14 +add %rax,%r14 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 72) +# asm 1: movq 72(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 72(<qp=%rcx),>mulrax=%rax +movq 72(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? rt2 += mulrax +# asm 1: add <mulrax=int64#7,<rt2=int64#13 +# asm 2: add <mulrax=%rax,<rt2=%r15 +add %rax,%r15 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? rt2 += mulc +# asm 1: add <mulc=int64#15,<rt2=int64#13 +# asm 2: add <mulc=%rbp,<rt2=%r15 +add %rbp,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 80) +# asm 1: movq 80(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 80(<qp=%rcx),>mulrax=%rax +movq 80(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? rt3 += mulrax +# asm 1: add <mulrax=int64#7,<rt3=int64#14 +# asm 2: add <mulrax=%rax,<rt3=%rbx +add %rax,%rbx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? rt3 += mulc +# asm 1: add <mulc=int64#15,<rt3=int64#14 +# asm 2: add <mulc=%rbp,<rt3=%rbx +add %rbp,%rbx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 88) +# asm 1: movq 88(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 88(<qp=%rcx),>mulrax=%rax +movq 88(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#5 +# asm 2: add <mulrax=%rax,<mulr4=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#15,<mulr4=int64#5 +# asm 2: add <mulc=%rbp,<mulr4=%r8 +add %rbp,%r8 + +# qhasm: mulr5 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr5=int64#6 +# asm 2: adc <mulrdx=%rdx,<mulr5=%r9 +adc %rdx,%r9 + +# qhasm: mulx2 = *(uint64 *)(pp + 80) +# asm 1: movq 80(<pp=int64#2),>mulx2=int64#10 +# asm 2: movq 80(<pp=%rsi),>mulx2=%r12 +movq 80(%rsi),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 64) +# asm 1: movq 64(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 64(<qp=%rcx),>mulrax=%rax +movq 64(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? rt2 += mulrax +# asm 1: add <mulrax=int64#7,<rt2=int64#13 +# asm 2: add <mulrax=%rax,<rt2=%r15 +add %rax,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 72) +# asm 1: movq 72(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 72(<qp=%rcx),>mulrax=%rax +movq 72(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? rt3 += mulrax +# asm 1: add <mulrax=int64#7,<rt3=int64#14 +# asm 2: add <mulrax=%rax,<rt3=%rbx +add %rax,%rbx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? rt3 += mulc +# asm 1: add <mulc=int64#15,<rt3=int64#14 +# asm 2: add <mulc=%rbp,<rt3=%rbx +add %rbp,%rbx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 80) +# asm 1: movq 80(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 80(<qp=%rcx),>mulrax=%rax +movq 80(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#5 +# asm 2: add <mulrax=%rax,<mulr4=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#15,<mulr4=int64#5 +# asm 2: add <mulc=%rbp,<mulr4=%r8 +add %rbp,%r8 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 88) +# asm 1: movq 88(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 88(<qp=%rcx),>mulrax=%rax +movq 88(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#6 +# asm 2: add <mulrax=%rax,<mulr5=%r9 +add %rax,%r9 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#15,<mulr5=int64#6 +# asm 2: add <mulc=%rbp,<mulr5=%r9 +add %rbp,%r9 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#8 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r10 +adc %rdx,%r10 + +# qhasm: mulx3 = *(uint64 *)(pp + 88) +# asm 1: movq 88(<pp=int64#2),>mulx3=int64#2 +# asm 2: movq 88(<pp=%rsi),>mulx3=%rsi +movq 88(%rsi),%rsi + +# qhasm: mulrax = *(uint64 *)(qp + 64) +# asm 1: movq 64(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 64(<qp=%rcx),>mulrax=%rax +movq 64(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#2 +# asm 2: mul <mulx3=%rsi +mul %rsi + +# qhasm: carry? rt3 += mulrax +# asm 1: add <mulrax=int64#7,<rt3=int64#14 +# asm 2: add <mulrax=%rax,<rt3=%rbx +add %rax,%rbx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#10 +# asm 2: mov $0,>mulc=%r12 +mov $0,%r12 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#10 +# asm 2: adc <mulrdx=%rdx,<mulc=%r12 +adc %rdx,%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 72) +# asm 1: movq 72(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 72(<qp=%rcx),>mulrax=%rax +movq 72(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#2 +# asm 2: mul <mulx3=%rsi +mul %rsi + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#5 +# asm 2: add <mulrax=%rax,<mulr4=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#10,<mulr4=int64#5 +# asm 2: add <mulc=%r12,<mulr4=%r8 +add %r12,%r8 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#10 +# asm 2: mov $0,>mulc=%r12 +mov $0,%r12 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#10 +# asm 2: adc <mulrdx=%rdx,<mulc=%r12 +adc %rdx,%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 80) +# asm 1: movq 80(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 80(<qp=%rcx),>mulrax=%rax +movq 80(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#2 +# asm 2: mul <mulx3=%rsi +mul %rsi + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#6 +# asm 2: add <mulrax=%rax,<mulr5=%r9 +add %rax,%r9 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#10,<mulr5=int64#6 +# asm 2: add <mulc=%r12,<mulr5=%r9 +add %r12,%r9 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#10 +# asm 2: mov $0,>mulc=%r12 +mov $0,%r12 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#10 +# asm 2: adc <mulrdx=%rdx,<mulc=%r12 +adc %rdx,%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 88) +# asm 1: movq 88(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 88(<qp=%rcx),>mulrax=%rax +movq 88(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#2 +# asm 2: mul <mulx3=%rsi +mul %rsi + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#8 +# asm 2: add <mulrax=%rax,<mulr6=%r10 +add %rax,%r10 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr6 += mulc +# asm 1: add <mulc=int64#10,<mulr6=int64#8 +# asm 2: add <mulc=%r12,<mulr6=%r10 +add %r12,%r10 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#9 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r11 +adc %rdx,%r11 + +# qhasm: mulrax = mulr4 +# asm 1: mov <mulr4=int64#5,>mulrax=int64#7 +# asm 2: mov <mulr4=%r8,>mulrax=%rax +mov %r8,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: mulr4 = mulrax +# asm 1: mov <mulrax=int64#7,>mulr4=int64#2 +# asm 2: mov <mulrax=%rax,>mulr4=%rsi +mov %rax,%rsi + +# qhasm: mulrax = mulr5 +# asm 1: mov <mulr5=int64#6,>mulrax=int64#7 +# asm 2: mov <mulr5=%r9,>mulrax=%rax +mov %r9,%rax + +# qhasm: mulr5 = mulrdx +# asm 1: mov <mulrdx=int64#3,>mulr5=int64#4 +# asm 2: mov <mulrdx=%rdx,>mulr5=%rcx +mov %rdx,%rcx + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#4 +# asm 2: add <mulrax=%rax,<mulr5=%rcx +add %rax,%rcx + +# qhasm: mulrax = mulr6 +# asm 1: mov <mulr6=int64#8,>mulrax=int64#7 +# asm 2: mov <mulr6=%r10,>mulrax=%rax +mov %r10,%rax + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#5 +# asm 2: mov $0,>mulr6=%r8 +mov $0,%r8 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#5 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r8 +adc %rdx,%r8 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#5 +# asm 2: add <mulrax=%rax,<mulr6=%r8 +add %rax,%r8 + +# qhasm: mulrax = mulr7 +# asm 1: mov <mulr7=int64#9,>mulrax=int64#7 +# asm 2: mov <mulr7=%r11,>mulrax=%rax +mov %r11,%rax + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#6 +# asm 2: mov $0,>mulr7=%r9 +mov $0,%r9 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#6 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r9 +adc %rdx,%r9 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr7 += mulrax +# asm 1: add <mulrax=int64#7,<mulr7=int64#6 +# asm 2: add <mulrax=%rax,<mulr7=%r9 +add %rax,%r9 + +# qhasm: mulr8 = 0 +# asm 1: mov $0,>mulr8=int64#7 +# asm 2: mov $0,>mulr8=%rax +mov $0,%rax + +# qhasm: mulr8 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr8=int64#7 +# asm 2: adc <mulrdx=%rdx,<mulr8=%rax +adc %rdx,%rax + +# qhasm: carry? rt0 += mulr4 +# asm 1: add <mulr4=int64#2,<rt0=int64#11 +# asm 2: add <mulr4=%rsi,<rt0=%r13 +add %rsi,%r13 + +# qhasm: carry? rt1 += mulr5 + carry +# asm 1: adc <mulr5=int64#4,<rt1=int64#12 +# asm 2: adc <mulr5=%rcx,<rt1=%r14 +adc %rcx,%r14 + +# qhasm: carry? rt2 += mulr6 + carry +# asm 1: adc <mulr6=int64#5,<rt2=int64#13 +# asm 2: adc <mulr6=%r8,<rt2=%r15 +adc %r8,%r15 + +# qhasm: carry? rt3 += mulr7 + carry +# asm 1: adc <mulr7=int64#6,<rt3=int64#14 +# asm 2: adc <mulr7=%r9,<rt3=%rbx +adc %r9,%rbx + +# qhasm: mulzero = 0 +# asm 1: mov $0,>mulzero=int64#2 +# asm 2: mov $0,>mulzero=%rsi +mov $0,%rsi + +# qhasm: mulr8 += mulzero + carry +# asm 1: adc <mulzero=int64#2,<mulr8=int64#7 +# asm 2: adc <mulzero=%rsi,<mulr8=%rax +adc %rsi,%rax + +# qhasm: mulr8 *= 38 +# asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#3 +# asm 2: imulq $38,<mulr8=%rax,>mulr8=%rdx +imulq $38,%rax,%rdx + +# qhasm: carry? rt0 += mulr8 +# asm 1: add <mulr8=int64#3,<rt0=int64#11 +# asm 2: add <mulr8=%rdx,<rt0=%r13 +add %rdx,%r13 + +# qhasm: carry? rt1 += mulzero + carry +# asm 1: adc <mulzero=int64#2,<rt1=int64#12 +# asm 2: adc <mulzero=%rsi,<rt1=%r14 +adc %rsi,%r14 + +# qhasm: carry? rt2 += mulzero + carry +# asm 1: adc <mulzero=int64#2,<rt2=int64#13 +# asm 2: adc <mulzero=%rsi,<rt2=%r15 +adc %rsi,%r15 + +# qhasm: carry? rt3 += mulzero + carry +# asm 1: adc <mulzero=int64#2,<rt3=int64#14 +# asm 2: adc <mulzero=%rsi,<rt3=%rbx +adc %rsi,%rbx + +# qhasm: mulzero += mulzero + carry +# asm 1: adc <mulzero=int64#2,<mulzero=int64#2 +# asm 2: adc <mulzero=%rsi,<mulzero=%rsi +adc %rsi,%rsi + +# qhasm: mulzero *= 38 +# asm 1: imulq $38,<mulzero=int64#2,>mulzero=int64#2 +# asm 2: imulq $38,<mulzero=%rsi,>mulzero=%rsi +imulq $38,%rsi,%rsi + +# qhasm: rt0 += mulzero +# asm 1: add <mulzero=int64#2,<rt0=int64#11 +# asm 2: add <mulzero=%rsi,<rt0=%r13 +add %rsi,%r13 + +# qhasm: carry? rt0 += rt0 +# asm 1: add <rt0=int64#11,<rt0=int64#11 +# asm 2: add <rt0=%r13,<rt0=%r13 +add %r13,%r13 + +# qhasm: carry? rt1 += rt1 + carry +# asm 1: adc <rt1=int64#12,<rt1=int64#12 +# asm 2: adc <rt1=%r14,<rt1=%r14 +adc %r14,%r14 + +# qhasm: carry? rt2 += rt2 + carry +# asm 1: adc <rt2=int64#13,<rt2=int64#13 +# asm 2: adc <rt2=%r15,<rt2=%r15 +adc %r15,%r15 + +# qhasm: carry? rt3 += rt3 + carry +# asm 1: adc <rt3=int64#14,<rt3=int64#14 +# asm 2: adc <rt3=%rbx,<rt3=%rbx +adc %rbx,%rbx + +# qhasm: addt0 = 0 +# asm 1: mov $0,>addt0=int64#2 +# asm 2: mov $0,>addt0=%rsi +mov $0,%rsi + +# qhasm: addt1 = 38 +# asm 1: mov $38,>addt1=int64#3 +# asm 2: mov $38,>addt1=%rdx +mov $38,%rdx + +# qhasm: addt1 = addt0 if !carry +# asm 1: cmovae <addt0=int64#2,<addt1=int64#3 +# asm 2: cmovae <addt0=%rsi,<addt1=%rdx +cmovae %rsi,%rdx + +# qhasm: carry? rt0 += addt1 +# asm 1: add <addt1=int64#3,<rt0=int64#11 +# asm 2: add <addt1=%rdx,<rt0=%r13 +add %rdx,%r13 + +# qhasm: carry? rt1 += addt0 + carry +# asm 1: adc <addt0=int64#2,<rt1=int64#12 +# asm 2: adc <addt0=%rsi,<rt1=%r14 +adc %rsi,%r14 + +# qhasm: carry? rt2 += addt0 + carry +# asm 1: adc <addt0=int64#2,<rt2=int64#13 +# asm 2: adc <addt0=%rsi,<rt2=%r15 +adc %rsi,%r15 + +# qhasm: carry? rt3 += addt0 + carry +# asm 1: adc <addt0=int64#2,<rt3=int64#14 +# asm 2: adc <addt0=%rsi,<rt3=%rbx +adc %rsi,%rbx + +# qhasm: addt0 = addt1 if carry +# asm 1: cmovc <addt1=int64#3,<addt0=int64#2 +# asm 2: cmovc <addt1=%rdx,<addt0=%rsi +cmovc %rdx,%rsi + +# qhasm: rt0 += addt0 +# asm 1: add <addt0=int64#2,<rt0=int64#11 +# asm 2: add <addt0=%rsi,<rt0=%r13 +add %rsi,%r13 + +# qhasm: rz0 = rt0 +# asm 1: mov <rt0=int64#11,>rz0=int64#2 +# asm 2: mov <rt0=%r13,>rz0=%rsi +mov %r13,%rsi + +# qhasm: rz1 = rt1 +# asm 1: mov <rt1=int64#12,>rz1=int64#3 +# asm 2: mov <rt1=%r14,>rz1=%rdx +mov %r14,%rdx + +# qhasm: rz2 = rt2 +# asm 1: mov <rt2=int64#13,>rz2=int64#4 +# asm 2: mov <rt2=%r15,>rz2=%rcx +mov %r15,%rcx + +# qhasm: rz3 = rt3 +# asm 1: mov <rt3=int64#14,>rz3=int64#5 +# asm 2: mov <rt3=%rbx,>rz3=%r8 +mov %rbx,%r8 + +# qhasm: carry? rz0 += c0_stack +# asm 1: addq <c0_stack=stack64#8,<rz0=int64#2 +# asm 2: addq <c0_stack=56(%rsp),<rz0=%rsi +addq 56(%rsp),%rsi + +# qhasm: carry? rz1 += c1_stack + carry +# asm 1: adcq <c1_stack=stack64#9,<rz1=int64#3 +# asm 2: adcq <c1_stack=64(%rsp),<rz1=%rdx +adcq 64(%rsp),%rdx + +# qhasm: carry? rz2 += c2_stack + carry +# asm 1: adcq <c2_stack=stack64#10,<rz2=int64#4 +# asm 2: adcq <c2_stack=72(%rsp),<rz2=%rcx +adcq 72(%rsp),%rcx + +# qhasm: carry? rz3 += c3_stack + carry +# asm 1: adcq <c3_stack=stack64#11,<rz3=int64#5 +# asm 2: adcq <c3_stack=80(%rsp),<rz3=%r8 +adcq 80(%rsp),%r8 + +# qhasm: addt0 = 0 +# asm 1: mov $0,>addt0=int64#6 +# asm 2: mov $0,>addt0=%r9 +mov $0,%r9 + +# qhasm: addt1 = 38 +# asm 1: mov $38,>addt1=int64#7 +# asm 2: mov $38,>addt1=%rax +mov $38,%rax + +# qhasm: addt1 = addt0 if !carry +# asm 1: cmovae <addt0=int64#6,<addt1=int64#7 +# asm 2: cmovae <addt0=%r9,<addt1=%rax +cmovae %r9,%rax + +# qhasm: carry? rz0 += addt1 +# asm 1: add <addt1=int64#7,<rz0=int64#2 +# asm 2: add <addt1=%rax,<rz0=%rsi +add %rax,%rsi + +# qhasm: carry? rz1 += addt0 + carry +# asm 1: adc <addt0=int64#6,<rz1=int64#3 +# asm 2: adc <addt0=%r9,<rz1=%rdx +adc %r9,%rdx + +# qhasm: carry? rz2 += addt0 + carry +# asm 1: adc <addt0=int64#6,<rz2=int64#4 +# asm 2: adc <addt0=%r9,<rz2=%rcx +adc %r9,%rcx + +# qhasm: carry? rz3 += addt0 + carry +# asm 1: adc <addt0=int64#6,<rz3=int64#5 +# asm 2: adc <addt0=%r9,<rz3=%r8 +adc %r9,%r8 + +# qhasm: addt0 = addt1 if carry +# asm 1: cmovc <addt1=int64#7,<addt0=int64#6 +# asm 2: cmovc <addt1=%rax,<addt0=%r9 +cmovc %rax,%r9 + +# qhasm: rz0 += addt0 +# asm 1: add <addt0=int64#6,<rz0=int64#2 +# asm 2: add <addt0=%r9,<rz0=%rsi +add %r9,%rsi + +# qhasm: carry? rt0 -= c0_stack +# asm 1: subq <c0_stack=stack64#8,<rt0=int64#11 +# asm 2: subq <c0_stack=56(%rsp),<rt0=%r13 +subq 56(%rsp),%r13 + +# qhasm: carry? rt1 -= c1_stack - carry +# asm 1: sbbq <c1_stack=stack64#9,<rt1=int64#12 +# asm 2: sbbq <c1_stack=64(%rsp),<rt1=%r14 +sbbq 64(%rsp),%r14 + +# qhasm: carry? rt2 -= c2_stack - carry +# asm 1: sbbq <c2_stack=stack64#10,<rt2=int64#13 +# asm 2: sbbq <c2_stack=72(%rsp),<rt2=%r15 +sbbq 72(%rsp),%r15 + +# qhasm: carry? rt3 -= c3_stack - carry +# asm 1: sbbq <c3_stack=stack64#11,<rt3=int64#14 +# asm 2: sbbq <c3_stack=80(%rsp),<rt3=%rbx +sbbq 80(%rsp),%rbx + +# qhasm: subt0 = 0 +# asm 1: mov $0,>subt0=int64#6 +# asm 2: mov $0,>subt0=%r9 +mov $0,%r9 + +# qhasm: subt1 = 38 +# asm 1: mov $38,>subt1=int64#7 +# asm 2: mov $38,>subt1=%rax +mov $38,%rax + +# qhasm: subt1 = subt0 if !carry +# asm 1: cmovae <subt0=int64#6,<subt1=int64#7 +# asm 2: cmovae <subt0=%r9,<subt1=%rax +cmovae %r9,%rax + +# qhasm: carry? rt0 -= subt1 +# asm 1: sub <subt1=int64#7,<rt0=int64#11 +# asm 2: sub <subt1=%rax,<rt0=%r13 +sub %rax,%r13 + +# qhasm: carry? rt1 -= subt0 - carry +# asm 1: sbb <subt0=int64#6,<rt1=int64#12 +# asm 2: sbb <subt0=%r9,<rt1=%r14 +sbb %r9,%r14 + +# qhasm: carry? rt2 -= subt0 - carry +# asm 1: sbb <subt0=int64#6,<rt2=int64#13 +# asm 2: sbb <subt0=%r9,<rt2=%r15 +sbb %r9,%r15 + +# qhasm: carry? rt3 -= subt0 - carry +# asm 1: sbb <subt0=int64#6,<rt3=int64#14 +# asm 2: sbb <subt0=%r9,<rt3=%rbx +sbb %r9,%rbx + +# qhasm: subt0 = subt1 if carry +# asm 1: cmovc <subt1=int64#7,<subt0=int64#6 +# asm 2: cmovc <subt1=%rax,<subt0=%r9 +cmovc %rax,%r9 + +# qhasm: rt0 -= subt0 +# asm 1: sub <subt0=int64#6,<rt0=int64#11 +# asm 2: sub <subt0=%r9,<rt0=%r13 +sub %r9,%r13 + +# qhasm: *(uint64 *)(rp + 32) = rz0 +# asm 1: movq <rz0=int64#2,32(<rp=int64#1) +# asm 2: movq <rz0=%rsi,32(<rp=%rdi) +movq %rsi,32(%rdi) + +# qhasm: *(uint64 *)(rp + 40) = rz1 +# asm 1: movq <rz1=int64#3,40(<rp=int64#1) +# asm 2: movq <rz1=%rdx,40(<rp=%rdi) +movq %rdx,40(%rdi) + +# qhasm: *(uint64 *)(rp + 48) = rz2 +# asm 1: movq <rz2=int64#4,48(<rp=int64#1) +# asm 2: movq <rz2=%rcx,48(<rp=%rdi) +movq %rcx,48(%rdi) + +# qhasm: *(uint64 *)(rp + 56) = rz3 +# asm 1: movq <rz3=int64#5,56(<rp=int64#1) +# asm 2: movq <rz3=%r8,56(<rp=%rdi) +movq %r8,56(%rdi) + +# qhasm: *(uint64 *)(rp + 96) = rt0 +# asm 1: movq <rt0=int64#11,96(<rp=int64#1) +# asm 2: movq <rt0=%r13,96(<rp=%rdi) +movq %r13,96(%rdi) + +# qhasm: *(uint64 *)(rp + 104) = rt1 +# asm 1: movq <rt1=int64#12,104(<rp=int64#1) +# asm 2: movq <rt1=%r14,104(<rp=%rdi) +movq %r14,104(%rdi) + +# qhasm: *(uint64 *)(rp + 112) = rt2 +# asm 1: movq <rt2=int64#13,112(<rp=int64#1) +# asm 2: movq <rt2=%r15,112(<rp=%rdi) +movq %r15,112(%rdi) + +# qhasm: *(uint64 *)(rp + 120) = rt3 +# asm 1: movq <rt3=int64#14,120(<rp=int64#1) +# asm 2: movq <rt3=%rbx,120(<rp=%rdi) +movq %rbx,120(%rdi) + +# qhasm: caller1 = caller1_stack +# asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9 +# asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10 +# asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11 +# asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12 +# asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13 +# asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14 +# asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15 +# asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/ext/ed25519-amd64-asm/ge25519_base.c b/ext/ed25519-amd64-asm/ge25519_base.c new file mode 100644 index 00000000..a7ae9786 --- /dev/null +++ b/ext/ed25519-amd64-asm/ge25519_base.c @@ -0,0 +1,7 @@ +#include "ge25519.h" + +const ge25519 ge25519_base = {{{0xC9562D608F25D51A, 0x692CC7609525A7B2, 0xC0A4E231FDD6DC5C, 0x216936D3CD6E53FE}}, + {{0x6666666666666658, 0x6666666666666666, 0x6666666666666666, 0x6666666666666666}}, + {{0x0000000000000001, 0x0000000000000000, 0x0000000000000000, 000000000000000000}}, + {{0x6DDE8AB3A5B7DDA3, 0x20F09F80775152F5, 0x66EA4E8E64ABE37D, 0x67875F0FD78B7665}}}; + diff --git a/ext/ed25519-amd64-asm/ge25519_base_niels.data b/ext/ed25519-amd64-asm/ge25519_base_niels.data new file mode 100644 index 00000000..8e3300cf --- /dev/null +++ b/ext/ed25519-amd64-asm/ge25519_base_niels.data @@ -0,0 +1,1536 @@ +{{{0x9d103905d740913e, 0xfd399f05d140beb3, 0xa5c18434688f8a09, 0x44fd2f9298f81267}}, + {{0x2fbc93c6f58c3b85, 0xcf932dc6fb8c0e19, 0x270b4898643d42c2, 0x07cf9d3a33d4ba65}}, + {{0xdbbd15674b6fbb59, 0x41e13f00eea2a5ea, 0xcdd49d1cc957c6fa, 0x4f0ebe1faf16ecca}}}, +{{{0x8a99a56042b4d5a8, 0x8f2b810c4e60acf6, 0xe09e236bb16e37aa, 0x6bb595a669c92555}}, + {{0x9224e7fc933c71d7, 0x9f469d967a0ff5b5, 0x5aa69a65e1d60702, 0x590c063fa87d2e2e}}, + {{0x6e347eaadad36802, 0xbaf3599383ee4805, 0x3bcabe10e6076826, 0x49314f0a165ed1b8}}}, +{{{0x56611fe8a4fcd265, 0x3bd353fde5c1ba7d, 0x8131f31a214bd6bd, 0x2ab91587555bda62}}, + {{0xaf25b0a84cee9730, 0x025a8430e8864b8a, 0xc11b50029f016732, 0x7a164e1b9a80f8f4}}, + {{0x9bf211f4f1674834, 0xb84e6b17f62df895, 0xd7de6f075b722a4e, 0x549a04b963bb2a21}}}, +{{{0x95fe050a056818bf, 0x327e89715660faa9, 0xc3e8e3cd06a05073, 0x27933f4c7445a49a}}, + {{0x287351b98efc099f, 0x6765c6f47dfd2538, 0xca348d3dfb0a9265, 0x680e910321e58727}}, + {{0xbf1e45ece51426b0, 0xe32bc63d6dba0f94, 0xe42974d58cf852c0, 0x44f079b1b0e64c18}}}, +{{{0x7f9182c3a447d6ba, 0xd50014d14b2729b7, 0xe33cf11cb864a087, 0x154a7e73eb1b55f3}}, + {{0xa212bc4408a5bb33, 0x8d5048c3c75eed02, 0xdd1beb0c5abfec44, 0x2945ccf146e206eb}}, + {{0xc832a179e7d003b3, 0x5f729d0a00124d7e, 0x62c1d4a10e6d8ff3, 0x68b8ac5938b27a98}}}, +{{{0x499806b67b7d8ca4, 0x575be28427d22739, 0xbb085ce7204553b9, 0x38b64c41ae417884}}, + {{0x3a0ceeeb77157131, 0x9b27158900c8af88, 0x8065b668da59a736, 0x51e57bb6a2cc38bd}}, + {{0x8f9dad91689de3a4, 0x175f2428f8fb9137, 0x050ab5329fcfb988, 0x7865dfa21354c09f}}}, +{{{0xba6f2c9aaa3221b1, 0x6ca021533bba23a7, 0x9dea764f92192c3a, 0x1d6edd5d2e5317e0}}, + {{0x6b1a5cd0944ea3bf, 0x7470353ab39dc0d2, 0x71b2528228542e49, 0x461bea69283c927e}}, + {{0x217a8aacab0fda36, 0xa528c6543d3549c8, 0x37d05b8b13ab7568, 0x233cef623a2cbc37}}}, +{{{0xe2a75dedf39234d9, 0x963d7680e1b558f9, 0x2c2741ac6e3c23fb, 0x3a9024a1320e01c3}}, + {{0x59b7596604dd3e8f, 0x6cb30377e288702c, 0xb1339c665ed9c323, 0x0915e76061bce52f}}, + {{0xdf7de835a834a37e, 0x8be19cda689857ea, 0x2c1185367167b326, 0x589eb3d9dbefd5c2}}}, +{{{0x7ec851ca553e2df3, 0xa71284cba64878b3, 0xe6b5e4193288d1e7, 0x4cf210ec5a9a8883}}, + {{0x322d04a52d9021f6, 0xb9c19f3375c6bf9c, 0x587a3a4342d20b09, 0x143b1cf8aa64fe61}}, + {{0x9f867c7d968acaab, 0x5f54258e27092729, 0xd0a7d34bea180975, 0x21b546a3374126e1}}}, +{{{0xa94ff858a2888343, 0xce0ed4565313ed3c, 0xf55c3dcfb5bf34fa, 0x0a653ca5c9eab371}}, + {{0x490a7a45d185218f, 0x9a15377846049335, 0x0060ea09cc31e1f6, 0x7e041577f86ee965}}, + {{0x66b2a496ce5b67f3, 0xff5492d8bd569796, 0x503cec294a592cd0, 0x566943650813acb2}}}, +{{{0xb818db0c26620798, 0x5d5c31d9606e354a, 0x0982fa4f00a8cdc7, 0x17e12bcd4653e2d4}}, + {{0x5672f9eb1dabb69d, 0xba70b535afe853fc, 0x47ac0f752796d66d, 0x32a5351794117275}}, + {{0xd3a644a6df648437, 0x703b6559880fbfdd, 0xcb852540ad3a1aa5, 0x0900b3f78e4c6468}}}, +{{{0x0a851b9f679d651b, 0xe108cb61033342f2, 0xd601f57fe88b30a3, 0x371f3acaed2dd714}}, + {{0xed280fbec816ad31, 0x52d9595bd8e6efe3, 0x0fe71772f6c623f5, 0x4314030b051e293c}}, + {{0xd560005efbf0bcad, 0x8eb70f2ed1870c5e, 0x201f9033d084e6a0, 0x4c3a5ae1ce7b6670}}}, +{{{0x4138a434dcb8fa95, 0x870cf67d6c96840b, 0xde388574297be82c, 0x7c814db27262a55a}}, + {{0xbaf875e4c93da0dd, 0xb93282a771b9294d, 0x80d63fb7f4c6c460, 0x6de9c73dea66c181}}, + {{0x478904d5a04df8f2, 0xfafbae4ab10142d3, 0xf6c8ac63555d0998, 0x5aac4a412f90b104}}}, +{{{0xc64f326b3ac92908, 0x5551b282e663e1e0, 0x476b35f54a1a4b83, 0x1b9da3fe189f68c2}}, + {{0x603a0d0abd7f5134, 0x8089c932e1d3ae46, 0xdf2591398798bd63, 0x1c145cd274ba0235}}, + {{0x32e8386475f3d743, 0x365b8baf6ae5d9ef, 0x825238b6385b681e, 0x234929c1167d65e1}}}, +{{{0x984decaba077ade8, 0x383f77ad19eb389d, 0xc7ec6b7e2954d794, 0x59c77b3aeb7c3a7a}}, + {{0x48145cc21d099fcf, 0x4535c192cc28d7e5, 0x80e7c1e548247e01, 0x4a5f28743b2973ee}}, + {{0xd3add725225ccf62, 0x911a3381b2152c5d, 0xd8b39fad5b08f87d, 0x6f05606b4799fe3b}}}, +{{{0x9ffe9e92177ba962, 0x98aee71d0de5cae1, 0x3ff4ae942d831044, 0x714de12e58533ac8}}, + {{0x5b433149f91b6483, 0xadb5dc655a2cbf62, 0x87fa8412632827b3, 0x60895e91ab49f8d8}}, + {{0xe9ecf2ed0cf86c18, 0xb46d06120735dfd4, 0xbc9da09804b96be7, 0x73e2e62fd96dc26b}}}, +{{{0xed5b635449aa515e, 0xa865c49f0bc6823a, 0x850c1fe95b42d1c4, 0x30d76d6f03d315b9}}, + {{0x2eccdd0e632f9c1d, 0x51d0b69676893115, 0x52dfb76ba8637a58, 0x6dd37d49a00eef39}}, + {{0x6c4444172106e4c7, 0xfb53d680928d7f69, 0xb4739ea4694d3f26, 0x10c697112e864bb0}}}, +{{{0x6493c4277dbe5fde, 0x265d4fad19ad7ea2, 0x0e00dfc846304590, 0x25e61cabed66fe09}}, + {{0x0ca62aa08358c805, 0x6a3d4ae37a204247, 0x7464d3a63b11eddc, 0x03bf9baf550806ef}}, + {{0x3f13e128cc586604, 0x6f5873ecb459747e, 0xa0b63dedcc1268f5, 0x566d78634586e22c}}}, +{{{0x1637a49f9cc10834, 0xbc8e56d5a89bc451, 0x1cb5ec0f7f7fd2db, 0x33975bca5ecc35d9}}, + {{0xa1054285c65a2fd0, 0x6c64112af31667c3, 0x680ae240731aee58, 0x14fba5f34793b22a}}, + {{0x3cd746166985f7d4, 0x593e5e84c9c80057, 0x2fc3f2b67b61131e, 0x14829cea83fc526c}}}, +{{{0xff437b8497dd95c2, 0x6c744e30aa4eb5a7, 0x9e0c5d613c85e88b, 0x2fd9c71e5f758173}}, + {{0x21e70b2f4e71ecb8, 0xe656ddb940a477e3, 0xbf6556cece1d4f80, 0x05fc3bc4535d7b7e}}, + {{0x24b8b3ae52afdedd, 0x3495638ced3b30cf, 0x33a4bc83a9be8195, 0x373767475c651f04}}}, +{{{0x2fba99fd40d1add9, 0xb307166f96f4d027, 0x4363f05215f03bae, 0x1fbea56c3b18f999}}, + {{0x634095cb14246590, 0xef12144016c15535, 0x9e38140c8910bc60, 0x6bf5905730907c8c}}, + {{0x0fa778f1e1415b8a, 0x06409ff7bac3a77e, 0x6f52d7b89aa29a50, 0x02521cf67a635a56}}}, +{{{0x513fee0b0a9d5294, 0x8f98e75c0fdf5a66, 0xd4618688bfe107ce, 0x3fa00a7e71382ced}}, + {{0xb1146720772f5ee4, 0xe8f894b196079ace, 0x4af8224d00ac824a, 0x001753d9f7cd6cc4}}, + {{0x3c69232d963ddb34, 0x1dde87dab4973858, 0xaad7d1f9a091f285, 0x12b5fe2fa048edb6}}}, +{{{0x71f0fbc496fce34d, 0x73b9826badf35bed, 0xd2047261ff28c561, 0x749b76f96fb1206f}}, + {{0xdf2b7c26ad6f1e92, 0x4b66d323504b8913, 0x8c409dc0751c8bc3, 0x6f7e93c20796c7b8}}, + {{0x1f5af604aea6ae05, 0xc12351f1bee49c99, 0x61a808b5eeff6b66, 0x0fcec10f01e02151}}}, +{{{0x644d58a649fe1e44, 0x21fcaea231ad777e, 0x02441c5a887fd0d2, 0x4901aa7183c511f3}}, + {{0x3df2d29dc4244e45, 0x2b020e7493d8de0a, 0x6cc8067e820c214d, 0x413779166feab90a}}, + {{0x08b1b7548c1af8f0, 0xce0f7a7c246299b4, 0xf760b0f91e06d939, 0x41bb887b726d1213}}}, +{{{0x9267806c567c49d8, 0x066d04ccca791e6a, 0xa69f5645e3cc394b, 0x5c95b686a0788cd2}}, + {{0x97d980e0aa39f7d2, 0x35d0384252c6b51c, 0x7d43f49307cd55aa, 0x56bd36cfb78ac362}}, + {{0x2ac519c10d14a954, 0xeaf474b494b5fa90, 0xe6af8382a9f87a5a, 0x0dea6db1879be094}}}, +{{{0xaa66bf547344e5ab, 0xda1258888f1b4309, 0x5e87d2b3fd564b2f, 0x5b2c78885483b1dd}}, + {{0x15baeb74d6a8797a, 0x7ef55cf1fac41732, 0x29001f5a3c8b05c5, 0x0ad7cc8752eaccfb}}, + {{0x52151362793408cf, 0xeb0f170319963d94, 0xa833b2fa883d9466, 0x093a7fa775003c78}}}, +{{{0xe5107de63a16d7be, 0xa377ffdc9af332cf, 0x70d5bf18440b677f, 0x6a252b19a4a31403}}, + {{0xb8e9604460a91286, 0x7f3fd8047778d3de, 0x67d01e31bf8a5e2d, 0x7b038a06c27b653e}}, + {{0x9ed919d5d36990f3, 0x5213aebbdb4eb9f2, 0xc708ea054cb99135, 0x58ded57f72260e56}}}, +{{{0x78e79dade9413d77, 0xf257f9d59729e67d, 0x59db910ee37aa7e6, 0x6aa11b5bbb9e039c}}, + {{0xda6d53265b0fd48b, 0x8960823193bfa988, 0xd78ac93261d57e28, 0x79f2942d3a5c8143}}, + {{0x97da2f25b6c88de9, 0x251ba7eaacf20169, 0x09b44f87ef4eb4e4, 0x7d90ab1bbc6a7da5}}}, +{{{0x9acca683a7016bfe, 0x90505f4df2c50b6d, 0x6b610d5fcce435aa, 0x19a10d446198ff96}}, + {{0x1a07a3f496b3c397, 0x11ceaa188f4e2532, 0x7d9498d5a7751bf0, 0x19ed161f508dd8a0}}, + {{0x560a2cd687dce6ca, 0x7f3568c48664cf4d, 0x8741e95222803a38, 0x483bdab1595653fc}}}, +{{{0xfa780f148734fa49, 0x106f0b70360534e0, 0x2210776fe3e307bd, 0x3286c109dde6a0fe}}, + {{0xd6cf4d0ab4da80f6, 0x82483e45f8307fe0, 0x05005269ae6f9da4, 0x1c7052909cf7877a}}, + {{0x32ee7de2874e98d4, 0x14c362e9b97e0c60, 0x5781dcde6a60a38a, 0x217dd5eaaa7aa840}}}, +{{{0x9db7c4d0248e1eb0, 0xe07697e14d74bf52, 0x1e6a9b173c562354, 0x7fa7c21f795a4965}}, + {{0x8bdf1fb9be8c0ec8, 0x00bae7f8e30a0282, 0x4963991dad6c4f6c, 0x07058a6e5df6f60a}}, + {{0xe9eb02c4db31f67f, 0xed25fd8910bcfb2b, 0x46c8131f5c5cddb4, 0x33b21c13a0cb9bce}}}, +{{{0x360692f8087d8e31, 0xf4dcc637d27163f7, 0x25a4e62065ea5963, 0x659bf72e5ac160d9}}, + {{0x9aafb9b05ee38c5b, 0xbf9d2d4e071a13c7, 0x8eee6e6de933290a, 0x1c3bab17ae109717}}, + {{0x1c9ab216c7cab7b0, 0x7d65d37407bbc3cc, 0x52744750504a58d5, 0x09f2606b131a2990}}}, +{{{0x40e87d44744346be, 0x1d48dad415b52b25, 0x7c3a8a18a13b603e, 0x4eb728c12fcdbdf7}}, + {{0x7e234c597c6691ae, 0x64889d3d0a85b4c8, 0xdae2c90c354afae7, 0x0a871e070c6a9e1d}}, + {{0x3301b5994bbc8989, 0x736bae3a5bdd4260, 0x0d61ade219d59e3c, 0x3ee7300f2685d464}}}, +{{{0xf5d255e49e7dd6b7, 0x8016115c610b1eac, 0x3c99975d92e187ca, 0x13815762979125c2}}, + {{0x43fa7947841e7518, 0xe5c6fa59639c46d7, 0xa1065e1de3052b74, 0x7d47c6a2cfb89030}}, + {{0x3fdad0148ef0d6e0, 0x9d3e749a91546f3c, 0x71ec621026bb8157, 0x148cf58d34c9ec80}}}, +{{{0x46a492f67934f027, 0x469984bef6840aa9, 0x5ca1bc2a89611854, 0x3ff2fa1ebd5dbbd4}}, + {{0xe2572f7d9ae4756d, 0x56c345bb88f3487f, 0x9fd10b6d6960a88d, 0x278febad4eaea1b9}}, + {{0xb1aa681f8c933966, 0x8c21949c20290c98, 0x39115291219d3c52, 0x4104dd02fe9c677b}}}, +{{{0x72b2bf5e1124422a, 0xa1fa0c3398a33ab5, 0x94cb6101fa52b666, 0x2c863b00afaf53d5}}, + {{0x81214e06db096ab8, 0x21a8b6c90ce44f35, 0x6524c12a409e2af5, 0x0165b5a48efca481}}, + {{0xf190a474a0846a76, 0x12eff984cd2f7cc0, 0x695e290658aa2b8f, 0x591b67d9bffec8b8}}}, +{{{0x312f0d1c80b49bfa, 0x5979515eabf3ec8a, 0x727033c09ef01c88, 0x3de02ec7ca8f7bcb}}, + {{0x99b9b3719f18b55d, 0xe465e5faa18c641e, 0x61081136c29f05ed, 0x489b4f867030128b}}, + {{0xd232102d3aeb92ef, 0xe16253b46116a861, 0x3d7eabe7190baa24, 0x49f5fbba496cbebf}}}, +{{{0x30949a108a5bcfd4, 0xdc40dd70bc6473eb, 0x92c294c1307c0d1c, 0x5604a86dcbfa6e74}}, + {{0x155d628c1e9c572e, 0x8a4d86acc5884741, 0x91a352f6515763eb, 0x06a1a6c28867515b}}, + {{0x7288d1d47c1764b6, 0x72541140e0418b51, 0x9f031a6018acf6d1, 0x20989e89fe2742c6}}}, +{{{0x499777fd3a2dcc7f, 0x32857c2ca54fd892, 0xa279d864d207e3a0, 0x0403ed1d0ca67e29}}, + {{0x1674278b85eaec2e, 0x5621dc077acb2bdf, 0x640a4c1661cbf45a, 0x730b9950f70595d3}}, + {{0xc94b2d35874ec552, 0xc5e6c8cf98246f8d, 0xf7cb46fa16c035ce, 0x5bd7454308303dcc}}}, +{{{0x7f9ad19528b24cc2, 0x7f6b54656335c181, 0x66b8b66e4fc07236, 0x133a78007380ad83}}, + {{0x85c4932115e7792a, 0xc64c89a2bdcdddc9, 0x9d1e3da8ada3d762, 0x5bb7db123067f82c}}, + {{0x0961f467c6ca62be, 0x04ec21d6211952ee, 0x182360779bd54770, 0x740dca6d58f0e0d2}}}, +{{{0x50b70bf5d3f0af0b, 0x4feaf48ae32e71f7, 0x60e84ed3a55bbd34, 0x00ed489b3f50d1ed}}, + {{0x3906c72aed261ae5, 0x9ab68fd988e100f7, 0xf5e9059af3360197, 0x0e53dc78bf2b6d47}}, + {{0xb90829bf7971877a, 0x5e4444636d17e631, 0x4d05c52e18276893, 0x27632d9a5a4a4af5}}}, +{{{0xd11ff05154b260ce, 0xd86dc38e72f95270, 0x601fcd0d267cc138, 0x2b67916429e90ccd}}, + {{0xa98285d187eaffdb, 0xa5b4fbbbd8d0a864, 0xb658f27f022663f7, 0x3bbc2b22d99ce282}}, + {{0xb917c952583c0a58, 0x653ff9b80fe4c6f3, 0x9b0da7d7bcdf3c0c, 0x43a0eeb6ab54d60e}}}, +{{{0x396966a46d4a5487, 0xf811a18aac2bb3ba, 0x66e4685b5628b26b, 0x70a477029d929b92}}, + {{0x3ac6322357875fe8, 0xd9d4f4ecf5fbcb8f, 0x8dee8493382bb620, 0x50c5eaa14c799fdc}}, + {{0xdd0edc8bd6f2fb3c, 0x54c63aa79cc7b7a0, 0xae0b032b2c8d9f1a, 0x6f9ce107602967fb}}}, +{{{0xad1054b1cde1c22a, 0xc4a8e90248eb32df, 0x5f3e7b33accdc0ea, 0x72364713fc79963e}}, + {{0x139693063520e0b5, 0x437fcf7c88ea03fe, 0xf7d4c40bd3c959bc, 0x699154d1f893ded9}}, + {{0x315d5c75b4b27526, 0xcccb842d0236daa5, 0x22f0c8a3345fee8e, 0x73975a617d39dbed}}}, +{{{0xe4024df96375da10, 0x78d3251a1830c870, 0x902b1948658cd91c, 0x7e18b10b29b7438a}}, + {{0x6f37f392f4433e46, 0x0e19b9a11f566b18, 0x220fb78a1fd1d662, 0x362a4258a381c94d}}, + {{0x9071d9132b6beb2f, 0x0f26e9ad28418247, 0xeab91ec9bdec925d, 0x4be65bc8f48af2de}}}, +{{{0x78487feba36e7028, 0x5f3f13001dd8ce34, 0x934fb12d4b30c489, 0x056c244d397f0a2b}}, + {{0x1d50fba257c26234, 0x7bd4823adeb0678b, 0xc2b0dc6ea6538af5, 0x5665eec6351da73e}}, + {{0xdb3ee00943bfb210, 0x4972018720800ac2, 0x26ab5d6173bd8667, 0x20b209c2ab204938}}}, +{{{0x549e342ac07fb34b, 0x02d8220821373d93, 0xbc262d70acd1f567, 0x7a92c9fdfbcac784}}, + {{0x1fcca94516bd3289, 0x448d65aa41420428, 0x59c3b7b216a55d62, 0x49992cc64e612cd8}}, + {{0x65bd1bea70f801de, 0x1befb7c0fe49e28a, 0xa86306cdb1b2ae4a, 0x3b7ac0cd265c2a09}}}, +{{{0x822bee438c01bcec, 0x530cb525c0fbc73b, 0x48519034c1953fe9, 0x265cc261e09a0f5b}}, + {{0xf0d54e4f22ed39a7, 0xa2aae91e5608150a, 0xf421b2e9eddae875, 0x31bc531d6b7de992}}, + {{0xdf3d134da980f971, 0x7a4fb8d1221a22a7, 0x3df7d42035aad6d8, 0x2a14edcc6a1a125e}}}, +{{{0xdf48ee0752cfce4e, 0xc3fffaf306ec08b7, 0x05710b2ab95459c4, 0x161d25fa963ea38d}}, + {{0x231a8c570478433c, 0xb7b5270ec281439d, 0xdbaa99eae3d9079f, 0x2c03f5256c2b03d9}}, + {{0x790f18757b53a47d, 0x307b0130cf0c5879, 0x31903d77257ef7f9, 0x699468bdbd96bbaf}}}, +{{{0xbd1f2f46f4dafecf, 0x7cef0114a47fd6f7, 0xd31ffdda4a47b37f, 0x525219a473905785}}, + {{0xd8dd3de66aa91948, 0x485064c22fc0d2cc, 0x9b48246634fdea2f, 0x293e1c4e6c4a2e3a}}, + {{0x376e134b925112e1, 0x703778b5dca15da0, 0xb04589af461c3111, 0x5b605c447f032823}}}, +{{{0xb965805920c47c89, 0xe7f0100c923b8fcc, 0x0001256502e2ef77, 0x24a76dcea8aeb3ee}}, + {{0x3be9fec6f0e7f04c, 0x866a579e75e34962, 0x5542ef161e1de61a, 0x2f12fef4cc5abdd5}}, + {{0x0a4522b2dfc0c740, 0x10d06e7f40c9a407, 0xc6cf144178cff668, 0x5e607b2518a43790}}}, +{{{0x58b31d8f6cdf1818, 0x35cfa74fc36258a2, 0xe1b3ff4f66e61d6e, 0x5067acab6ccdd5f7}}, + {{0xa02c431ca596cf14, 0xe3c42d40aed3e400, 0xd24526802e0f26db, 0x201f33139e457068}}, + {{0xfd527f6b08039d51, 0x18b14964017c0006, 0xd5220eb02e25a4a8, 0x397cba8862460375}}}, +{{{0x30c13093f05959b2, 0xe23aa18de9a97976, 0x222fd491721d5e26, 0x2339d320766e6c3a}}, + {{0x7815c3fbc81379e7, 0xa6619420dde12af1, 0xffa9c0f885a8fdd5, 0x771b4022c1e1c252}}, + {{0xd87dd986513a2fa7, 0xf5ac9b71f9d4cf08, 0xd06bc31b1ea283b3, 0x331a189219971a76}}}, +{{{0xf5166f45fb4f80c6, 0x9c36c7de61c775cf, 0xe3d4e81b9041d91c, 0x31167c6b83bdfe21}}, + {{0x26512f3a9d7572af, 0x5bcbe28868074a9e, 0x84edc1c11180f7c4, 0x1ac9619ff649a67b}}, + {{0xf22b3842524b1068, 0x5068343bee9ce987, 0xfc9d71844a6250c8, 0x612436341f08b111}}}, +{{{0xd99d41db874e898d, 0x09fea5f16c07dc20, 0x793d2c67d00f9bbc, 0x46ebe2309e5eff40}}, + {{0x8b6349e31a2d2638, 0x9ddfb7009bd3fd35, 0x7f8bf1b8a3a06ba4, 0x1522aa3178d90445}}, + {{0x2c382f5369614938, 0xdafe409ab72d6d10, 0xe8c83391b646f227, 0x45fe70f50524306c}}}, +{{{0xda4875a6960c0b8c, 0x5b68d076ef0e2f20, 0x07fb51cf3d0b8fd4, 0x428d1623a0e392d4}}, + {{0x62f24920c8951491, 0x05f007c83f630ca2, 0x6fbb45d2f5c9d4b8, 0x16619f6db57a2245}}, + {{0x084f4a4401a308fd, 0xa82219c376a5caac, 0xdeb8de4643d1bc7d, 0x1d81592d60bd38c6}}}, +{{{0xd833d7beec2a4c38, 0x2c9162830acc20ed, 0xe93a47aa92df7581, 0x702d67a3333c4a81}}, + {{0x3a4a369a2f89c8a1, 0x63137a1d7c8de80d, 0xbcac008a78eda015, 0x2cb8b3a5b483b03f}}, + {{0x36e417cbcb1b90a1, 0x33b3ddaa7f11794e, 0x3f510808885bc607, 0x24141dc0e6a8020d}}}, +{{{0x59f73c773fefee9d, 0xb3f1ef89c1cf989d, 0xe35dfb42e02e545f, 0x5766120b47a1b47c}}, + {{0x91925dccbd83157d, 0x3ca1205322cc8094, 0x28e57f183f90d6e4, 0x1a4714cede2e767b}}, + {{0xdb20ba0fb8b6b7ff, 0xb732c3b677511fa1, 0xa92b51c099f02d89, 0x4f3875ad489ca5f1}}}, +{{{0xc7fc762f4932ab22, 0x7ac0edf72f4c3c1b, 0x5f6b55aa9aa895e8, 0x3680274dad0a0081}}, + {{0x79ed13f6ee73eec0, 0xa5c6526d69110bb1, 0xe48928c38603860c, 0x722a1446fd7059f5}}, + {{0xd0959fe9a8cf8819, 0xd0a995508475a99c, 0x6eac173320b09cc5, 0x628ecf04331b1095}}}, +{{{0x98bcb118a9d0ddbc, 0xee449e3408b4802b, 0x87089226b8a6b104, 0x685f349a45c7915d}}, + {{0x9b41acf85c74ccf1, 0xb673318108265251, 0x99c92aed11adb147, 0x7a47d70d34ecb40f}}, + {{0x60a0c4cbcc43a4f5, 0x775c66ca3677bea9, 0xa17aa1752ff8f5ed, 0x11ded9020e01fdc0}}}, +{{{0x890e7809caefe704, 0x8728296de30e8c6c, 0x4c5cd2a392aeb1c9, 0x194263d15771531f}}, + {{0x471f95b03bea93b7, 0x0552d7d43313abd3, 0xbd9370e2e17e3f7b, 0x7b120f1db20e5bec}}, + {{0x17d2fb3d86502d7a, 0xb564d84450a69352, 0x7da962c8a60ed75d, 0x00d0f85b318736aa}}}, +{{{0x978b142e777c84fd, 0xf402644705a8c062, 0xa67ad51be7e612c7, 0x2f7b459698dd6a33}}, + {{0xa6753c1efd7621c1, 0x69c0b4a7445671f5, 0x971f527405b23c11, 0x387bc74851a8c7cd}}, + {{0x81894b4d4a52a9a8, 0xadd93e12f6b8832f, 0x184d8548b61bd638, 0x3f1c62dbd6c9f6cd}}}, +{{{0x2e8f1f0091910c1f, 0xa4df4fe0bff2e12c, 0x60c6560aee927438, 0x6338283facefc8fa}}, + {{0x3fad3e40148f693d, 0x052656e194eb9a72, 0x2f4dcbfd184f4e2f, 0x406f8db1c482e18b}}, + {{0x9e630d2c7f191ee4, 0x4fbf8301bc3ff670, 0x787d8e4e7afb73c4, 0x50d83d5be8f58fa5}}}, +{{{0x85683916c11a1897, 0x2d69a4efe506d008, 0x39af1378f664bd01, 0x65942131361517c6}}, + {{0xc0accf90b4d3b66d, 0xa7059de561732e60, 0x033d1f7870c6b0ba, 0x584161cd26d946e4}}, + {{0xbbf2b1a072d27ca2, 0xbf393c59fbdec704, 0xe98dbbcee262b81e, 0x02eebd0b3029b589}}}, +{{{0x61368756a60dac5f, 0x17e02f6aebabdc57, 0x7f193f2d4cce0f7d, 0x20234a7789ecdcf0}}, + {{0x8765b69f7b85c5e8, 0x6ff0678bd168bab2, 0x3a70e77c1d330f9b, 0x3a5f6d51b0af8e7c}}, + {{0x76d20db67178b252, 0x071c34f9d51ed160, 0xf62a4a20b3e41170, 0x7cd682353cffe366}}}, +{{{0x0be1a45bd887fab6, 0x2a846a32ba403b6e, 0xd9921012e96e6000, 0x2838c8863bdc0943}}, + {{0xa665cd6068acf4f3, 0x42d92d183cd7e3d3, 0x5759389d336025d9, 0x3ef0253b2b2cd8ff}}, + {{0xd16bb0cf4a465030, 0xfa496b4115c577ab, 0x82cfae8af4ab419d, 0x21dcb8a606a82812}}}, +{{{0x5c6004468c9d9fc8, 0x2540096ed42aa3cb, 0x125b4d4c12ee2f9c, 0x0bc3d08194a31dab}}, + {{0x9a8d00fabe7731ba, 0x8203607e629e1889, 0xb2cc023743f3d97f, 0x5d840dbf6c6f678b}}, + {{0x706e380d309fe18b, 0x6eb02da6b9e165c7, 0x57bbba997dae20ab, 0x3a4276232ac196dd}}}, +{{{0x4b42432c8a7084fa, 0x898a19e3dfb9e545, 0xbe9f00219c58e45d, 0x1ff177cea16debd1}}, + {{0x3bf8c172db447ecb, 0x5fcfc41fc6282dbd, 0x80acffc075aa15fe, 0x0770c9e824e1a9f9}}, + {{0xcf61d99a45b5b5fd, 0x860984e91b3a7924, 0xe7300919303e3e89, 0x39f264fd41500b1e}}}, +{{{0xa7ad3417dbe7e29c, 0xbd94376a2b9c139c, 0xa0e91b8e93597ba9, 0x1712d73468889840}}, + {{0xd19b4aabfe097be1, 0xa46dfce1dfe01929, 0xc3c908942ca6f1ff, 0x65c621272c35f14e}}, + {{0xe72b89f8ce3193dd, 0x4d103356a125c0bb, 0x0419a93d2e1cfe83, 0x22f9800ab19ce272}}}, +{{{0x605a368a3e9ef8cb, 0xe3e9c022a5504715, 0x553d48b05f24248f, 0x13f416cd647626e5}}, + {{0x42029fdd9a6efdac, 0xb912cebe34a54941, 0x640f64b987bdf37b, 0x4171a4d38598cab4}}, + {{0xfa2758aa99c94c8c, 0x23006f6fb000b807, 0xfbd291ddadda5392, 0x508214fa574bd1ab}}}, +{{{0xc20269153ed6fe4b, 0xa65a6739511d77c4, 0xcbde26462c14af94, 0x22f960ec6faba74b}}, + {{0x461a15bb53d003d6, 0xb2102888bcf3c965, 0x27c576756c683a5a, 0x3a7758a4c86cb447}}, + {{0x548111f693ae5076, 0x1dae21df1dfd54a6, 0x12248c90f3115e65, 0x5d9fd15f8de7f494}}}, +{{{0x031408d36d63727f, 0x6a379aefd7c7b533, 0xa9e18fc5ccaee24b, 0x332f35914f8fbed3}}, + {{0x3f244d2aeed7521e, 0x8e3a9028432e9615, 0xe164ba772e9c16d4, 0x3bc187fa47eb98d8}}, + {{0x6d470115ea86c20c, 0x998ab7cb6c46d125, 0xd77832b53a660188, 0x450d81ce906fba03}}}, +{{{0xf8ae4d2ad8453902, 0x7018058ee8db2d1d, 0xaab3995fc7d2c11e, 0x53b16d2324ccca79}}, + {{0x23264d66b2cae0b5, 0x7dbaed33ebca6576, 0x030ebed6f0d24ac8, 0x2a887f78f7635510}}, + {{0x2a23b9e75c012d4f, 0x0c974651cae1f2ea, 0x2fb63273675d70ca, 0x0ba7250b864403f5}}}, +{{{0xbb0d18fd029c6421, 0xbc2d142189298f02, 0x8347f8e68b250e96, 0x7b9f2fe8032d71c9}}, + {{0xdd63589386f86d9c, 0x61699176e13a85a4, 0x2e5111954eaa7d57, 0x32c21b57fb60bdfb}}, + {{0xd87823cd319e0780, 0xefc4cfc1897775c5, 0x4854fb129a0ab3f7, 0x12c49d417238c371}}}, +{{{0x0950b533ffe83769, 0x21861c1d8e1d6bd1, 0xf022d8381302e510, 0x2509200c6391cab4}}, + {{0x09b3a01783799542, 0x626dd08faad5ee3f, 0xba00bceeeb70149f, 0x1421b246a0a444c9}}, + {{0x4aa43a8e8c24a7c7, 0x04c1f540d8f05ef5, 0xadba5e0c0b3eb9dc, 0x2ab5504448a49ce3}}}, +{{{0x2ed227266f0f5dec, 0x9824ee415ed50824, 0x807bec7c9468d415, 0x7093bae1b521e23f}}, + {{0xdc07ac631c5d3afa, 0x58615171f9df8c6c, 0x72a079d89d73e2b0, 0x7301f4ceb4eae15d}}, + {{0x6409e759d6722c41, 0xa674e1cf72bf729b, 0xbc0a24eb3c21e569, 0x390167d24ebacb23}}}, +{{{0x27f58e3bba353f1c, 0x4c47764dbf6a4361, 0xafbbc4e56e562650, 0x07db2ee6aae1a45d}}, + {{0xd7bb054ba2f2120b, 0xe2b9ceaeb10589b7, 0x3fe8bac8f3c0edbe, 0x4cbd40767112cb69}}, + {{0x0b603cc029c58176, 0x5988e3825cb15d61, 0x2bb61413dcf0ad8d, 0x7b8eec6c74183287}}}, +{{{0xe4ca40782cd27cb0, 0xdaf9c323fbe967bd, 0xb29bd34a8ad41e9e, 0x72810497626ede4d}}, + {{0x32fee570fc386b73, 0xda8b0141da3a8cc7, 0x975ffd0ac8968359, 0x6ee809a1b132a855}}, + {{0x9444bb31fcfd863a, 0x2fe3690a3e4e48c5, 0xdc29c867d088fa25, 0x13bd1e38d173292e}}}, +{{{0xd32b4cd8696149b5, 0xe55937d781d8aab7, 0x0bcb2127ae122b94, 0x41e86fcfb14099b0}}, + {{0x223fb5cf1dfac521, 0x325c25316f554450, 0x030b98d7659177ac, 0x1ed018b64f88a4bd}}, + {{0x3630dfa1b802a6b0, 0x880f874742ad3bd5, 0x0af90d6ceec5a4d4, 0x746a247a37cdc5d9}}}, +{{{0xd531b8bd2b7b9af6, 0x5005093537fc5b51, 0x232fcf25c593546d, 0x20a365142bb40f49}}, + {{0x6eccd85278d941ed, 0x2254ae83d22f7843, 0xc522d02e7bbfcdb7, 0x681e3351bff0e4e2}}, + {{0x8b64b59d83034f45, 0x2f8b71f21fa20efb, 0x69249495ba6550e4, 0x539ef98e45d5472b}}}, +{{{0x6e7bb6a1a6205275, 0xaa4f21d7413c8e83, 0x6f56d155e88f5cb2, 0x2de25d4ba6345be1}}, + {{0xd074d8961cae743f, 0xf86d18f5ee1c63ed, 0x97bdc55be7f4ed29, 0x4cbad279663ab108}}, + {{0x80d19024a0d71fcd, 0xc525c20afb288af8, 0xb1a3974b5f3a6419, 0x7d7fbcefe2007233}}}, +{{{0xfaef1e6a266b2801, 0x866c68c4d5739f16, 0xf68a2fbc1b03762c, 0x5975435e87b75a8d}}, + {{0xcd7c5dc5f3c29094, 0xc781a29a2a9105ab, 0x80c61d36421c3058, 0x4f9cd196dcd8d4d7}}, + {{0x199297d86a7b3768, 0xd0d058241ad17a63, 0xba029cad5c1c0c17, 0x7ccdd084387a0307}}}, +{{{0xdca6422c6d260417, 0xae153d50948240bd, 0xa9c0c1b4fb68c677, 0x428bd0ed61d0cf53}}, + {{0x9b0c84186760cc93, 0xcdae007a1ab32a99, 0xa88dec86620bda18, 0x3593ca848190ca44}}, + {{0x9213189a5e849aa7, 0xd4d8c33565d8facd, 0x8c52545b53fdbbd1, 0x27398308da2d63e6}}}, +{{{0x42c38d28435ed413, 0xbd50f3603278ccc9, 0xbb07ab1a79da03ef, 0x269597aebe8c3355}}, + {{0xb9a10e4c0a702453, 0x0fa25866d57d1bde, 0xffb9d9b5cd27daf7, 0x572c2945492c33fd}}, + {{0xc77fc745d6cd30be, 0xe4dfe8d3e3baaefb, 0xa22c8830aa5dda0c, 0x7f985498c05bca80}}}, +{{{0x3849ce889f0be117, 0x8005ad1b7b54a288, 0x3da3c39f23fc921c, 0x76c2ec470a31f304}}, + {{0xd35615520fbf6363, 0x08045a45cf4dfba6, 0xeec24fbc873fa0c2, 0x30f2653cd69b12e7}}, + {{0x8a08c938aac10c85, 0x46179b60db276bcb, 0xa920c01e0e6fac70, 0x2f1273f1596473da}}}, +{{{0x4739fc7c8ae01e11, 0xfd5274904a6aab9f, 0x41d98a8287728f2e, 0x5d9e572ad85b69f2}}, + {{0x30488bd755a70bc0, 0x06d6b5a4f1d442e7, 0xead1a69ebc596162, 0x38ac1997edc5f784}}, + {{0x0666b517a751b13b, 0x747d06867e9b858c, 0xacacc011454dde49, 0x22dfcd9cbfe9e69c}}}, +{{{0x8ddbd2e0c30d0cd9, 0xad8e665facbb4333, 0x8f6b258c322a961f, 0x6b2916c05448c1c7}}, + {{0x56ec59b4103be0a1, 0x2ee3baecd259f969, 0x797cb29413f5cd32, 0x0fe9877824cde472}}, + {{0x7edb34d10aba913b, 0x4ea3cd822e6dac0e, 0x66083dff6578f815, 0x4c303f307ff00a17}}}, +{{{0xd30a3bd617b28c85, 0xc5d377b739773bea, 0xc6c6e78c1e6a5cbf, 0x0d61b8f78b2ab7c4}}, + {{0x29fc03580dd94500, 0xecd27aa46fbbec93, 0x130a155fc2e2a7f8, 0x416b151ab706a1d5}}, + {{0x56a8d7efe9c136b0, 0xbd07e5cd58e44b20, 0xafe62fda1b57e0ab, 0x191a2af74277e8d2}}}, +{{{0xd550095bab6f4985, 0x04f4cd5b4fbfaf1a, 0x9d8e2ed12a0c7540, 0x2bc24e04b2212286}}, + {{0x09d4b60b2fe09a14, 0xc384f0afdbb1747e, 0x58e2ea8978b5fd6e, 0x519ef577b5e09b0a}}, + {{0x1863d7d91124cca9, 0x7ac08145b88a708e, 0x2bcd7309857031f5, 0x62337a6e8ab8fae5}}}, +{{{0x4bcef17f06ffca16, 0xde06e1db692ae16a, 0x0753702d614f42b0, 0x5f6041b45b9212d0}}, + {{0xd1ab324e1b3a1273, 0x18947cf181055340, 0x3b5d9567a98c196e, 0x7fa00425802e1e68}}, + {{0x7d531574028c2705, 0x80317d69db0d75fe, 0x30fface8ef8c8ddd, 0x7e9de97bb6c3e998}}}, +{{{0x1558967b9e6585a3, 0x97c99ce098e98b92, 0x10af149b6eb3adad, 0x42181fe8f4d38cfa}}, + {{0xf004be62a24d40dd, 0xba0659910452d41f, 0x81c45ee162a44234, 0x4cb829d8a22266ef}}, + {{0x1dbcaa8407b86681, 0x081f001e8b26753b, 0x3cd7ce6a84048e81, 0x78af11633f25f22c}}}, +{{{0x8416ebd40b50babc, 0x1508722628208bee, 0xa3148fafb9c1c36d, 0x0d07daacd32d7d5d}}, + {{0x3241c00e7d65318c, 0xe6bee5dcd0e86de7, 0x118b2dc2fbc08c26, 0x680d04a7fc603dc3}}, + {{0xf9c2414a695aa3eb, 0xdaa42c4c05a68f21, 0x7c6c23987f93963e, 0x210e8cd30c3954e3}}}, +{{{0xac4201f210a71c06, 0x6a65e0aef3bfb021, 0xbc42c35c393632f7, 0x56ea8db1865f0742}}, + {{0x2b50f16137fe6c26, 0xe102bcd856e404d8, 0x12b0f1414c561f6b, 0x51b17bc8d028ec91}}, + {{0xfff5fb4bcf535119, 0xf4989d79df1108a0, 0xbdfcea659a3ba325, 0x18a11f1174d1a6f2}}}, +{{{0x407375ab3f6bba29, 0x9ec3b6d8991e482e, 0x99c80e82e55f92e9, 0x307c13b6fb0c0ae1}}, + {{0xfbd63cdad27a5f2c, 0xf00fc4bc8aa106d7, 0x53fb5c1a8e64a430, 0x04eaabe50c1a2e85}}, + {{0x24751021cb8ab5e7, 0xfc2344495c5010eb, 0x5f1e717b4e5610a1, 0x44da5f18c2710cd5}}}, +{{{0x033cc55ff1b82eb5, 0xb15ae36d411cae52, 0xba40b6198ffbacd3, 0x768edce1532e861f}}, + {{0x9156fe6b89d8eacc, 0xe6b79451e23126a1, 0xbd7463d93944eb4e, 0x726373f6767203ae}}, + {{0xe305ca72eb7ef68a, 0x662cf31f70eadb23, 0x18f026fdb4c45b68, 0x513b5384b5d2ecbd}}}, +{{{0x46d46280c729989e, 0x4b93fbd05368a5dd, 0x63df3f81d1765a89, 0x34cebd64b9a0a223}}, + {{0x5e2702878af34ceb, 0x900b0409b946d6ae, 0x6512ebf7dabd8512, 0x61d9b76988258f81}}, + {{0xa6c5a71349b7d94b, 0xa3f3d15823eb9446, 0x0416fbd277484834, 0x69d45e6f2c70812f}}}, +{{{0xce16f74bc53c1431, 0x2b9725ce2072edde, 0xb8b9c36fb5b23ee7, 0x7e2e0e450b5cc908}}, + {{0x9fe62b434f460efb, 0xded303d4a63607d6, 0xf052210eb7a0da24, 0x237e7dbe00545b93}}, + {{0x013575ed6701b430, 0x231094e69f0bfd10, 0x75320f1583e47f22, 0x71afa699b11155e3}}}, +{{{0x65ce6f9b3953b61d, 0xc65839eaafa141e6, 0x0f435ffda9f759fe, 0x021142e9c2b1c28e}}, + {{0xea423c1c473b50d6, 0x51e87a1f3b38ef10, 0x9b84bf5fb2c9be95, 0x00731fbc78f89a1c}}, + {{0xe430c71848f81880, 0xbf960c225ecec119, 0xb6dae0836bba15e3, 0x4c4d6f3347e15808}}}, +{{{0x18f7eccfc17d1fc9, 0x6c75f5a651403c14, 0xdbde712bf7ee0cdf, 0x193fddaaa7e47a22}}, + {{0x2f0cddfc988f1970, 0x6b916227b0b9f51b, 0x6ec7b6c4779176be, 0x38bf9500a88f9fa8}}, + {{0x1fd2c93c37e8876f, 0xa2f61e5a18d1462c, 0x5080f58239241276, 0x6a6fb99ebf0d4969}}}, +{{{0x6a46c1bb560855eb, 0x2416bb38f893f09d, 0xd71d11378f71acc1, 0x75f76914a31896ea}}, + {{0xeeb122b5b6e423c6, 0x939d7010f286ff8e, 0x90a92a831dcf5d8c, 0x136fda9f42c5eb10}}, + {{0xf94cdfb1a305bdd1, 0x0f364b9d9ff82c08, 0x2a87d8a5c3bb588a, 0x022183510be8dcba}}}, +{{{0x4af766385ead2d14, 0xa08ed880ca7c5830, 0x0d13a6e610211e3d, 0x6a071ce17b806c03}}, + {{0x9d5a710143307a7f, 0xb063de9ec47da45f, 0x22bbfe52be927ad3, 0x1387c441fd40426c}}, + {{0xb5d3c3d187978af8, 0x722b5a3d7f0e4413, 0x0d7b4848bb477ca0, 0x3171b26aaf1edc92}}}, +{{{0xa92f319097564ca8, 0xff7bb84c2275e119, 0x4f55fe37a4875150, 0x221fd4873cf0835a}}, + {{0xa60db7d8b28a47d1, 0xa6bf14d61770a4f1, 0xd4a1f89353ddbd58, 0x6c514a63344243e9}}, + {{0x2322204f3a156341, 0xfb73e0e9ba0a032d, 0xfce0dd4c410f030e, 0x48daa596fb924aaa}}}, +{{{0x6eca8e665ca59cc7, 0xa847254b2e38aca0, 0x31afc708d21e17ce, 0x676dd6fccad84af7}}, + {{0x14f61d5dc84c9793, 0x9941f9e3ef418206, 0xcdf5b88f346277ac, 0x58c837fa0e8a79a9}}, + {{0x0cf9688596fc9058, 0x1ddcbbf37b56a01b, 0xdcc2e77d4935d66a, 0x1c4f73f2c6a57f0a}}}, +{{{0x0e7a4fbd305fa0bb, 0x829d4ce054c663ad, 0xf421c3832fe33848, 0x795ac80d1bf64c42}}, + {{0xb36e706efc7c3484, 0x73dfc9b4c3c1cf61, 0xeb1d79c9781cc7e5, 0x70459adb7daf675c}}, + {{0x1b91db4991b42bb3, 0x572696234b02dcca, 0x9fdf9ee51f8c78dc, 0x5fe162848ce21fd3}}}, +{{{0xe2790aae4d077c41, 0x8b938270db7469a3, 0x6eb632dc8abd16a2, 0x720814ecaa064b72}}, + {{0x315c29c795115389, 0xd7e0e507862f74ce, 0x0c4a762185927432, 0x72de6c984a25a1e4}}, + {{0xae9ab553bf6aa310, 0x050a50a9806d6e1b, 0x92bb7403adff5139, 0x0394d27645be618b}}}, +{{{0x4d572251857eedf4, 0xe3724edde19e93c5, 0x8a71420e0b797035, 0x3b3c833687abe743}}, + {{0xf5396425b23545a4, 0x15a7a27e98fbb296, 0xab6c52bc636fdd86, 0x79d995a8419334ee}}, + {{0xcd8a8ea61195dd75, 0xa504d8a81dd9a82f, 0x540dca81a35879b6, 0x60dd16a379c86a8a}}}, +{{{0x35a2c8487381e559, 0x596ffea6d78082cb, 0xcb9771ebdba7b653, 0x5a08b5019b4da685}}, + {{0x3501d6f8153e47b8, 0xb7a9675414a2f60c, 0x112ee8b6455d9523, 0x4e62a3c18112ea8a}}, + {{0xc8d4ac04516ab786, 0x595af3215295b23d, 0xd6edd234db0230c1, 0x0929efe8825b41cc}}}, +{{{0x5f0601d1cbd0f2d3, 0x736e412f6132bb7f, 0x83604432238dde87, 0x1e3a5272f5c0753c}}, + {{0x8b3172b7ad56651d, 0x01581b7a3fabd717, 0x2dc94df6424df6e4, 0x30376e5d2c29284f}}, + {{0xd2918da78159a59c, 0x6bdc1cd93f0713f3, 0x565f7a934acd6590, 0x53daacec4cb4c128}}}, +{{{0x4ca73bd79cc8a7d6, 0x4d4a738f47e9a9b2, 0xf4cbf12942f5fe00, 0x01a13ff9bdbf0752}}, + {{0x99852bc3852cfdb0, 0x2cc12e9559d6ed0b, 0x70f9e2bf9b5ac27b, 0x4f3b8c117959ae99}}, + {{0x55b6c9c82ff26412, 0x1ac4a8c91fb667a8, 0xd527bfcfeb778bf2, 0x303337da7012a3be}}}, +{{{0x955422228c1c9d7c, 0x01fac1371a9b340f, 0x7e8d9177925b48d7, 0x53f8ad5661b3e31b}}, + {{0x976d3ccbfad2fdd1, 0xcb88839737a640a8, 0x2ff00c1d6734cb25, 0x269ff4dc789c2d2b}}, + {{0x0c003fbdc08d678d, 0x4d982fa37ead2b17, 0xc07e6bcdb2e582f1, 0x296c7291df412a44}}}, +{{{0x7903de2b33daf397, 0xd0ff0619c9a624b3, 0x8a1d252b555b3e18, 0x2b6d581c52e0b7c0}}, + {{0xdfb23205dab8b59e, 0x465aeaa0c8092250, 0xd133c1189a725d18, 0x2327370261f117d1}}, + {{0x3d0543d3623e7986, 0x679414c2c278a354, 0xae43f0cc726196f6, 0x7836c41f8245eaba}}}, +{{{0xe7a254db49e95a81, 0x5192d5d008b0ad73, 0x4d20e5b1d00afc07, 0x5d55f8012cf25f38}}, + {{0xca651e848011937c, 0xc6b0c46e6ef41a28, 0xb7021ba75f3f8d52, 0x119dff99ead7b9fd}}, + {{0x43eadfcbf4b31d4d, 0xc6503f7411148892, 0xfeee68c5060d3b17, 0x329293b3dd4a0ac8}}}, +{{{0x4e59214fe194961a, 0x49be7dc70d71cd4f, 0x9300cfd23b50f22d, 0x4789d446fc917232}}, + {{0x2879852d5d7cb208, 0xb8dedd70687df2e7, 0xdc0bffab21687891, 0x2b44c043677daa35}}, + {{0x1a1c87ab074eb78e, 0xfac6d18e99daf467, 0x3eacbbcd484f9067, 0x60c52eef2bb9a4e4}}}, +{{{0x0b5d89bc3bfd8bf1, 0xb06b9237c9f3551a, 0x0e4c16b0d53028f5, 0x10bc9c312ccfcaab}}, + {{0x702bc5c27cae6d11, 0x44c7699b54a48cab, 0xefbc4056ba492eb2, 0x70d77248d9b6676d}}, + {{0xaa8ae84b3ec2a05b, 0x98699ef4ed1781e0, 0x794513e4708e85d1, 0x63755bd3a976f413}}}, +{{{0xb55fa03e2ad10853, 0x356f75909ee63569, 0x9ff9f1fdbe69b890, 0x0d8cc1c48bc16f84}}, + {{0x3dc7101897f1acb7, 0x5dda7d5ec165bbd8, 0x508e5b9c0fa1020f, 0x2763751737c52a56}}, + {{0x029402d36eb419a9, 0xf0b44e7e77b460a5, 0xcfa86230d43c4956, 0x70c2dd8a7ad166e7}}}, +{{{0x656194509f6fec0e, 0xee2e7ea946c6518d, 0x9733c1f367e09b5c, 0x2e0fac6363948495}}, + {{0x91d4967db8ed7e13, 0x74252f0ad776817a, 0xe40982e00d852564, 0x32b8613816a53ce5}}, + {{0x79e7f7bee448cd64, 0x6ac83a67087886d0, 0xf89fd4d9a0e4db2e, 0x4179215c735a4f41}}}, +{{{0x8c7094e7d7dced2a, 0x97fb8ac347d39c70, 0xe13be033a906d902, 0x700344a30cd99d76}}, + {{0xe4ae33b9286bcd34, 0xb7ef7eb6559dd6dc, 0x278b141fb3d38e1f, 0x31fa85662241c286}}, + {{0xaf826c422e3622f4, 0xc12029879833502d, 0x9bc1b7e12b389123, 0x24bb2312a9952489}}}, +{{{0xb1a8ed1732de67c3, 0x3cb49418461b4948, 0x8ebd434376cfbcd2, 0x0fee3e871e188008}}, + {{0x41f80c2af5f85c6b, 0x687284c304fa6794, 0x8945df99a3ba1bad, 0x0d1d2af9ffeb5d16}}, + {{0xa9da8aa132621edf, 0x30b822a159226579, 0x4004197ba79ac193, 0x16acd79718531d76}}}, +{{{0x72df72af2d9b1d3d, 0x63462a36a432245a, 0x3ecea07916b39637, 0x123e0ef6b9302309}}, + {{0xc959c6c57887b6ad, 0x94e19ead5f90feba, 0x16e24e62a342f504, 0x164ed34b18161700}}, + {{0x487ed94c192fe69a, 0x61ae2cea3a911513, 0x877bf6d3b9a4de27, 0x78da0fc61073f3eb}}}, +{{{0x5bf15d28e52bc66a, 0x2c47e31870f01a8e, 0x2419afbc06c28bdd, 0x2d25deeb256b173a}}, + {{0xa29f80f1680c3a94, 0x71f77e151ae9e7e6, 0x1100f15848017973, 0x054aa4b316b38ddd}}, + {{0xdfc8468d19267cb8, 0x0b28789c66e54daf, 0x2aeb1d2a666eec17, 0x134610a6ab7da760}}}, +{{{0xcaf55ec27c59b23f, 0x99aeed3e154d04f2, 0x68441d72e14141f4, 0x140345133932a0a2}}, + {{0xd91430e0dc028c3c, 0x0eb955a85217c771, 0x4b09e1ed2c99a1fa, 0x42881af2bd6a743c}}, + {{0x7bfec69aab5cad3d, 0xc23e8cd34cb2cfad, 0x685dd14bfb37d6a2, 0x0ad6d64415677a18}}}, +{{{0x781a439e417becb5, 0x4ac5938cd10e0266, 0x5da385110692ac24, 0x11b065a2ade31233}}, + {{0x7914892847927e9f, 0x33dad6ef370aa877, 0x1f8f24fa11122703, 0x5265ac2f2adf9592}}, + {{0x405fdd309afcb346, 0xd9723d4428e63f54, 0x94c01df05f65aaae, 0x43e4dc3ae14c0809}}}, +{{{0xbc12c7f1a938a517, 0x473028ab3180b2e1, 0x3f78571efbcd254a, 0x74e534426ff6f90f}}, + {{0xea6f7ac3adc2c6a3, 0xd0e928f6e9717c94, 0xe2d379ead645eaf5, 0x46dd8785c51ffbbe}}, + {{0x709801be375c8898, 0x4b06dab5e3fd8348, 0x75880ced27230714, 0x2b09468fdd2f4c42}}}, +{{{0x97c749eeb701cb96, 0x83f438d4b6a369c3, 0x62962b8b9a402cd9, 0x6976c7509888df7b}}, + {{0x5b97946582ffa02a, 0xda096a51fea8f549, 0xa06351375f77af9b, 0x1bcfde61201d1e76}}, + {{0x4a4a5490246a59a2, 0xd63ebddee87fdd90, 0xd9437c670d2371fa, 0x69e87308d30f8ed6}}}, +{{{0x435a8bb15656beb0, 0xf8fac9ba4f4d5bca, 0xb9b278c41548c075, 0x3eb0ef76e892b622}}, + {{0x0f80bf028bc80303, 0x6aae16b37a18cefb, 0xdd47ea47d72cd6a3, 0x61943588f4ed39aa}}, + {{0xd26e5c3e91039f85, 0xc0e9e77df6f33aa9, 0xe8968c5570066a93, 0x3c34d1881faaaddd}}}, +{{{0x3f9d2b5ea09f9ec0, 0x1dab3b6fb623a890, 0xa09ba3ea72d926c4, 0x374193513fd8b36d}}, + {{0xbd5b0b8f2fffe0d9, 0x6aa254103ed24fb9, 0x2ac7d7bcb26821c4, 0x605b394b60dca36a}}, + {{0xb4e856e45a9d1ed2, 0xefe848766c97a9a2, 0xb104cf641e5eee7d, 0x2f50b81c88a71c8f}}}, +{{{0x31723c61fc6811bb, 0x9cb450486211800f, 0x768933d347995753, 0x3491a53502752fcd}}, + {{0x2b552ca0a7da522a, 0x3230b336449b0250, 0xf2c4c5bca4b99fb9, 0x7b2c674958074a22}}, + {{0xd55165883ed28cdf, 0x12d84fd2d362de39, 0x0a874ad3e3378e4f, 0x000d2b1f7c763e74}}}, +{{{0x3d420811d06d4a67, 0xbefc048590e0ffe3, 0xf870c6b7bd487bde, 0x6e2a7316319afa28}}, + {{0x9624778c3e94a8ab, 0x0ad6f3cee9a78bec, 0x948ac7810d743c4f, 0x76627935aaecfccc}}, + {{0x56a8ac24d6d59a9f, 0xc8db753e3096f006, 0x477f41e68f4c5299, 0x588d851cf6c86114}}}, +{{{0x51138ec78df6b0fe, 0x5397da89e575f51b, 0x09207a1d717af1b9, 0x2102fdba2b20d650}}, + {{0xcd2a65e777d1f515, 0x548991878faa60f1, 0xb1b73bbcdabc06e5, 0x654878cba97cc9fb}}, + {{0x969ee405055ce6a1, 0x36bca7681251ad29, 0x3a1af517aa7da415, 0x0ad725db29ecb2ba}}}, +{{{0xdc4267b1834e2457, 0xb67544b570ce1bc5, 0x1af07a0bf7d15ed7, 0x4aefcffb71a03650}}, + {{0xfec7bc0c9b056f85, 0x537d5268e7f5ffd7, 0x77afc6624312aefa, 0x4f675f5302399fd9}}, + {{0xc32d36360415171e, 0xcd2bef118998483b, 0x870a6eadd0945110, 0x0bccbb72a2a86561}}}, +{{{0x185e962feab1a9c8, 0x86e7e63565147dcd, 0xb092e031bb5b6df2, 0x4024f0ab59d6b73e}}, + {{0x186d5e4c50fe1296, 0xe0397b82fee89f7e, 0x3bc7f6c5507031b0, 0x6678fd69108f37c2}}, + {{0x1586fa31636863c2, 0x07f68c48572d33f2, 0x4f73cc9f789eaefc, 0x2d42e2108ead4701}}}, +{{{0x97f5131594dfd29b, 0x6155985d313f4c6a, 0xeba13f0708455010, 0x676b2608b8d2d322}}, + {{0x21717b0d0f537593, 0x914e690b131e064c, 0x1bb687ae752ae09f, 0x420bf3a79b423c6e}}, + {{0x8138ba651c5b2b47, 0x8671b6ec311b1b80, 0x7bff0cb1bc3135b0, 0x745d2ffa9c0cf1e0}}}, +{{{0xbf525a1e2bc9c8bd, 0xea5b260826479d81, 0xd511c70edf0155db, 0x1ae23ceb960cf5d0}}, + {{0x6036df5721d34e6a, 0xb1db8827997bb3d0, 0xd3c209c3c8756afa, 0x06e15be54c1dc839}}, + {{0x5b725d871932994a, 0x32351cb5ceb1dab0, 0x7dc41549dab7ca05, 0x58ded861278ec1f7}}}, +{{{0xd8173793f266c55c, 0xc8c976c5cc454e49, 0x5ce382f8bc26c3a8, 0x2ff39de85485f6f9}}, + {{0x2dfb5ba8b6c2c9a8, 0x48eeef8ef52c598c, 0x33809107f12d1573, 0x08ba696b531d5bd8}}, + {{0x77ed3eeec3efc57a, 0x04e05517d4ff4811, 0xea3d7a3ff1a671cb, 0x120633b4947cfe54}}}, +{{{0x0b94987891610042, 0x4ee7b13cecebfae8, 0x70be739594f0a4c0, 0x35d30a99b4d59185}}, + {{0x82bd31474912100a, 0xde237b6d7e6fbe06, 0xe11e761911ea79c6, 0x07433be3cb393bde}}, + {{0xff7944c05ce997f4, 0x575d3de4b05c51a3, 0x583381fd5a76847c, 0x2d873ede7af6da9f}}}, +{{{0x157a316443373409, 0xfab8b7eef4aa81d9, 0xb093fee6f5a64806, 0x2e773654707fa7b6}}, + {{0xaa6202e14e5df981, 0xa20d59175015e1f5, 0x18a275d3bae21d6c, 0x0543618a01600253}}, + {{0x0deabdf4974c23c1, 0xaa6f0a259dce4693, 0x04202cb8a29aba2c, 0x4b1443362d07960d}}}, +{{{0x47b837f753242cec, 0x256dc48cc04212f2, 0xe222fbfbe1d928c5, 0x48ea295bad8a2c07}}, + {{0x299b1c3f57c5715e, 0x96cb929e6b686d90, 0x3004806447235ab3, 0x2c435c24a44d9fe1}}, + {{0x0607c97c80f8833f, 0x0e851578ca25ec5b, 0x54f7450b161ebb6f, 0x7bcb4792a0def80e}}}, +{{{0x8487e3d02bc73659, 0x4baf8445059979df, 0xd17c975adcad6fbf, 0x57369f0bdefc96b6}}, + {{0x1cecd0a0045224c2, 0x757f1b1b69e53952, 0x775b7a925289f681, 0x1b6cc62016736148}}, + {{0xf1a9990175638698, 0x353dd1beeeaa60d3, 0x849471334c9ba488, 0x63fa6e6843ade311}}}, +{{{0xd15c20536597c168, 0x9f73740098d28789, 0x18aee7f13257ba1f, 0x3418bfda07346f14}}, + {{0x2195becdd24b5eb7, 0x5e41f18cc0cd44f9, 0xdf28074441ca9ede, 0x07073b98f35b7d67}}, + {{0xd03c676c4ce530d4, 0x0b64c0473b5df9f4, 0x065cef8b19b3a31e, 0x3084d661533102c9}}}, +{{{0xe1f6b79ebf8469ad, 0x15801004e2663135, 0x9a498330af74181b, 0x3ba2504f049b673c}}, + {{0x9a6ce876760321fd, 0x7fe2b5109eb63ad8, 0x00e7d4ae8ac80592, 0x73d86b7abb6f723a}}, + {{0x0b52b5606dba5ab6, 0xa9134f0fbbb1edab, 0x30a9520d9b04a635, 0x6813b8f37973e5db}}}, +{{{0x9854b054334127c1, 0x105d047882fbff25, 0xdb49f7f944186f4f, 0x1768e838bed0b900}}, + {{0xf194ca56f3157e29, 0x136d35705ef528a5, 0xdd4cef778b0599bc, 0x7d5472af24f833ed}}, + {{0xd0ef874daf33da47, 0x00d3be5db6e339f9, 0x3f2a8a2f9c9ceece, 0x5d1aeb792352435a}}}, +{{{0xf59e6bb319cd63ca, 0x670c159221d06839, 0xb06d565b2150cab6, 0x20fb199d104f12a3}}, + {{0x12c7bfaeb61ba775, 0xb84e621fe263bffd, 0x0b47a5c35c840dcf, 0x7e83be0bccaf8634}}, + {{0x61943dee6d99c120, 0x86101f2e460b9fe0, 0x6bb2f1518ee8598d, 0x76b76289fcc475cc}}}, +{{{0x791b4cc1756286fa, 0xdbced317d74a157c, 0x7e732421ea72bde6, 0x01fe18491131c8e9}}, + {{0x4245f1a1522ec0b3, 0x558785b22a75656d, 0x1d485a2548a1b3c0, 0x60959eccd58fe09f}}, + {{0x3ebfeb7ba8ed7a09, 0x49fdc2bbe502789c, 0x44ebce5d3c119428, 0x35e1eb55be947f4a}}}, +{{{0xdbdae701c5738dd3, 0xf9c6f635b26f1bee, 0x61e96a8042f15ef4, 0x3aa1d11faf60a4d8}}, + {{0x14fd6dfa726ccc74, 0x3b084cfe2f53b965, 0xf33ae4f552a2c8b4, 0x59aab07a0d40166a}}, + {{0x77bcec4c925eac25, 0x1848718460137738, 0x5b374337fea9f451, 0x1865e78ec8e6aa46}}}, +{{{0xccc4b7c7b66e1f7a, 0x44157e25f50c2f7e, 0x3ef06dfc713eaf1c, 0x582f446752da63f7}}, + {{0x967c54e91c529ccb, 0x30f6269264c635fb, 0x2747aff478121965, 0x17038418eaf66f5c}}, + {{0xc6317bd320324ce4, 0xa81042e8a4488bc4, 0xb21ef18b4e5a1364, 0x0c2a1c4bcda28dc9}}}, +{{{0xd24dc7d06f1f0447, 0xb2269e3edb87c059, 0xd15b0272fbb2d28f, 0x7c558bd1c6f64877}}, + {{0xedc4814869bd6945, 0x0d6d907dbe1c8d22, 0xc63bd212d55cc5ab, 0x5a6a9b30a314dc83}}, + {{0xd0ec1524d396463d, 0x12bb628ac35a24f0, 0xa50c3a791cbc5fa4, 0x0404a5ca0afbafc3}}}, +{{{0x8c1f40070aa743d6, 0xccbad0cb5b265ee8, 0x574b046b668fd2de, 0x46395bfdcadd9633}}, + {{0x62bc9e1b2a416fd1, 0xb5c6f728e350598b, 0x04343fd83d5d6967, 0x39527516e7f8ee98}}, + {{0x117fdb2d1a5d9a9c, 0x9c7745bcd1005c2a, 0xefd4bef154d56fea, 0x76579a29e822d016}}}, +{{{0x45b68e7e49c02a17, 0x23cd51a2bca9a37f, 0x3ed65f11ec224c1b, 0x43a384dc9e05bdb1}}, + {{0x333cb51352b434f2, 0xd832284993de80e1, 0xb5512887750d35ce, 0x02c514bb2a2777c1}}, + {{0x684bd5da8bf1b645, 0xfb8bd37ef6b54b53, 0x313916d7a9b0d253, 0x1160920961548059}}}, +{{{0xb44d166929dacfaa, 0xda529f4c8413598f, 0xe9ef63ca453d5559, 0x351e125bc5698e0b}}, + {{0x7a385616369b4dcd, 0x75c02ca7655c3563, 0x7dc21bf9d4f18021, 0x2f637d7491e6e042}}, + {{0xd4b49b461af67bbe, 0xd603037ac8ab8961, 0x71dee19ff9a699fb, 0x7f182d06e7ce2a9a}}}, +{{{0x7a7c8e64ab0168ec, 0xcb5a4a5515edc543, 0x095519d347cd0eda, 0x67d4ac8c343e93b0}}, + {{0x09454b728e217522, 0xaa58e8f4d484b8d8, 0xd358254d7f46903c, 0x44acc043241c5217}}, + {{0x1c7d6bbb4f7a5777, 0x8b35fed4918313e1, 0x4adca1c6c96b4684, 0x556d1c8312ad71bd}}}, +{{{0x17ef40e30c8d3982, 0x31f7073e15a3fa34, 0x4f21f3cb0773646e, 0x746c6c6d1d824eff}}, + {{0x81f06756b11be821, 0x0faff82310a3f3dd, 0xf8b2d0556a99465d, 0x097abe38cc8c7f05}}, + {{0x0c49c9877ea52da4, 0x4c4369559bdc1d43, 0x022c3809f7ccebd2, 0x577e14a34bee84bd}}}, +{{{0xf0e268ac61a73b0a, 0xf2fafa103791a5f5, 0xc1e13e826b6d00e9, 0x60fa7ee96fd78f42}}, + {{0x94fecebebd4dd72b, 0xf46a4fda060f2211, 0x124a5977c0c8d1ff, 0x705304b8fb009295}}, + {{0xb63d1d354d296ec6, 0xf3c3053e5fad31d8, 0x670b958cb4bd42ec, 0x21398e0ca16353fd}}}, +{{{0x216ab2ca8da7d2ef, 0x366ad9dd99f42827, 0xae64b9004fdd3c75, 0x403a395b53909e62}}, + {{0x86c5fc16861b7e9a, 0xf6a330476a27c451, 0x01667267a1e93597, 0x05ffb9cd6082dfeb}}, + {{0xa617fa9ff53f6139, 0x60f2b5e513e66cb6, 0xd7a8beefb3448aa4, 0x7a2932856f5ea192}}}, +{{{0x0b39d761b02de888, 0x5f550e7ed2414e1f, 0xa6bfa45822e1a940, 0x050a2f7dfd447b99}}, + {{0xb89c444879639302, 0x4ae4f19350c67f2c, 0xf0b35da8c81af9c6, 0x39d0003546871017}}, + {{0x437c3b33a650db77, 0x6bafe81dbac52bb2, 0xfe99402d2db7d318, 0x2b5b7eec372ba6ce}}}, +{{{0xb3bc4bbd83f50eef, 0x508f0c998c927866, 0x43e76587c8b7e66e, 0x0f7655a3a47f98d9}}, + {{0xa694404d613ac8f4, 0x500c3c2bfa97e72c, 0x874104d21fcec210, 0x1b205fb38604a8ee}}, + {{0x55ecad37d24b133c, 0x441e147d6038c90b, 0x656683a1d62c6fee, 0x0157d5dc87e0ecae}}}, +{{{0xf2a7af510354c13d, 0xd7a0b145aa372b60, 0x2869b96a05a3d470, 0x6528e42d82460173}}, + {{0x95265514d71eb524, 0xe603d8815df14593, 0x147cdf410d4de6b7, 0x5293b1730437c850}}, + {{0x23d0e0814bccf226, 0x92c745cd8196fb93, 0x8b61796c59541e5b, 0x40a44df0c021f978}}}, +{{{0xdaa869894f20ea6a, 0xea14a3d14c620618, 0x6001fccb090bf8be, 0x35f4e822947e9cf0}}, + {{0x86c96e514bc5d095, 0xf20d4098fca6804a, 0x27363d89c826ea5d, 0x39ca36565719cacf}}, + {{0x97506f2f6f87b75c, 0xc624aea0034ae070, 0x1ec856e3aad34dd6, 0x055b0be0e440e58f}}}, +{{{0x6469a17d89735d12, 0xdb6f27d5e662b9f1, 0x9fcba3286a395681, 0x363b8004d269af25}}, + {{0x4d12a04b6ea33da2, 0x57cf4c15e36126dd, 0x90ec9675ee44d967, 0x64ca348d2a985aac}}, + {{0x99588e19e4c4912d, 0xefcc3b4e1ca5ce6b, 0x4522ea60fa5b98d5, 0x7064bbab1de4a819}}}, +{{{0xb919e1515a770641, 0xa9a2e2c74e7f8039, 0x7527250b3df23109, 0x756a7330ac27b78b}}, + {{0xa290c06142542129, 0xf2e2c2aebe8d5b90, 0xcf2458db76abfe1b, 0x02157ade83d626bf}}, + {{0x3e46972a1b9a038b, 0x2e4ee66a7ee03fb4, 0x81a248776edbb4ca, 0x1a944ee88ecd0563}}}, +{{{0xd5a91d1151039372, 0x2ed377b799ca26de, 0xa17202acfd366b6b, 0x0730291bd6901995}}, + {{0xbb40a859182362d6, 0xb99f55778a4d1abb, 0x8d18b427758559f6, 0x26c20fe74d26235a}}, + {{0x648d1d9fe9cc22f5, 0x66bc561928dd577c, 0x47d3ed21652439d1, 0x49d271acedaf8b49}}}, +{{{0x89f5058a382b33f3, 0x5ae2ba0bad48c0b4, 0x8f93b503a53db36e, 0x5aa3ed9d95a232e6}}, + {{0x2798aaf9b4b75601, 0x5eac72135c8dad72, 0xd2ceaa6161b7a023, 0x1bbfb284e98f7d4e}}, + {{0x656777e9c7d96561, 0xcb2b125472c78036, 0x65053299d9506eee, 0x4a07e14e5e8957cc}}}, +{{{0x4ee412cb980df999, 0xa315d76f3c6ec771, 0xbba5edde925c77fd, 0x3f0bac391d313402}}, + {{0x240b58cdc477a49b, 0xfd38dade6447f017, 0x19928d32a7c86aad, 0x50af7aed84afa081}}, + {{0x6e4fde0115f65be5, 0x29982621216109b2, 0x780205810badd6d9, 0x1921a316baebd006}}}, +{{{0x89422f7edfb870fc, 0x2c296beb4f76b3bd, 0x0738f1d436c24df7, 0x6458df41e273aeb0}}, + {{0xd75aad9ad9f3c18b, 0x566a0eef60b1c19c, 0x3e9a0bac255c0ed9, 0x7b049deca062c7f5}}, + {{0xdccbe37a35444483, 0x758879330fedbe93, 0x786004c312c5dd87, 0x6093dccbc2950e64}}}, +{{{0x1ff39a8585e0706d, 0x36d0a5d8b3e73933, 0x43b9f2e1718f453b, 0x57d1ea084827a97c}}, + {{0x6bdeeebe6084034b, 0x3199c2b6780fb854, 0x973376abb62d0695, 0x6e3180c98b647d90}}, + {{0xee7ab6e7a128b071, 0xa4c1596d93a88baa, 0xf7b4de82b2216130, 0x363e999ddd97bd18}}}, +{{{0x96a843c135ee1fc4, 0x976eb35508e4c8cf, 0xb42f6801b58cd330, 0x48ee9b78693a052b}}, + {{0x2f1848dce24baec6, 0x769b7255babcaf60, 0x90cb3c6e3cefe931, 0x231f979bc6f9b355}}, + {{0x5c31de4bcc2af3c6, 0xb04bb030fe208d1f, 0xb78d7009c14fb466, 0x079bfa9b08792413}}}, +{{{0xe3903a51da300df4, 0x843964233da95ab0, 0xed3cf12d0b356480, 0x038c77f684817194}}, + {{0xf3c9ed80a2d54245, 0x0aa08b7877f63952, 0xd76dac63d1085475, 0x1ef4fb159470636b}}, + {{0x854e5ee65b167bec, 0x59590a4296d0cdc2, 0x72b2df3498102199, 0x575ee92a4a0bff56}}}, +{{{0xd4c080908a182fcf, 0x30e170c299489dbd, 0x05babd5752f733de, 0x43d4e7112cd3fd00}}, + {{0x5d46bc450aa4d801, 0xc3af1227a533b9d8, 0x389e3b262b8906c2, 0x200a1e7e382f581b}}, + {{0x518db967eaf93ac5, 0x71bc989b056652c0, 0xfe2b85d9567197f5, 0x050eca52651e4e38}}}, +{{{0xc3431ade453f0c9c, 0xe9f5045eff703b9b, 0xfcd97ac9ed847b3d, 0x4b0ee6c21c58f4c6}}, + {{0x97ac397660e668ea, 0x9b19bbfe153ab497, 0x4cb179b534eca79f, 0x6151c09fa131ae57}}, + {{0x3af55c0dfdf05d96, 0xdd262ee02ab4ee7a, 0x11b2bb8712171709, 0x1fef24fa800f030b}}}, +{{{0xb496123a6b6c6609, 0xa750fe8580ab5938, 0xf471bf39b7c27a5f, 0x507903ce77ac193c}}, + {{0xff91a66a90166220, 0xf22552ae5bf1e009, 0x7dff85d87f90df7c, 0x4f620ffe0c736fb9}}, + {{0x62f90d65dfde3e34, 0xcf28c592b9fa5fad, 0x99c86ef9c6164510, 0x25d448044a256c84}}}, +{{{0xbd68230ec7e9b16f, 0x0eb1b9c1c1c5795d, 0x7943c8c495b6b1ff, 0x2f9faf620bbacf5e}}, + {{0x2c7c4415c9022b55, 0x56a0d241812eb1fe, 0xf02ea1c9d7b65e0d, 0x4180512fd5323b26}}, + {{0xa4ff3e698a48a5db, 0xba6a3806bd95403b, 0x9f7ce1af47d5b65d, 0x15e087e55939d2fb}}}, +{{{0x12207543745c1496, 0xdaff3cfdda38610c, 0xe4e797272c71c34f, 0x39c07b1934bdede9}}, + {{0x8894186efb963f38, 0x48a00e80dc639bd5, 0xa4e8092be96c1c99, 0x5a097d54ca573661}}, + {{0x2d45892b17c9e755, 0xd033fd7289308df8, 0x6c2fe9d9525b8bd9, 0x2edbecf1c11cc079}}}, +{{{0x1616a4e3c715a0d2, 0x53623cb0f8341d4d, 0x96ef5329c7e899cb, 0x3d4e8dbba668baa6}}, + {{0xee0f0fddd087a25f, 0x9c7531555c3e34ee, 0x660c572e8fab3ab5, 0x0854fc44544cd3b2}}, + {{0x61eba0c555edad19, 0x24b533fef0a83de6, 0x3b77042883baa5f8, 0x678f82b898a47e8d}}}, +{{{0xb1491d0bd6900c54, 0x3539722c9d132636, 0x4db928920b362bc9, 0x4d7cd1fea68b69df}}, + {{0x1e09d94057775696, 0xeed1265c3cd951db, 0xfa9dac2b20bce16f, 0x0f7f76e0e8d089f4}}, + {{0x36d9ebc5d485b00c, 0xa2596492e4adb365, 0xc1659480c2119ccd, 0x45306349186e0d5f}}}, +{{{0x94ddd0c1a6cdff1d, 0x55f6f115e84213ae, 0x6c935f85992fcf6a, 0x067ee0f54a37f16f}}, + {{0x96a414ec2b072491, 0x1bb2218127a7b65b, 0x6d2849596e8a4af0, 0x65f3b08ccd27765f}}, + {{0xecb29fff199801f7, 0x9d361d1fa2a0f72f, 0x25f11d2375fd2f49, 0x124cefe80fe10fe2}}}, +{{{0x4c126cf9d18df255, 0xc1d471e9147a63b6, 0x2c6d3c73f3c93b5f, 0x6be3a6a2e3ff86a2}}, + {{0x1518e85b31b16489, 0x8faadcb7db710bfb, 0x39b0bdf4a14ae239, 0x05f4cbea503d20c1}}, + {{0xce040e9ec04145bc, 0xc71ff4e208f6834c, 0xbd546e8dab8847a3, 0x64666aa0a4d2aba5}}}, +{{{0x6841435a7c06d912, 0xca123c21bb3f830b, 0xd4b37b27b1cbe278, 0x1d753b84c76f5046}}, + {{0xb0c53bf73337e94c, 0x7cb5697e11e14f15, 0x4b84abac1930c750, 0x28dd4abfe0640468}}, + {{0x7dc0b64c44cb9f44, 0x18a3e1ace3925dbf, 0x7a3034862d0457c4, 0x4c498bf78a0c892e}}}, +{{{0x37d653fb1aa73196, 0x0f9495303fd76418, 0xad200b09fb3a17b2, 0x544d49292fc8613e}}, + {{0x22d2aff530976b86, 0x8d90b806c2d24604, 0xdca1896c4de5bae5, 0x28005fe6c8340c17}}, + {{0x6aefba9f34528688, 0x5c1bff9425107da1, 0xf75bbbcd66d94b36, 0x72e472930f316dfa}}}, +{{{0x2695208c9781084f, 0xb1502a0b23450ee1, 0xfd9daea603efde02, 0x5a9d2e8c2733a34c}}, + {{0x07f3f635d32a7627, 0x7aaa4d865f6566f0, 0x3c85e79728d04450, 0x1fee7f000fe06438}}, + {{0x765305da03dbf7e5, 0xa4daf2491434cdbd, 0x7b4ad5cdd24a88ec, 0x00f94051ee040543}}}, +{{{0x8d356b23c3d330b2, 0xf21c8b9bb0471b06, 0xb36c316c6e42b83c, 0x07d79c7e8beab10d}}, + {{0xd7ef93bb07af9753, 0x583ed0cf3db766a7, 0xce6998bf6e0b1ec5, 0x47b7ffd25dd40452}}, + {{0x87fbfb9cbc08dd12, 0x8a066b3ae1eec29b, 0x0d57242bdb1fc1bf, 0x1c3520a35ea64bb6}}}, +{{{0x80d253a6bccba34a, 0x3e61c3a13838219b, 0x90c3b6019882e396, 0x1c3d05775d0ee66f}}, + {{0xcda86f40216bc059, 0x1fbb231d12bcd87e, 0xb4956a9e17c70990, 0x38750c3b66d12e55}}, + {{0x692ef1409422e51a, 0xcbc0c73c2b5df671, 0x21014fe7744ce029, 0x0621e2c7d330487c}}}, +{{{0xaf9860cc8259838d, 0x90ea48c1c69f9adc, 0x6526483765581e30, 0x0007d6097bd3a5bc}}, + {{0xb7ae1796b0dbf0f3, 0x54dfafb9e17ce196, 0x25923071e9aaa3b4, 0x5d8e589ca1002e9d}}, + {{0xc0bf1d950842a94b, 0xb2d3c363588f2e3e, 0x0a961438bb51e2ef, 0x1583d7783c1cbf86}}}, +{{{0xeceea2ef5da27ae1, 0x597c3a1455670174, 0xc9a62a126609167a, 0x252a5f2e81ed8f70}}, + {{0x90034704cc9d28c7, 0x1d1b679ef72cc58f, 0x16e12b5fbe5b8726, 0x4958064e83c5580a}}, + {{0x0d2894265066e80d, 0xfcc3f785307c8c6b, 0x1b53da780c1112fd, 0x079c170bd843b388}}}, +{{{0x0506ece464fa6fff, 0xbee3431e6205e523, 0x3579422451b8ea42, 0x6dec05e34ac9fb00}}, + {{0xcdd6cd50c0d5d056, 0x9af7686dbb03573b, 0x3ca6723ff3c3ef48, 0x6768c0d7317b8acc}}, + {{0x94b625e5f155c1b3, 0x417bf3a7997b7b91, 0xc22cbddc6d6b2600, 0x51445e14ddcd52f4}}}, +{{{0x57502b4b3b144951, 0x8e67ff6b444bbcb3, 0xb8bd6927166385db, 0x13186f31e39295c8}}, + {{0x893147ab2bbea455, 0x8c53a24f92079129, 0x4b49f948be30f7a7, 0x12e990086e4fd43d}}, + {{0xf10c96b37fdfbb2e, 0x9f9a935e121ceaf9, 0xdf1136c43a5b983f, 0x77b2e3f05d3e99af}}}, +{{{0xfd0d75879cf12657, 0xe82fef94e53a0e29, 0xcc34a7f05bbb4be7, 0x0b251172a50c38a2}}, + {{0x9532f48fcc5cd29b, 0x2ba851bea3ce3671, 0x32dacaa051122941, 0x478d99d9350004f2}}, + {{0x1d5ad94890bb02c0, 0x50e208b10ec25115, 0xa26a22894ef21702, 0x4dc923343b524805}}}, +{{{0xe3828c400f8086b6, 0x3f77e6f7979f0dc8, 0x7ef6de304df42cb4, 0x5265797cb6abd784}}, + {{0x3ad3e3ebf36c4975, 0xd75d25a537862125, 0xe873943da025a516, 0x6bbc7cb4c411c847}}, + {{0x3c6f9cd1d4a50d56, 0xb6244077c6feab7e, 0x6ff9bf483580972e, 0x00375883b332acfb}}}, +{{{0x0001b2cd28cb0940, 0x63fb51a06f1c24c9, 0xb5ad8691dcd5ca31, 0x67238dbd8c450660}}, + {{0xc98bec856c75c99c, 0xe44184c000e33cf4, 0x0a676b9bba907634, 0x669e2cb571f379d7}}, + {{0xcb116b73a49bd308, 0x025aad6b2392729e, 0xb4793efa3f55d9b1, 0x72a1056140678bb9}}}, +{{{0xa2b6812b1cc9249d, 0x62866eee21211f58, 0x2cb5c5b85df10ece, 0x03a6b259e263ae00}}, + {{0x0d8d2909e2e505b6, 0x98ca78abc0291230, 0x77ef5569a9b12327, 0x7c77897b81439b47}}, + {{0xf1c1b5e2de331cb5, 0x5a9f5d8e15fca420, 0x9fa438f17bd932b1, 0x2a381bf01c6146e7}}}, +{{{0xac9b9879cfc811c1, 0x8b7d29813756e567, 0x50da4e607c70edfc, 0x5dbca62f884400b6}}, + {{0xf7c0be32b534166f, 0x27e6ca6419cf70d4, 0x934df7d7a957a759, 0x5701461dabdec2aa}}, + {{0x2c6747402c915c25, 0x1bdcd1a80b0d340a, 0x5e5601bd07b43f5f, 0x2555b4e05539a242}}}, +{{{0x6fc09f5266ddd216, 0xdce560a7c8e37048, 0xec65939da2df62fd, 0x7a869ae7e52ed192}}, + {{0x78409b1d87e463d4, 0xad4da95acdfb639d, 0xec28773755259b9c, 0x69c806e9c31230ab}}, + {{0x7b48f57414bb3f22, 0x68c7cee4aedccc88, 0xed2f936179ed80be, 0x25d70b885f77bc4b}}}, +{{{0x4151c3d9762bf4de, 0x083f435f2745d82b, 0x29775a2e0d23ddd5, 0x138e3a6269a5db24}}, + {{0x98459d29bb1ae4d4, 0x56b9c4c739f954ec, 0x832743f6c29b4b3e, 0x21ea8e2798b6878a}}, + {{0x87bef4b46a5a7b9c, 0xd2299d1b5fc1d062, 0x82409818dd321648, 0x5c5abeb1e5a2e03d}}}, +{{{0x14722af4b73c2ddb, 0xbc470c5f5a05060d, 0x00943eac2581b02e, 0x0e434b3b1f499c8f}}, + {{0x02cde6de1306a233, 0x7b5a52a2116f8ec7, 0xe1c681f4c1163b5b, 0x241d350660d32643}}, + {{0x6be4404d0ebc52c7, 0xae46233bb1a791f5, 0x2aec170ed25db42b, 0x1d8dfd966645d694}}}, +{{{0x296fa9c59c2ec4de, 0xbc8b61bf4f84f3cb, 0x1c7706d917a8f908, 0x63b795fc7ad3255d}}, + {{0xd598639c12ddb0a4, 0xa5d19f30c024866b, 0xd17c2f0358fce460, 0x07a195152e095e8a}}, + {{0xa8368f02389e5fc8, 0x90433b02cf8de43b, 0xafa1fd5dc5412643, 0x3e8fe83d032f0137}}}, +{{{0x2f8b15b90570a294, 0x94f2427067084549, 0xde1c5ae161bbfd84, 0x75ba3b797fac4007}}, + {{0x08704c8de8efd13c, 0xdfc51a8e33e03731, 0xa59d5da51260cde3, 0x22d60899a6258c86}}, + {{0x6239dbc070cdd196, 0x60fe8a8b6c7d8a9a, 0xb38847bceb401260, 0x0904d07b87779e5e}}}, +{{{0xb4ce1fd4ddba919c, 0xcf31db3ec74c8daa, 0x2c63cc63ad86cc51, 0x43e2143fbc1dde07}}, + {{0xf4322d6648f940b9, 0x06952f0cbd2d0c39, 0x167697ada081f931, 0x6240aacebaf72a6c}}, + {{0xf834749c5ba295a0, 0xd6947c5bca37d25a, 0x66f13ba7e7c9316a, 0x56bdaf238db40cac}}}, +{{{0x362ab9e3f53533eb, 0x338568d56eb93d40, 0x9e0e14521d5a5572, 0x1d24a86d83741318}}, + {{0x1310d36cc19d3bb2, 0x062a6bb7622386b9, 0x7c9b8591d7a14f5c, 0x03aa31507e1e5754}}, + {{0xf4ec7648ffd4ce1f, 0xe045eaf054ac8c1c, 0x88d225821d09357c, 0x43b261dc9aeb4859}}}, +{{{0xe55b1e1988bb79bb, 0xa09ed07dc17a359d, 0xb02c2ee2603dea33, 0x326055cf5b276bc2}}, + {{0x19513d8b6c951364, 0x94fe7126000bf47b, 0x028d10ddd54f9567, 0x02b4d5e242940964}}, + {{0xb4a155cb28d18df2, 0xeacc4646186ce508, 0xc49cf4936c824389, 0x27a6c809ae5d3410}}}, +{{{0x8ba6ebcd1f0db188, 0x37d3d73a675a5be8, 0xf22edfa315f5585a, 0x2cb67174ff60a17e}}, + {{0xcd2c270ac43d6954, 0xdd4a3e576a66cab2, 0x79fa592469d7036c, 0x221503603d8c2599}}, + {{0x59eecdf9390be1d0, 0xa9422044728ce3f1, 0x82891c667a94f0f4, 0x7b1df4b73890f436}}}, +{{{0xe492f2e0b3b2a224, 0x7c6c9e062b551160, 0x15eb8fe20d7f7b0e, 0x61fcef2658fc5992}}, + {{0x5f2e221807f8f58c, 0xe3555c9fd49409d4, 0xb2aaa88d1fb6a630, 0x68698245d352e03d}}, + {{0xdbb15d852a18187a, 0xf3e4aad386ddacd7, 0x44bae2810ff6c482, 0x46cf4c473daf01cf}}}, +{{{0x426525ed9ec4e5f9, 0x0e5eda0116903303, 0x72b1a7f2cbe5cadc, 0x29387bcd14eb5f40}}, + {{0x213c6ea7f1498140, 0x7c1e7ef8392b4854, 0x2488c38c5629ceba, 0x1065aae50d8cc5bb}}, + {{0x1c2c4525df200d57, 0x5c3b2dd6bfca674a, 0x0a07e7b1e1834030, 0x69a198e64f1ce716}}}, +{{{0x7afcd613efa9d697, 0x0cc45aa41c067959, 0xa56fe104c1fada96, 0x3a73b70472e40365}}, + {{0x7b26e56b9e2d4734, 0xc4c7132b81c61675, 0xef5c9525ec9cde7f, 0x39c80b16e71743ad}}, + {{0x0f196e0d1b826c68, 0xf71ff0e24960e3db, 0x6113167023b7436c, 0x0cf0ea5877da7282}}}, +{{{0x196c80a4ddd4ccbd, 0x22e6f55d95f2dd9d, 0xc75e33c740d6c71b, 0x7bb51279cb3c042f}}, + {{0xe332ced43ba6945a, 0xde0b1361e881c05d, 0x1ad40f095e67ed3b, 0x5da8acdab8c63d5d}}, + {{0xc4b6664a3a70159f, 0x76194f0f0a904e14, 0xa5614c39a4096c13, 0x6cd0ff50979feced}}}, +{{{0xc0e067e78f4428ac, 0x14835ab0a61135e3, 0xf21d14f338062935, 0x6390a4c8df04849c}}, + {{0x7fecfabdb04ba18e, 0xd0fc7bfc3bddbcf7, 0xa41d486e057a131c, 0x641a4391f2223a61}}, + {{0xc5c6b95aa606a8db, 0x914b7f9eb06825f1, 0x2a731f6b44fc9eff, 0x30ddf38562705cfc}}}, +{{{0x4e3dcbdad1bff7f9, 0xc9118e8220645717, 0xbacccebc0f189d56, 0x1b4822e9d4467668}}, + {{0x33bef2bd68bcd52c, 0xc649dbb069482ef2, 0xb5b6ee0c41cb1aee, 0x5c294d270212a7e5}}, + {{0xab360a7f25563781, 0x2512228a480f7958, 0xc75d05276114b4e3, 0x222d9625d976fe2a}}}, +{{{0x1c717f85b372ace1, 0x81930e694638bf18, 0x239cad056bc08b58, 0x0b34271c87f8fff4}}, + {{0x0f94be7e0a344f85, 0xeb2faa8c87f22c38, 0x9ce1e75e4ee16f0f, 0x43e64e5418a08dea}}, + {{0x8155e2521a35ce63, 0xbe100d4df912028e, 0xbff80bf8a57ddcec, 0x57342dc96d6bc6e4}}}, +{{{0xefeef065c8ce5998, 0xbf029510b5cbeaa2, 0x8c64a10620b7c458, 0x35134fb231c24855}}, + {{0xf3c3bcb71e707bf6, 0x351d9b8c7291a762, 0x00502e6edad69a33, 0x522f521f1ec8807f}}, + {{0x272c1f46f9a3902b, 0xc91ba3b799657bcc, 0xae614b304f8a1c0e, 0x7afcaad70b99017b}}}, +{{{0xc25ded54a4b8be41, 0x902d13e11bb0e2dd, 0x41f43233cde82ab2, 0x1085faa5c3aae7cb}}, + {{0xa88141ecef842b6b, 0x55e7b14797abe6c5, 0x8c748f9703784ffe, 0x5b50a1f7afcd00b7}}, + {{0x9b840f66f1361315, 0x18462242701003e9, 0x65ed45fae4a25080, 0x0a2862393fda7320}}}, +{{{0x46ab13c8347cbc9d, 0x3849e8d499c12383, 0x4cea314087d64ac9, 0x1f354134b1a29ee7}}, + {{0x960e737b6ecb9d17, 0xfaf24948d67ceae1, 0x37e7a9b4d55e1b89, 0x5cb7173cb46c59eb}}, + {{0x4a89e68b82b7abf0, 0xf41cd9279ba6b7b9, 0x16e6c210e18d876f, 0x7cacdb0f7f1b09c6}}}, +{{{0x9062b2e0d91a78bc, 0x47c9889cc8509667, 0x9df54a66405070b8, 0x7369e6a92493a1bf}}, + {{0xe1014434dcc5caed, 0x47ed5d963c84fb33, 0x70019576ed86a0e7, 0x25b2697bd267f9e4}}, + {{0x9d673ffb13986864, 0x3ca5fbd9415dc7b8, 0xe04ecc3bdf273b5e, 0x1420683db54e4cd2}}}, +{{{0xb478bd1e249dd197, 0x620c35005e58c102, 0xfb02d32fccbaac5c, 0x60b63bebf508a72d}}, + {{0x34eebb6fc1cc5ad0, 0x6a1b0ce99646ac8b, 0xd3b0da49a66bde53, 0x31e83b4161d081c1}}, + {{0x97e8c7129e062b4f, 0x49e48f4f29320ad8, 0x5bece14b6f18683f, 0x55cf1eb62d550317}}}, +{{{0x5879101065c23d58, 0x8b9d086d5094819c, 0xe2402fa912c55fa7, 0x669a6564570891d4}}, + {{0x3076b5e37df58c52, 0xd73ab9dde799cc36, 0xbd831ce34913ee20, 0x1a56fbaa62ba0133}}, + {{0x943e6b505c9dc9ec, 0x302557bba77c371a, 0x9873ae5641347651, 0x13c4836799c58a5c}}}, +{{{0x423a5d465ab3e1b9, 0xfc13c187c7f13f61, 0x19f83664ecb5b9b6, 0x66f80c93a637b607}}, + {{0xc4dcfb6a5d8bd080, 0xdeebc4ec571a4842, 0xd4b2e883b8e55365, 0x50bdc87dc8e5b827}}, + {{0x606d37836edfe111, 0x32353e15f011abd9, 0x64b03ac325b73b96, 0x1dd56444725fd5ae}}}, +{{{0x8fa47ff83362127d, 0xbc9f6ac471cd7c15, 0x6e71454349220c8b, 0x0e645912219f732e}}, + {{0xc297e60008bac89a, 0x7d4cea11eae1c3e0, 0xf3e38be19fe7977c, 0x3a3a450f63a305cd}}, + {{0x078f2f31d8394627, 0x389d3183de94a510, 0xd1e36c6d17996f80, 0x318c8d9393a9a87b}}}, +{{{0xf2745d032afffe19, 0x0c9f3c497f24db66, 0xbc98d3e3ba8598ef, 0x224c7c679a1d5314}}, + {{0x5d669e29ab1dd398, 0xfc921658342d9e3b, 0x55851dfdf35973cd, 0x509a41c325950af6}}, + {{0xbdc06edca6f925e9, 0x793ef3f4641b1f33, 0x82ec12809d833e89, 0x05bff02328a11389}}}, +{{{0x3632137023cae00b, 0x544acf0ad1accf59, 0x96741049d21a1c88, 0x780b8cc3fa2a44a7}}, + {{0x6881a0dd0dc512e4, 0x4fe70dc844a5fafe, 0x1f748e6b8f4a5240, 0x576277cdee01a3ea}}, + {{0x1ef38abc234f305f, 0x9a577fbd1405de08, 0x5e82a51434e62a0d, 0x5ff418726271b7a1}}}, +{{{0x398e080c1789db9d, 0xa7602025f3e778f5, 0xfa98894c06bd035d, 0x106a03dc25a966be}}, + {{0xe5db47e813b69540, 0xf35d2a3b432610e1, 0xac1f26e938781276, 0x29d4db8ca0a0cb69}}, + {{0xd9ad0aaf333353d0, 0x38669da5acd309e5, 0x3c57658ac888f7f0, 0x4ab38a51052cbefa}}}, +{{{0xdfdacbee4324c0e9, 0x054442883f955bb7, 0xdef7aaa8ea31609f, 0x68aee70642287cff}}, + {{0xf68fe2e8809de054, 0xe3bc096a9c82bad1, 0x076353d40aadbf45, 0x7b9b1fb5dea1959e}}, + {{0xf01cc8f17471cc0c, 0x95242e37579082bb, 0x27776093d3e46b5f, 0x2d13d55a28bd85fb}}}, +{{{0xfac5d2065b35b8da, 0xa8da8a9a85624bb7, 0xccd2ca913d21cd0f, 0x6b8341ee8bf90d58}}, + {{0xbf019cce7aee7a52, 0xa8ded2b6e454ead3, 0x3c619f0b87a8bb19, 0x3619b5d7560916d8}}, + {{0x3579f26b0282c4b2, 0x64d592f24fafefae, 0xb7cded7b28c8c7c0, 0x6a927b6b7173a8d7}}}, +{{{0x1f6db24f986e4656, 0x1021c02ed1e9105b, 0xf8ff3fff2cc0a375, 0x1d2a6bf8c6c82592}}, + {{0x8d7040863ece88eb, 0xf0e307a980eec08c, 0xac2250610d788fda, 0x056d92a43a0d478d}}, + {{0x1b05a196fc3da5a1, 0x77d7a8c243b59ed0, 0x06da3d6297d17918, 0x66fbb494f12353f7}}}, +{{{0x751a50b9d85c0fb8, 0xd1afdc258bcf097b, 0x2f16a6a38309a969, 0x14ddff9ee5b00659}}, + {{0xd6d70996f12309d6, 0xdbfb2385e9c3d539, 0x46d602b0f7552411, 0x270a0b0557843e0c}}, + {{0x61ff0640a7862bcc, 0x81cac09a5f11abfe, 0x9047830455d12abb, 0x19a4bde1945ae873}}}, +{{{0x9b9f26f520a6200a, 0x64804443cf13eaf8, 0x8a63673f8631edd3, 0x72bbbce11ed39dc1}}, + {{0x40c709dec076c49f, 0x657bfaf27f3e53f6, 0x40662331eca042c4, 0x14b375487eb4df04}}, + {{0xae853c94ab66dc47, 0xeb62343edf762d6e, 0xf08e0e186fb2f7d1, 0x4f0b1c02700ab37a}}}, +{{{0xe1706787d81951fa, 0xa10a2c8eb290c77b, 0xe7382fa03ed66773, 0x0a4d84710bcc4b54}}, + {{0x79fd21ccc1b2e23f, 0x4ae7c281453df52a, 0xc8172ec9d151486b, 0x68abe9443e0a7534}}, + {{0xda12c6c407831dcb, 0x0da230d74d5c510d, 0x4ab1531e6bd404e1, 0x4106b166bcf440ef}}}, +{{{0x02e57a421cd23668, 0x4ad9fb5d0eaef6fd, 0x954e6727b1244480, 0x7f792f9d2699f331}}, + {{0xa485ccd539e4ecf2, 0x5aa3f3ad0555bab5, 0x145e3439937df82d, 0x1238b51e1214283f}}, + {{0x0b886b925fd4d924, 0x60906f7a3626a80d, 0xecd367b4b98abd12, 0x2876beb1def344cf}}}, +{{{0xdc84e93563144691, 0x632fe8a0d61f23f4, 0x4caa800612a9a8d5, 0x48f9dbfa0e9918d3}}, + {{0xd594b3333a8a85f8, 0x4ea37689e78d7d58, 0x73bf9f455e8e351f, 0x5507d7d2bc41ebb4}}, + {{0x1ceb2903299572fc, 0x7c8ccaa29502d0ee, 0x91bfa43411cce67b, 0x5784481964a831e7}}}, +{{{0xda7c2b256768d593, 0x98c1c0574422ca13, 0xf1a80bd5ca0ace1d, 0x29cdd1adc088a690}}, + {{0xd6cfd1ef5fddc09c, 0xe82b3efdf7575dce, 0x25d56b5d201634c2, 0x3041c6bb04ed2b9b}}, + {{0x0ff2f2f9d956e148, 0xade797759f356b2e, 0x1a4698bb5f6c025c, 0x104bbd6814049a7b}}}, +{{{0x51f0fd3168f1ed67, 0x2c811dcdd86f3bc2, 0x44dc5c4304d2f2de, 0x5be8cc57092a7149}}, + {{0xa95d9a5fd67ff163, 0xe92be69d4cc75681, 0xb7f8024cde20f257, 0x204f2a20fb072df5}}, + {{0xc8143b3d30ebb079, 0x7589155abd652e30, 0x653c3c318f6d5c31, 0x2570fb17c279161f}}}, +{{{0x3efa367f2cb61575, 0xf5f96f761cd6026c, 0xe8c7142a65b52562, 0x3dcb65ea53030acd}}, + {{0x192ea9550bb8245a, 0xc8e6fba88f9050d1, 0x7986ea2d88a4c935, 0x241c5f91de018668}}, + {{0x28d8172940de6caa, 0x8fbf2cf022d9733a, 0x16d7fcdd235b01d1, 0x08420edd5fcdf0e5}}}, +{{{0xcdff20ab8362fa4a, 0x57e118d4e21a3e6e, 0xe3179617fc39e62b, 0x0d9a53efbc1769fd}}, + {{0x0358c34e04f410ce, 0xb6135b5a276e0685, 0x5d9670c7ebb91521, 0x04d654f321db889c}}, + {{0x5e7dc116ddbdb5d5, 0x2954deb68da5dd2d, 0x1cb608173334a292, 0x4a7a4f2618991ad7}}}, +{{{0xf4a718025fb15f95, 0x3df65f346b5c1b8f, 0xcdfcf08500e01112, 0x11b50c4cddd31848}}, + {{0x24c3b291af372a4b, 0x93da8270718147f2, 0xdd84856486899ef2, 0x4a96314223e0ee33}}, + {{0xa6e8274408a4ffd6, 0x738e177e9c1576d9, 0x773348b63d02b3f2, 0x4f4bce4dce6bcc51}}}, +{{{0xa71fce5ae2242584, 0x26ea725692f58a9e, 0xd21a09d71cea3cf4, 0x73fcdd14b71c01e6}}, + {{0x30e2616ec49d0b6f, 0xe456718fcaec2317, 0x48eb409bf26b4fa6, 0x3042cee561595f37}}, + {{0x427e7079449bac41, 0x855ae36dbce2310a, 0x4cae76215f841a7c, 0x389e740c9a9ce1d6}}}, +{{{0x64fcb3ae34dcb9ce, 0x97500323e348d0ad, 0x45b3f07d62c6381b, 0x61545379465a6788}}, + {{0xc9bd78f6570eac28, 0xe55b0b3227919ce1, 0x65fc3eaba19b91ed, 0x25c425e5d6263690}}, + {{0x3f3e06a6f1d7de6e, 0x3ef976278e062308, 0x8c14f6264e8a6c77, 0x6539a08915484759}}}, +{{{0xe9d21f74c3d2f773, 0xc150544125c46845, 0x624e5ce8f9b99e33, 0x11c5e4aac5cd186c}}, + {{0xddc4dbd414bb4a19, 0x19b2bc3c98424f8e, 0x48a89fd736ca7169, 0x0f65320ef019bd90}}, + {{0xd486d1b1cafde0c6, 0x4f3fe6e3163b5181, 0x59a8af0dfaf2939a, 0x4cabc7bdec33072a}}}, +{{{0x16faa8fb532f7428, 0xdbd42ea046a4e272, 0x5337653b8b9ea480, 0x4065947223973f03}}, + {{0xf7c0a19c1a54a044, 0x4a1c5e2477bd9fbb, 0xa6e3ca115af22972, 0x1819bb953f2e9e0d}}, + {{0x498fbb795e042e84, 0x7d0dd89a7698b714, 0x8bfb0ba427fe6295, 0x36ba82e721200524}}}, +{{{0xd60ecbb74245ec41, 0xfd9be89e34348716, 0xc9240afee42284de, 0x4472f648d0531db4}}, + {{0xc8d69d0a57274ed5, 0x45ba803260804b17, 0xdf3cda102255dfac, 0x77d221232709b339}}, + {{0x498a6d7064ad94d8, 0xa5b5c8fd9af62263, 0x8ca8ed0545c141f4, 0x2c63bec3662d358c}}}, +{{{0x7fe60d8bea787955, 0xb9dc117eb5f401b7, 0x91c7c09a19355cce, 0x22692ef59442bedf}}, + {{0x9a518b3a8586f8bf, 0x9ee71af6cbb196f0, 0xaa0625e6a2385cf2, 0x1deb2176ddd7c8d1}}, + {{0x8563d19a2066cf6c, 0x401bfd8c4dcc7cd7, 0xd976a6becd0d8f62, 0x67cfd773a278b05e}}}, +{{{0x8dec31faef3ee475, 0x99dbff8a9e22fd92, 0x512d11594e26cab1, 0x0cde561eec4310b9}}, + {{0x2d5fa9855a4e586a, 0x65f8f7a449beab7e, 0xaa074dddf21d33d3, 0x185cba721bcb9dee}}, + {{0x93869da3f4e3cb41, 0xbf0392f540f7977e, 0x026204fcd0463b83, 0x3ec91a769eec6eed}}}, +{{{0x1e9df75bf78166ad, 0x4dfda838eb0cd7af, 0xba002ed8c1eaf988, 0x13fedb3e11f33cfc}}, + {{0x0fad2fb7b0a3402f, 0x46615ecbfb69f4a8, 0xf745bcc8c5f8eaa6, 0x7a5fa8794a94e896}}, + {{0x52958faa13cd67a1, 0x965ee0818bdbb517, 0x16e58daa2e8845b3, 0x357d397d5499da8f}}}, +{{{0x1ebfa05fb0bace6c, 0xc934620c1caf9a1e, 0xcc771cc41d82b61a, 0x2d94a16aa5f74fec}}, + {{0x481dacb4194bfbf8, 0x4d77e3f1bae58299, 0x1ef4612e7d1372a0, 0x3a8d867e70ff69e1}}, + {{0x6f58cd5d55aff958, 0xba3eaa5c75567721, 0x75c123999165227d, 0x69be1343c2f2b35e}}}, +{{{0x0e091d5ee197c92a, 0x4f51019f2945119f, 0x143679b9f034e99c, 0x7d88112e4d24c696}}, + {{0x82bbbdac684b8de3, 0xa2f4c7d03fca0718, 0x337f92fbe096aaa8, 0x200d4d8c63587376}}, + {{0x208aed4b4893b32b, 0x3efbf23ebe59b964, 0xd762deb0dba5e507, 0x69607bd681bd9d94}}}, +{{{0xf6be021068de1ce1, 0xe8d518e70edcbc1f, 0xe3effdd01b5505a5, 0x35f63353d3ec3fd0}}, + {{0x3b7f3bd49323a902, 0x7c21b5566b2c6e53, 0xe5ba8ff53a7852a7, 0x28bc77a5838ece00}}, + {{0x63ba78a8e25d8036, 0x63651e0094333490, 0x48d82f20288ce532, 0x3a31abfa36b57524}}}, +{{{0x239e9624089c0a2e, 0xc748c4c03afe4738, 0x17dbed2a764fa12a, 0x639b93f0321c8582}}, + {{0xc08f788f3f78d289, 0xfe30a72ca1404d9f, 0xf2778bfccf65cc9d, 0x7ee498165acb2021}}, + {{0x7bd508e39111a1c3, 0x2b2b90d480907489, 0xe7d2aec2ae72fd19, 0x0edf493c85b602a6}}}, +{{{0xaecc8158599b5a68, 0xea574f0febade20e, 0x4fe41d7422b67f07, 0x403b92e3019d4fb4}}, + {{0x6767c4d284764113, 0xa090403ff7f5f835, 0x1c8fcffacae6bede, 0x04c00c54d1dfa369}}, + {{0x4dc22f818b465cf8, 0x71a0f35a1480eff8, 0xaee8bfad04c7d657, 0x355bb12ab26176f4}}}, +{{{0xa71e64cc7493bbf4, 0xe5bd84d9eca3b0c3, 0x0a6bc50cfa05e785, 0x0f9b8132182ec312}}, + {{0xa301dac75a8c7318, 0xed90039db3ceaa11, 0x6f077cbf3bae3f2d, 0x7518eaf8e052ad8e}}, + {{0xa48859c41b7f6c32, 0x0f2d60bcf4383298, 0x1815a929c9b1d1d9, 0x47c3871bbb1755c4}}}, +{{{0x5144539771ec4f48, 0xf805b17dc98c5d6e, 0xf762c11a47c3c66b, 0x00b89b85764699dc}}, + {{0xfbe65d50c85066b0, 0x62ecc4b0b3a299b0, 0xe53754ea441ae8e0, 0x08fea02ce8d48d5f}}, + {{0x824ddd7668deead0, 0xc86445204b685d23, 0xb514cfcd5d89d665, 0x473829a74f75d537}}}, +{{{0x82d2da754679c418, 0xe63bd7d8b2618df0, 0x355eef24ac47eb0a, 0x2078684c4833c6b4}}, + {{0x23d9533aad3902c9, 0x64c2ddceef03588f, 0x15257390cfe12fb4, 0x6c668b4d44e4d390}}, + {{0x3b48cf217a78820c, 0xf76a0ab281273e97, 0xa96c65a78c8eed7b, 0x7411a6054f8a433f}}}, +{{{0x4d659d32b99dc86d, 0x044cdc75603af115, 0xb34c712cdcc2e488, 0x7c136574fb8134ff}}, + {{0x579ae53d18b175b4, 0x68713159f392a102, 0x8455ecba1eef35f5, 0x1ec9a872458c398f}}, + {{0xb8e6a4d400a2509b, 0x9b81d7020bc882b4, 0x57e7cc9bf1957561, 0x3add88a5c7cd6460}}}, +{{{0xab895770b635dcf2, 0x02dfef6cf66c1fbc, 0x85530268beb6d187, 0x249929fccc879e74}}, + {{0x85c298d459393046, 0x8f7e35985ff659ec, 0x1d2ca22af2f66e3a, 0x61ba1131a406a720}}, + {{0xa3d0a0f116959029, 0x023b6b6cba7ebd89, 0x7bf15a3e26783307, 0x5620310cbbd8ece7}}}, +{{{0x528993434934d643, 0xb9dbf806a51222f5, 0x8f6d878fc3f41c22, 0x37676a2a4d9d9730}}, + {{0x6646b5f477e285d6, 0x40e8ff676c8f6193, 0xa6ec7311abb594dd, 0x7ec846f3658cec4d}}, + {{0x9b5e8f3f1da22ec7, 0x130f1d776c01cd13, 0x214c8fcfa2989fb8, 0x6daaf723399b9dd5}}}, +{{{0x591e4a5610628564, 0x2a4bb87ca8b4df34, 0xde2a2572e7a38e43, 0x3cbdabd9fee5046e}}, + {{0x81aebbdd2cd13070, 0x962e4325f85a0e9e, 0xde9391aacadffecb, 0x53177fda52c230e6}}, + {{0xa7bc970650b9de79, 0x3d12a7fbc301b59b, 0x02652e68d36ae38c, 0x79d739835a6199dc}}}, +{{{0xd9354df64131c1bd, 0x758094a186ec5822, 0x4464ee12e459f3c2, 0x6c11fce4cb133282}}, + {{0x21c9d9920d591737, 0x9bea41d2e9b46cd6, 0xe20e84200d89bfca, 0x79d99f946eae5ff8}}, + {{0xf17b483568673205, 0x387deae83caad96c, 0x61b471fd56ffe386, 0x31741195b745a599}}}, +{{{0xe8d10190b77a360b, 0x99b983209995e702, 0xbd4fdff8fa0247aa, 0x2772e344e0d36a87}}, + {{0x17f8ba683b02a047, 0x50212096feefb6c8, 0x70139be21556cbe2, 0x203e44a11d98915b}}, + {{0xd6863eba37b9e39f, 0x105bc169723b5a23, 0x104f6459a65c0762, 0x567951295b4d38d4}}}, +{{{0x535fd60613037524, 0xe210adf6b0fbc26a, 0xac8d0a9b23e990ae, 0x47204d08d72fdbf9}}, + {{0x07242eb30d4b497f, 0x1ef96306b9bccc87, 0x37950934d8116f45, 0x05468d6201405b04}}, + {{0x00f565a9f93267de, 0xcecfd78dc0d58e8a, 0xa215e2dcf318e28e, 0x4599ee919b633352}}}, +{{{0xd3c220ca70e0e76b, 0xb12bea58ea9f3094, 0x294ddec8c3271282, 0x0c3539e1a1d1d028}}, + {{0xac746d6b861ae579, 0x31ab0650f6aea9dc, 0x241d661140256d4c, 0x2f485e853d21a5de}}, + {{0x329744839c0833f3, 0x6fe6257fd2abc484, 0x5327d1814b358817, 0x65712585893fe9bc}}}, +{{{0x9c102fb732a61161, 0xe48e10dd34d520a8, 0x365c63546f9a9176, 0x32f6fe4c046f6006}}, + {{0x81c29f1bd708ee3f, 0xddcb5a05ae6407d0, 0x97aec1d7d2a3eba7, 0x1590521a91d50831}}, + {{0x40a3a11ec7910acc, 0x9013dff8f16d27ae, 0x1a9720d8abb195d4, 0x1bb9fe452ea98463}}}, +{{{0xe9d1d950b3d54f9e, 0x2d5f9cbee00d33c1, 0x51c2c656a04fc6ac, 0x65c091ee3c1cbcc9}}, + {{0xcf5e6c95cc36747c, 0x294201536b0bc30d, 0x453ac67cee797af0, 0x5eae6ab32a8bb3c9}}, + {{0x7083661114f118ea, 0x2b37b87b94349cad, 0x7273f51cb4e99f40, 0x78a2a95823d75698}}}, +{{{0xa2b072e95c8c2ace, 0x69cffc96651e9c4b, 0x44328ef842e7b42b, 0x5dd996c122aadeb3}}, + {{0xb4f23c425ef83207, 0xabf894d3c9a934b5, 0xd0708c1339fd87f7, 0x1876789117166130}}, + {{0x925b5ef0670c507c, 0x819bc842b93c33bf, 0x10792e9a70dd003f, 0x59ad4b7a6e28dc74}}}, +{{{0x5f3a7562eb3dbe47, 0xf7ea38548ebda0b8, 0x00c3e53145747299, 0x1304e9e71627d551}}, + {{0x583b04bfacad8ea2, 0x29b743e8148be884, 0x2b1e583b0810c5db, 0x2b5449e58eb3bbaa}}, + {{0x789814d26adc9cfe, 0x3c1bab3f8b48dd0b, 0xda0fe1fff979c60a, 0x4468de2d7c2dd693}}}, +{{{0x51bb355e9419469e, 0x33e6dc4c23ddc754, 0x93a5b6d6447f9962, 0x6cce7c6ffb44bd63}}, + {{0x4b9ad8c6f86307ce, 0x21113531435d0c28, 0xd4a866c5657a772c, 0x5da6427e63247352}}, + {{0x1a94c688deac22ca, 0xb9066ef7bbae1ff8, 0x88ad8c388d59580f, 0x58f29abfe79f2ca8}}}, +{{{0xe90ecfab8de73e68, 0x54036f9f377e76a5, 0xf0495b0bbe015982, 0x577629c4a7f41e36}}, + {{0x4b5a64bf710ecdf6, 0xb14ce538462c293c, 0x3643d056d50b3ab9, 0x6af93724185b4870}}, + {{0x3220024509c6a888, 0xd2e036134b558973, 0x83e236233c33289f, 0x701f25bb0caec18f}}}, +{{{0xc3a8b0f8e4616ced, 0xf700660e9e25a87d, 0x61e3061ff4bca59c, 0x2e0c92bfbdc40be9}}, + {{0x9d18f6d97cbec113, 0x844a06e674bfdbe4, 0x20f5b522ac4e60d6, 0x720a5bc050955e51}}, + {{0x0c3f09439b805a35, 0xe84e8b376242abfc, 0x691417f35c229346, 0x0e9b9cbb144ef0ec}}}, +{{{0xfbbad48ffb5720ad, 0xee81916bdbf90d0e, 0xd4813152635543bf, 0x221104eb3f337bd8}}, + {{0x8dee9bd55db1beee, 0xc9c3ab370a723fb9, 0x44a8f1bf1c68d791, 0x366d44191cfd3cde}}, + {{0x9e3c1743f2bc8c14, 0x2eda26fcb5856c3b, 0xccb82f0e68a7fb97, 0x4167a4e6bc593244}}}, +{{{0x643b9d2876f62700, 0x5d1d9d400e7668eb, 0x1b4b430321fc0684, 0x7938bb7e2255246a}}, + {{0xc2be2665f8ce8fee, 0xe967ff14e880d62c, 0xf12e6e7e2f364eee, 0x34b33370cb7ed2f6}}, + {{0xcdc591ee8681d6cc, 0xce02109ced85a753, 0xed7485c158808883, 0x1176fc6e2dfe65e4}}}, +{{{0xb4af6cd05b9c619b, 0x2ddfc9f4b2a58480, 0x3d4fa502ebe94dc4, 0x08fc3a4c677d5f34}}, + {{0xdb90e28949770eb8, 0x98fbcc2aacf440a3, 0x21354ffeded7879b, 0x1f6a3e54f26906b6}}, + {{0x60a4c199d30734ea, 0x40c085b631165cd6, 0xe2333e23f7598295, 0x4f2fad0116b900d1}}}, +{{{0x44beb24194ae4e54, 0x5f541c511857ef6c, 0xa61e6b2d368d0498, 0x445484a4972ef7ab}}, + {{0x962cd91db73bb638, 0xe60577aafc129c08, 0x6f619b39f3b61689, 0x3451995f2944ee81}}, + {{0x9152fcd09fea7d7c, 0x4a816c94b0935cf6, 0x258e9aaa47285c40, 0x10b89ca6042893b7}}}, +{{{0x9b2a426e3b646025, 0x32127190385ce4cf, 0xa25cffc2dd6dea45, 0x06409010bea8de75}}, + {{0xd67cded679d34aa0, 0xcc0b9ec0cc4db39f, 0xa535a456e35d190f, 0x2e05d9eaf61f6fef}}, + {{0xc447901ad61beb59, 0x661f19bce5dc880a, 0x24685482b7ca6827, 0x293c778cefe07f26}}}, +{{{0x86809e7007069096, 0xaad75b15e4e50189, 0x07f35715a21a0147, 0x0487f3f112815d5e}}, + {{0x16c795d6a11ff200, 0xcb70d0e2b15815c9, 0x89f293209b5395b5, 0x50b8c2d031e47b4f}}, + {{0x48350c08068a4962, 0x6ffdd05351092c9a, 0x17af4f4aaf6fc8dd, 0x4b0553b53cdba58b}}}, +{{{0x9c65fcbe1b32ff79, 0xeb75ea9f03b50f9b, 0xfced2a6c6c07e606, 0x35106cd551717908}}, + {{0xbf05211b27c152d4, 0x5ec26849bd1af639, 0x5e0b2caa8e6fab98, 0x054c8bdd50bd0840}}, + {{0x38a0b12f1dcf073d, 0x4b60a8a3b7f6a276, 0xfed5ac25d3404f9a, 0x72e82d5e5505c229}}}, +{{{0x6b0b697ff0d844c8, 0xbb12f85cd979cb49, 0xd2a541c6c1da0f1f, 0x7b7c242958ce7211}}, + {{0x00d9cdfd69771d02, 0x410276cd6cfbf17e, 0x4c45306c1cb12ec7, 0x2857bf1627500861}}, + {{0x9f21903f0101689e, 0xd779dfd3bf861005, 0xa122ee5f3deb0f1b, 0x510df84b485a00d4}}}, +{{{0xa54133bb9277a1fa, 0x74ec3b6263991237, 0x1a3c54dc35d2f15a, 0x2d347144e482ba3a}}, + {{0x24b3c887c70ac15e, 0xb0f3a557fb81b732, 0x9b2cde2fe578cc1b, 0x4cf7ed0703b54f8e}}, + {{0x6bd47c6598fbee0f, 0x9e4733e2ab55be2d, 0x1093f624127610c5, 0x4e05e26ad0a1eaa4}}}, +{{{0xda9b6b624b531f20, 0x429a760e77509abb, 0xdbe9f522e823cb80, 0x618f1856880c8f82}}, + {{0x1833c773e18fe6c0, 0xe3c4711ad3c87265, 0x3bfd3c4f0116b283, 0x1955875eb4cd4db8}}, + {{0x6da6de8f0e399799, 0x7ad61aa440fda178, 0xb32cd8105e3563dd, 0x15f6beae2ae340ae}}}, +{{{0x862bcb0c31ec3a62, 0x810e2b451138f3c2, 0x788ec4b839dac2a4, 0x28f76867ae2a9281}}, + {{0xba9a0f7b9245e215, 0xf368612dd98c0dbb, 0x2e84e4cbf220b020, 0x6ba92fe962d90eda}}, + {{0x3e4df9655884e2aa, 0xbd62fbdbdbd465a5, 0xd7596caa0de9e524, 0x6e8042ccb2b1b3d7}}}, +{{{0xf10d3c29ce28ca6e, 0xbad34540fcb6093d, 0xe7426ed7a2ea2d3f, 0x08af9d4e4ff298b9}}, + {{0x1530653616521f7e, 0x660d06b896203dba, 0x2d3989bc545f0879, 0x4b5303af78ebd7b0}}, + {{0x72f8a6c3bebcbde8, 0x4f0fca4adc3a8e89, 0x6fa9d4e8c7bfdf7a, 0x0dcf2d679b624eb7}}}, +{{{0x3d5947499718289c, 0x12ebf8c524533f26, 0x0262bfcb14c3ef15, 0x20b878d577b7518e}}, + {{0x753941be5a45f06e, 0xd07caeed6d9c5f65, 0x11776b9c72ff51b6, 0x17d2d1d9ef0d4da9}}, + {{0x27f2af18073f3e6a, 0xfd3fe519d7521069, 0x22e3b72c3ca60022, 0x72214f63cc65c6a7}}}, +{{{0xb4e37f405307a693, 0xaba714d72f336795, 0xd6fbd0a773761099, 0x5fdf48c58171cbc9}}, + {{0x1d9db7b9f43b29c9, 0xd605824a4f518f75, 0xf2c072bd312f9dc4, 0x1f24ac855a1545b0}}, + {{0x24d608328e9505aa, 0x4748c1d10c1420ee, 0xc7ffe45c06fb25a2, 0x00ba739e2ae395e6}}}, +{{{0x592e98de5c8790d6, 0xe5bfb7d345c2a2df, 0x115a3b60f9b49922, 0x03283a3e67ad78f3}}, + {{0xae4426f5ea88bb26, 0x360679d984973bfb, 0x5c9f030c26694e50, 0x72297de7d518d226}}, + {{0x48241dc7be0cb939, 0x32f19b4d8b633080, 0xd3dfc90d02289308, 0x05e1296846271945}}}, +{{{0xba82eeb32d9c495a, 0xceefc8fcf12bb97c, 0xb02dabae93b5d1e0, 0x39c00c9c13698d9b}}, + {{0xadbfbbc8242c4550, 0xbcc80cecd03081d9, 0x843566a6f5c8df92, 0x78cf25d38258ce4c}}, + {{0x15ae6b8e31489d68, 0xaa851cab9c2bf087, 0xc9a75a97f04efa05, 0x006b52076b3ff832}}}, +{{{0x29e0cfe19d95781c, 0xb681df18966310e2, 0x57df39d370516b39, 0x4d57e3443bc76122}}, + {{0xf5cb7e16b9ce082d, 0x3407f14c417abc29, 0xd4b36bce2bf4a7ab, 0x7de2e9561a9f75ce}}, + {{0xde70d4f4b6a55ecb, 0x4801527f5d85db99, 0xdbc9c440d3ee9a81, 0x6b2a90af1a6029ed}}}, +{{{0x6923f4fc9ae61e97, 0x5735281de03f5fd1, 0xa764ae43e6edd12d, 0x5fd8f4e9d12d3e4a}}, + {{0x77ebf3245bb2d80a, 0xd8301b472fb9079b, 0xc647e6f24cee7333, 0x465812c8276c2109}}, + {{0x4d43beb22a1062d9, 0x7065fb753831dc16, 0x180d4a7bde2968d7, 0x05b32c2b1cb16790}}}, +{{{0xc8c05eccd24da8fd, 0xa1cf1aac05dfef83, 0xdbbeeff27df9cd61, 0x3b5556a37b471e99}}, + {{0xf7fca42c7ad58195, 0x3214286e4333f3cc, 0xb6c29d0d340b979d, 0x31771a48567307e1}}, + {{0x32b0c524e14dd482, 0xedb351541a2ba4b6, 0xa3d16048282b5af3, 0x4fc079d27a7336eb}}}, +{{{0x51c938b089bf2f7f, 0x2497bd6502dfe9a7, 0xffffc09c7880e453, 0x124567cecaf98e92}}, + {{0xdc348b440c86c50d, 0x1337cbc9cc94e651, 0x6422f74d643e3cb9, 0x241170c2bae3cd08}}, + {{0x3ff9ab860ac473b4, 0xf0911dee0113e435, 0x4ae75060ebc6c4af, 0x3f8612966c87000d}}}, +{{{0x0c9c5303f7957be4, 0xa3c31a20e085c145, 0xb0721d71d0850050, 0x0aba390eab0bf2da}}, + {{0x529fdffe638c7bf3, 0xdf2b9e60388b4995, 0xe027b34f1bad0249, 0x7bc92fc9b9fa74ed}}, + {{0x9f97ef2e801ad9f9, 0x83697d5479afda3a, 0xe906b3ffbd596b50, 0x02672b37dd3fb8e0}}}, +{{{0x48b2ca8b260885e4, 0xa4286bec82b34c1c, 0x937e1a2617f58f74, 0x741d1fcbab2ca2a5}}, + {{0xee9ba729398ca7f5, 0xeb9ca6257a4849db, 0x29eb29ce7ec544e1, 0x232ca21ef736e2c8}}, + {{0xbf61423d253fcb17, 0x08803ceafa39eb14, 0xf18602df9851c7af, 0x0400f3a049e3414b}}}, +{{{0xabce0476ba61c55b, 0x36a3d6d7c4d39716, 0x6eb259d5e8d82d09, 0x0c9176e984d756fb}}, + {{0x2efba412a06e7b06, 0x146785452c8d2560, 0xdf9713ebd67a91c7, 0x32830ac7157eadf3}}, + {{0x0e782a7ab73769e8, 0x04a05d7875b18e2c, 0x29525226ebcceae1, 0x0d794f8383eba820}}}, +{{{0xff35f5cb9e1516f4, 0xee805bcf648aae45, 0xf0d73c2bb93a9ef3, 0x097b0bf22092a6c2}}, + {{0x7be44ce7a7a2e1ac, 0x411fd93efad1b8b7, 0x1734a1d70d5f7c9b, 0x0d6592233127db16}}, + {{0xc48bab1521a9d733, 0xa6c2eaead61abb25, 0x625c6c1cc6cb4305, 0x7fc90fea93eb3a67}}}, +{{{0x0408f1fe1f5c5926, 0x1a8f2f5e3b258bf4, 0x40a951a2fdc71669, 0x6598ee93c98b577e}}, + {{0xc527deb59c7cb23d, 0x955391695328404e, 0xd64392817ccf2c7a, 0x6ce97dabf7d8fa11}}, + {{0x25b5a8e50ef7c48f, 0xeb6034116f2ce532, 0xc5e75173e53de537, 0x73119fa08c12bb03}}}, +{{{0xed30129453f1a4cb, 0xbce621c9c8f53787, 0xfacb2b1338bee7b9, 0x3025798a9ea8428c}}, + {{0x7845b94d21f4774d, 0xbf62f16c7897b727, 0x671857c03c56522b, 0x3cd6a85295621212}}, + {{0x3fecde923aeca999, 0xbdaa5b0062e8c12f, 0x67b99dfc96988ade, 0x3f52c02852661036}}}, +{{{0xffeaa48e2a1351c6, 0x28624754fa7f53d7, 0x0b5ba9e57582ddf1, 0x60c0104ba696ac59}}, + {{0x9258bf99eec416c6, 0xac8a5017a9d2f671, 0x629549ab16dea4ab, 0x05d0e85c99091569}}, + {{0x051de020de9cbe97, 0xfa07fc56b50bcf74, 0x378cec9f0f11df65, 0x36853c69ab96de4d}}}, +{{{0x36d9b8de78f39b2d, 0x7f42ed71a847b9ec, 0x241cd1d679bd3fde, 0x6a704fec92fbce6b}}, + {{0x4433c0b0fac5e7be, 0x724bae854c08dcbe, 0xf1f24cc446978f9b, 0x4a0aff6d62825fc8}}, + {{0xe917fb9e61095301, 0xc102df9402a092f8, 0xbf09e2f5fa66190b, 0x681109bee0dcfe37}}}, +{{{0x559a0cc9782a0dde, 0x551dcdb2ea718385, 0x7f62865b31ef238c, 0x504aa7767973613d}}, + {{0x9c18fcfa36048d13, 0x29159db373899ddd, 0xdc9f350b9f92d0aa, 0x26f57eee878a19d4}}, + {{0x0cab2cd55687efb1, 0x5180d162247af17b, 0x85c15a344f5a2467, 0x4041943d9dba3069}}}, +{{{0xc3c0eeba43ebcc96, 0x8d749c9c26ea9caf, 0xd9fa95ee1c77ccc6, 0x1420a1d97684340f}}, + {{0x4b217743a26caadd, 0x47a6b424648ab7ce, 0xcb1d4f7a03fbc9e3, 0x12d931429800d019}}, + {{0x00c67799d337594f, 0x5e3c5140b23aa47b, 0x44182854e35ff395, 0x1b4f92314359a012}}}, +{{{0x3e5c109d89150951, 0x39cefa912de9696a, 0x20eae43f975f3020, 0x239b572a7f132dae}}, + {{0x33cf3030a49866b1, 0x251f73d2215f4859, 0xab82aa4051def4f6, 0x5ff191d56f9a23f6}}, + {{0x819ed433ac2d9068, 0x2883ab795fc98523, 0xef4572805593eb3d, 0x020c526a758f36cb}}}, +{{{0x779834f89ed8dbbc, 0xc8f2aaf9dc7ca46c, 0xa9524cdca3e1b074, 0x02aacc4615313877}}, + {{0xe931ef59f042cc89, 0x2c589c9d8e124bb6, 0xadc8e18aaec75997, 0x452cfe0a5602c50c}}, + {{0x86a0f7a0647877df, 0xbbc464270e607c9f, 0xab17ea25f1fb11c9, 0x4cfb7d7b304b877b}}}, +{{{0x72b43d6cb89b75fe, 0x54c694d99c6adc80, 0xb8c3aa373ee34c9f, 0x14b4622b39075364}}, + {{0xe28699c29789ef12, 0x2b6ecd71df57190d, 0xc343c857ecc970d0, 0x5b1d4cbc434d3ac5}}, + {{0xb6fb2615cc0a9f26, 0x3a4f0e2bb88dcce5, 0x1301498b3369a705, 0x2f98f71258592dd1}}}, +{{{0x0c94a74cb50f9e56, 0x5b1ff4a98e8e1320, 0x9a2acc2182300f67, 0x3a6ae249d806aaf9}}, + {{0x2e12ae444f54a701, 0xfcfe3ef0a9cbd7de, 0xcebf890d75835de0, 0x1d8062e9e7614554}}, + {{0x657ada85a9907c5a, 0x1a0ea8b591b90f62, 0x8d0e1dfbdf34b4e9, 0x298b8ce8aef25ff3}}}, +{{{0x2a927953eff70cb2, 0x4b89c92a79157076, 0x9418457a30a7cf6a, 0x34b8a8404d5ce485}}, + {{0x837a72ea0a2165de, 0x3fab07b40bcf79f6, 0x521636c77738ae70, 0x6ba6271803a7d7dc}}, + {{0xc26eecb583693335, 0xd5a813df63b5fefd, 0xa293aa9aa4b22573, 0x71d62bdd465e1c6a}}}, +{{{0x6533cc28d378df80, 0xf6db43790a0fa4b4, 0xe3645ff9f701da5a, 0x74d5f317f3172ba4}}, + {{0xcd2db5dab1f75ef5, 0xd77f95cf16b065f5, 0x14571fea3f49f085, 0x1c333621262b2b3d}}, + {{0xa86fe55467d9ca81, 0x398b7c752b298c37, 0xda6d0892e3ac623b, 0x4aebcc4547e9d98c}}}, +{{{0x53175a7205d21a77, 0xb0c04422d3b934d4, 0xadd9f24bdd5deadc, 0x074f46e69f10ff8c}}, + {{0x0de9b204a059a445, 0xe15cb4aa4b17ad0f, 0xe1bbec521f79c557, 0x2633f1b9d071081b}}, + {{0xc1fb4177018b9910, 0xa6ea20dc6c0fe140, 0xd661f3e74354c6ff, 0x5ecb72e6f1a3407a}}}, +{{{0xa515a31b2259fb4e, 0x0960f3972bcac52f, 0xedb52fec8d3454cb, 0x382e2720c476c019}}, + {{0xfeeae106e8e86997, 0x9863337f98d09383, 0x9470480eaa06ebef, 0x038b6898d4c5c2d0}}, + {{0xf391c51d8ace50a6, 0x3142d0b9ae2d2948, 0xdb4d5a1a7f24ca80, 0x21aeba8b59250ea8}}}, +{{{0x24f13b34cf405530, 0x3c44ea4a43088af7, 0x5dd5c5170006a482, 0x118eb8f8890b086d}}, + {{0x53853600f0087f23, 0x4c461879da7d5784, 0x6af303deb41f6860, 0x0a3c16c5c27c18ed}}, + {{0x17e49c17cc947f3d, 0xccc6eda6aac1d27b, 0xdf6092ceb0f08e56, 0x4909b3e22c67c36b}}}, +{{{0x9c9c85ea63fe2e89, 0xbe1baf910e9412ec, 0x8f7baa8a86fbfe7b, 0x0fb17f9fef968b6c}}, + {{0x59a16676706ff64e, 0x10b953dd0d86a53d, 0x5848e1e6ce5c0b96, 0x2d8b78e712780c68}}, + {{0x79d5c62eafc3902b, 0x773a215289e80728, 0xc38ae640e10120b9, 0x09ae23717b2b1a6d}}}, +{{{0xbb6a192a4e4d083c, 0x34ace0630029e192, 0x98245a59aafabaeb, 0x6d9c8a9ada97faac}}, + {{0x10ab8fa1ad32b1d0, 0xe9aced1be2778b24, 0xa8856bc0373de90f, 0x66f35ddddda53996}}, + {{0xd27d9afb24997323, 0x1bb7e07ef6f01d2e, 0x2ba7472df52ecc7f, 0x03019b4f646f9dc8}}}, +{{{0x04a186b5565345cd, 0xeee76610bcc4116a, 0x689c73b478fb2a45, 0x387dcbff65697512}}, + {{0xaf09b214e6b3dc6b, 0x3f7573b5ad7d2f65, 0xd019d988100a23b0, 0x392b63a58b5c35f7}}, + {{0x4093addc9c07c205, 0xc565be15f532c37e, 0x63dbecfd1583402a, 0x61722b4aef2e032e}}}, +{{{0x0012aafeecbd47af, 0x55a266fb1cd46309, 0xf203eb680967c72c, 0x39633944ca3c1429}}, + {{0xd6b07a5581cb0e3c, 0x290ff006d9444969, 0x08680b6a16dcda1f, 0x5568d2b75a06de59}}, + {{0x8d0cb88c1b37cfe1, 0x05b6a5a3053818f3, 0xf2e9bc04b787d959, 0x6beba1249add7f64}}}, +{{{0x1d06005ca5b1b143, 0x6d4c6bb87fd1cda2, 0x6ef5967653fcffe7, 0x097c29e8c1ce1ea5}}, + {{0x5c3cecb943f5a53b, 0x9cc9a61d06c08df2, 0xcfba639a85895447, 0x5a845ae80df09fd5}}, + {{0x4ce97dbe5deb94ca, 0x38d0a4388c709c48, 0xc43eced4a169d097, 0x0a1249fff7e587c3}}}, +{{{0x12f0071b276d01c9, 0xe7b8bac586c48c70, 0x5308129b71d6fba9, 0x5d88fbf95a3db792}}, + {{0x0b408d9e7354b610, 0x806b32535ba85b6e, 0xdbe63a034a58a207, 0x173bd9ddc9a1df2c}}, + {{0x2b500f1efe5872df, 0x58d6582ed43918c1, 0xe6ed278ec9673ae0, 0x06e1cd13b19ea319}}}, +{{{0x40d0ad516f166f23, 0x118e32931fab6abe, 0x3fe35e14a04d088e, 0x3080603526e16266}}, + {{0x472baf629e5b0353, 0x3baa0b90278d0447, 0x0c785f469643bf27, 0x7f3a6a1a8d837b13}}, + {{0xf7e644395d3d800b, 0x95a8d555c901edf6, 0x68cd7830592c6339, 0x30d0fded2e51307e}}}, +{{{0xe0594d1af21233b3, 0x1bdbe78ef0cc4d9c, 0x6965187f8f499a77, 0x0a9214202c099868}}, + {{0x9cb4971e68b84750, 0xa09572296664bbcf, 0x5c8de72672fa412b, 0x4615084351c589d9}}, + {{0xbc9019c0aeb9a02e, 0x55c7110d16034cae, 0x0e6df501659932ec, 0x3bca0d2895ca5dfe}}}, +{{{0x40f031bc3c5d62a4, 0x19fc8b3ecff07a60, 0x98183da2130fb545, 0x5631deddae8f13cd}}, + {{0x9c688eb69ecc01bf, 0xf0bc83ada644896f, 0xca2d955f5f7a9fe2, 0x4ea8b4038df28241}}, + {{0x2aed460af1cad202, 0x46305305a48cee83, 0x9121774549f11a5f, 0x24ce0930542ca463}}}, +{{{0x1fe890f5fd06c106, 0xb5c468355d8810f2, 0x827808fe6e8caf3e, 0x41d4e3c28a06d74b}}, + {{0x3fcfa155fdf30b85, 0xd2f7168e36372ea4, 0xb2e064de6492f844, 0x549928a7324f4280}}, + {{0xf26e32a763ee1a2e, 0xae91e4b7d25ffdea, 0xbc3bd33bd17f4d69, 0x491b66dec0dcff6a}}}, +{{{0x98f5b13dc7ea32a7, 0xe3d5f8cc7e16db98, 0xac0abf52cbf8d947, 0x08f338d0c85ee4ac}}, + {{0x75f04a8ed0da64a1, 0xed222caf67e2284b, 0x8234a3791f7b7ba4, 0x4cf6b8b0b7018b67}}, + {{0xc383a821991a73bd, 0xab27bc01df320c7a, 0xc13d331b84777063, 0x530d4a82eb078a99}}}, +{{{0x004c3630e1f94825, 0x7e2d78268cab535a, 0xc7482323cc84ff8b, 0x65ea753f101770b9}}, + {{0x6d6973456c9abf9e, 0x257fb2fc4900a880, 0x2bacf412c8cfb850, 0x0db3e7e00cbfbd5b}}, + {{0x3d66fc3ee2096363, 0x81d62c7f61b5cb6b, 0x0fbe044213443b1a, 0x02a4ec1921e1a1db}}}, +{{{0x5ce6259a3b24b8a2, 0xb8577acc45afa0b8, 0xcccbe6e88ba07037, 0x3d143c51127809bf}}, + {{0xf5c86162f1cf795f, 0x118c861926ee57f2, 0x172124851c063578, 0x36d12b5dec067fcf}}, + {{0x126d279179154557, 0xd5e48f5cfc783a0a, 0x36bdb6e8df179bac, 0x2ef517885ba82859}}}, +{{{0x88bd438cd11e0d4a, 0x30cb610d43ccf308, 0xe09a0e3791937bcc, 0x4559135b25b1720c}}, + {{0x1ea436837c6da1e9, 0xf9c189af1fb9bdbe, 0x303001fcce5dd155, 0x28a7c99ebc57be52}}, + {{0xb8fd9399e8d19e9d, 0x908191cb962423ff, 0xb2b948d747c742a3, 0x37f33226d7fb44c4}}}, +{{{0x0dae8767b55f6e08, 0x4a43b3b35b203a02, 0xe3725a6e80af8c79, 0x0f7a7fd1705fa7a3}}, + {{0x33912553c821b11d, 0x66ed42c241e301df, 0x066fcc11104222fd, 0x307a3b41c192168f}}, + {{0x8eeb5d076eb55ce0, 0x2fc536bfaa0d925a, 0xbe81830fdcb6c6e8, 0x556c7045827baf52}}}, +{{{0x8e2b517302e9d8b7, 0xe3e52269248714e8, 0xbd4fbd774ca960b5, 0x6f4b4199c5ecada9}}, + {{0xb94b90022bf44406, 0xabd4237eff90b534, 0x7600a960faf86d3a, 0x2f45abdac2322ee3}}, + {{0x61af4912c8ef8a6a, 0xe58fa4fe43fb6e5e, 0xb5afcc5d6fd427cf, 0x6a5393281e1e11eb}}}, +{{{0xf3da5139a5d1ee89, 0x8145457cff936988, 0x3f622fed00e188c4, 0x0f513815db8b5a3d}}, + {{0x0fff04fe149443cf, 0x53cac6d9865cddd7, 0x31385b03531ed1b7, 0x5846a27cacd1039d}}, + {{0x4ff5cdac1eb08717, 0x67e8b29590f2e9bc, 0x44093b5e237afa99, 0x0d414bed8708b8b2}}}, +{{{0xcfb68265fd0e75f6, 0xe45b3e28bb90e707, 0x7242a8de9ff92c7a, 0x685b3201933202dd}}, + {{0x81886a92294ac9e8, 0x23162b45d55547be, 0x94cfbc4403715983, 0x50eb8fdb134bc401}}, + {{0xc0b73ec6d6b330cd, 0x84e44807132faff1, 0x732b7352c4a5dee1, 0x5d7c7cf1aa7cd2d2}}}, +{{{0xaf3b46bf7a4aafa2, 0xb78705ec4d40d411, 0x114f0c6aca7c15e3, 0x3f364faaa9489d4d}}, + {{0x33d1013e9b73a562, 0x925cef5748ec26e1, 0xa7fce614dd468058, 0x78b0fad41e9aa438}}, + {{0xbf56a431ed05b488, 0xa533e66c9c495c7e, 0xe8652baf87f3651a, 0x0241800059d66c33}}}, +{{{0xceb077fea37a5be4, 0xdb642f02e5a5eeb7, 0xc2e6d0c5471270b8, 0x4771b65538e4529c}}, + {{0x28350c7dcf38ea01, 0x7c6cdbc0b2917ab6, 0xace7cfbe857082f7, 0x4d2845aba2d9a1e0}}, + {{0xbb537fe0447070de, 0xcba744436dd557df, 0xd3b5a3473600dbcb, 0x4aeabbe6f9ffd7f8}}}, +{{{0x4630119e40d8f78c, 0xa01a9bc53c710e11, 0x486d2b258910dd79, 0x1e6c47b3db0324e5}}, + {{0x6a2134bcc4a9c8f2, 0xfbf8fd1c8ace2e37, 0x000ae3049911a0ba, 0x046e3a616bc89b9e}}, + {{0x14e65442f03906be, 0x4a019d54e362be2a, 0x68ccdfec8dc230c7, 0x7cfb7e3faf6b861c}}}, +{{{0x4637974e8c58aedc, 0xb9ef22fbabf041a4, 0xe185d956e980718a, 0x2f1b78fab143a8a6}}, + {{0x96eebffb305b2f51, 0xd3f938ad889596b8, 0xf0f52dc746d5dd25, 0x57968290bb3a0095}}, + {{0xf71ab8430a20e101, 0xf393658d24f0ec47, 0xcf7509a86ee2eed1, 0x7dc43e35dc2aa3e1}}}, +{{{0x85966665887dd9c3, 0xc90f9b314bb05355, 0xc6e08df8ef2079b1, 0x7ef72016758cc12f}}, + {{0x5a782a5c273e9718, 0x3576c6995e4efd94, 0x0f2ed8051f237d3e, 0x044fb81d82d50a99}}, + {{0xc1df18c5a907e3d9, 0x57b3371dce4c6359, 0xca704534b201bb49, 0x7f79823f9c30dd2e}}}, +{{{0x8334d239a3b513e8, 0xc13670d4b91fa8d8, 0x12b54136f590bd33, 0x0a4e0373d784d9b4}}, + {{0x6a9c1ff068f587ba, 0x0827894e0050c8de, 0x3cbf99557ded5be7, 0x64a9b0431c06d6f0}}, + {{0x2eb3d6a15b7d2919, 0xb0b4f6a0d53a8235, 0x7156ce4389a45d47, 0x071a7d0ace18346c}}}, +{{{0xd3072daac887ba0b, 0x01262905bfa562ee, 0xcf543002c0ef768b, 0x2c3bcc7146ea7e9c}}, + {{0xcc0c355220e14431, 0x0d65950709b15141, 0x9af5621b209d5f36, 0x7c69bcf7617755d3}}, + {{0x07f0d7eb04e8295f, 0x10db18252f50f37d, 0xe951a9a3171798d7, 0x6f5a9a7322aca51d}}}, +{{{0x8ba1000c2f41c6c5, 0xc49f79c10cfefb9b, 0x4efa47703cc51c9f, 0x494e21a2e147afca}}, + {{0xe729d4eba3d944be, 0x8d9e09408078af9e, 0x4525567a47869c03, 0x02ab9680ee8d3b24}}, + {{0xefa48a85dde50d9a, 0x219a224e0fb9a249, 0xfa091f1dd91ef6d9, 0x6b5d76cbea46bb34}}}, +{{{0x8857556cec0cd994, 0x6472dc6f5cd01dba, 0xaf0169148f42b477, 0x0ae333f685277354}}, + {{0xe0f941171e782522, 0xf1e6ae74036936d3, 0x408b3ea2d0fcc746, 0x16fb869c03dd313e}}, + {{0x288e199733b60962, 0x24fc72b4d8abe133, 0x4811f7ed0991d03e, 0x3f81e38b8f70d075}}}, +{{{0x7f910fcc7ed9affe, 0x545cb8a12465874b, 0xa8397ed24b0c4704, 0x50510fc104f50993}}, + {{0x0adb7f355f17c824, 0x74b923c3d74299a4, 0xd57c3e8bcbf8eaf7, 0x0ad3e2d34cdedc3d}}, + {{0x6f0c0fc5336e249d, 0x745ede19c331cfd9, 0xf2d6fd0009eefe1c, 0x127c158bf0fa1ebe}}}, +{{{0xf6197c422e9879a2, 0xa44addd452ca3647, 0x9b413fc14b4eaccb, 0x354ef87d07ef4f68}}, + {{0xdea28fc4ae51b974, 0x1d9973d3744dfe96, 0x6240680b873848a8, 0x4ed82479d167df95}}, + {{0xfee3b52260c5d975, 0x50352efceb41b0b8, 0x8808ac30a9f6653c, 0x302d92d20539236d}}}, +{{{0x4c59023fcb3efb7c, 0x6c2fcb99c63c2a94, 0xba4190e2c3c7e084, 0x0e545daea51874d9}}, + {{0x957b8b8b0df53c30, 0x2a1c770a8e60f098, 0xbbc7a670345796de, 0x22a48f9a90c99bc9}}, + {{0x6b7dc0dc8d3fac58, 0x5497cd6ce6e42bfd, 0x542f7d1bf400d305, 0x4159f47f048d9136}}}, +{{{0x20ad660839e31e32, 0xf81e1bd58405be50, 0xf8064056f4dabc69, 0x14d23dd4ce71b975}}, + {{0x748515a8bbd24839, 0x77128347afb02b55, 0x50ba2ac649a2a17f, 0x060525513ad730f1}}, + {{0xf2398e098aa27f82, 0x6d7982bb89a1b024, 0xfa694084214dd24c, 0x71ab966fa32301c3}}}, +{{{0x2dcbd8e34ded02fc, 0x1151f3ec596f22aa, 0xbca255434e0328da, 0x35768fbe92411b22}}, + {{0xb1088a0702809955, 0x43b273ea0b43c391, 0xca9b67aefe0686ed, 0x605eecbf8335f4ed}}, + {{0x83200a656c340431, 0x9fcd71678ee59c2f, 0x75d4613f71300f8a, 0x7a912faf60f542f9}}}, +{{{0xb204585e5edc1a43, 0x9f0e16ee5897c73c, 0x5b82c0ae4e70483c, 0x624a170e2bddf9be}}, + {{0x253f4f8dfa2d5597, 0x25e49c405477130c, 0x00c052e5996b1102, 0x33cb966e33bb6c4a}}, + {{0x597028047f116909, 0x828ac41c1e564467, 0x70417dbde6217387, 0x721627aefbac4384}}}, +{{{0x97d03bc38736add5, 0x2f1422afc532b130, 0x3aa68a057101bbc4, 0x4c946cf7e74f9fa7}}, + {{0xfd3097bc410b2f22, 0xf1a05da7b5cfa844, 0x61289a1def57ca74, 0x245ea199bb821902}}, + {{0xaedca66978d477f8, 0x1898ba3c29117fe1, 0xcf73f983720cbd58, 0x67da12e6b8b56351}}}, +{{{0x7067e187b4bd6e07, 0x6e8f0203c7d1fe74, 0x93c6aa2f38c85a30, 0x76297d1f3d75a78a}}, + {{0x2b7ef3d38ec8308c, 0x828fd7ec71eb94ab, 0x807c3b36c5062abd, 0x0cb64cb831a94141}}, + {{0x3030fc33534c6378, 0xb9635c5ce541e861, 0x15d9a9bed9b2c728, 0x49233ea3f3775dcb}}}, +{{{0x629398fa8dbffc3a, 0xe12fe52dd54db455, 0xf3be11dfdaf25295, 0x628b140dce5e7b51}}, + {{0x7b3985fe1c9f249b, 0x4fd6b2d5a1233293, 0xceb345941adf4d62, 0x6987ff6f542de50c}}, + {{0x47e241428f83753c, 0x6317bebc866af997, 0xdabb5b433d1a9829, 0x074d8d245287fb2d}}}, +{{{0x8337d9cd440bfc31, 0x729d2ca1af318fd7, 0xa040a4a4772c2070, 0x46002ef03a7349be}}, + {{0x481875c6c0e31488, 0x219429b2e22034b4, 0x7223c98a31283b65, 0x3420d60b342277f9}}, + {{0xfaa23adeaffe65f7, 0x78261ed45be0764c, 0x441c0a1e2f164403, 0x5aea8e567a87d395}}}, +{{{0x7813c1a2bca4283d, 0xed62f091a1863dd9, 0xaec7bcb8c268fa86, 0x10e5d3b76f1cae4c}}, + {{0x2dbc6fb6e4e0f177, 0x04e1bf29a4bd6a93, 0x5e1966d4787af6e8, 0x0edc5f5eb426d060}}, + {{0x5453bfd653da8e67, 0xe9dc1eec24a9f641, 0xbf87263b03578a23, 0x45b46c51361cba72}}}, +{{{0xa9402abf314f7fa1, 0xe257f1dc8e8cf450, 0x1dbbd54b23a8be84, 0x2177bfa36dcb713b}}, + {{0xce9d4ddd8a7fe3e4, 0xab13645676620e30, 0x4b594f7bb30e9958, 0x5c1c0aef321229df}}, + {{0x37081bbcfa79db8f, 0x6048811ec25f59b3, 0x087a76659c832487, 0x4ae619387d8ab5bb}}}, +{{{0x8ddbf6aa5344a32e, 0x7d88eab4b41b4078, 0x5eb0eb974a130d60, 0x1a00d91b17bf3e03}}, + {{0x61117e44985bfb83, 0xfce0462a71963136, 0x83ac3448d425904b, 0x75685abe5ba43d64}}, + {{0x6e960933eb61f2b2, 0x543d0fa8c9ff4952, 0xdf7275107af66569, 0x135529b623b0e6aa}}}, +{{{0x18f0dbd7add1d518, 0x979f7888cfc11f11, 0x8732e1f07114759b, 0x79b5b81a65ca3a01}}, + {{0xf5c716bce22e83fe, 0xb42beb19e80985c1, 0xec9da63714254aae, 0x5972ea051590a613}}, + {{0x0fd4ac20dc8f7811, 0x9a9ad294ac4d4fa8, 0xc01b2d64b3360434, 0x4f7e9c95905f3bdb}}}, +{{{0x62674bbc5781302e, 0xd8520f3989addc0f, 0x8c2999ae53fbd9c6, 0x31993ad92e638e4c}}, + {{0x71c8443d355299fe, 0x8bcd3b1cdbebead7, 0x8092499ef1a49466, 0x1942eec4a144adc8}}, + {{0x7dac5319ae234992, 0x2c1b3d910cea3e92, 0x553ce494253c1122, 0x2a0a65314ef9ca75}}}, +{{{0x2db7937ff7f927c2, 0xdb741f0617d0a635, 0x5982f3a21155af76, 0x4cf6e218647c2ded}}, + {{0xcf361acd3c1c793a, 0x2f9ebcac5a35bc3b, 0x60e860e9a8cda6ab, 0x055dc39b6dea1a13}}, + {{0xb119227cc28d5bb6, 0x07e24ebc774dffab, 0xa83c78cee4a32c89, 0x121a307710aa24b6}}}, +{{{0xe4db5d5e9f034a97, 0xe153fc093034bc2d, 0x460546919551d3b1, 0x333fc76c7a40e52d}}, + {{0xd659713ec77483c9, 0x88bfe077b82b96af, 0x289e28231097bcd3, 0x527bb94a6ced3a9b}}, + {{0x563d992a995b482e, 0x3405d07c6e383801, 0x485035de2f64d8e5, 0x6b89069b20a7a9f7}}}, +{{{0x812aa0416270220d, 0x995a89faf9245b4e, 0xffadc4ce5072ef05, 0x23bc2103aa73eb73}}, + {{0x4082fa8cb5c7db77, 0x068686f8c734c155, 0x29e6c8d9f6e7a57e, 0x0473d308a7639bcf}}, + {{0xcaee792603589e05, 0x2b4b421246dcc492, 0x02a1ef74e601a94f, 0x102f73bfde04341a}}}, +{{{0xb5a2d50c7ec20d3e, 0xc64bdd6ea0c97263, 0x56e89052c1ff734d, 0x4929c6f72b2ffaba}}, + {{0x358ecba293a36247, 0xaf8f9862b268fd65, 0x412f7e9968a01c89, 0x5786f312cd754524}}, + {{0x337788ffca14032c, 0xf3921028447f1ee3, 0x8b14071f231bccad, 0x4c817b4bf2344783}}}, +{{{0x0ff853852871b96e, 0xe13e9fab60c3f1bb, 0xeefd595325344402, 0x0a37c37075b7744b}}, + {{0x413ba057a40b4484, 0xba4c2e1a4f5f6a43, 0x614ba0a5aee1d61c, 0x78a1531a8b05dc53}}, + {{0x6cbdf1703ad0562b, 0x8ecf4830c92521a3, 0xdaebd303fd8424e7, 0x72ad82a42e5ec56f}}}, +{{{0x3f9e8e35bafb65f6, 0x39d69ec8f27293a1, 0x6cb8cd958cf6a3d0, 0x1734778173adae6d}}, + {{0xc368939167024bc3, 0x8e69d16d49502fda, 0xfcf2ec3ce45f4b29, 0x065f669ea3b4cbc4}}, + {{0x8a00aec75532db4d, 0xb869a4e443e31bb1, 0x4a0f8552d3a7f515, 0x19adeb7c303d7c08}}}, +{{{0xc720cb6153ead9a3, 0x55b2c97f512b636e, 0xb1e35b5fd40290b1, 0x2fd9ccf13b530ee2}}, + {{0x9d05ba7d43c31794, 0x2470c8ff93322526, 0x8323dec816197438, 0x2852709881569b53}}, + {{0x07bd475b47f796b8, 0xd2c7b013542c8f54, 0x2dbd23f43b24f87e, 0x6551afd77b0901d6}}}, +{{{0x4546baaf54aac27f, 0xf6f66fecb2a45a28, 0x582d1b5b562bcfe8, 0x44b123f3920f785f}}, + {{0x68a24ce3a1d5c9ac, 0xbb77a33d10ff6461, 0x0f86ce4425d3166e, 0x56507c0950b9623b}}, + {{0x1206f0b7d1713e63, 0x353fe3d915bafc74, 0x194ceb970ad9d94d, 0x62fadd7cf9d03ad3}}}, +{{{0xc6b5967b5598a074, 0x5efe91ce8e493e25, 0xd4b72c4549280888, 0x20ef1149a26740c2}}, + {{0x3cd7bc61e7ce4594, 0xcd6b35a9b7dd267e, 0xa080abc84366ef27, 0x6ec7c46f59c79711}}, + {{0x2f07ad636f09a8a2, 0x8697e6ce24205e7d, 0xc0aefc05ee35a139, 0x15e80958b5f9d897}}}, +{{{0x25a5ef7d0c3e235b, 0x6c39c17fbe134ee7, 0xc774e1342dc5c327, 0x021354b892021f39}}, + {{0x4dd1ed355bb061c4, 0x42dc0cef941c0700, 0x61305dc1fd86340e, 0x56b2cc930e55a443}}, + {{0x1df79da6a6bfc5a2, 0x02f3a2749fde4369, 0xb323d9f2cda390a7, 0x7be0847b8774d363}}}, +{{{0x8c99cc5a8b3f55c3, 0x0611d7253fded2a0, 0xed2995ff36b70a36, 0x1f699a54d78a2619}}, + {{0x1466f5af5307fa11, 0x817fcc7ded6c0af2, 0x0a6de44ec3a4a3fb, 0x74071475bc927d0b}}, + {{0xe77292f373e7ea8a, 0x296537d2cb045a31, 0x1bd0653ed3274fde, 0x2f9a2c4476bd2966}}}, +{{{0xeb18b9ab7f5745c6, 0x023a8aee5787c690, 0xb72712da2df7afa9, 0x36597d25ea5c013d}}, + {{0xa2b4dae0b5511c9a, 0x7ac860292bffff06, 0x981f375df5504234, 0x3f6bd725da4ea12d}}, + {{0x734d8d7b106058ac, 0xd940579e6fc6905f, 0x6466f8f99202932d, 0x7b7ecc19da60d6d0}}}, +{{{0x78c2373c695c690d, 0xdd252e660642906e, 0x951d44444ae12bd2, 0x4235ad7601743956}}, + {{0x6dae4a51a77cfa9b, 0x82263654e7a38650, 0x09bbffcd8f2d82db, 0x03bedc661bf5caba}}, + {{0x6258cb0d078975f5, 0x492942549189f298, 0xa0cab423e2e36ee4, 0x0e7ce2b0cdf066a1}}}, +{{{0xc494643ac48c85a3, 0xfd361df43c6139ad, 0x09db17dd3ae94d48, 0x666e0a5d8fb4674a}}, + {{0xfea6fedfd94b70f9, 0xf130c051c1fcba2d, 0x4882d47e7f2fab89, 0x615256138aeceeb5}}, + {{0x2abbf64e4870cb0d, 0xcd65bcf0aa458b6b, 0x9abe4eba75e8985d, 0x7f0bc810d514dee4}}}, +{{{0xb9006ba426f4136f, 0x8d67369e57e03035, 0xcbc8dfd94f463c28, 0x0d1f8dbcf8eedbf5}}, + {{0x83ac9dad737213a0, 0x9ff6f8ba2ef72e98, 0x311e2edd43ec6957, 0x1d3a907ddec5ab75}}, + {{0xba1693313ed081dc, 0x29329fad851b3480, 0x0128013c030321cb, 0x00011b44a31bfde3}}}, +{{{0x3fdfa06c3fc66c0c, 0x5d40e38e4dd60dd2, 0x7ae38b38268e4d71, 0x3ac48d916e8357e1}}, + {{0x16561f696a0aa75c, 0xc1bf725c5852bd6a, 0x11a8dd7f9a7966ad, 0x63d988a2d2851026}}, + {{0x00120753afbd232e, 0xe92bceb8fdd8f683, 0xf81669b384e72b91, 0x33fad52b2368a066}}}, +{{{0x540649c6c5e41e16, 0x0af86430333f7735, 0xb2acfcd2f305e746, 0x16c0f429a256dca7}}, + {{0x8d2cc8d0c422cfe8, 0x072b4f7b05a13acb, 0xa3feb6e6ecf6a56f, 0x3cc355ccb90a71e2}}, + {{0xe9b69443903e9131, 0xb8a494cb7a5637ce, 0xc87cd1a4baba9244, 0x631eaf426bae7568}}}, +{{{0xb3e90410da66fe9f, 0x85dd4b526c16e5a6, 0xbc3d97611ef9bf83, 0x5599648b1ea919b5}}, + {{0x47d975b9a3700de8, 0x7280c5fbe2f80552, 0x53658f2732e45de1, 0x431f2c7f665f80b5}}, + {{0xd6026344858f7b19, 0x14ab352fa1ea514a, 0x8900441a2090a9d7, 0x7b04715f91253b26}}}, +{{{0x83edbd28acf6ae43, 0x86357c8b7d5c7ab4, 0xc0404769b7eb2c44, 0x59b37bf5c2f6583f}}, + {{0xb376c280c4e6bac6, 0x970ed3dd6d1d9b0b, 0xb09a9558450bf944, 0x48d0acfa57cde223}}, + {{0xb60f26e47dabe671, 0xf1d1a197622f3a37, 0x4208ce7ee9960394, 0x16234191336d3bdb}}}, +{{{0xf19aeac733a63aef, 0x2c7fba5d4442454e, 0x5da87aa04795e441, 0x413051e1a4e0b0f5}}, + {{0x852dd1fd3d578bbe, 0x2b65ce72c3286108, 0x658c07f4eace2273, 0x0933f804ec38ab40}}, + {{0xa7ab69798d496476, 0x8121aadefcb5abc8, 0xa5dc12ef7b539472, 0x07fd47065e45351a}}}, +{{{0xc8583c3d258d2bcd, 0x17029a4daf60b73f, 0xfa0fc9d6416a3781, 0x1c1e5fba38b3fb23}}, + {{0x304211559ae8e7c3, 0xf281b229944882a5, 0x8a13ac2e378250e4, 0x014afa0954ba48f4}}, + {{0xcb3197001bb3666c, 0x330060524bffecb9, 0x293711991a88233c, 0x291884363d4ed364}}}, +{{{0x033c6805dc4babfa, 0x2c15bf5e5596ecc1, 0x1bc70624b59b1d3b, 0x3ede9850a19f0ec5}}, + {{0xfb9d37c3bc1ab6eb, 0x02be14534d57a240, 0xf4d73415f8a5e1f6, 0x5964f4300ccc8188}}, + {{0xe44a23152d096800, 0x5c08c55970866996, 0xdf2db60a46affb6e, 0x579155c1f856fd89}}}, +{{{0x96324edd12e0c9ef, 0x468b878df2420297, 0x199a3776a4f573be, 0x1e7fbcf18e91e92a}}, + {{0xb5f16b630817e7a6, 0x808c69233c351026, 0x324a983b54cef201, 0x53c092084a485345}}, + {{0xd2d41481f1cbafbf, 0x231d2db6716174e5, 0x0b7d7656e2a55c98, 0x3e955cd82aa495f6}}}, +{{{0xe48f535e3ed15433, 0xd075692a0d7270a3, 0x40fbd21daade6387, 0x14264887cf4495f5}}, + {{0xab39f3ef61bb3a3f, 0x8eb400652eb9193e, 0xb5de6ecc38c11f74, 0x654d7e9626f3c49f}}, + {{0xe564cfdd5c7d2ceb, 0x82eeafded737ccb9, 0x6107db62d1f9b0ab, 0x0b6baac3b4358dbb}}}, +{{{0x7ae62bcb8622fe98, 0x47762256ceb891af, 0x1a5a92bcf2e406b4, 0x7d29401784e41501}}, + {{0x204abad63700a93b, 0xbe0023d3da779373, 0xd85f0346633ab709, 0x00496dc490820412}}, + {{0x1c74b88dc27e6360, 0x074854268d14850c, 0xa145fb7b3e0dcb30, 0x10843f1b43803b23}}}, +{{{0xc5f90455376276dd, 0xce59158dd7645cd9, 0x92f65d511d366b39, 0x11574b6e526996c4}}, + {{0xd56f672de324689b, 0xd1da8aedb394a981, 0xdd7b58fe9168cfed, 0x7ce246cd4d56c1e8}}, + {{0xb8f4308e7f80be53, 0x5f3cb8cb34a9d397, 0x18a961bd33cc2b2c, 0x710045fb3a9af671}}}, +{{{0x73f93d36101b95eb, 0xfaef33794f6f4486, 0x5651735f8f15e562, 0x7fa3f19058b40da1}}, + {{0xa03fc862059d699e, 0x2370cfa19a619e69, 0xc4fe3b122f823deb, 0x1d1b056fa7f0844e}}, + {{0x1bc64631e56bf61f, 0xd379ab106e5382a3, 0x4d58c57e0540168d, 0x566256628442d8e4}}}, +{{{0xb9e499def6267ff6, 0x7772ca7b742c0843, 0x23a0153fe9a4f2b1, 0x2cdfdfecd5d05006}}, + {{0xdd499cd61ff38640, 0x29cd9bc3063625a0, 0x51e2d8023dd73dc3, 0x4a25707a203b9231}}, + {{0x2ab7668a53f6ed6a, 0x304242581dd170a1, 0x4000144c3ae20161, 0x5721896d248e49fc}}}, +{{{0x0b6e5517fd181bae, 0x9022629f2bb963b4, 0x5509bce932064625, 0x578edd74f63c13da}}, + {{0x285d5091a1d0da4e, 0x4baa6fa7b5fe3e08, 0x63e5177ce19393b3, 0x03c935afc4b030fd}}, + {{0x997276c6492b0c3d, 0x47ccc2c4dfe205fc, 0xdcd29b84dd623a3c, 0x3ec2ab590288c7a2}}}, +{{{0xa1a0d27be4d87bb9, 0xa98b4deb61391aed, 0x99a0ddd073cb9b83, 0x2dd5c25a200fcace}}, + {{0xa7213a09ae32d1cb, 0x0f2b87df40f5c2d5, 0x0baea4c6e81eab29, 0x0e1bf66c6adbac5e}}, + {{0xe2abd5e9792c887e, 0x1a020018cb926d5d, 0xbfba69cdbaae5f1e, 0x730548b35ae88f5f}}}, +{{{0xc43551a3cba8b8ee, 0x65a26f1db2115f16, 0x760f4f52ab8c3850, 0x3043443b411db8ca}}, + {{0x805b094ba1d6e334, 0xbf3ef17709353f19, 0x423f06cb0622702b, 0x585a2277d87845dd}}, + {{0xa18a5f8233d48962, 0x6698c4b5ec78257f, 0xa78e6fa5373e41ff, 0x7656278950ef981f}}}, +{{{0x38c3cf59d51fc8c0, 0x9bedd2fd0506b6f2, 0x26bf109fab570e8f, 0x3f4160a8c1b846a6}}, + {{0xe17073a3ea86cf9d, 0x3a8cfbb707155fdc, 0x4853e7fc31838a8e, 0x28bbf484b613f616}}, + {{0xf2612f5c6f136c7c, 0xafead107f6dd11be, 0x527e9ad213de6f33, 0x1e79cb358188f75d}}}, +{{{0x013436c3eef7e3f1, 0x828b6a7ffe9e10f8, 0x7ff908e5bcf9defc, 0x65d7951b3a3b3831}}, + {{0x77e953d8f5e08181, 0x84a50c44299dded9, 0xdc6c2d0c864525e5, 0x478ab52d39d1f2f4}}, + {{0x66a6a4d39252d159, 0xe5dde1bc871ac807, 0xb82c6b40a6c1c96f, 0x16d87a411a212214}}}, +{{{0xb3bd7e5a42066215, 0x879be3cd0c5a24c1, 0x57c05db1d6f994b7, 0x28f87c8165f38ca6}}, + {{0xfba4d5e2d54e0583, 0xe21fafd72ebd99fa, 0x497ac2736ee9778f, 0x1f990b577a5a6dde}}, + {{0xa3344ead1be8f7d6, 0x7d1e50ebacea798f, 0x77c6569e520de052, 0x45882fe1534d6d3e}}}, +{{{0x6669345d757983d6, 0x62b6ed1117aa11a6, 0x7ddd1857985e128f, 0x688fe5b8f626f6dd}}, + {{0xd8ac9929943c6fe4, 0xb5f9f161a38392a2, 0x2699db13bec89af3, 0x7dcf843ce405f074}}, + {{0x6c90d6484a4732c0, 0xd52143fdca563299, 0xb3be28c3915dc6e1, 0x6739687e7327191b}}}, +{{{0x9f65c5ea200814cf, 0x840536e169a31740, 0x8b0ed13925c8b4ad, 0x0080dbafe936361d}}, + {{0x8ce5aad0c9cb971f, 0x1156aaa99fd54a29, 0x41f7247015af9b78, 0x1fe8cca8420f49aa}}, + {{0x72a1848f3c0cc82a, 0x38c560c2877c9e54, 0x5004e228ce554140, 0x042418a103429d71}}}, +{{{0x899dea51abf3ff5f, 0x9b93a8672fc2d8ba, 0x2c38cb97be6ebd5c, 0x114d578497263b5d}}, + {{0x58e84c6f20816247, 0x8db2b2b6e36fd793, 0x977182561d484d85, 0x0822024f8632abd7}}, + {{0xb301bb7c6b1beca3, 0x55393f6dc6eb1375, 0x910d281097b6e4eb, 0x1ad4548d9d479ea3}}}, +{{{0xcd5a7da0389a48fd, 0xb38fa4aa9a78371e, 0xc6d9761b2cdb8e6c, 0x35cf51dbc97e1443}}, + {{0xa06fe66d0fe9fed3, 0xa8733a401c587909, 0x30d14d800df98953, 0x41ce5876c7b30258}}, + {{0x59ac3bc5d670c022, 0xeae67c109b119406, 0x9798bdf0b3782fda, 0x651e3201fd074092}}}, +{{{0xd63d8483ef30c5cf, 0x4cd4b4962361cc0c, 0xee90e500a48426ac, 0x0af51d7d18c14eeb}}, + {{0xa57ba4a01efcae9e, 0x769f4beedc308a94, 0xd1f10eeb3603cb2e, 0x4099ce5e7e441278}}, + {{0x1ac98e4f8a5121e9, 0x7dae9544dbfa2fe0, 0x8320aa0dd6430df9, 0x667282652c4a2fb5}}}, +{{{0x874621f4d86bc9ab, 0xb54c7bbe56fe6fea, 0x077a24257fadc22c, 0x1ab53be419b90d39}}, + {{0xada8b6e02946db23, 0x1c0ce51a7b253ab7, 0x8448c85a66dd485b, 0x7f1fc025d0675adf}}, + {{0xd8ee1b18319ea6aa, 0x004d88083a21f0da, 0x3bd6aa1d883a4f4b, 0x4db9a3a6dfd9fd14}}}, +{{{0x8ce7b23bb99c0755, 0x35c5d6edc4f50f7a, 0x7e1e2ed2ed9b50c3, 0x36305f16e8934da1}}, + {{0xd95b00bbcbb77c68, 0xddbc846a91f17849, 0x7cf700aebe28d9b3, 0x5ce1285c85d31f3e}}, + {{0x31b6972d98b0bde8, 0x7d920706aca6de5b, 0xe67310f8908a659f, 0x50fac2a6efdf0235}}}, +{{{0xf3d3a9f35b880f5a, 0xedec050cdb03e7c2, 0xa896981ff9f0b1a2, 0x49a4ae2bac5e34a4}}, + {{0x295b1c86f6f449bc, 0x51b2e84a1f0ab4dd, 0xc001cb30aa8e551d, 0x6a28d35944f43662}}, + {{0x28bb12ee04a740e0, 0x14313bbd9bce8174, 0x72f5b5e4e8c10c40, 0x7cbfb19936adcd5b}}}, +{{{0xa311ddc26b89792d, 0x1b30b4c6da512664, 0x0ca77b4ccf150859, 0x1de443df1b009408}}, + {{0x8e793a7acc36e6e0, 0xf9fab7a37d586eed, 0x3a4f9692bae1f4e4, 0x1c14b03eff5f447e}}, + {{0x19647bd114a85291, 0x57b76cb21034d3af, 0x6329db440f9d6dfa, 0x5ef43e586a571493}}}, +{{{0xef782014385675a6, 0xa2649f30aafda9e8, 0x4cd1eb505cdfa8cb, 0x46115aba1d4dc0b3}}, + {{0xa66dcc9dc80c1ac0, 0x97a05cf41b38a436, 0xa7ebf3be95dbd7c6, 0x7da0b8f68d7e7dab}}, + {{0xd40f1953c3b5da76, 0x1dac6f7321119e9b, 0x03cc6021feb25960, 0x5a5f887e83674b4b}}}, +{{{0x8f6301cf70a13d11, 0xcfceb815350dd0c4, 0xf70297d4a4bca47e, 0x3669b656e44d1434}}, + {{0x9e9628d3a0a643b9, 0xb5c3cb00e6c32064, 0x9b5302897c2dec32, 0x43e37ae2d5d1c70c}}, + {{0x387e3f06eda6e133, 0x67301d5199a13ac0, 0xbd5ad8f836263811, 0x6a21e6cd4fd5e9be}}}, +{{{0xf1c6170a3046e65f, 0x58712a2a00d23524, 0x69dbbd3c8c82b755, 0x586bf9f1a195ff57}}, + {{0xef4129126699b2e3, 0x71d30847708d1301, 0x325432d01182b0bd, 0x45371b07001e8b36}}, + {{0xa6db088d5ef8790b, 0x5278f0dc610937e5, 0xac0349d261a16eb8, 0x0eafb03790e52179}}}, +{{{0x960555c13748042f, 0x219a41e6820baa11, 0x1c81f73873486d0c, 0x309acc675a02c661}}, + {{0x5140805e0f75ae1d, 0xec02fbe32662cc30, 0x2cebdf1eea92396d, 0x44ae3344c5435bb3}}, + {{0x9cf289b9bba543ee, 0xf3760e9d5ac97142, 0x1d82e5c64f9360aa, 0x62d5221b7f94678f}}}, +{{{0x524c299c18d0936d, 0xc86bb56c8a0c1a0c, 0xa375052edb4a8631, 0x5c0efde4bc754562}}, + {{0x7585d4263af77a3c, 0xdfae7b11fee9144d, 0xa506708059f7193d, 0x14f29a5383922037}}, + {{0xdf717edc25b2d7f5, 0x21f970db99b53040, 0xda9234b7c3ed4c62, 0x5e72365c7bee093e}}}, +{{{0x575bfc074571217f, 0x3779675d0694d95b, 0x9a0a37bbf4191e33, 0x77f1104c47b4eabc}}, + {{0x7d9339062f08b33e, 0x5b9659e5df9f32be, 0xacff3dad1f9ebdfd, 0x70b20555cb7349b7}}, + {{0xbe5113c555112c4c, 0x6688423a9a881fcd, 0x446677855e503b47, 0x0e34398f4a06404a}}}, +{{{0xb67d22d93ecebde8, 0x09b3e84127822f07, 0x743fa61fb05b6d8d, 0x5e5405368a362372}}, + {{0x18930b093e4b1928, 0x7de3e10e73f3f640, 0xf43217da73395d6f, 0x6f8aded6ca379c3e}}, + {{0xe340123dfdb7b29a, 0x487b97e1a21ab291, 0xf9967d02fde6949e, 0x780de72ec8d3de97}}}, +{{{0x0ae28545089ae7bc, 0x388ddecf1c7f4d06, 0x38ac15510a4811b8, 0x0eb28bf671928ce4}}, + {{0x671feaf300f42772, 0x8f72eb2a2a8c41aa, 0x29a17fd797373292, 0x1defc6ad32b587a6}}, + {{0xaf5bbe1aef5195a7, 0x148c1277917b15ed, 0x2991f7fb7ae5da2e, 0x467d201bf8dd2867}}}, +{{{0x7906ee72f7bd2e6b, 0x05d270d6109abf4e, 0x8d5cfe45b941a8a4, 0x44c218671c974287}}, + {{0x745f9d56296bc318, 0x993580d4d8152e65, 0xb0e5b13f5839e9ce, 0x51fc2b28d43921c0}}, + {{0x1b8fd11795e2a98c, 0x1c4e5ee12b6b6291, 0x5b30e7107424b572, 0x6e6b9de84c4f4ac6}}}, +{{{0xdff25fce4b1de151, 0xd841c0c7e11c4025, 0x2554b3c854749c87, 0x2d292459908e0df9}}, + {{0x6b7c5f10f80cb088, 0x736b54dc56e42151, 0xc2b620a5c6ef99c4, 0x5f4c802cc3a06f42}}, + {{0x9b65c8f17d0752da, 0x881ce338c77ee800, 0xc3b514f05b62f9e3, 0x66ed5dd5bec10d48}}}, +{{{0x7d38a1c20bb2089d, 0x808334e196ccd412, 0xc4a70b8c6c97d313, 0x2eacf8bc03007f20}}, + {{0xf0adf3c9cbca047d, 0x81c3b2cbf4552f6b, 0xcfda112d44735f93, 0x1f23a0c77e20048c}}, + {{0xf235467be5bc1570, 0x03d2d9020dbab38c, 0x27529aa2fcf9e09e, 0x0840bef29d34bc50}}}, +{{{0x796dfb35dc10b287, 0x27176bcd5c7ff29d, 0x7f3d43e8c7b24905, 0x0304f5a191c54276}}, + {{0xcd54e06b7f37e4eb, 0x8cc15f87f5e96cca, 0xb8248bb0d3597dce, 0x246affa06074400c}}, + {{0x37d88e68fbe45321, 0x86097548c0d75032, 0x4e9b13ef894a0d35, 0x25a83cac5753d325}}}, +{{{0x10222f48eed8165e, 0x623fc1234b8bcf3a, 0x1e145c09c221e8f0, 0x7ccfa59fca782630}}, + {{0x9f0f66293952b6e2, 0x33db5e0e0934267b, 0xff45252bd609fedc, 0x06be10f5c506e0c9}}, + {{0x1a9615a9b62a345f, 0x22050c564a52fecc, 0xa7a2788528bc0dfe, 0x5e82770a1a1ee71d}}}, +{{{0x35425183ad896a5c, 0xe8673afbe78d52f6, 0x2c66f25f92a35f64, 0x09d04f3b3b86b102}}, + {{0xe802e80a42339c74, 0x34175166a7fffae5, 0x34865d1f1c408cae, 0x2cca982c605bc5ee}}, + {{0xfd2d5d35197dbe6e, 0x207c2eea8be4ffa3, 0x2613d8db325ae918, 0x7a325d1727741d3e}}}, +{{{0xd036b9bbd16dfde2, 0xa2055757c497a829, 0x8e6cc966a7f12667, 0x4d3b1a791239c180}}, + {{0xecd27d017e2a076a, 0xd788689f1636495e, 0x52a61af0919233e5, 0x2a479df17bb1ae64}}, + {{0x9e5eee8e33db2710, 0x189854ded6c43ca5, 0xa41c22c592718138, 0x27ad5538a43a5e9b}}}, +{{{0x2746dd4b15350d61, 0xd03fcbc8ee9521b7, 0xe86e365a138672ca, 0x510e987f7e7d89e2}}, + {{0xcb5a7d638e47077c, 0x8db7536120a1c059, 0x549e1e4d8bedfdcc, 0x080153b7503b179d}}, + {{0xdda69d930a3ed3e3, 0x3d386ef1cd60a722, 0xc817ad58bdaa4ee6, 0x23be8d554fe7372a}}}, +{{{0x95fe919a74ef4fad, 0x3a827becf6a308a2, 0x964e01d309a47b01, 0x71c43c4f5ba3c797}}, + {{0xbc1ef4bd567ae7a9, 0x3f624cb2d64498bd, 0xe41064d22c1f4ec8, 0x2ef9c5a5ba384001}}, + {{0xb6fd6df6fa9e74cd, 0xf18278bce4af267a, 0x8255b3d0f1ef990e, 0x5a758ca390c5f293}}}, +{{{0xa2b72710d9462495, 0x3aa8c6d2d57d5003, 0xe3d400bfa0b487ca, 0x2dbae244b3eb72ec}}, + {{0x8ce0918b1d61dc94, 0x8ded36469a813066, 0xd4e6a829afe8aad3, 0x0a738027f639d43f}}, + {{0x980f4a2f57ffe1cc, 0x00670d0de1839843, 0x105c3f4a49fb15fd, 0x2698ca635126a69c}}}, +{{{0xe765318832b0ba78, 0x381831f7925cff8b, 0x08a81b91a0291fcc, 0x1fb43dcc49caeb07}}, + {{0x2e3d702f5e3dd90e, 0x9e3f0918e4d25386, 0x5e773ef6024da96a, 0x3c004b0c4afa3332}}, + {{0x9aa946ac06f4b82b, 0x1ca284a5a806c4f3, 0x3ed3265fc6cd4787, 0x6b43fd01cd1fd217}}}, +{{{0xc7a75d4b4697c544, 0x15fdf848df0fffbf, 0x2868b9ebaa46785a, 0x5a68d7105b52f714}}, + {{0xb5c742583e760ef3, 0x75dc52b9ee0ab990, 0xbf1427c2072b923f, 0x73420b2d6ff0d9f0}}, + {{0xaf2cf6cb9e851e06, 0x8f593913c62238c4, 0xda8ab89699fbf373, 0x3db5632fea34bc9e}}}, +{{{0xf46eee2bf75dd9d8, 0x0d17b1f6396759a5, 0x1bf2d131499e7273, 0x04321adf49d75f13}}, + {{0x2e4990b1829825d5, 0xedeaeb873e9a8991, 0xeef03d394c704af8, 0x59197ea495df2b0e}}, + {{0x04e16019e4e55aae, 0xe77b437a7e2f92e9, 0xc7ce2dc16f159aa4, 0x45eafdc1f4d70cc0}}}, +{{{0x698401858045d72b, 0x4c22faa2cf2f0651, 0x941a36656b222dc6, 0x5a5eebc80362dade}}, + {{0xb60e4624cfccb1ed, 0x59dbc292bd5c0395, 0x31a09d1ddc0481c9, 0x3f73ceea5d56d940}}, + {{0xb7a7bfd10a4e8dc6, 0xbe57007e44c9b339, 0x60c1207f1557aefa, 0x26058891266218db}}}, +{{{0x59f704a68360ff04, 0xc3d93fde7661e6f4, 0x831b2a7312873551, 0x54ad0c2e4e615d57}}, + {{0x4c818e3cc676e542, 0x5e422c9303ceccad, 0xec07cccab4129f08, 0x0dedfa10b24443b8}}, + {{0xee3b67d5b82b522a, 0x36f163469fa5c1eb, 0xa5b4d2f26ec19fd3, 0x62ecb2baa77a9408}}}, +{{{0xe5ed795261152b3d, 0x4962357d0eddd7d1, 0x7482c8d0b96b4c71, 0x2e59f919a966d8be}}, + {{0x92072836afb62874, 0x5fcd5e8579e104a5, 0x5aad01adc630a14a, 0x61913d5075663f98}}, + {{0x0dc62d361a3231da, 0xfa47583294200270, 0x02d801513f9594ce, 0x3ddbc2a131c05d5c}}}, +{{{0x3f50a50a4ffb81ef, 0xb1e035093bf420bf, 0x9baa8e1cc6aa2cd0, 0x32239861fa237a40}}, + {{0xfb735ac2004a35d1, 0x31de0f433a6607c3, 0x7b8591bfc528d599, 0x55be9a25f5bb050c}}, + {{0x0d005acd33db3dbf, 0x0111b37c80ac35e2, 0x4892d66c6f88ebeb, 0x770eadb16508fbcd}}}, +{{{0x8451f9e05e4e89dd, 0xc06302ffbc793937, 0x5d22749556a6495c, 0x09a6755ca05603fb}}, + {{0xf1d3b681a05071b9, 0x2207659a3592ff3a, 0x5f0169297881e40e, 0x16bedd0e86ba374e}}, + {{0x5ecccc4f2c2737b5, 0x43b79e0c2dccb703, 0x33e008bc4ec43df3, 0x06c1b840f07566c0}}}, +{{{0x7688a5c6a388f877, 0x02a96c14deb2b6ac, 0x64c9f3431b8c2af8, 0x3628435554a1eed6}}, + {{0x69ee9e7f9b02805c, 0xcbff828a547d1640, 0x3d93a869b2430968, 0x46b7b8cd3fe26972}}, + {{0xe9812086fe7eebe0, 0x4cba6be72f515437, 0x1d04168b516efae9, 0x5ea1391043982cb9}}}, +{{{0x49125c9cf4702ee1, 0x4520b71f8b25b32d, 0x33193026501fef7e, 0x656d8997c8d2eb2b}}, + {{0x6f2b3be4d5d3b002, 0xafec33d96a09c880, 0x035f73a4a8bcc4cc, 0x22c5b9284662198b}}, + {{0xcb58c8fe433d8939, 0x89a0cb2e6a8d7e50, 0x79ca955309fbbe5a, 0x0c626616cd7fc106}}}, +{{{0x1ffeb80a4879b61f, 0x6396726e4ada21ed, 0x33c7b093368025ba, 0x471aa0c6f3c31788}}, + {{0x8fdfc379fbf454b1, 0x45a5a970f1a4b771, 0xac921ef7bad35915, 0x42d088dca81c2192}}, + {{0x8fda0f37a0165199, 0x0adadb77c8a0e343, 0x20fbfdfcc875e820, 0x1cf2bea80c2206e7}}}, +{{{0xc2ddf1deb36202ac, 0x92a5fe09d2e27aa5, 0x7d1648f6fc09f1d3, 0x74c2cc0513bc4959}}, + {{0x982d6e1a02c0412f, 0x90fa4c83db58e8fe, 0x01c2f5bcdcb18bc0, 0x686e0c90216abc66}}, + {{0x1fadbadba54395a7, 0xb41a02a0ae0da66a, 0xbf19f598bba37c07, 0x6a12b8acde48430d}}}, +{{{0xf8daea1f39d495d9, 0x592c190e525f1dfc, 0xdb8cbd04c9991d1b, 0x11f7fda3d88f0cb7}}, + {{0x793bdd801aaeeb5f, 0x00a2a0aac1518871, 0xe8a373a31f2136b4, 0x48aab888fc91ef19}}, + {{0x041f7e925830f40e, 0x002d6ca979661c06, 0x86dc9ff92b046a2e, 0x760360928b0493d1}}}, +{{{0x21bb41c6120cf9c6, 0xeab2aa12decda59b, 0xc1a72d020aa48b34, 0x215d4d27e87d3b68}}, + {{0xb43108e5695a0b05, 0x6cb00ee8ad37a38b, 0x5edad6eea3537381, 0x3f2602d4b6dc3224}}, + {{0xc8b247b65bcaf19c, 0x49779dc3b1b2c652, 0x89a180bbd5ece2e2, 0x13f098a3cec8e039}}}, +{{{0x9adc0ff9ce5ec54b, 0x039c2a6b8c2f130d, 0x028007c7f0f89515, 0x78968314ac04b36b}}, + {{0xf3aa57a22796bb14, 0x883abab79b07da21, 0xe54be21831a0391c, 0x5ee7fb38d83205f9}}, + {{0x538dfdcb41446a8e, 0xa5acfda9434937f9, 0x46af908d263c8c78, 0x61d0633c9bca0d09}}}, +{{{0x63744935ffdb2566, 0xc5bd6b89780b68bb, 0x6f1b3280553eec03, 0x6e965fd847aed7f5}}, + {{0xada328bcf8fc73df, 0xee84695da6f037fc, 0x637fb4db38c2a909, 0x5b23ac2df8067bdc}}, + {{0x9ad2b953ee80527b, 0xe88f19aafade6d8d, 0x0e711704150e82cf, 0x79b9bbb9dd95dedc}}}, +{{{0xebb355406a3126c2, 0xd26383a868c8c393, 0x6c0c6429e5b97a82, 0x5065f158c9fd2147}}, + {{0xd1997dae8e9f7374, 0xa032a2f8cfbb0816, 0xcd6cba126d445f0a, 0x1ba811460accb834}}, + {{0x708169fb0c429954, 0xe14600acd76ecf67, 0x2eaab98a70e645ba, 0x3981f39e58a4faf2}}}, +{{{0x18fb8a7559230a93, 0x1d168f6960e6f45d, 0x3a85a94514a93cb5, 0x38dc083705acd0fd}}, + {{0xc845dfa56de66fde, 0xe152a5002c40483a, 0xe9d2e163c7b4f632, 0x30f4452edcbc1b65}}, + {{0x856d2782c5759740, 0xfa134569f99cbecc, 0x8844fc73c0ea4e71, 0x632d9a1a593f2469}}}, +{{{0xf6bb6b15b807cba6, 0x1823c7dfbc54f0d7, 0xbb1d97036e29670b, 0x0b24f48847ed4a57}}, + {{0xbf09fd11ed0c84a7, 0x63f071810d9f693a, 0x21908c2d57cf8779, 0x3a5a7df28af64ba2}}, + {{0xdcdad4be511beac7, 0xa4538075ed26ccf2, 0xe19cff9f005f9a65, 0x34fcf74475481f63}}}, +{{{0xc197e04c789767ca, 0xb8714dcb38d9467d, 0x55de888283f95fa8, 0x3d3bdc164dfa63f7}}, + {{0xa5bb1dab78cfaa98, 0x5ceda267190b72f2, 0x9309c9110a92608e, 0x0119a3042fb374b0}}, + {{0x67a2d89ce8c2177d, 0x669da5f66895d0c1, 0xf56598e5b282a2b0, 0x56c088f1ede20a73}}}, +{{{0x336d3d1110a86e17, 0xd7f388320b75b2fa, 0xf915337625072988, 0x09674c6b99108b87}}, + {{0x581b5fac24f38f02, 0xa90be9febae30cbd, 0x9a2169028acf92f0, 0x038b7ea48359038f}}, + {{0x9f4ef82199316ff8, 0x2f49d282eaa78d4f, 0x0971a5ab5aef3174, 0x6e5e31025969eb65}}}, +{{{0xb16c62f587e593fb, 0x4999eddeca5d3e71, 0xb491c1e014cc3e6d, 0x08f5114789a8dba8}}, + {{0x3304fb0e63066222, 0xfb35068987acba3f, 0xbd1924778c1061a3, 0x3058ad43d1838620}}, + {{0x323c0ffde57663d0, 0x05c3df38a22ea610, 0xbdc78abdac994f9a, 0x26549fa4efe3dc99}}}, +{{{0x738b38d787ce8f89, 0xb62658e24179a88d, 0x30738c9cf151316d, 0x49128c7f727275c9}}, + {{0x04dbbc17f75396b9, 0x69e6a2d7d2f86746, 0xc6409d99f53eabc6, 0x606175f6332e25d2}}, + {{0x4021370ef540e7dd, 0x0910d6f5a1f1d0a5, 0x4634aacd5b06b807, 0x6a39e6356944f235}}}, +{{{0x96cd5640df90f3e7, 0x6c3a760edbfa25ea, 0x24f3ef0959e33cc4, 0x42889e7e530d2e58}}, + {{0x1da1965774049e9d, 0xfbcd6ea198fe352b, 0xb1cbcd50cc5236a6, 0x1f5ec83d3f9846e2}}, + {{0x8efb23c3328ccb75, 0xaf42a207dd876ee9, 0x20fbdadc5dfae796, 0x241e246b06bf9f51}}}, +{{{0x29e68e57ad6e98f6, 0x4c9260c80b462065, 0x3f00862ea51ebb4b, 0x5bc2c77fb38d9097}}, + {{0x7eaafc9a6280bbb8, 0x22a70f12f403d809, 0x31ce40bb1bfc8d20, 0x2bc65635e8bd53ee}}, + {{0xe8d5dc9fa96bad93, 0xe58fb17dde1947dc, 0x681532ea65185fa3, 0x1fdd6c3b034a7830}}}, +{{{0x0a64e28c55dc18fe, 0xe3df9e993399ebdd, 0x79ac432370e2e652, 0x35ff7fc33ae4cc0e}}, + {{0x9c13a6a52dd8f7a9, 0x2dbb1f8c3efdcabf, 0x961e32405e08f7b5, 0x48c8a121bbe6c9e5}}, + {{0xfc415a7c59646445, 0xd224b2d7c128b615, 0x6035c9c905fbb912, 0x42d7a91274429fab}}}, +{{{0x4e6213e3eaf72ed3, 0x6794981a43acd4e7, 0xff547cde6eb508cb, 0x6fed19dd10fcb532}}, + {{0xa9a48947933da5bc, 0x4a58920ec2e979ec, 0x96d8800013e5ac4c, 0x453692d74b48b147}}, + {{0xdd775d99a8559c6f, 0xf42a2140df003e24, 0x5223e229da928a66, 0x063f46ba6d38f22c}}}, +{{{0xd2d242895f536694, 0xca33a2c542939b2c, 0x986fada6c7ddb95c, 0x5a152c042f712d5d}}, + {{0x39843cb737346921, 0xa747fb0738c89447, 0xcb8d8031a245307e, 0x67810f8e6d82f068}}, + {{0x3eeb8fbcd2287db4, 0x72c7d3a301a03e93, 0x5473e88cbd98265a, 0x7324aa515921b403}}}, +{{{0x857942f46c3cbe8e, 0xa1d364b14730c046, 0x1c8ed914d23c41bf, 0x0838e161eef6d5d2}}, + {{0xad23f6dae82354cb, 0x6962502ab6571a6d, 0x9b651636e38e37d1, 0x5cac5005d1a3312f}}, + {{0x8cc154cce9e39904, 0x5b3a040b84de6846, 0xc4d8a61cb1be5d6e, 0x40fb897bd8861f02}}}, +{{{0x84c5aa9062de37a1, 0x421da5000d1d96e1, 0x788286306a9242d9, 0x3c5e464a690d10da}}, + {{0xe57ed8475ab10761, 0x71435e206fd13746, 0x342f824ecd025632, 0x4b16281ea8791e7b}}, + {{0xd1c101d50b813381, 0xdee60f1176ee6828, 0x0cb68893383f6409, 0x6183c565f6ff484a}}}, +{{{0x741d5a461e6bf9d6, 0x2305b3fc7777a581, 0xd45574a26474d3d9, 0x1926e1dc6401e0ff}}, + {{0xdb468549af3f666e, 0xd77fcf04f14a0ea5, 0x3df23ff7a4ba0c47, 0x3a10dfe132ce3c85}}, + {{0xe07f4e8aea17cea0, 0x2fd515463a1fc1fd, 0x175322fd31f2c0f1, 0x1fa1d01d861e5d15}}}, +{{{0xcc8055947d599832, 0x1e4656da37f15520, 0x99f6f7744e059320, 0x773563bc6a75cf33}}, + {{0x38dcac00d1df94ab, 0x2e712bddd1080de9, 0x7f13e93efdd5e262, 0x73fced18ee9a01e5}}, + {{0x06b1e90863139cb3, 0xa493da67c5a03ecd, 0x8d77cec8ad638932, 0x1f426b701b864f44}}}, +{{{0xefc9264c41911c01, 0xf1a3b7b817a22c25, 0x5875da6bf30f1447, 0x4e1af5271d31b090}}, + {{0xf17e35c891a12552, 0xb76b8153575e9c76, 0xfa83406f0d9b723e, 0x0b76bb1b3fa7e438}}, + {{0x08b8c1f97f92939b, 0xbe6771cbd444ab6e, 0x22e5646399bb8017, 0x7b6dd61eb772a955}}}, +{{{0xb7adc1e850f33d92, 0x7998fa4f608cd5cf, 0xad962dbd8dfc5bdb, 0x703e9bceaf1d2f4f}}, + {{0x5730abf9ab01d2c7, 0x16fb76dc40143b18, 0x866cbe65a0cbb281, 0x53fa9b659bff6afe}}, + {{0x6c14c8e994885455, 0x843a5d6665aed4e5, 0x181bb73ebcd65af1, 0x398d93e5c4c61f50}}}, +{{{0x1c4bd16733e248f3, 0xbd9e128715bf0a5f, 0xd43f8cf0a10b0376, 0x53b09b5ddf191b13}}, + {{0xc3877c60d2e7e3f2, 0x3b34aaa030828bb1, 0x283e26e7739ef138, 0x699c9c9002c30577}}, + {{0xf306a7235946f1cc, 0x921718b5cce5d97d, 0x28cdd24781b4e975, 0x51caf30c6fcdd907}}}, +{{{0xa60ba7427674e00a, 0x630e8570a17a7bf3, 0x3758563dcf3324cc, 0x5504aa292383fdaa}}, + {{0x737af99a18ac54c7, 0x903378dcc51cb30f, 0x2b89bc334ce10cc7, 0x12ae29c189f8e99a}}, + {{0xa99ec0cb1f0d01cf, 0x0dd1efcc3a34f7ae, 0x55ca7521d09c4e22, 0x5fd14fe958eba5ea}}}, +{{{0xb5dc2ddf2845ab2c, 0x069491b10a7fe993, 0x4daaf3d64002e346, 0x093ff26e586474d1}}, + {{0x3c42fe5ebf93cb8e, 0xbedfa85136d4565f, 0xe0f0859e884220e8, 0x7dd73f960725d128}}, + {{0xb10d24fe68059829, 0x75730672dbaf23e5, 0x1367253ab457ac29, 0x2f59bcbc86b470a4}}}, +{{{0x83847d429917135f, 0xad1b911f567d03d7, 0x7e7748d9be77aad1, 0x5458b42e2e51af4a}}, + {{0x7041d560b691c301, 0x85201b3fadd7e71e, 0x16c2e16311335585, 0x2aa55e3d010828b1}}, + {{0xed5192e60c07444f, 0x42c54e2d74421d10, 0x352b4c82fdb5c864, 0x13e9004a8a768664}}}, +{{{0xcbb5b5556c032bff, 0xdf7191b729297a3a, 0xc1ff7326aded81bb, 0x71ade8bb68be03f5}}, + {{0x1e6284c5806b467c, 0xc5f6997be75d607b, 0x8b67d958b378d262, 0x3d88d66a81cd8b70}}, + {{0x8b767a93204ed789, 0x762fcacb9fa0ae2a, 0x771febcc6dce4887, 0x343062158ff05fb3}}}, +{{{0xe05da1a7e1f5bf49, 0x26457d6dd4736092, 0x77dcb07773cc32f6, 0x0a5d94969cdd5fcd}}, + {{0xfce219072a7b31b4, 0x4d7adc75aa578016, 0x0ec276a687479324, 0x6d6d9d5d1fda4beb}}, + {{0x22b1a58ae9b08183, 0xfd95d071c15c388b, 0xa9812376850a0517, 0x33384cbabb7f335e}}}, +{{{0x3c6fa2680ca2c7b5, 0x1b5082046fb64fda, 0xeb53349c5431d6de, 0x5278b38f6b879c89}}, + {{0x33bc627a26218b8d, 0xea80b21fc7a80c61, 0x9458b12b173e9ee6, 0x076247be0e2f3059}}, + {{0x52e105f61416375a, 0xec97af3685abeba4, 0x26e6b50623a67c36, 0x5cf0e856f3d4fb01}}}, +{{{0xf6c968731ae8cab4, 0x5e20741ecb4f92c5, 0x2da53be58ccdbc3e, 0x2dddfea269970df7}}, + {{0xbeaece313db342a8, 0xcba3635b842db7ee, 0xe88c6620817f13ef, 0x1b9438aa4e76d5c6}}, + {{0x8a50777e166f031a, 0x067b39f10fb7a328, 0x1925c9a6010fbd76, 0x6df9b575cc740905}}}, +{{{0x42c1192927f6bdcf, 0x8f91917a403d61ca, 0xdc1c5a668b9e1f61, 0x1596047804ec0f8d}}, + {{0xecdfc35b48cade41, 0x6a88471fb2328270, 0x740a4a2440a01b6a, 0x471e5796003b5f29}}, + {{0xda96bbb3aced37ac, 0x7a2423b5e9208cea, 0x24cc5c3038aebae2, 0x50c356afdc5dae2f}}}, +{{{0x09dcbf4341c30318, 0xeeba061183181dce, 0xc179c0cedc1e29a1, 0x1dbf7b89073f35b0}}, + {{0xcfed9cdf1b31b964, 0xf486a9858ca51af3, 0x14897265ea8c1f84, 0x784a53dd932acc00}}, + {{0x2d99f9df14fc4920, 0x76ccb60cc4499fe5, 0xa4132cbbe5cf0003, 0x3f93d82354f000ea}}}, +{{{0x8183e7689e04ce85, 0x678fb71e04465341, 0xad92058f6688edac, 0x5da350d3532b099a}}, + {{0xeaac12d179e14978, 0xff923ff3bbebff5e, 0x4af663e40663ce27, 0x0fd381a811a5f5ff}}, + {{0xf256aceca436df54, 0x108b6168ae69d6e8, 0x20d986cb6b5d036c, 0x655957b9fee2af50}}}, +{{{0xaea8b07fa902030f, 0xf88c766af463d143, 0x15b083663c787a60, 0x08eab1148267a4a8}}, + {{0xbdc1409bd002d0ac, 0x66660245b5ccd9a6, 0x82317dc4fade85ec, 0x02fe934b6ad7df0d}}, + {{0xef5cf100cfb7ea74, 0x22897633a1cb42ac, 0xd4ce0c54cef285e2, 0x30408c048a146a55}}}, +{{{0x739d8845832fcedb, 0xfa38d6c9ae6bf863, 0x32bc0dcab74ffef7, 0x73937e8814bce45e}}, + {{0xbb2e00c9193b877f, 0xece3a890e0dc506b, 0xecf3b7c036de649f, 0x5f46040898de9e1a}}, + {{0xb9037116297bf48d, 0xa9d13b22d4f06834, 0xe19715574696bdc6, 0x2cf8a4e891d5e835}}}, +{{{0x6d93fd8707110f67, 0xdd4c09d37c38b549, 0x7cb16a4cc2736a86, 0x2049bd6e58252a09}}, + {{0x2cb5487e17d06ba2, 0x24d2381c3950196b, 0xd7659c8185978a30, 0x7a6f7f2891d6a4f6}}, + {{0x7d09fd8d6a9aef49, 0xf0ee60be5b3db90b, 0x4c21b52c519ebfd4, 0x6011aadfc545941d}}}, +{{{0x5f67926dcf95f83c, 0x7c7e856171289071, 0xd6a1e7f3998f7a5b, 0x6fc5cc1b0b62f9e0}}, + {{0x63ded0c802cbf890, 0xfbd098ca0dff6aaa, 0x624d0afdb9b6ed99, 0x69ce18b779340b1e}}, + {{0xd1ef5528b29879cb, 0xdd1aae3cd47e9092, 0x127e0442189f2352, 0x15596b3ae57101f1}}}, +{{{0x462739d23f9179a2, 0xff83123197d6ddcf, 0x1307deb553f2148a, 0x0d2237687b5f4dda}}, + {{0x09ff31167e5124ca, 0x0be4158bd9c745df, 0x292b7d227ef556e5, 0x3aa4e241afb6d138}}, + {{0x2cc138bf2a3305f5, 0x48583f8fa2e926c3, 0x083ab1a25549d2eb, 0x32fcaa6e4687a36c}}}, +{{{0x7bc56e8dc57d9af5, 0x3e0bd2ed9df0bdf2, 0xaac014de22efe4a3, 0x4627e9cefebd6a5c}}, + {{0x3207a4732787ccdf, 0x17e31908f213e3f8, 0xd5b2ecd7f60d964e, 0x746f6336c2600be9}}, + {{0x3f4af345ab6c971c, 0xe288eb729943731f, 0x33596a8a0344186d, 0x7b4917007ed66293}}}, +{{{0x2d85fb5cab84b064, 0x497810d289f3bc14, 0x476adc447b15ce0c, 0x122ba376f844fd7b}}, + {{0x54341b28dd53a2dd, 0xaa17905bdf42fc3f, 0x0ff592d94dd2f8f4, 0x1d03620fe08cd37d}}, + {{0xc20232cda2b4e554, 0x9ed0fd42115d187f, 0x2eabb4be7dd479d9, 0x02c70bf52b68ec4c}}}, +{{{0xa287ec4b5d0b2fbb, 0x415c5790074882ca, 0xe044a61ec1d0815c, 0x26334f0a409ef5e0}}, + {{0xace532bf458d72e1, 0x5be768e07cb73cb5, 0x56cf7d94ee8bbde7, 0x6b0697e3feb43a03}}, + {{0xb6c8f04adf62a3c0, 0x3ef000ef076da45d, 0x9c9cb95849f0d2a9, 0x1cc37f43441b2fae}}}, +{{{0x508f565a5cc7324f, 0xd061c4c0e506a922, 0xfb18abdb5c45ac19, 0x6c6809c10380314a}}, + {{0xd76656f1c9ceaeb9, 0x1c5b15f818e5656a, 0x26e72832844c2334, 0x3a346f772f196838}}, + {{0xd2d55112e2da6ac8, 0xe9bd0331b1e851ed, 0x960746dd8ec67262, 0x05911b9f6ef7c5d0}}}, +{{{0xe9dcd756b637ff2d, 0xec4c348fc987f0c4, 0xced59285f3fbc7b7, 0x3305354793e1ea87}}, + {{0x01c18980c5fe9f94, 0xcd656769716fd5c8, 0x816045c3d195a086, 0x6e2b7f3266cc7982}}, + {{0xcc802468f7c3568f, 0x9de9ba8219974cb3, 0xabb7229cb5b81360, 0x44e2017a6fbeba62}}}, +{{{0xc4c2a74354dab774, 0x8e5d4c3c4eaf031a, 0xb76c23d242838f17, 0x749a098f68dce4ea}}, + {{0x87f82cf3b6ca6ecd, 0x580f893e18f4a0c2, 0x058930072604e557, 0x6cab6ac256d19c1d}}, + {{0xdcdfe0a02cc1de60, 0x032665ff51c5575b, 0x2c0c32f1073abeeb, 0x6a882014cd7b8606}}}, +{{{0xa52a92fea4747fb5, 0xdc12a4491fa5ab89, 0xd82da94bb847a4ce, 0x4d77edce9512cc4e}}, + {{0xd111d17caf4feb6e, 0x050bba42b33aa4a3, 0x17514c3ceeb46c30, 0x54bedb8b1bc27d75}}, + {{0x77c8e14577e2189c, 0xa3e46f6aff99c445, 0x3144dfc86d335343, 0x3a96559e7c4216a9}}}, +{{{0x12550d37f42ad2ee, 0x8b78e00498a1fbf5, 0x5d53078233894cb2, 0x02c84e4e3e498d0c}}, + {{0x4493896880baaa52, 0x4c98afc4f285940e, 0xef4aa79ba45448b6, 0x5278c510a57aae7f}}, + {{0xa54dd074294c0b94, 0xf55d46b8df18ffb6, 0xf06fecc58dae8366, 0x588657668190d165}}}, +{{{0xd47712311aef7117, 0x50343101229e92c7, 0x7a95e1849d159b97, 0x2449959b8b5d29c9}}, + {{0xbf5834f03de25cc3, 0xb887c8aed6815496, 0x5105221a9481e892, 0x6760ed19f7723f93}}, + {{0x669ba3b7ac35e160, 0x2eccf73fba842056, 0x1aec1f17c0804f07, 0x0d96bc031856f4e7}}}, +{{{0x3318be7775c52d82, 0x4cb764b554d0aab9, 0xabcf3d27cc773d91, 0x3bf4d1848123288a}}, + {{0xb1d534b0cc7505e1, 0x32cd003416c35288, 0xcb36a5800762c29d, 0x5bfe69b9237a0bf8}}, + {{0x183eab7e78a151ab, 0xbbe990c999093763, 0xff717d6e4ac7e335, 0x4c5cddb325f39f88}}}, +{{{0xc0f6b74d6190a6eb, 0x20ea81a42db8f4e4, 0xa8bd6f7d97315760, 0x33b1d60262ac7c21}}, + {{0x57750967e7a9f902, 0x2c37fdfc4f5b467e, 0xb261663a3177ba46, 0x3a375e78dc2d532b}}, + {{0x8141e72f2d4dddea, 0xe6eafe9862c607c8, 0x23c28458573cafd0, 0x46b9476f4ff97346}}}, +{{{0x0c1ffea44f901e5c, 0x2b0b6fb72184b782, 0xe587ff910114db88, 0x37130f364785a142}}, + {{0x1215505c0d58359f, 0x2a2013c7fc28c46b, 0x24a0a1af89ea664e, 0x4400b638a1130e1f}}, + {{0x3a01b76496ed19c3, 0x31e00ab0ed327230, 0x520a885783ca15b1, 0x06aab9875accbec7}}}, +{{{0xc1339983f5df0ebb, 0xc0f3758f512c4cac, 0x2cf1130a0bb398e1, 0x6b3cecf9aa270c62}}, + {{0x5349acf3512eeaef, 0x20c141d31cc1cb49, 0x24180c07a99a688d, 0x555ef9d1c64b2d17}}, + {{0x36a770ba3b73bd08, 0x624aef08a3afbf0c, 0x5737ff98b40946f2, 0x675f4de13381749d}}}, +{{{0x0e2c52036b1782fc, 0x64816c816cad83b4, 0xd0dcbdd96964073e, 0x13d99df70164c520}}, + {{0xa12ff6d93bdab31d, 0x0725d80f9d652dfe, 0x019c4ff39abe9487, 0x60f450b882cd3c43}}, + {{0x014b5ec321e5c0ca, 0x4fcb69c9d719bfa2, 0x4e5f1c18750023a0, 0x1c06de9e55edac80}}}, +{{{0x990f7ad6a33ec4e2, 0x6608f938be2ee08e, 0x9ca143c563284515, 0x4cf38a1fec2db60d}}, + {{0xffd52b40ff6d69aa, 0x34530b18dc4049bb, 0x5e4a5c2fa34d9897, 0x78096f8e7d32ba2d}}, + {{0xa0aaaa650dfa5ce7, 0xf9c49e2a48b5478c, 0x4f09cc7d7003725b, 0x373cad3a26091abe}}}, +{{{0xb294634d82c9f57c, 0x1fcbfde124934536, 0x9e9c4db3418cdb5a, 0x0040f3d9454419fc}}, + {{0xf1bea8fb89ddbbad, 0x3bcb2cbc61aeaecb, 0x8f58a7bb1f9b8d9d, 0x21547eda5112a686}}, + {{0xdefde939fd5986d3, 0xf4272c89510a380c, 0xb72ba407bb3119b9, 0x63550a334a254df4}}}, +{{{0x6507d6edb569cf37, 0x178429b00ca52ee1, 0xea7c0090eb6bd65d, 0x3eea62c7daf78f51}}, + {{0x9bba584572547b49, 0xf305c6fae2c408e0, 0x60e8fa69c734f18d, 0x39a92bafaa7d767a}}, + {{0x9d24c713e693274e, 0x5f63857768dbd375, 0x70525560eb8ab39a, 0x68436a0665c9c4cd}}}, +{{{0xbc0235e8202f3f27, 0xc75c00e264f975b0, 0x91a4e9d5a38c2416, 0x17b6e7f68ab789f9}}, + {{0x1e56d317e820107c, 0xc5266844840ae965, 0xc1e0a1c6320ffc7a, 0x5373669c91611472}}, + {{0x5d2814ab9a0e5257, 0x908f2084c9cab3fc, 0xafcaf5885b2d1eca, 0x1cb4b5a678f87d11}}}, +{{{0xb664c06b394afc6c, 0x0c88de2498da5fb1, 0x4f8d03164bcad834, 0x330bca78de7434a2}}, + {{0x6b74aa62a2a007e7, 0xf311e0b0f071c7b1, 0x5707e438000be223, 0x2dc0fd2d82ef6eac}}, + {{0x982eff841119744e, 0xf9695e962b074724, 0xc58ac14fbfc953fb, 0x3c31be1b369f1cf5}}}, +{{{0xb0f4864d08948aee, 0x07dc19ee91ba1c6f, 0x7975cdaea6aca158, 0x330b61134262d4bb}}, + {{0xc168bc93f9cb4272, 0xaeb8711fc7cedb98, 0x7f0e52aa34ac8d7a, 0x41cec1097e7d55bb}}, + {{0xf79619d7a26d808a, 0xbb1fd49e1d9e156d, 0x73d7c36cdba1df27, 0x26b44cd91f28777d}}}, +{{{0x300a9035393aa6d8, 0x2b501131a12bb1cd, 0x7b1ff677f093c222, 0x4309c1f8cab82bad}}, + {{0xaf44842db0285f37, 0x8753189047efc8df, 0x9574e091f820979a, 0x0e378d6069615579}}, + {{0xd9fa917183075a55, 0x4bdb5ad26b009fdc, 0x7829ad2cd63def0e, 0x078fc54975fd3877}}}, +{{{0x87dfbd1428878f2d, 0x134636dd1e9421a1, 0x4f17c951257341a3, 0x5df98d4bad296cb8}}, + {{0xe2004b5bb833a98a, 0x44775dec2d4c3330, 0x3aa244067eace913, 0x272630e3d58e00a9}}, + {{0xf3678fd0ecc90b54, 0xf001459b12043599, 0x26725fbc3758b89b, 0x4325e4aa73a719ae}}}, +{{{0x657dc6ef433c3493, 0x65375e9f80dbf8c3, 0x47fd2d465b372dae, 0x4966ab79796e7947}}, + {{0xed24629acf69f59d, 0x2a4a1ccedd5abbf4, 0x3535ca1f56b2d67b, 0x5d8c68d043b1b42d}}, + {{0xee332d4de3b42b0a, 0xd84e5a2b16a4601c, 0x78243877078ba3e4, 0x77ed1eb4184ee437}}}, +{{{0xbfd4e13f201839a0, 0xaeefffe23e3df161, 0xb65b04f06b5d1fe3, 0x52e085fb2b62fbc0}}, + {{0x185d43f89e92ed1a, 0xb04a1eeafe4719c6, 0x499fbe88a6f03f4f, 0x5d8b0d2f3c859bdd}}, + {{0x124079eaa54cf2ba, 0xd72465eb001b26e7, 0x6843bcfdc97af7fd, 0x0524b42b55eacd02}}}, +{{{0xfd0d5dbee45447b0, 0x6cec351a092005ee, 0x99a47844567579cb, 0x59d242a216e7fa45}}, + {{0xbc18dcad9b829eac, 0x23ae7d28b5f579d0, 0xc346122a69384233, 0x1a6110b2e7d4ac89}}, + {{0x4f833f6ae66997ac, 0x6849762a361839a4, 0x6985dec1970ab525, 0x53045e89dcb1f546}}}, +{{{0xcb8bb346d75353db, 0xfcfcb24bae511e22, 0xcba48d40d50ae6ef, 0x26e3bae5f4f7cb5d}}, + {{0x84da3cde8d45fe12, 0xbd42c218e444e2d2, 0xa85196781f7e3598, 0x7642c93f5616e2b2}}, + {{0x2323daa74595f8e4, 0xde688c8b857abeb4, 0x3fc48e961c59326e, 0x0b2e73ca15c9b8ba}}}, +{{{0xd6bb4428c17f5026, 0x9eb27223fb5a9ca7, 0xe37ba5031919c644, 0x21ce380db59a6602}}, + {{0x0e3fbfaf79c03a55, 0x3077af054cbb5acf, 0xd5c55245db3de39f, 0x015e68c1476a4af7}}, + {{0xc1d5285220066a38, 0x95603e523570aef3, 0x832659a7226b8a4d, 0x5dd689091f8eedc9}}}, +{{{0xcbac84debfd3c856, 0x1624c348b35ff244, 0xb7f88dca5d9cad07, 0x3b0e574da2c2ebe8}}, + {{0x1d022591a5313084, 0xca2d4aaed6270872, 0x86a12b852f0bfd20, 0x56e6c439ad7da748}}, + {{0xc704ff4942bdbae6, 0x5e21ade2b2de1f79, 0xe95db3f35652fad8, 0x0822b5378f08ebc1}}}, +{{{0x51f048478f387475, 0xb25dbcf49cbecb3c, 0x9aab1244d99f2055, 0x2c709e6c1c10a5d6}}, + {{0xe1b7f29362730383, 0x4b5279ffebca8a2c, 0xdafc778abfd41314, 0x7deb10149c72610f}}, + {{0xcb62af6a8766ee7a, 0x66cbec045553cd0e, 0x588001380f0be4b5, 0x08e68e9ff62ce2ea}}}, +{{{0x34ad500a4bc130ad, 0x8d38db493d0bd49c, 0xa25c3d98500a89be, 0x2f1f3f87eeba3b09}}, + {{0x2f2d09d50ab8f2f9, 0xacb9218dc55923df, 0x4a8f342673766cb9, 0x4cb13bd738f719f5}}, + {{0xf7848c75e515b64a, 0xa59501badb4a9038, 0xc20d313f3f751b50, 0x19a1e353c0ae2ee8}}}, +{{{0x7d1c7560bafa05c3, 0xb3e1a0a0c6e55e61, 0xe3529718c0d66473, 0x41546b11c20c3486}}, + {{0xb42172cdd596bdbd, 0x93e0454398eefc40, 0x9fb15347b44109b5, 0x736bd3990266ae34}}, + {{0x85532d509334b3b4, 0x46fd114b60816573, 0xcc5f5f30425c8375, 0x412295a2b87fab5c}}}, +{{{0x19c99b88f57ed6e9, 0x5393cb266df8c825, 0x5cee3213b30ad273, 0x14e153ebb52d2e34}}, + {{0x2e655261e293eac6, 0x845a92032133acdb, 0x460975cb7900996b, 0x0760bb8d195add80}}, + {{0x413e1a17cde6818a, 0x57156da9ed69a084, 0x2cbf268f46caccb1, 0x6b34be9bc33ac5f2}}}, +{{{0xf3df2f643a78c0b2, 0x4c3e971ef22e027c, 0xec7d1c5e49c1b5a3, 0x2012c18f0922dd2d}}, + {{0x11fc69656571f2d3, 0xc6c9e845530e737a, 0xe33ae7a2d4fe5035, 0x01b9c7b62e6dd30b}}, + {{0x880b55e55ac89d29, 0x1483241f45a0a763, 0x3d36efdfc2e76c1f, 0x08af5b784e4bade8}}}, +{{{0x283499dc881f2533, 0x9d0525da779323b6, 0x897addfb673441f4, 0x32b79d71163a168d}}, + {{0xe27314d289cc2c4b, 0x4be4bd11a287178d, 0x18d528d6fa3364ce, 0x6423c1d5afd9826e}}, + {{0xcc85f8d9edfcb36a, 0x22bcc28f3746e5f9, 0xe49de338f9e5d3cd, 0x480a5efbc13e2dcc}}}, +{{{0x0b51e70b01622071, 0x06b505cf8b1dafc5, 0x2c6bb061ef5aabcd, 0x47aa27600cb7bf31}}, + {{0xb6614ce442ce221f, 0x6e199dcc4c053928, 0x663fb4a4dc1cbe03, 0x24b31d47691c8e06}}, + {{0x2a541eedc015f8c3, 0x11a4fe7e7c693f7c, 0xf0af66134ea278d6, 0x545b585d14dda094}}}, +{{{0x67bf275ea0d43a0f, 0xade68e34089beebe, 0x4289134cd479e72e, 0x0f62f9c332ba5454}}, + {{0x6204e4d0e3b321e1, 0x3baa637a28ff1e95, 0x0b0ccffd5b99bd9e, 0x4d22dc3e64c8d071}}, + {{0xfcb46589d63b5f39, 0x5cae6a3f57cbcf61, 0xfebac2d2953afa05, 0x1c0fa01a36371436}}}, +{{{0xe7547449bc7cd692, 0x0f9abeaae6f73ddf, 0x4af01ca700837e29, 0x63ab1b5d3f1bc183}}, + {{0xc11ee5e854c53fae, 0x6a0b06c12b4f3ff4, 0x33540f80e0b67a72, 0x15f18fc3cd07e3ef}}, + {{0x32750763b028f48c, 0x06020740556a065f, 0xd53bd812c3495b58, 0x08706c9b865f508d}}}, +{{{0xf37ca2ab3d343dff, 0x1a8c6a2d80abc617, 0x8e49e035d4ccffca, 0x48b46beebaa1d1b9}}, + {{0xcc991b4138b41246, 0x243b9c526f9ac26b, 0xb9ef494db7cbabbd, 0x5fba433dd082ed00}}, + {{0x9c49e355c9941ad0, 0xb9734ade74498f84, 0x41c3fed066663e5c, 0x0ecfedf8e8e710b3}}}, +{{{0x76430f9f9cd470d9, 0xb62acc9ba42f6008, 0x1898297c59adad5e, 0x7789dd2db78c5080}}, + {{0x744f7463e9403762, 0xf79a8dee8dfcc9c9, 0x163a649655e4cde3, 0x3b61788db284f435}}, + {{0xb22228190d6ef6b2, 0xa94a66b246ce4bfa, 0x46c1a77a4f0b6cc7, 0x4236ccffeb7338cf}}}, +{{{0x8497404d0d55e274, 0x6c6663d9c4ad2b53, 0xec2fb0d9ada95734, 0x2617e120cdb8f73c}}, + {{0x3bd82dbfda777df6, 0x71b177cc0b98369e, 0x1d0e8463850c3699, 0x5a71945b48e2d1f1}}, + {{0x6f203dd5405b4b42, 0x327ec60410b24509, 0x9c347230ac2a8846, 0x77de29fc11ffeb6a}}}, +{{{0xb0ac57c983b778a8, 0x53cdcca9d7fe912c, 0x61c2b854ff1f59dc, 0x3a1a2cf0f0de7dac}}, + {{0x835e138fecced2ca, 0x8c9eaf13ea963b9a, 0xc95fbfc0b2160ea6, 0x575e66f3ad877892}}, + {{0x99803a27c88fcb3a, 0x345a6789275ec0b0, 0x459789d0ff6c2be5, 0x62f882651e70a8b2}}}, +{{{0x085ae2c759ff1be4, 0x149145c93b0e40b7, 0xc467e7fa7ff27379, 0x4eeecf0ad5c73a95}}, + {{0x6d822986698a19e0, 0xdc9821e174d78a71, 0x41a85f31f6cb1f47, 0x352721c2bcda9c51}}, + {{0x48329952213fc985, 0x1087cf0d368a1746, 0x8e5261b166c15aa5, 0x2d5b2d842ed24c21}}}, +{{{0x02cfebd9ebd3ded1, 0xd45b217739021974, 0x7576f813fe30a1b7, 0x5691b6f9a34ef6c2}}, + {{0x5eb7d13d196ac533, 0x377234ecdb80be2b, 0xe144cffc7cf5ae24, 0x5226bcf9c441acec}}, + {{0x79ee6c7223e5b547, 0x6f5f50768330d679, 0xed73e1e96d8adce9, 0x27c3da1e1d8ccc03}}}, +{{{0x7eb9efb23fe24c74, 0x3e50f49f1651be01, 0x3ea732dc21858dea, 0x17377bd75bb810f9}}, + {{0x28302e71630ef9f6, 0xc2d4a2032b64cee0, 0x090820304b6292be, 0x5fca747aa82adf18}}, + {{0x232a03c35c258ea5, 0x86f23a2c6bcb0cf1, 0x3dad8d0d2e442166, 0x04a8933cab76862b}}}, +{{{0xd2c604b622943dff, 0xbc8cbece44cfb3a0, 0x5d254ff397808678, 0x0fa3614f3b1ca6bf}}, + {{0x69082b0e8c936a50, 0xf9c9a035c1dac5b6, 0x6fb73e54c4dfb634, 0x4005419b1d2bc140}}, + {{0xa003febdb9be82f0, 0x2089c1af3a44ac90, 0xf8499f911954fa8e, 0x1fba218aef40ab42}}}, +{{{0xab549448fac8f53e, 0x81f6e89a7ba63741, 0x74fd6c7d6c2b5e01, 0x392e3acaa8c86e42}}, + {{0x4f3e57043e7b0194, 0xa81d3eee08daaf7f, 0xc839c6ab99dcdef1, 0x6c535d13ff7761d5}}, + {{0x4cbd34e93e8a35af, 0x2e0781445887e816, 0x19319c76f29ab0ab, 0x25e17fe4d50ac13b}}}, +{{{0x0a289bd71e04f676, 0x208e1c52d6420f95, 0x5186d8b034691fab, 0x255751442a9fb351}}, + {{0x915f7ff576f121a7, 0xc34a32272fcd87e3, 0xccba2fde4d1be526, 0x6bba828f8969899b}}, + {{0xe2d1bc6690fe3901, 0x4cb54a18a0997ad5, 0x971d6914af8460d4, 0x559d504f7f6b7be4}}}, +{{{0xa7738378b3eb54d5, 0x1d69d366a5553c7c, 0x0a26cf62f92800ba, 0x01ab12d5807e3217}}, + {{0x9c4891e7f6d266fd, 0x0744a19b0307781b, 0x88388f1d6061e23b, 0x123ea6a3354bd50e}}, + {{0x118d189041e32d96, 0xb9ede3c2d8315848, 0x1eab4271d83245d9, 0x4a3961e2c918a154}}}, +{{{0x71dc3be0f8e6bba0, 0xd6cef8347effe30a, 0xa992425fe13a476a, 0x2cd6bce3fb1db763}}, + {{0x0327d644f3233f1e, 0x499a260e34fcf016, 0x83b5a716f2dab979, 0x68aceead9bd4111f}}, + {{0x38b4c90ef3d7c210, 0x308e6e24b7ad040c, 0x3860d9f1b7e73e23, 0x595760d5b508f597}}}, +{{{0x6129bfe104aa6397, 0x8f960008a4a7fccb, 0x3f8bc0897d909458, 0x709fa43edcb291a9}}, + {{0x882acbebfd022790, 0x89af3305c4115760, 0x65f492e37d3473f4, 0x2cb2c5df54515a2b}}, + {{0xeb0a5d8c63fd2aca, 0xd22bc1662e694eff, 0x2723f36ef8cbb03a, 0x70f029ecf0c8131f}}}, +{{{0x461307b32eed3e33, 0xae042f33a45581e7, 0xc94449d3195f0366, 0x0b7d5d8a6c314858}}, + {{0x2a6aafaa5e10b0b9, 0x78f0a370ef041aa9, 0x773efb77aa3ad61f, 0x44eca5a2a74bd9e1}}, + {{0x25d448327b95d543, 0x70d38300a3340f1d, 0xde1c531c60e1c52b, 0x272224512c7de9e4}}}, +{{{0x1abc92af49c5342e, 0xffeed811b2e6fad0, 0xefa28c8dfcc84e29, 0x11b5df18a44cc543}}, + {{0xbf7bbb8a42a975fc, 0x8c5c397796ada358, 0xe27fc76fcdedaa48, 0x19735fd7f6bc20a6}}, + {{0xe3ab90d042c84266, 0xeb848e0f7f19547e, 0x2503a1d065a497b9, 0x0fef911191df895f}}}, +{{{0xb1507ca1ab1c6eb9, 0xbd448f3e16b687b3, 0x3455fb7f2c7a91ab, 0x7579229e2f2adec1}}, + {{0x6ab5dcb85b1c16b7, 0x94c0fce83c7b27a5, 0xa4b11c1a735517be, 0x499238d0ba0eafaa}}, + {{0xecf46e527aba8b57, 0x15a08c478bd1647b, 0x7af1c6a65f706fef, 0x6345fa78f03a30d5}}}, +{{{0xdf02f95f1015e7a1, 0x790ec41da9b40263, 0x4d3a0ea133ea1107, 0x54f70be7e33af8c9}}, + {{0x93d3cbe9bdd8f0a4, 0xdb152c1bfd177302, 0x7dbddc6d7f17a875, 0x3e1a71cc8f426efe}}, + {{0xc83ca3e390babd62, 0x80ede3670291c833, 0xc88038ccd37900c4, 0x2c5fc0231ec31fa1}}}, +{{{0xfeba911717038b4f, 0xe5123721c9deef81, 0x1c97e4e75d0d8834, 0x68afae7a23dc3bc6}}, + {{0xc422e4d102456e65, 0x87414ac1cad47b91, 0x1592e2bba2b6ffdd, 0x75d9d2bff5c2100f}}, + {{0x5bd9b4763626e81c, 0x89966936bca02edd, 0x0a41193d61f077b3, 0x3097a24200ce5471}}}, +{{{0x57427734c7f8b84c, 0xf141a13e01b270e9, 0x02d1adfeb4e564a6, 0x4bb23d92ce83bd48}}, + {{0xa162e7246695c486, 0x131d633435a89607, 0x30521561a0d12a37, 0x56704bada6afb363}}, + {{0xaf6c4aa752f912b9, 0x5e665f6cd86770c8, 0x4c35ac83a3c8cd58, 0x2b7a29c010a58a7e}}}, +{{{0xc4007f77d0c1cec3, 0x8d1020b6bac492f8, 0x32ec29d57e69daaf, 0x599408759d95fce0}}, + {{0x33810a23bf00086e, 0xafce925ee736ff7c, 0x3d60e670e24922d4, 0x11ce9e714f96061b}}, + {{0x219ef713d815bac1, 0xf141465d485be25c, 0x6d5447cc4e513c51, 0x174926be5ef44393}}}, +{{{0xb5deb2f9fc5bd5bb, 0x92daa72ae1d810e1, 0xafc4cfdcb72a1c59, 0x497d78813fc22a24}}, + {{0x3ef5d41593ea022e, 0x5cbcc1a20ed0eed6, 0x8fd24ecf07382c8c, 0x6fa42ead06d8e1ad}}, + {{0xe276824a1f73371f, 0x7f7cf01c4f5b6736, 0x7e201fe304fa46e7, 0x785a36a357808c96}}}, +{{{0x825fbdfd63014d2b, 0xc852369c6ca7578b, 0x5b2fcd285c0b5df0, 0x12ab214c58048c8f}}, + {{0x070442985d517bc3, 0x6acd56c7ae653678, 0x00a27983985a7763, 0x5167effae512662b}}, + {{0xbd4ea9e10f53c4b6, 0x1673dc5f8ac91a14, 0xa8f81a4e2acc1aba, 0x33a92a7924332a25}}}, +{{{0x9dd1f49927996c02, 0x0cb3b058e04d1752, 0x1f7e88967fd02c3e, 0x2f964268cb8b3eb1}}, + {{0x7ba95ba0218f2ada, 0xcff42287330fb9ca, 0xdada496d56c6d907, 0x5380c296f4beee54}}, + {{0x9d4f270466898d0a, 0x3d0987990aff3f7a, 0xd09ef36267daba45, 0x7761455e7b1c669c}}}
\ No newline at end of file diff --git a/ext/ed25519-amd64-asm/ge25519_base_niels_smalltables.data b/ext/ed25519-amd64-asm/ge25519_base_niels_smalltables.data new file mode 100644 index 00000000..a31f6f2f --- /dev/null +++ b/ext/ed25519-amd64-asm/ge25519_base_niels_smalltables.data @@ -0,0 +1,768 @@ +{{{0x9d103905d740913e, 0xfd399f05d140beb3, 0xa5c18434688f8a09, 0x44fd2f9298f81267}}, + {{0x2fbc93c6f58c3b85, 0xcf932dc6fb8c0e19, 0x270b4898643d42c2, 0x07cf9d3a33d4ba65}}, + {{0xdbbd15674b6fbb59, 0x41e13f00eea2a5ea, 0xcdd49d1cc957c6fa, 0x4f0ebe1faf16ecca}}}, +{{{0x8a99a56042b4d5a8, 0x8f2b810c4e60acf6, 0xe09e236bb16e37aa, 0x6bb595a669c92555}}, + {{0x9224e7fc933c71d7, 0x9f469d967a0ff5b5, 0x5aa69a65e1d60702, 0x590c063fa87d2e2e}}, + {{0x6e347eaadad36802, 0xbaf3599383ee4805, 0x3bcabe10e6076826, 0x49314f0a165ed1b8}}}, +{{{0x56611fe8a4fcd265, 0x3bd353fde5c1ba7d, 0x8131f31a214bd6bd, 0x2ab91587555bda62}}, + {{0xaf25b0a84cee9730, 0x025a8430e8864b8a, 0xc11b50029f016732, 0x7a164e1b9a80f8f4}}, + {{0x9bf211f4f1674834, 0xb84e6b17f62df895, 0xd7de6f075b722a4e, 0x549a04b963bb2a21}}}, +{{{0x95fe050a056818bf, 0x327e89715660faa9, 0xc3e8e3cd06a05073, 0x27933f4c7445a49a}}, + {{0x287351b98efc099f, 0x6765c6f47dfd2538, 0xca348d3dfb0a9265, 0x680e910321e58727}}, + {{0xbf1e45ece51426b0, 0xe32bc63d6dba0f94, 0xe42974d58cf852c0, 0x44f079b1b0e64c18}}}, +{{{0x7f9182c3a447d6ba, 0xd50014d14b2729b7, 0xe33cf11cb864a087, 0x154a7e73eb1b55f3}}, + {{0xa212bc4408a5bb33, 0x8d5048c3c75eed02, 0xdd1beb0c5abfec44, 0x2945ccf146e206eb}}, + {{0xc832a179e7d003b3, 0x5f729d0a00124d7e, 0x62c1d4a10e6d8ff3, 0x68b8ac5938b27a98}}}, +{{{0x499806b67b7d8ca4, 0x575be28427d22739, 0xbb085ce7204553b9, 0x38b64c41ae417884}}, + {{0x3a0ceeeb77157131, 0x9b27158900c8af88, 0x8065b668da59a736, 0x51e57bb6a2cc38bd}}, + {{0x8f9dad91689de3a4, 0x175f2428f8fb9137, 0x050ab5329fcfb988, 0x7865dfa21354c09f}}}, +{{{0xba6f2c9aaa3221b1, 0x6ca021533bba23a7, 0x9dea764f92192c3a, 0x1d6edd5d2e5317e0}}, + {{0x6b1a5cd0944ea3bf, 0x7470353ab39dc0d2, 0x71b2528228542e49, 0x461bea69283c927e}}, + {{0x217a8aacab0fda36, 0xa528c6543d3549c8, 0x37d05b8b13ab7568, 0x233cef623a2cbc37}}}, +{{{0xe2a75dedf39234d9, 0x963d7680e1b558f9, 0x2c2741ac6e3c23fb, 0x3a9024a1320e01c3}}, + {{0x59b7596604dd3e8f, 0x6cb30377e288702c, 0xb1339c665ed9c323, 0x0915e76061bce52f}}, + {{0xdf7de835a834a37e, 0x8be19cda689857ea, 0x2c1185367167b326, 0x589eb3d9dbefd5c2}}}, +{{{0xed5b635449aa515e, 0xa865c49f0bc6823a, 0x850c1fe95b42d1c4, 0x30d76d6f03d315b9}}, + {{0x2eccdd0e632f9c1d, 0x51d0b69676893115, 0x52dfb76ba8637a58, 0x6dd37d49a00eef39}}, + {{0x6c4444172106e4c7, 0xfb53d680928d7f69, 0xb4739ea4694d3f26, 0x10c697112e864bb0}}}, +{{{0x6493c4277dbe5fde, 0x265d4fad19ad7ea2, 0x0e00dfc846304590, 0x25e61cabed66fe09}}, + {{0x0ca62aa08358c805, 0x6a3d4ae37a204247, 0x7464d3a63b11eddc, 0x03bf9baf550806ef}}, + {{0x3f13e128cc586604, 0x6f5873ecb459747e, 0xa0b63dedcc1268f5, 0x566d78634586e22c}}}, +{{{0x1637a49f9cc10834, 0xbc8e56d5a89bc451, 0x1cb5ec0f7f7fd2db, 0x33975bca5ecc35d9}}, + {{0xa1054285c65a2fd0, 0x6c64112af31667c3, 0x680ae240731aee58, 0x14fba5f34793b22a}}, + {{0x3cd746166985f7d4, 0x593e5e84c9c80057, 0x2fc3f2b67b61131e, 0x14829cea83fc526c}}}, +{{{0xff437b8497dd95c2, 0x6c744e30aa4eb5a7, 0x9e0c5d613c85e88b, 0x2fd9c71e5f758173}}, + {{0x21e70b2f4e71ecb8, 0xe656ddb940a477e3, 0xbf6556cece1d4f80, 0x05fc3bc4535d7b7e}}, + {{0x24b8b3ae52afdedd, 0x3495638ced3b30cf, 0x33a4bc83a9be8195, 0x373767475c651f04}}}, +{{{0x2fba99fd40d1add9, 0xb307166f96f4d027, 0x4363f05215f03bae, 0x1fbea56c3b18f999}}, + {{0x634095cb14246590, 0xef12144016c15535, 0x9e38140c8910bc60, 0x6bf5905730907c8c}}, + {{0x0fa778f1e1415b8a, 0x06409ff7bac3a77e, 0x6f52d7b89aa29a50, 0x02521cf67a635a56}}}, +{{{0x513fee0b0a9d5294, 0x8f98e75c0fdf5a66, 0xd4618688bfe107ce, 0x3fa00a7e71382ced}}, + {{0xb1146720772f5ee4, 0xe8f894b196079ace, 0x4af8224d00ac824a, 0x001753d9f7cd6cc4}}, + {{0x3c69232d963ddb34, 0x1dde87dab4973858, 0xaad7d1f9a091f285, 0x12b5fe2fa048edb6}}}, +{{{0x71f0fbc496fce34d, 0x73b9826badf35bed, 0xd2047261ff28c561, 0x749b76f96fb1206f}}, + {{0xdf2b7c26ad6f1e92, 0x4b66d323504b8913, 0x8c409dc0751c8bc3, 0x6f7e93c20796c7b8}}, + {{0x1f5af604aea6ae05, 0xc12351f1bee49c99, 0x61a808b5eeff6b66, 0x0fcec10f01e02151}}}, +{{{0x644d58a649fe1e44, 0x21fcaea231ad777e, 0x02441c5a887fd0d2, 0x4901aa7183c511f3}}, + {{0x3df2d29dc4244e45, 0x2b020e7493d8de0a, 0x6cc8067e820c214d, 0x413779166feab90a}}, + {{0x08b1b7548c1af8f0, 0xce0f7a7c246299b4, 0xf760b0f91e06d939, 0x41bb887b726d1213}}}, +{{{0x40e87d44744346be, 0x1d48dad415b52b25, 0x7c3a8a18a13b603e, 0x4eb728c12fcdbdf7}}, + {{0x7e234c597c6691ae, 0x64889d3d0a85b4c8, 0xdae2c90c354afae7, 0x0a871e070c6a9e1d}}, + {{0x3301b5994bbc8989, 0x736bae3a5bdd4260, 0x0d61ade219d59e3c, 0x3ee7300f2685d464}}}, +{{{0xf5d255e49e7dd6b7, 0x8016115c610b1eac, 0x3c99975d92e187ca, 0x13815762979125c2}}, + {{0x43fa7947841e7518, 0xe5c6fa59639c46d7, 0xa1065e1de3052b74, 0x7d47c6a2cfb89030}}, + {{0x3fdad0148ef0d6e0, 0x9d3e749a91546f3c, 0x71ec621026bb8157, 0x148cf58d34c9ec80}}}, +{{{0x46a492f67934f027, 0x469984bef6840aa9, 0x5ca1bc2a89611854, 0x3ff2fa1ebd5dbbd4}}, + {{0xe2572f7d9ae4756d, 0x56c345bb88f3487f, 0x9fd10b6d6960a88d, 0x278febad4eaea1b9}}, + {{0xb1aa681f8c933966, 0x8c21949c20290c98, 0x39115291219d3c52, 0x4104dd02fe9c677b}}}, +{{{0x72b2bf5e1124422a, 0xa1fa0c3398a33ab5, 0x94cb6101fa52b666, 0x2c863b00afaf53d5}}, + {{0x81214e06db096ab8, 0x21a8b6c90ce44f35, 0x6524c12a409e2af5, 0x0165b5a48efca481}}, + {{0xf190a474a0846a76, 0x12eff984cd2f7cc0, 0x695e290658aa2b8f, 0x591b67d9bffec8b8}}}, +{{{0x312f0d1c80b49bfa, 0x5979515eabf3ec8a, 0x727033c09ef01c88, 0x3de02ec7ca8f7bcb}}, + {{0x99b9b3719f18b55d, 0xe465e5faa18c641e, 0x61081136c29f05ed, 0x489b4f867030128b}}, + {{0xd232102d3aeb92ef, 0xe16253b46116a861, 0x3d7eabe7190baa24, 0x49f5fbba496cbebf}}}, +{{{0x30949a108a5bcfd4, 0xdc40dd70bc6473eb, 0x92c294c1307c0d1c, 0x5604a86dcbfa6e74}}, + {{0x155d628c1e9c572e, 0x8a4d86acc5884741, 0x91a352f6515763eb, 0x06a1a6c28867515b}}, + {{0x7288d1d47c1764b6, 0x72541140e0418b51, 0x9f031a6018acf6d1, 0x20989e89fe2742c6}}}, +{{{0x499777fd3a2dcc7f, 0x32857c2ca54fd892, 0xa279d864d207e3a0, 0x0403ed1d0ca67e29}}, + {{0x1674278b85eaec2e, 0x5621dc077acb2bdf, 0x640a4c1661cbf45a, 0x730b9950f70595d3}}, + {{0xc94b2d35874ec552, 0xc5e6c8cf98246f8d, 0xf7cb46fa16c035ce, 0x5bd7454308303dcc}}}, +{{{0x7f9ad19528b24cc2, 0x7f6b54656335c181, 0x66b8b66e4fc07236, 0x133a78007380ad83}}, + {{0x85c4932115e7792a, 0xc64c89a2bdcdddc9, 0x9d1e3da8ada3d762, 0x5bb7db123067f82c}}, + {{0x0961f467c6ca62be, 0x04ec21d6211952ee, 0x182360779bd54770, 0x740dca6d58f0e0d2}}}, +{{{0xdf48ee0752cfce4e, 0xc3fffaf306ec08b7, 0x05710b2ab95459c4, 0x161d25fa963ea38d}}, + {{0x231a8c570478433c, 0xb7b5270ec281439d, 0xdbaa99eae3d9079f, 0x2c03f5256c2b03d9}}, + {{0x790f18757b53a47d, 0x307b0130cf0c5879, 0x31903d77257ef7f9, 0x699468bdbd96bbaf}}}, +{{{0xbd1f2f46f4dafecf, 0x7cef0114a47fd6f7, 0xd31ffdda4a47b37f, 0x525219a473905785}}, + {{0xd8dd3de66aa91948, 0x485064c22fc0d2cc, 0x9b48246634fdea2f, 0x293e1c4e6c4a2e3a}}, + {{0x376e134b925112e1, 0x703778b5dca15da0, 0xb04589af461c3111, 0x5b605c447f032823}}}, +{{{0xb965805920c47c89, 0xe7f0100c923b8fcc, 0x0001256502e2ef77, 0x24a76dcea8aeb3ee}}, + {{0x3be9fec6f0e7f04c, 0x866a579e75e34962, 0x5542ef161e1de61a, 0x2f12fef4cc5abdd5}}, + {{0x0a4522b2dfc0c740, 0x10d06e7f40c9a407, 0xc6cf144178cff668, 0x5e607b2518a43790}}}, +{{{0x58b31d8f6cdf1818, 0x35cfa74fc36258a2, 0xe1b3ff4f66e61d6e, 0x5067acab6ccdd5f7}}, + {{0xa02c431ca596cf14, 0xe3c42d40aed3e400, 0xd24526802e0f26db, 0x201f33139e457068}}, + {{0xfd527f6b08039d51, 0x18b14964017c0006, 0xd5220eb02e25a4a8, 0x397cba8862460375}}}, +{{{0x30c13093f05959b2, 0xe23aa18de9a97976, 0x222fd491721d5e26, 0x2339d320766e6c3a}}, + {{0x7815c3fbc81379e7, 0xa6619420dde12af1, 0xffa9c0f885a8fdd5, 0x771b4022c1e1c252}}, + {{0xd87dd986513a2fa7, 0xf5ac9b71f9d4cf08, 0xd06bc31b1ea283b3, 0x331a189219971a76}}}, +{{{0xf5166f45fb4f80c6, 0x9c36c7de61c775cf, 0xe3d4e81b9041d91c, 0x31167c6b83bdfe21}}, + {{0x26512f3a9d7572af, 0x5bcbe28868074a9e, 0x84edc1c11180f7c4, 0x1ac9619ff649a67b}}, + {{0xf22b3842524b1068, 0x5068343bee9ce987, 0xfc9d71844a6250c8, 0x612436341f08b111}}}, +{{{0xd99d41db874e898d, 0x09fea5f16c07dc20, 0x793d2c67d00f9bbc, 0x46ebe2309e5eff40}}, + {{0x8b6349e31a2d2638, 0x9ddfb7009bd3fd35, 0x7f8bf1b8a3a06ba4, 0x1522aa3178d90445}}, + {{0x2c382f5369614938, 0xdafe409ab72d6d10, 0xe8c83391b646f227, 0x45fe70f50524306c}}}, +{{{0xda4875a6960c0b8c, 0x5b68d076ef0e2f20, 0x07fb51cf3d0b8fd4, 0x428d1623a0e392d4}}, + {{0x62f24920c8951491, 0x05f007c83f630ca2, 0x6fbb45d2f5c9d4b8, 0x16619f6db57a2245}}, + {{0x084f4a4401a308fd, 0xa82219c376a5caac, 0xdeb8de4643d1bc7d, 0x1d81592d60bd38c6}}}, +{{{0x61368756a60dac5f, 0x17e02f6aebabdc57, 0x7f193f2d4cce0f7d, 0x20234a7789ecdcf0}}, + {{0x8765b69f7b85c5e8, 0x6ff0678bd168bab2, 0x3a70e77c1d330f9b, 0x3a5f6d51b0af8e7c}}, + {{0x76d20db67178b252, 0x071c34f9d51ed160, 0xf62a4a20b3e41170, 0x7cd682353cffe366}}}, +{{{0x0be1a45bd887fab6, 0x2a846a32ba403b6e, 0xd9921012e96e6000, 0x2838c8863bdc0943}}, + {{0xa665cd6068acf4f3, 0x42d92d183cd7e3d3, 0x5759389d336025d9, 0x3ef0253b2b2cd8ff}}, + {{0xd16bb0cf4a465030, 0xfa496b4115c577ab, 0x82cfae8af4ab419d, 0x21dcb8a606a82812}}}, +{{{0x5c6004468c9d9fc8, 0x2540096ed42aa3cb, 0x125b4d4c12ee2f9c, 0x0bc3d08194a31dab}}, + {{0x9a8d00fabe7731ba, 0x8203607e629e1889, 0xb2cc023743f3d97f, 0x5d840dbf6c6f678b}}, + {{0x706e380d309fe18b, 0x6eb02da6b9e165c7, 0x57bbba997dae20ab, 0x3a4276232ac196dd}}}, +{{{0x4b42432c8a7084fa, 0x898a19e3dfb9e545, 0xbe9f00219c58e45d, 0x1ff177cea16debd1}}, + {{0x3bf8c172db447ecb, 0x5fcfc41fc6282dbd, 0x80acffc075aa15fe, 0x0770c9e824e1a9f9}}, + {{0xcf61d99a45b5b5fd, 0x860984e91b3a7924, 0xe7300919303e3e89, 0x39f264fd41500b1e}}}, +{{{0xa7ad3417dbe7e29c, 0xbd94376a2b9c139c, 0xa0e91b8e93597ba9, 0x1712d73468889840}}, + {{0xd19b4aabfe097be1, 0xa46dfce1dfe01929, 0xc3c908942ca6f1ff, 0x65c621272c35f14e}}, + {{0xe72b89f8ce3193dd, 0x4d103356a125c0bb, 0x0419a93d2e1cfe83, 0x22f9800ab19ce272}}}, +{{{0x605a368a3e9ef8cb, 0xe3e9c022a5504715, 0x553d48b05f24248f, 0x13f416cd647626e5}}, + {{0x42029fdd9a6efdac, 0xb912cebe34a54941, 0x640f64b987bdf37b, 0x4171a4d38598cab4}}, + {{0xfa2758aa99c94c8c, 0x23006f6fb000b807, 0xfbd291ddadda5392, 0x508214fa574bd1ab}}}, +{{{0xc20269153ed6fe4b, 0xa65a6739511d77c4, 0xcbde26462c14af94, 0x22f960ec6faba74b}}, + {{0x461a15bb53d003d6, 0xb2102888bcf3c965, 0x27c576756c683a5a, 0x3a7758a4c86cb447}}, + {{0x548111f693ae5076, 0x1dae21df1dfd54a6, 0x12248c90f3115e65, 0x5d9fd15f8de7f494}}}, +{{{0x031408d36d63727f, 0x6a379aefd7c7b533, 0xa9e18fc5ccaee24b, 0x332f35914f8fbed3}}, + {{0x3f244d2aeed7521e, 0x8e3a9028432e9615, 0xe164ba772e9c16d4, 0x3bc187fa47eb98d8}}, + {{0x6d470115ea86c20c, 0x998ab7cb6c46d125, 0xd77832b53a660188, 0x450d81ce906fba03}}}, +{{{0x6e7bb6a1a6205275, 0xaa4f21d7413c8e83, 0x6f56d155e88f5cb2, 0x2de25d4ba6345be1}}, + {{0xd074d8961cae743f, 0xf86d18f5ee1c63ed, 0x97bdc55be7f4ed29, 0x4cbad279663ab108}}, + {{0x80d19024a0d71fcd, 0xc525c20afb288af8, 0xb1a3974b5f3a6419, 0x7d7fbcefe2007233}}}, +{{{0xfaef1e6a266b2801, 0x866c68c4d5739f16, 0xf68a2fbc1b03762c, 0x5975435e87b75a8d}}, + {{0xcd7c5dc5f3c29094, 0xc781a29a2a9105ab, 0x80c61d36421c3058, 0x4f9cd196dcd8d4d7}}, + {{0x199297d86a7b3768, 0xd0d058241ad17a63, 0xba029cad5c1c0c17, 0x7ccdd084387a0307}}}, +{{{0xdca6422c6d260417, 0xae153d50948240bd, 0xa9c0c1b4fb68c677, 0x428bd0ed61d0cf53}}, + {{0x9b0c84186760cc93, 0xcdae007a1ab32a99, 0xa88dec86620bda18, 0x3593ca848190ca44}}, + {{0x9213189a5e849aa7, 0xd4d8c33565d8facd, 0x8c52545b53fdbbd1, 0x27398308da2d63e6}}}, +{{{0x42c38d28435ed413, 0xbd50f3603278ccc9, 0xbb07ab1a79da03ef, 0x269597aebe8c3355}}, + {{0xb9a10e4c0a702453, 0x0fa25866d57d1bde, 0xffb9d9b5cd27daf7, 0x572c2945492c33fd}}, + {{0xc77fc745d6cd30be, 0xe4dfe8d3e3baaefb, 0xa22c8830aa5dda0c, 0x7f985498c05bca80}}}, +{{{0x3849ce889f0be117, 0x8005ad1b7b54a288, 0x3da3c39f23fc921c, 0x76c2ec470a31f304}}, + {{0xd35615520fbf6363, 0x08045a45cf4dfba6, 0xeec24fbc873fa0c2, 0x30f2653cd69b12e7}}, + {{0x8a08c938aac10c85, 0x46179b60db276bcb, 0xa920c01e0e6fac70, 0x2f1273f1596473da}}}, +{{{0x4739fc7c8ae01e11, 0xfd5274904a6aab9f, 0x41d98a8287728f2e, 0x5d9e572ad85b69f2}}, + {{0x30488bd755a70bc0, 0x06d6b5a4f1d442e7, 0xead1a69ebc596162, 0x38ac1997edc5f784}}, + {{0x0666b517a751b13b, 0x747d06867e9b858c, 0xacacc011454dde49, 0x22dfcd9cbfe9e69c}}}, +{{{0x8ddbd2e0c30d0cd9, 0xad8e665facbb4333, 0x8f6b258c322a961f, 0x6b2916c05448c1c7}}, + {{0x56ec59b4103be0a1, 0x2ee3baecd259f969, 0x797cb29413f5cd32, 0x0fe9877824cde472}}, + {{0x7edb34d10aba913b, 0x4ea3cd822e6dac0e, 0x66083dff6578f815, 0x4c303f307ff00a17}}}, +{{{0xd30a3bd617b28c85, 0xc5d377b739773bea, 0xc6c6e78c1e6a5cbf, 0x0d61b8f78b2ab7c4}}, + {{0x29fc03580dd94500, 0xecd27aa46fbbec93, 0x130a155fc2e2a7f8, 0x416b151ab706a1d5}}, + {{0x56a8d7efe9c136b0, 0xbd07e5cd58e44b20, 0xafe62fda1b57e0ab, 0x191a2af74277e8d2}}}, +{{{0xce16f74bc53c1431, 0x2b9725ce2072edde, 0xb8b9c36fb5b23ee7, 0x7e2e0e450b5cc908}}, + {{0x9fe62b434f460efb, 0xded303d4a63607d6, 0xf052210eb7a0da24, 0x237e7dbe00545b93}}, + {{0x013575ed6701b430, 0x231094e69f0bfd10, 0x75320f1583e47f22, 0x71afa699b11155e3}}}, +{{{0x65ce6f9b3953b61d, 0xc65839eaafa141e6, 0x0f435ffda9f759fe, 0x021142e9c2b1c28e}}, + {{0xea423c1c473b50d6, 0x51e87a1f3b38ef10, 0x9b84bf5fb2c9be95, 0x00731fbc78f89a1c}}, + {{0xe430c71848f81880, 0xbf960c225ecec119, 0xb6dae0836bba15e3, 0x4c4d6f3347e15808}}}, +{{{0x18f7eccfc17d1fc9, 0x6c75f5a651403c14, 0xdbde712bf7ee0cdf, 0x193fddaaa7e47a22}}, + {{0x2f0cddfc988f1970, 0x6b916227b0b9f51b, 0x6ec7b6c4779176be, 0x38bf9500a88f9fa8}}, + {{0x1fd2c93c37e8876f, 0xa2f61e5a18d1462c, 0x5080f58239241276, 0x6a6fb99ebf0d4969}}}, +{{{0x6a46c1bb560855eb, 0x2416bb38f893f09d, 0xd71d11378f71acc1, 0x75f76914a31896ea}}, + {{0xeeb122b5b6e423c6, 0x939d7010f286ff8e, 0x90a92a831dcf5d8c, 0x136fda9f42c5eb10}}, + {{0xf94cdfb1a305bdd1, 0x0f364b9d9ff82c08, 0x2a87d8a5c3bb588a, 0x022183510be8dcba}}}, +{{{0x4af766385ead2d14, 0xa08ed880ca7c5830, 0x0d13a6e610211e3d, 0x6a071ce17b806c03}}, + {{0x9d5a710143307a7f, 0xb063de9ec47da45f, 0x22bbfe52be927ad3, 0x1387c441fd40426c}}, + {{0xb5d3c3d187978af8, 0x722b5a3d7f0e4413, 0x0d7b4848bb477ca0, 0x3171b26aaf1edc92}}}, +{{{0xa92f319097564ca8, 0xff7bb84c2275e119, 0x4f55fe37a4875150, 0x221fd4873cf0835a}}, + {{0xa60db7d8b28a47d1, 0xa6bf14d61770a4f1, 0xd4a1f89353ddbd58, 0x6c514a63344243e9}}, + {{0x2322204f3a156341, 0xfb73e0e9ba0a032d, 0xfce0dd4c410f030e, 0x48daa596fb924aaa}}}, +{{{0x6eca8e665ca59cc7, 0xa847254b2e38aca0, 0x31afc708d21e17ce, 0x676dd6fccad84af7}}, + {{0x14f61d5dc84c9793, 0x9941f9e3ef418206, 0xcdf5b88f346277ac, 0x58c837fa0e8a79a9}}, + {{0x0cf9688596fc9058, 0x1ddcbbf37b56a01b, 0xdcc2e77d4935d66a, 0x1c4f73f2c6a57f0a}}}, +{{{0x0e7a4fbd305fa0bb, 0x829d4ce054c663ad, 0xf421c3832fe33848, 0x795ac80d1bf64c42}}, + {{0xb36e706efc7c3484, 0x73dfc9b4c3c1cf61, 0xeb1d79c9781cc7e5, 0x70459adb7daf675c}}, + {{0x1b91db4991b42bb3, 0x572696234b02dcca, 0x9fdf9ee51f8c78dc, 0x5fe162848ce21fd3}}}, +{{{0x4e59214fe194961a, 0x49be7dc70d71cd4f, 0x9300cfd23b50f22d, 0x4789d446fc917232}}, + {{0x2879852d5d7cb208, 0xb8dedd70687df2e7, 0xdc0bffab21687891, 0x2b44c043677daa35}}, + {{0x1a1c87ab074eb78e, 0xfac6d18e99daf467, 0x3eacbbcd484f9067, 0x60c52eef2bb9a4e4}}}, +{{{0x0b5d89bc3bfd8bf1, 0xb06b9237c9f3551a, 0x0e4c16b0d53028f5, 0x10bc9c312ccfcaab}}, + {{0x702bc5c27cae6d11, 0x44c7699b54a48cab, 0xefbc4056ba492eb2, 0x70d77248d9b6676d}}, + {{0xaa8ae84b3ec2a05b, 0x98699ef4ed1781e0, 0x794513e4708e85d1, 0x63755bd3a976f413}}}, +{{{0xb55fa03e2ad10853, 0x356f75909ee63569, 0x9ff9f1fdbe69b890, 0x0d8cc1c48bc16f84}}, + {{0x3dc7101897f1acb7, 0x5dda7d5ec165bbd8, 0x508e5b9c0fa1020f, 0x2763751737c52a56}}, + {{0x029402d36eb419a9, 0xf0b44e7e77b460a5, 0xcfa86230d43c4956, 0x70c2dd8a7ad166e7}}}, +{{{0x656194509f6fec0e, 0xee2e7ea946c6518d, 0x9733c1f367e09b5c, 0x2e0fac6363948495}}, + {{0x91d4967db8ed7e13, 0x74252f0ad776817a, 0xe40982e00d852564, 0x32b8613816a53ce5}}, + {{0x79e7f7bee448cd64, 0x6ac83a67087886d0, 0xf89fd4d9a0e4db2e, 0x4179215c735a4f41}}}, +{{{0x8c7094e7d7dced2a, 0x97fb8ac347d39c70, 0xe13be033a906d902, 0x700344a30cd99d76}}, + {{0xe4ae33b9286bcd34, 0xb7ef7eb6559dd6dc, 0x278b141fb3d38e1f, 0x31fa85662241c286}}, + {{0xaf826c422e3622f4, 0xc12029879833502d, 0x9bc1b7e12b389123, 0x24bb2312a9952489}}}, +{{{0xb1a8ed1732de67c3, 0x3cb49418461b4948, 0x8ebd434376cfbcd2, 0x0fee3e871e188008}}, + {{0x41f80c2af5f85c6b, 0x687284c304fa6794, 0x8945df99a3ba1bad, 0x0d1d2af9ffeb5d16}}, + {{0xa9da8aa132621edf, 0x30b822a159226579, 0x4004197ba79ac193, 0x16acd79718531d76}}}, +{{{0x72df72af2d9b1d3d, 0x63462a36a432245a, 0x3ecea07916b39637, 0x123e0ef6b9302309}}, + {{0xc959c6c57887b6ad, 0x94e19ead5f90feba, 0x16e24e62a342f504, 0x164ed34b18161700}}, + {{0x487ed94c192fe69a, 0x61ae2cea3a911513, 0x877bf6d3b9a4de27, 0x78da0fc61073f3eb}}}, +{{{0x5bf15d28e52bc66a, 0x2c47e31870f01a8e, 0x2419afbc06c28bdd, 0x2d25deeb256b173a}}, + {{0xa29f80f1680c3a94, 0x71f77e151ae9e7e6, 0x1100f15848017973, 0x054aa4b316b38ddd}}, + {{0xdfc8468d19267cb8, 0x0b28789c66e54daf, 0x2aeb1d2a666eec17, 0x134610a6ab7da760}}}, +{{{0x51138ec78df6b0fe, 0x5397da89e575f51b, 0x09207a1d717af1b9, 0x2102fdba2b20d650}}, + {{0xcd2a65e777d1f515, 0x548991878faa60f1, 0xb1b73bbcdabc06e5, 0x654878cba97cc9fb}}, + {{0x969ee405055ce6a1, 0x36bca7681251ad29, 0x3a1af517aa7da415, 0x0ad725db29ecb2ba}}}, +{{{0xdc4267b1834e2457, 0xb67544b570ce1bc5, 0x1af07a0bf7d15ed7, 0x4aefcffb71a03650}}, + {{0xfec7bc0c9b056f85, 0x537d5268e7f5ffd7, 0x77afc6624312aefa, 0x4f675f5302399fd9}}, + {{0xc32d36360415171e, 0xcd2bef118998483b, 0x870a6eadd0945110, 0x0bccbb72a2a86561}}}, +{{{0x185e962feab1a9c8, 0x86e7e63565147dcd, 0xb092e031bb5b6df2, 0x4024f0ab59d6b73e}}, + {{0x186d5e4c50fe1296, 0xe0397b82fee89f7e, 0x3bc7f6c5507031b0, 0x6678fd69108f37c2}}, + {{0x1586fa31636863c2, 0x07f68c48572d33f2, 0x4f73cc9f789eaefc, 0x2d42e2108ead4701}}}, +{{{0x97f5131594dfd29b, 0x6155985d313f4c6a, 0xeba13f0708455010, 0x676b2608b8d2d322}}, + {{0x21717b0d0f537593, 0x914e690b131e064c, 0x1bb687ae752ae09f, 0x420bf3a79b423c6e}}, + {{0x8138ba651c5b2b47, 0x8671b6ec311b1b80, 0x7bff0cb1bc3135b0, 0x745d2ffa9c0cf1e0}}}, +{{{0xbf525a1e2bc9c8bd, 0xea5b260826479d81, 0xd511c70edf0155db, 0x1ae23ceb960cf5d0}}, + {{0x6036df5721d34e6a, 0xb1db8827997bb3d0, 0xd3c209c3c8756afa, 0x06e15be54c1dc839}}, + {{0x5b725d871932994a, 0x32351cb5ceb1dab0, 0x7dc41549dab7ca05, 0x58ded861278ec1f7}}}, +{{{0xd8173793f266c55c, 0xc8c976c5cc454e49, 0x5ce382f8bc26c3a8, 0x2ff39de85485f6f9}}, + {{0x2dfb5ba8b6c2c9a8, 0x48eeef8ef52c598c, 0x33809107f12d1573, 0x08ba696b531d5bd8}}, + {{0x77ed3eeec3efc57a, 0x04e05517d4ff4811, 0xea3d7a3ff1a671cb, 0x120633b4947cfe54}}}, +{{{0x0b94987891610042, 0x4ee7b13cecebfae8, 0x70be739594f0a4c0, 0x35d30a99b4d59185}}, + {{0x82bd31474912100a, 0xde237b6d7e6fbe06, 0xe11e761911ea79c6, 0x07433be3cb393bde}}, + {{0xff7944c05ce997f4, 0x575d3de4b05c51a3, 0x583381fd5a76847c, 0x2d873ede7af6da9f}}}, +{{{0x157a316443373409, 0xfab8b7eef4aa81d9, 0xb093fee6f5a64806, 0x2e773654707fa7b6}}, + {{0xaa6202e14e5df981, 0xa20d59175015e1f5, 0x18a275d3bae21d6c, 0x0543618a01600253}}, + {{0x0deabdf4974c23c1, 0xaa6f0a259dce4693, 0x04202cb8a29aba2c, 0x4b1443362d07960d}}}, +{{{0xccc4b7c7b66e1f7a, 0x44157e25f50c2f7e, 0x3ef06dfc713eaf1c, 0x582f446752da63f7}}, + {{0x967c54e91c529ccb, 0x30f6269264c635fb, 0x2747aff478121965, 0x17038418eaf66f5c}}, + {{0xc6317bd320324ce4, 0xa81042e8a4488bc4, 0xb21ef18b4e5a1364, 0x0c2a1c4bcda28dc9}}}, +{{{0xd24dc7d06f1f0447, 0xb2269e3edb87c059, 0xd15b0272fbb2d28f, 0x7c558bd1c6f64877}}, + {{0xedc4814869bd6945, 0x0d6d907dbe1c8d22, 0xc63bd212d55cc5ab, 0x5a6a9b30a314dc83}}, + {{0xd0ec1524d396463d, 0x12bb628ac35a24f0, 0xa50c3a791cbc5fa4, 0x0404a5ca0afbafc3}}}, +{{{0x8c1f40070aa743d6, 0xccbad0cb5b265ee8, 0x574b046b668fd2de, 0x46395bfdcadd9633}}, + {{0x62bc9e1b2a416fd1, 0xb5c6f728e350598b, 0x04343fd83d5d6967, 0x39527516e7f8ee98}}, + {{0x117fdb2d1a5d9a9c, 0x9c7745bcd1005c2a, 0xefd4bef154d56fea, 0x76579a29e822d016}}}, +{{{0x45b68e7e49c02a17, 0x23cd51a2bca9a37f, 0x3ed65f11ec224c1b, 0x43a384dc9e05bdb1}}, + {{0x333cb51352b434f2, 0xd832284993de80e1, 0xb5512887750d35ce, 0x02c514bb2a2777c1}}, + {{0x684bd5da8bf1b645, 0xfb8bd37ef6b54b53, 0x313916d7a9b0d253, 0x1160920961548059}}}, +{{{0xb44d166929dacfaa, 0xda529f4c8413598f, 0xe9ef63ca453d5559, 0x351e125bc5698e0b}}, + {{0x7a385616369b4dcd, 0x75c02ca7655c3563, 0x7dc21bf9d4f18021, 0x2f637d7491e6e042}}, + {{0xd4b49b461af67bbe, 0xd603037ac8ab8961, 0x71dee19ff9a699fb, 0x7f182d06e7ce2a9a}}}, +{{{0x7a7c8e64ab0168ec, 0xcb5a4a5515edc543, 0x095519d347cd0eda, 0x67d4ac8c343e93b0}}, + {{0x09454b728e217522, 0xaa58e8f4d484b8d8, 0xd358254d7f46903c, 0x44acc043241c5217}}, + {{0x1c7d6bbb4f7a5777, 0x8b35fed4918313e1, 0x4adca1c6c96b4684, 0x556d1c8312ad71bd}}}, +{{{0x17ef40e30c8d3982, 0x31f7073e15a3fa34, 0x4f21f3cb0773646e, 0x746c6c6d1d824eff}}, + {{0x81f06756b11be821, 0x0faff82310a3f3dd, 0xf8b2d0556a99465d, 0x097abe38cc8c7f05}}, + {{0x0c49c9877ea52da4, 0x4c4369559bdc1d43, 0x022c3809f7ccebd2, 0x577e14a34bee84bd}}}, +{{{0xf0e268ac61a73b0a, 0xf2fafa103791a5f5, 0xc1e13e826b6d00e9, 0x60fa7ee96fd78f42}}, + {{0x94fecebebd4dd72b, 0xf46a4fda060f2211, 0x124a5977c0c8d1ff, 0x705304b8fb009295}}, + {{0xb63d1d354d296ec6, 0xf3c3053e5fad31d8, 0x670b958cb4bd42ec, 0x21398e0ca16353fd}}}, +{{{0x89f5058a382b33f3, 0x5ae2ba0bad48c0b4, 0x8f93b503a53db36e, 0x5aa3ed9d95a232e6}}, + {{0x2798aaf9b4b75601, 0x5eac72135c8dad72, 0xd2ceaa6161b7a023, 0x1bbfb284e98f7d4e}}, + {{0x656777e9c7d96561, 0xcb2b125472c78036, 0x65053299d9506eee, 0x4a07e14e5e8957cc}}}, +{{{0x4ee412cb980df999, 0xa315d76f3c6ec771, 0xbba5edde925c77fd, 0x3f0bac391d313402}}, + {{0x240b58cdc477a49b, 0xfd38dade6447f017, 0x19928d32a7c86aad, 0x50af7aed84afa081}}, + {{0x6e4fde0115f65be5, 0x29982621216109b2, 0x780205810badd6d9, 0x1921a316baebd006}}}, +{{{0x89422f7edfb870fc, 0x2c296beb4f76b3bd, 0x0738f1d436c24df7, 0x6458df41e273aeb0}}, + {{0xd75aad9ad9f3c18b, 0x566a0eef60b1c19c, 0x3e9a0bac255c0ed9, 0x7b049deca062c7f5}}, + {{0xdccbe37a35444483, 0x758879330fedbe93, 0x786004c312c5dd87, 0x6093dccbc2950e64}}}, +{{{0x1ff39a8585e0706d, 0x36d0a5d8b3e73933, 0x43b9f2e1718f453b, 0x57d1ea084827a97c}}, + {{0x6bdeeebe6084034b, 0x3199c2b6780fb854, 0x973376abb62d0695, 0x6e3180c98b647d90}}, + {{0xee7ab6e7a128b071, 0xa4c1596d93a88baa, 0xf7b4de82b2216130, 0x363e999ddd97bd18}}}, +{{{0x96a843c135ee1fc4, 0x976eb35508e4c8cf, 0xb42f6801b58cd330, 0x48ee9b78693a052b}}, + {{0x2f1848dce24baec6, 0x769b7255babcaf60, 0x90cb3c6e3cefe931, 0x231f979bc6f9b355}}, + {{0x5c31de4bcc2af3c6, 0xb04bb030fe208d1f, 0xb78d7009c14fb466, 0x079bfa9b08792413}}}, +{{{0xe3903a51da300df4, 0x843964233da95ab0, 0xed3cf12d0b356480, 0x038c77f684817194}}, + {{0xf3c9ed80a2d54245, 0x0aa08b7877f63952, 0xd76dac63d1085475, 0x1ef4fb159470636b}}, + {{0x854e5ee65b167bec, 0x59590a4296d0cdc2, 0x72b2df3498102199, 0x575ee92a4a0bff56}}}, +{{{0xd4c080908a182fcf, 0x30e170c299489dbd, 0x05babd5752f733de, 0x43d4e7112cd3fd00}}, + {{0x5d46bc450aa4d801, 0xc3af1227a533b9d8, 0x389e3b262b8906c2, 0x200a1e7e382f581b}}, + {{0x518db967eaf93ac5, 0x71bc989b056652c0, 0xfe2b85d9567197f5, 0x050eca52651e4e38}}}, +{{{0xc3431ade453f0c9c, 0xe9f5045eff703b9b, 0xfcd97ac9ed847b3d, 0x4b0ee6c21c58f4c6}}, + {{0x97ac397660e668ea, 0x9b19bbfe153ab497, 0x4cb179b534eca79f, 0x6151c09fa131ae57}}, + {{0x3af55c0dfdf05d96, 0xdd262ee02ab4ee7a, 0x11b2bb8712171709, 0x1fef24fa800f030b}}}, +{{{0x37d653fb1aa73196, 0x0f9495303fd76418, 0xad200b09fb3a17b2, 0x544d49292fc8613e}}, + {{0x22d2aff530976b86, 0x8d90b806c2d24604, 0xdca1896c4de5bae5, 0x28005fe6c8340c17}}, + {{0x6aefba9f34528688, 0x5c1bff9425107da1, 0xf75bbbcd66d94b36, 0x72e472930f316dfa}}}, +{{{0x2695208c9781084f, 0xb1502a0b23450ee1, 0xfd9daea603efde02, 0x5a9d2e8c2733a34c}}, + {{0x07f3f635d32a7627, 0x7aaa4d865f6566f0, 0x3c85e79728d04450, 0x1fee7f000fe06438}}, + {{0x765305da03dbf7e5, 0xa4daf2491434cdbd, 0x7b4ad5cdd24a88ec, 0x00f94051ee040543}}}, +{{{0x8d356b23c3d330b2, 0xf21c8b9bb0471b06, 0xb36c316c6e42b83c, 0x07d79c7e8beab10d}}, + {{0xd7ef93bb07af9753, 0x583ed0cf3db766a7, 0xce6998bf6e0b1ec5, 0x47b7ffd25dd40452}}, + {{0x87fbfb9cbc08dd12, 0x8a066b3ae1eec29b, 0x0d57242bdb1fc1bf, 0x1c3520a35ea64bb6}}}, +{{{0x80d253a6bccba34a, 0x3e61c3a13838219b, 0x90c3b6019882e396, 0x1c3d05775d0ee66f}}, + {{0xcda86f40216bc059, 0x1fbb231d12bcd87e, 0xb4956a9e17c70990, 0x38750c3b66d12e55}}, + {{0x692ef1409422e51a, 0xcbc0c73c2b5df671, 0x21014fe7744ce029, 0x0621e2c7d330487c}}}, +{{{0xaf9860cc8259838d, 0x90ea48c1c69f9adc, 0x6526483765581e30, 0x0007d6097bd3a5bc}}, + {{0xb7ae1796b0dbf0f3, 0x54dfafb9e17ce196, 0x25923071e9aaa3b4, 0x5d8e589ca1002e9d}}, + {{0xc0bf1d950842a94b, 0xb2d3c363588f2e3e, 0x0a961438bb51e2ef, 0x1583d7783c1cbf86}}}, +{{{0xeceea2ef5da27ae1, 0x597c3a1455670174, 0xc9a62a126609167a, 0x252a5f2e81ed8f70}}, + {{0x90034704cc9d28c7, 0x1d1b679ef72cc58f, 0x16e12b5fbe5b8726, 0x4958064e83c5580a}}, + {{0x0d2894265066e80d, 0xfcc3f785307c8c6b, 0x1b53da780c1112fd, 0x079c170bd843b388}}}, +{{{0x0506ece464fa6fff, 0xbee3431e6205e523, 0x3579422451b8ea42, 0x6dec05e34ac9fb00}}, + {{0xcdd6cd50c0d5d056, 0x9af7686dbb03573b, 0x3ca6723ff3c3ef48, 0x6768c0d7317b8acc}}, + {{0x94b625e5f155c1b3, 0x417bf3a7997b7b91, 0xc22cbddc6d6b2600, 0x51445e14ddcd52f4}}}, +{{{0x57502b4b3b144951, 0x8e67ff6b444bbcb3, 0xb8bd6927166385db, 0x13186f31e39295c8}}, + {{0x893147ab2bbea455, 0x8c53a24f92079129, 0x4b49f948be30f7a7, 0x12e990086e4fd43d}}, + {{0xf10c96b37fdfbb2e, 0x9f9a935e121ceaf9, 0xdf1136c43a5b983f, 0x77b2e3f05d3e99af}}}, +{{{0x296fa9c59c2ec4de, 0xbc8b61bf4f84f3cb, 0x1c7706d917a8f908, 0x63b795fc7ad3255d}}, + {{0xd598639c12ddb0a4, 0xa5d19f30c024866b, 0xd17c2f0358fce460, 0x07a195152e095e8a}}, + {{0xa8368f02389e5fc8, 0x90433b02cf8de43b, 0xafa1fd5dc5412643, 0x3e8fe83d032f0137}}}, +{{{0x2f8b15b90570a294, 0x94f2427067084549, 0xde1c5ae161bbfd84, 0x75ba3b797fac4007}}, + {{0x08704c8de8efd13c, 0xdfc51a8e33e03731, 0xa59d5da51260cde3, 0x22d60899a6258c86}}, + {{0x6239dbc070cdd196, 0x60fe8a8b6c7d8a9a, 0xb38847bceb401260, 0x0904d07b87779e5e}}}, +{{{0xb4ce1fd4ddba919c, 0xcf31db3ec74c8daa, 0x2c63cc63ad86cc51, 0x43e2143fbc1dde07}}, + {{0xf4322d6648f940b9, 0x06952f0cbd2d0c39, 0x167697ada081f931, 0x6240aacebaf72a6c}}, + {{0xf834749c5ba295a0, 0xd6947c5bca37d25a, 0x66f13ba7e7c9316a, 0x56bdaf238db40cac}}}, +{{{0x362ab9e3f53533eb, 0x338568d56eb93d40, 0x9e0e14521d5a5572, 0x1d24a86d83741318}}, + {{0x1310d36cc19d3bb2, 0x062a6bb7622386b9, 0x7c9b8591d7a14f5c, 0x03aa31507e1e5754}}, + {{0xf4ec7648ffd4ce1f, 0xe045eaf054ac8c1c, 0x88d225821d09357c, 0x43b261dc9aeb4859}}}, +{{{0xe55b1e1988bb79bb, 0xa09ed07dc17a359d, 0xb02c2ee2603dea33, 0x326055cf5b276bc2}}, + {{0x19513d8b6c951364, 0x94fe7126000bf47b, 0x028d10ddd54f9567, 0x02b4d5e242940964}}, + {{0xb4a155cb28d18df2, 0xeacc4646186ce508, 0xc49cf4936c824389, 0x27a6c809ae5d3410}}}, +{{{0x8ba6ebcd1f0db188, 0x37d3d73a675a5be8, 0xf22edfa315f5585a, 0x2cb67174ff60a17e}}, + {{0xcd2c270ac43d6954, 0xdd4a3e576a66cab2, 0x79fa592469d7036c, 0x221503603d8c2599}}, + {{0x59eecdf9390be1d0, 0xa9422044728ce3f1, 0x82891c667a94f0f4, 0x7b1df4b73890f436}}}, +{{{0xe492f2e0b3b2a224, 0x7c6c9e062b551160, 0x15eb8fe20d7f7b0e, 0x61fcef2658fc5992}}, + {{0x5f2e221807f8f58c, 0xe3555c9fd49409d4, 0xb2aaa88d1fb6a630, 0x68698245d352e03d}}, + {{0xdbb15d852a18187a, 0xf3e4aad386ddacd7, 0x44bae2810ff6c482, 0x46cf4c473daf01cf}}}, +{{{0x426525ed9ec4e5f9, 0x0e5eda0116903303, 0x72b1a7f2cbe5cadc, 0x29387bcd14eb5f40}}, + {{0x213c6ea7f1498140, 0x7c1e7ef8392b4854, 0x2488c38c5629ceba, 0x1065aae50d8cc5bb}}, + {{0x1c2c4525df200d57, 0x5c3b2dd6bfca674a, 0x0a07e7b1e1834030, 0x69a198e64f1ce716}}}, +{{{0x9062b2e0d91a78bc, 0x47c9889cc8509667, 0x9df54a66405070b8, 0x7369e6a92493a1bf}}, + {{0xe1014434dcc5caed, 0x47ed5d963c84fb33, 0x70019576ed86a0e7, 0x25b2697bd267f9e4}}, + {{0x9d673ffb13986864, 0x3ca5fbd9415dc7b8, 0xe04ecc3bdf273b5e, 0x1420683db54e4cd2}}}, +{{{0xb478bd1e249dd197, 0x620c35005e58c102, 0xfb02d32fccbaac5c, 0x60b63bebf508a72d}}, + {{0x34eebb6fc1cc5ad0, 0x6a1b0ce99646ac8b, 0xd3b0da49a66bde53, 0x31e83b4161d081c1}}, + {{0x97e8c7129e062b4f, 0x49e48f4f29320ad8, 0x5bece14b6f18683f, 0x55cf1eb62d550317}}}, +{{{0x5879101065c23d58, 0x8b9d086d5094819c, 0xe2402fa912c55fa7, 0x669a6564570891d4}}, + {{0x3076b5e37df58c52, 0xd73ab9dde799cc36, 0xbd831ce34913ee20, 0x1a56fbaa62ba0133}}, + {{0x943e6b505c9dc9ec, 0x302557bba77c371a, 0x9873ae5641347651, 0x13c4836799c58a5c}}}, +{{{0x423a5d465ab3e1b9, 0xfc13c187c7f13f61, 0x19f83664ecb5b9b6, 0x66f80c93a637b607}}, + {{0xc4dcfb6a5d8bd080, 0xdeebc4ec571a4842, 0xd4b2e883b8e55365, 0x50bdc87dc8e5b827}}, + {{0x606d37836edfe111, 0x32353e15f011abd9, 0x64b03ac325b73b96, 0x1dd56444725fd5ae}}}, +{{{0x8fa47ff83362127d, 0xbc9f6ac471cd7c15, 0x6e71454349220c8b, 0x0e645912219f732e}}, + {{0xc297e60008bac89a, 0x7d4cea11eae1c3e0, 0xf3e38be19fe7977c, 0x3a3a450f63a305cd}}, + {{0x078f2f31d8394627, 0x389d3183de94a510, 0xd1e36c6d17996f80, 0x318c8d9393a9a87b}}}, +{{{0xf2745d032afffe19, 0x0c9f3c497f24db66, 0xbc98d3e3ba8598ef, 0x224c7c679a1d5314}}, + {{0x5d669e29ab1dd398, 0xfc921658342d9e3b, 0x55851dfdf35973cd, 0x509a41c325950af6}}, + {{0xbdc06edca6f925e9, 0x793ef3f4641b1f33, 0x82ec12809d833e89, 0x05bff02328a11389}}}, +{{{0x3632137023cae00b, 0x544acf0ad1accf59, 0x96741049d21a1c88, 0x780b8cc3fa2a44a7}}, + {{0x6881a0dd0dc512e4, 0x4fe70dc844a5fafe, 0x1f748e6b8f4a5240, 0x576277cdee01a3ea}}, + {{0x1ef38abc234f305f, 0x9a577fbd1405de08, 0x5e82a51434e62a0d, 0x5ff418726271b7a1}}}, +{{{0x398e080c1789db9d, 0xa7602025f3e778f5, 0xfa98894c06bd035d, 0x106a03dc25a966be}}, + {{0xe5db47e813b69540, 0xf35d2a3b432610e1, 0xac1f26e938781276, 0x29d4db8ca0a0cb69}}, + {{0xd9ad0aaf333353d0, 0x38669da5acd309e5, 0x3c57658ac888f7f0, 0x4ab38a51052cbefa}}}, +{{{0xda7c2b256768d593, 0x98c1c0574422ca13, 0xf1a80bd5ca0ace1d, 0x29cdd1adc088a690}}, + {{0xd6cfd1ef5fddc09c, 0xe82b3efdf7575dce, 0x25d56b5d201634c2, 0x3041c6bb04ed2b9b}}, + {{0x0ff2f2f9d956e148, 0xade797759f356b2e, 0x1a4698bb5f6c025c, 0x104bbd6814049a7b}}}, +{{{0x51f0fd3168f1ed67, 0x2c811dcdd86f3bc2, 0x44dc5c4304d2f2de, 0x5be8cc57092a7149}}, + {{0xa95d9a5fd67ff163, 0xe92be69d4cc75681, 0xb7f8024cde20f257, 0x204f2a20fb072df5}}, + {{0xc8143b3d30ebb079, 0x7589155abd652e30, 0x653c3c318f6d5c31, 0x2570fb17c279161f}}}, +{{{0x3efa367f2cb61575, 0xf5f96f761cd6026c, 0xe8c7142a65b52562, 0x3dcb65ea53030acd}}, + {{0x192ea9550bb8245a, 0xc8e6fba88f9050d1, 0x7986ea2d88a4c935, 0x241c5f91de018668}}, + {{0x28d8172940de6caa, 0x8fbf2cf022d9733a, 0x16d7fcdd235b01d1, 0x08420edd5fcdf0e5}}}, +{{{0xcdff20ab8362fa4a, 0x57e118d4e21a3e6e, 0xe3179617fc39e62b, 0x0d9a53efbc1769fd}}, + {{0x0358c34e04f410ce, 0xb6135b5a276e0685, 0x5d9670c7ebb91521, 0x04d654f321db889c}}, + {{0x5e7dc116ddbdb5d5, 0x2954deb68da5dd2d, 0x1cb608173334a292, 0x4a7a4f2618991ad7}}}, +{{{0xf4a718025fb15f95, 0x3df65f346b5c1b8f, 0xcdfcf08500e01112, 0x11b50c4cddd31848}}, + {{0x24c3b291af372a4b, 0x93da8270718147f2, 0xdd84856486899ef2, 0x4a96314223e0ee33}}, + {{0xa6e8274408a4ffd6, 0x738e177e9c1576d9, 0x773348b63d02b3f2, 0x4f4bce4dce6bcc51}}}, +{{{0xa71fce5ae2242584, 0x26ea725692f58a9e, 0xd21a09d71cea3cf4, 0x73fcdd14b71c01e6}}, + {{0x30e2616ec49d0b6f, 0xe456718fcaec2317, 0x48eb409bf26b4fa6, 0x3042cee561595f37}}, + {{0x427e7079449bac41, 0x855ae36dbce2310a, 0x4cae76215f841a7c, 0x389e740c9a9ce1d6}}}, +{{{0x64fcb3ae34dcb9ce, 0x97500323e348d0ad, 0x45b3f07d62c6381b, 0x61545379465a6788}}, + {{0xc9bd78f6570eac28, 0xe55b0b3227919ce1, 0x65fc3eaba19b91ed, 0x25c425e5d6263690}}, + {{0x3f3e06a6f1d7de6e, 0x3ef976278e062308, 0x8c14f6264e8a6c77, 0x6539a08915484759}}}, +{{{0xe9d21f74c3d2f773, 0xc150544125c46845, 0x624e5ce8f9b99e33, 0x11c5e4aac5cd186c}}, + {{0xddc4dbd414bb4a19, 0x19b2bc3c98424f8e, 0x48a89fd736ca7169, 0x0f65320ef019bd90}}, + {{0xd486d1b1cafde0c6, 0x4f3fe6e3163b5181, 0x59a8af0dfaf2939a, 0x4cabc7bdec33072a}}}, +{{{0x239e9624089c0a2e, 0xc748c4c03afe4738, 0x17dbed2a764fa12a, 0x639b93f0321c8582}}, + {{0xc08f788f3f78d289, 0xfe30a72ca1404d9f, 0xf2778bfccf65cc9d, 0x7ee498165acb2021}}, + {{0x7bd508e39111a1c3, 0x2b2b90d480907489, 0xe7d2aec2ae72fd19, 0x0edf493c85b602a6}}}, +{{{0xaecc8158599b5a68, 0xea574f0febade20e, 0x4fe41d7422b67f07, 0x403b92e3019d4fb4}}, + {{0x6767c4d284764113, 0xa090403ff7f5f835, 0x1c8fcffacae6bede, 0x04c00c54d1dfa369}}, + {{0x4dc22f818b465cf8, 0x71a0f35a1480eff8, 0xaee8bfad04c7d657, 0x355bb12ab26176f4}}}, +{{{0xa71e64cc7493bbf4, 0xe5bd84d9eca3b0c3, 0x0a6bc50cfa05e785, 0x0f9b8132182ec312}}, + {{0xa301dac75a8c7318, 0xed90039db3ceaa11, 0x6f077cbf3bae3f2d, 0x7518eaf8e052ad8e}}, + {{0xa48859c41b7f6c32, 0x0f2d60bcf4383298, 0x1815a929c9b1d1d9, 0x47c3871bbb1755c4}}}, +{{{0x5144539771ec4f48, 0xf805b17dc98c5d6e, 0xf762c11a47c3c66b, 0x00b89b85764699dc}}, + {{0xfbe65d50c85066b0, 0x62ecc4b0b3a299b0, 0xe53754ea441ae8e0, 0x08fea02ce8d48d5f}}, + {{0x824ddd7668deead0, 0xc86445204b685d23, 0xb514cfcd5d89d665, 0x473829a74f75d537}}}, +{{{0x82d2da754679c418, 0xe63bd7d8b2618df0, 0x355eef24ac47eb0a, 0x2078684c4833c6b4}}, + {{0x23d9533aad3902c9, 0x64c2ddceef03588f, 0x15257390cfe12fb4, 0x6c668b4d44e4d390}}, + {{0x3b48cf217a78820c, 0xf76a0ab281273e97, 0xa96c65a78c8eed7b, 0x7411a6054f8a433f}}}, +{{{0x4d659d32b99dc86d, 0x044cdc75603af115, 0xb34c712cdcc2e488, 0x7c136574fb8134ff}}, + {{0x579ae53d18b175b4, 0x68713159f392a102, 0x8455ecba1eef35f5, 0x1ec9a872458c398f}}, + {{0xb8e6a4d400a2509b, 0x9b81d7020bc882b4, 0x57e7cc9bf1957561, 0x3add88a5c7cd6460}}}, +{{{0xab895770b635dcf2, 0x02dfef6cf66c1fbc, 0x85530268beb6d187, 0x249929fccc879e74}}, + {{0x85c298d459393046, 0x8f7e35985ff659ec, 0x1d2ca22af2f66e3a, 0x61ba1131a406a720}}, + {{0xa3d0a0f116959029, 0x023b6b6cba7ebd89, 0x7bf15a3e26783307, 0x5620310cbbd8ece7}}}, +{{{0x528993434934d643, 0xb9dbf806a51222f5, 0x8f6d878fc3f41c22, 0x37676a2a4d9d9730}}, + {{0x6646b5f477e285d6, 0x40e8ff676c8f6193, 0xa6ec7311abb594dd, 0x7ec846f3658cec4d}}, + {{0x9b5e8f3f1da22ec7, 0x130f1d776c01cd13, 0x214c8fcfa2989fb8, 0x6daaf723399b9dd5}}}, +{{{0x5f3a7562eb3dbe47, 0xf7ea38548ebda0b8, 0x00c3e53145747299, 0x1304e9e71627d551}}, + {{0x583b04bfacad8ea2, 0x29b743e8148be884, 0x2b1e583b0810c5db, 0x2b5449e58eb3bbaa}}, + {{0x789814d26adc9cfe, 0x3c1bab3f8b48dd0b, 0xda0fe1fff979c60a, 0x4468de2d7c2dd693}}}, +{{{0x51bb355e9419469e, 0x33e6dc4c23ddc754, 0x93a5b6d6447f9962, 0x6cce7c6ffb44bd63}}, + {{0x4b9ad8c6f86307ce, 0x21113531435d0c28, 0xd4a866c5657a772c, 0x5da6427e63247352}}, + {{0x1a94c688deac22ca, 0xb9066ef7bbae1ff8, 0x88ad8c388d59580f, 0x58f29abfe79f2ca8}}}, +{{{0xe90ecfab8de73e68, 0x54036f9f377e76a5, 0xf0495b0bbe015982, 0x577629c4a7f41e36}}, + {{0x4b5a64bf710ecdf6, 0xb14ce538462c293c, 0x3643d056d50b3ab9, 0x6af93724185b4870}}, + {{0x3220024509c6a888, 0xd2e036134b558973, 0x83e236233c33289f, 0x701f25bb0caec18f}}}, +{{{0xc3a8b0f8e4616ced, 0xf700660e9e25a87d, 0x61e3061ff4bca59c, 0x2e0c92bfbdc40be9}}, + {{0x9d18f6d97cbec113, 0x844a06e674bfdbe4, 0x20f5b522ac4e60d6, 0x720a5bc050955e51}}, + {{0x0c3f09439b805a35, 0xe84e8b376242abfc, 0x691417f35c229346, 0x0e9b9cbb144ef0ec}}}, +{{{0xfbbad48ffb5720ad, 0xee81916bdbf90d0e, 0xd4813152635543bf, 0x221104eb3f337bd8}}, + {{0x8dee9bd55db1beee, 0xc9c3ab370a723fb9, 0x44a8f1bf1c68d791, 0x366d44191cfd3cde}}, + {{0x9e3c1743f2bc8c14, 0x2eda26fcb5856c3b, 0xccb82f0e68a7fb97, 0x4167a4e6bc593244}}}, +{{{0x643b9d2876f62700, 0x5d1d9d400e7668eb, 0x1b4b430321fc0684, 0x7938bb7e2255246a}}, + {{0xc2be2665f8ce8fee, 0xe967ff14e880d62c, 0xf12e6e7e2f364eee, 0x34b33370cb7ed2f6}}, + {{0xcdc591ee8681d6cc, 0xce02109ced85a753, 0xed7485c158808883, 0x1176fc6e2dfe65e4}}}, +{{{0xb4af6cd05b9c619b, 0x2ddfc9f4b2a58480, 0x3d4fa502ebe94dc4, 0x08fc3a4c677d5f34}}, + {{0xdb90e28949770eb8, 0x98fbcc2aacf440a3, 0x21354ffeded7879b, 0x1f6a3e54f26906b6}}, + {{0x60a4c199d30734ea, 0x40c085b631165cd6, 0xe2333e23f7598295, 0x4f2fad0116b900d1}}}, +{{{0x44beb24194ae4e54, 0x5f541c511857ef6c, 0xa61e6b2d368d0498, 0x445484a4972ef7ab}}, + {{0x962cd91db73bb638, 0xe60577aafc129c08, 0x6f619b39f3b61689, 0x3451995f2944ee81}}, + {{0x9152fcd09fea7d7c, 0x4a816c94b0935cf6, 0x258e9aaa47285c40, 0x10b89ca6042893b7}}}, +{{{0x3d5947499718289c, 0x12ebf8c524533f26, 0x0262bfcb14c3ef15, 0x20b878d577b7518e}}, + {{0x753941be5a45f06e, 0xd07caeed6d9c5f65, 0x11776b9c72ff51b6, 0x17d2d1d9ef0d4da9}}, + {{0x27f2af18073f3e6a, 0xfd3fe519d7521069, 0x22e3b72c3ca60022, 0x72214f63cc65c6a7}}}, +{{{0xb4e37f405307a693, 0xaba714d72f336795, 0xd6fbd0a773761099, 0x5fdf48c58171cbc9}}, + {{0x1d9db7b9f43b29c9, 0xd605824a4f518f75, 0xf2c072bd312f9dc4, 0x1f24ac855a1545b0}}, + {{0x24d608328e9505aa, 0x4748c1d10c1420ee, 0xc7ffe45c06fb25a2, 0x00ba739e2ae395e6}}}, +{{{0x592e98de5c8790d6, 0xe5bfb7d345c2a2df, 0x115a3b60f9b49922, 0x03283a3e67ad78f3}}, + {{0xae4426f5ea88bb26, 0x360679d984973bfb, 0x5c9f030c26694e50, 0x72297de7d518d226}}, + {{0x48241dc7be0cb939, 0x32f19b4d8b633080, 0xd3dfc90d02289308, 0x05e1296846271945}}}, +{{{0xba82eeb32d9c495a, 0xceefc8fcf12bb97c, 0xb02dabae93b5d1e0, 0x39c00c9c13698d9b}}, + {{0xadbfbbc8242c4550, 0xbcc80cecd03081d9, 0x843566a6f5c8df92, 0x78cf25d38258ce4c}}, + {{0x15ae6b8e31489d68, 0xaa851cab9c2bf087, 0xc9a75a97f04efa05, 0x006b52076b3ff832}}}, +{{{0x29e0cfe19d95781c, 0xb681df18966310e2, 0x57df39d370516b39, 0x4d57e3443bc76122}}, + {{0xf5cb7e16b9ce082d, 0x3407f14c417abc29, 0xd4b36bce2bf4a7ab, 0x7de2e9561a9f75ce}}, + {{0xde70d4f4b6a55ecb, 0x4801527f5d85db99, 0xdbc9c440d3ee9a81, 0x6b2a90af1a6029ed}}}, +{{{0x6923f4fc9ae61e97, 0x5735281de03f5fd1, 0xa764ae43e6edd12d, 0x5fd8f4e9d12d3e4a}}, + {{0x77ebf3245bb2d80a, 0xd8301b472fb9079b, 0xc647e6f24cee7333, 0x465812c8276c2109}}, + {{0x4d43beb22a1062d9, 0x7065fb753831dc16, 0x180d4a7bde2968d7, 0x05b32c2b1cb16790}}}, +{{{0xc8c05eccd24da8fd, 0xa1cf1aac05dfef83, 0xdbbeeff27df9cd61, 0x3b5556a37b471e99}}, + {{0xf7fca42c7ad58195, 0x3214286e4333f3cc, 0xb6c29d0d340b979d, 0x31771a48567307e1}}, + {{0x32b0c524e14dd482, 0xedb351541a2ba4b6, 0xa3d16048282b5af3, 0x4fc079d27a7336eb}}}, +{{{0x51c938b089bf2f7f, 0x2497bd6502dfe9a7, 0xffffc09c7880e453, 0x124567cecaf98e92}}, + {{0xdc348b440c86c50d, 0x1337cbc9cc94e651, 0x6422f74d643e3cb9, 0x241170c2bae3cd08}}, + {{0x3ff9ab860ac473b4, 0xf0911dee0113e435, 0x4ae75060ebc6c4af, 0x3f8612966c87000d}}}, +{{{0x559a0cc9782a0dde, 0x551dcdb2ea718385, 0x7f62865b31ef238c, 0x504aa7767973613d}}, + {{0x9c18fcfa36048d13, 0x29159db373899ddd, 0xdc9f350b9f92d0aa, 0x26f57eee878a19d4}}, + {{0x0cab2cd55687efb1, 0x5180d162247af17b, 0x85c15a344f5a2467, 0x4041943d9dba3069}}}, +{{{0xc3c0eeba43ebcc96, 0x8d749c9c26ea9caf, 0xd9fa95ee1c77ccc6, 0x1420a1d97684340f}}, + {{0x4b217743a26caadd, 0x47a6b424648ab7ce, 0xcb1d4f7a03fbc9e3, 0x12d931429800d019}}, + {{0x00c67799d337594f, 0x5e3c5140b23aa47b, 0x44182854e35ff395, 0x1b4f92314359a012}}}, +{{{0x3e5c109d89150951, 0x39cefa912de9696a, 0x20eae43f975f3020, 0x239b572a7f132dae}}, + {{0x33cf3030a49866b1, 0x251f73d2215f4859, 0xab82aa4051def4f6, 0x5ff191d56f9a23f6}}, + {{0x819ed433ac2d9068, 0x2883ab795fc98523, 0xef4572805593eb3d, 0x020c526a758f36cb}}}, +{{{0x779834f89ed8dbbc, 0xc8f2aaf9dc7ca46c, 0xa9524cdca3e1b074, 0x02aacc4615313877}}, + {{0xe931ef59f042cc89, 0x2c589c9d8e124bb6, 0xadc8e18aaec75997, 0x452cfe0a5602c50c}}, + {{0x86a0f7a0647877df, 0xbbc464270e607c9f, 0xab17ea25f1fb11c9, 0x4cfb7d7b304b877b}}}, +{{{0x72b43d6cb89b75fe, 0x54c694d99c6adc80, 0xb8c3aa373ee34c9f, 0x14b4622b39075364}}, + {{0xe28699c29789ef12, 0x2b6ecd71df57190d, 0xc343c857ecc970d0, 0x5b1d4cbc434d3ac5}}, + {{0xb6fb2615cc0a9f26, 0x3a4f0e2bb88dcce5, 0x1301498b3369a705, 0x2f98f71258592dd1}}}, +{{{0x0c94a74cb50f9e56, 0x5b1ff4a98e8e1320, 0x9a2acc2182300f67, 0x3a6ae249d806aaf9}}, + {{0x2e12ae444f54a701, 0xfcfe3ef0a9cbd7de, 0xcebf890d75835de0, 0x1d8062e9e7614554}}, + {{0x657ada85a9907c5a, 0x1a0ea8b591b90f62, 0x8d0e1dfbdf34b4e9, 0x298b8ce8aef25ff3}}}, +{{{0x2a927953eff70cb2, 0x4b89c92a79157076, 0x9418457a30a7cf6a, 0x34b8a8404d5ce485}}, + {{0x837a72ea0a2165de, 0x3fab07b40bcf79f6, 0x521636c77738ae70, 0x6ba6271803a7d7dc}}, + {{0xc26eecb583693335, 0xd5a813df63b5fefd, 0xa293aa9aa4b22573, 0x71d62bdd465e1c6a}}}, +{{{0x6533cc28d378df80, 0xf6db43790a0fa4b4, 0xe3645ff9f701da5a, 0x74d5f317f3172ba4}}, + {{0xcd2db5dab1f75ef5, 0xd77f95cf16b065f5, 0x14571fea3f49f085, 0x1c333621262b2b3d}}, + {{0xa86fe55467d9ca81, 0x398b7c752b298c37, 0xda6d0892e3ac623b, 0x4aebcc4547e9d98c}}}, +{{{0x12f0071b276d01c9, 0xe7b8bac586c48c70, 0x5308129b71d6fba9, 0x5d88fbf95a3db792}}, + {{0x0b408d9e7354b610, 0x806b32535ba85b6e, 0xdbe63a034a58a207, 0x173bd9ddc9a1df2c}}, + {{0x2b500f1efe5872df, 0x58d6582ed43918c1, 0xe6ed278ec9673ae0, 0x06e1cd13b19ea319}}}, +{{{0x40d0ad516f166f23, 0x118e32931fab6abe, 0x3fe35e14a04d088e, 0x3080603526e16266}}, + {{0x472baf629e5b0353, 0x3baa0b90278d0447, 0x0c785f469643bf27, 0x7f3a6a1a8d837b13}}, + {{0xf7e644395d3d800b, 0x95a8d555c901edf6, 0x68cd7830592c6339, 0x30d0fded2e51307e}}}, +{{{0xe0594d1af21233b3, 0x1bdbe78ef0cc4d9c, 0x6965187f8f499a77, 0x0a9214202c099868}}, + {{0x9cb4971e68b84750, 0xa09572296664bbcf, 0x5c8de72672fa412b, 0x4615084351c589d9}}, + {{0xbc9019c0aeb9a02e, 0x55c7110d16034cae, 0x0e6df501659932ec, 0x3bca0d2895ca5dfe}}}, +{{{0x40f031bc3c5d62a4, 0x19fc8b3ecff07a60, 0x98183da2130fb545, 0x5631deddae8f13cd}}, + {{0x9c688eb69ecc01bf, 0xf0bc83ada644896f, 0xca2d955f5f7a9fe2, 0x4ea8b4038df28241}}, + {{0x2aed460af1cad202, 0x46305305a48cee83, 0x9121774549f11a5f, 0x24ce0930542ca463}}}, +{{{0x1fe890f5fd06c106, 0xb5c468355d8810f2, 0x827808fe6e8caf3e, 0x41d4e3c28a06d74b}}, + {{0x3fcfa155fdf30b85, 0xd2f7168e36372ea4, 0xb2e064de6492f844, 0x549928a7324f4280}}, + {{0xf26e32a763ee1a2e, 0xae91e4b7d25ffdea, 0xbc3bd33bd17f4d69, 0x491b66dec0dcff6a}}}, +{{{0x98f5b13dc7ea32a7, 0xe3d5f8cc7e16db98, 0xac0abf52cbf8d947, 0x08f338d0c85ee4ac}}, + {{0x75f04a8ed0da64a1, 0xed222caf67e2284b, 0x8234a3791f7b7ba4, 0x4cf6b8b0b7018b67}}, + {{0xc383a821991a73bd, 0xab27bc01df320c7a, 0xc13d331b84777063, 0x530d4a82eb078a99}}}, +{{{0x004c3630e1f94825, 0x7e2d78268cab535a, 0xc7482323cc84ff8b, 0x65ea753f101770b9}}, + {{0x6d6973456c9abf9e, 0x257fb2fc4900a880, 0x2bacf412c8cfb850, 0x0db3e7e00cbfbd5b}}, + {{0x3d66fc3ee2096363, 0x81d62c7f61b5cb6b, 0x0fbe044213443b1a, 0x02a4ec1921e1a1db}}}, +{{{0x5ce6259a3b24b8a2, 0xb8577acc45afa0b8, 0xcccbe6e88ba07037, 0x3d143c51127809bf}}, + {{0xf5c86162f1cf795f, 0x118c861926ee57f2, 0x172124851c063578, 0x36d12b5dec067fcf}}, + {{0x126d279179154557, 0xd5e48f5cfc783a0a, 0x36bdb6e8df179bac, 0x2ef517885ba82859}}}, +{{{0x4637974e8c58aedc, 0xb9ef22fbabf041a4, 0xe185d956e980718a, 0x2f1b78fab143a8a6}}, + {{0x96eebffb305b2f51, 0xd3f938ad889596b8, 0xf0f52dc746d5dd25, 0x57968290bb3a0095}}, + {{0xf71ab8430a20e101, 0xf393658d24f0ec47, 0xcf7509a86ee2eed1, 0x7dc43e35dc2aa3e1}}}, +{{{0x85966665887dd9c3, 0xc90f9b314bb05355, 0xc6e08df8ef2079b1, 0x7ef72016758cc12f}}, + {{0x5a782a5c273e9718, 0x3576c6995e4efd94, 0x0f2ed8051f237d3e, 0x044fb81d82d50a99}}, + {{0xc1df18c5a907e3d9, 0x57b3371dce4c6359, 0xca704534b201bb49, 0x7f79823f9c30dd2e}}}, +{{{0x8334d239a3b513e8, 0xc13670d4b91fa8d8, 0x12b54136f590bd33, 0x0a4e0373d784d9b4}}, + {{0x6a9c1ff068f587ba, 0x0827894e0050c8de, 0x3cbf99557ded5be7, 0x64a9b0431c06d6f0}}, + {{0x2eb3d6a15b7d2919, 0xb0b4f6a0d53a8235, 0x7156ce4389a45d47, 0x071a7d0ace18346c}}}, +{{{0xd3072daac887ba0b, 0x01262905bfa562ee, 0xcf543002c0ef768b, 0x2c3bcc7146ea7e9c}}, + {{0xcc0c355220e14431, 0x0d65950709b15141, 0x9af5621b209d5f36, 0x7c69bcf7617755d3}}, + {{0x07f0d7eb04e8295f, 0x10db18252f50f37d, 0xe951a9a3171798d7, 0x6f5a9a7322aca51d}}}, +{{{0x8ba1000c2f41c6c5, 0xc49f79c10cfefb9b, 0x4efa47703cc51c9f, 0x494e21a2e147afca}}, + {{0xe729d4eba3d944be, 0x8d9e09408078af9e, 0x4525567a47869c03, 0x02ab9680ee8d3b24}}, + {{0xefa48a85dde50d9a, 0x219a224e0fb9a249, 0xfa091f1dd91ef6d9, 0x6b5d76cbea46bb34}}}, +{{{0x8857556cec0cd994, 0x6472dc6f5cd01dba, 0xaf0169148f42b477, 0x0ae333f685277354}}, + {{0xe0f941171e782522, 0xf1e6ae74036936d3, 0x408b3ea2d0fcc746, 0x16fb869c03dd313e}}, + {{0x288e199733b60962, 0x24fc72b4d8abe133, 0x4811f7ed0991d03e, 0x3f81e38b8f70d075}}}, +{{{0x7f910fcc7ed9affe, 0x545cb8a12465874b, 0xa8397ed24b0c4704, 0x50510fc104f50993}}, + {{0x0adb7f355f17c824, 0x74b923c3d74299a4, 0xd57c3e8bcbf8eaf7, 0x0ad3e2d34cdedc3d}}, + {{0x6f0c0fc5336e249d, 0x745ede19c331cfd9, 0xf2d6fd0009eefe1c, 0x127c158bf0fa1ebe}}}, +{{{0xf6197c422e9879a2, 0xa44addd452ca3647, 0x9b413fc14b4eaccb, 0x354ef87d07ef4f68}}, + {{0xdea28fc4ae51b974, 0x1d9973d3744dfe96, 0x6240680b873848a8, 0x4ed82479d167df95}}, + {{0xfee3b52260c5d975, 0x50352efceb41b0b8, 0x8808ac30a9f6653c, 0x302d92d20539236d}}}, +{{{0x7813c1a2bca4283d, 0xed62f091a1863dd9, 0xaec7bcb8c268fa86, 0x10e5d3b76f1cae4c}}, + {{0x2dbc6fb6e4e0f177, 0x04e1bf29a4bd6a93, 0x5e1966d4787af6e8, 0x0edc5f5eb426d060}}, + {{0x5453bfd653da8e67, 0xe9dc1eec24a9f641, 0xbf87263b03578a23, 0x45b46c51361cba72}}}, +{{{0xa9402abf314f7fa1, 0xe257f1dc8e8cf450, 0x1dbbd54b23a8be84, 0x2177bfa36dcb713b}}, + {{0xce9d4ddd8a7fe3e4, 0xab13645676620e30, 0x4b594f7bb30e9958, 0x5c1c0aef321229df}}, + {{0x37081bbcfa79db8f, 0x6048811ec25f59b3, 0x087a76659c832487, 0x4ae619387d8ab5bb}}}, +{{{0x8ddbf6aa5344a32e, 0x7d88eab4b41b4078, 0x5eb0eb974a130d60, 0x1a00d91b17bf3e03}}, + {{0x61117e44985bfb83, 0xfce0462a71963136, 0x83ac3448d425904b, 0x75685abe5ba43d64}}, + {{0x6e960933eb61f2b2, 0x543d0fa8c9ff4952, 0xdf7275107af66569, 0x135529b623b0e6aa}}}, +{{{0x18f0dbd7add1d518, 0x979f7888cfc11f11, 0x8732e1f07114759b, 0x79b5b81a65ca3a01}}, + {{0xf5c716bce22e83fe, 0xb42beb19e80985c1, 0xec9da63714254aae, 0x5972ea051590a613}}, + {{0x0fd4ac20dc8f7811, 0x9a9ad294ac4d4fa8, 0xc01b2d64b3360434, 0x4f7e9c95905f3bdb}}}, +{{{0x62674bbc5781302e, 0xd8520f3989addc0f, 0x8c2999ae53fbd9c6, 0x31993ad92e638e4c}}, + {{0x71c8443d355299fe, 0x8bcd3b1cdbebead7, 0x8092499ef1a49466, 0x1942eec4a144adc8}}, + {{0x7dac5319ae234992, 0x2c1b3d910cea3e92, 0x553ce494253c1122, 0x2a0a65314ef9ca75}}}, +{{{0x2db7937ff7f927c2, 0xdb741f0617d0a635, 0x5982f3a21155af76, 0x4cf6e218647c2ded}}, + {{0xcf361acd3c1c793a, 0x2f9ebcac5a35bc3b, 0x60e860e9a8cda6ab, 0x055dc39b6dea1a13}}, + {{0xb119227cc28d5bb6, 0x07e24ebc774dffab, 0xa83c78cee4a32c89, 0x121a307710aa24b6}}}, +{{{0xe4db5d5e9f034a97, 0xe153fc093034bc2d, 0x460546919551d3b1, 0x333fc76c7a40e52d}}, + {{0xd659713ec77483c9, 0x88bfe077b82b96af, 0x289e28231097bcd3, 0x527bb94a6ced3a9b}}, + {{0x563d992a995b482e, 0x3405d07c6e383801, 0x485035de2f64d8e5, 0x6b89069b20a7a9f7}}}, +{{{0x812aa0416270220d, 0x995a89faf9245b4e, 0xffadc4ce5072ef05, 0x23bc2103aa73eb73}}, + {{0x4082fa8cb5c7db77, 0x068686f8c734c155, 0x29e6c8d9f6e7a57e, 0x0473d308a7639bcf}}, + {{0xcaee792603589e05, 0x2b4b421246dcc492, 0x02a1ef74e601a94f, 0x102f73bfde04341a}}}, +{{{0xeb18b9ab7f5745c6, 0x023a8aee5787c690, 0xb72712da2df7afa9, 0x36597d25ea5c013d}}, + {{0xa2b4dae0b5511c9a, 0x7ac860292bffff06, 0x981f375df5504234, 0x3f6bd725da4ea12d}}, + {{0x734d8d7b106058ac, 0xd940579e6fc6905f, 0x6466f8f99202932d, 0x7b7ecc19da60d6d0}}}, +{{{0x78c2373c695c690d, 0xdd252e660642906e, 0x951d44444ae12bd2, 0x4235ad7601743956}}, + {{0x6dae4a51a77cfa9b, 0x82263654e7a38650, 0x09bbffcd8f2d82db, 0x03bedc661bf5caba}}, + {{0x6258cb0d078975f5, 0x492942549189f298, 0xa0cab423e2e36ee4, 0x0e7ce2b0cdf066a1}}}, +{{{0xc494643ac48c85a3, 0xfd361df43c6139ad, 0x09db17dd3ae94d48, 0x666e0a5d8fb4674a}}, + {{0xfea6fedfd94b70f9, 0xf130c051c1fcba2d, 0x4882d47e7f2fab89, 0x615256138aeceeb5}}, + {{0x2abbf64e4870cb0d, 0xcd65bcf0aa458b6b, 0x9abe4eba75e8985d, 0x7f0bc810d514dee4}}}, +{{{0xb9006ba426f4136f, 0x8d67369e57e03035, 0xcbc8dfd94f463c28, 0x0d1f8dbcf8eedbf5}}, + {{0x83ac9dad737213a0, 0x9ff6f8ba2ef72e98, 0x311e2edd43ec6957, 0x1d3a907ddec5ab75}}, + {{0xba1693313ed081dc, 0x29329fad851b3480, 0x0128013c030321cb, 0x00011b44a31bfde3}}}, +{{{0x3fdfa06c3fc66c0c, 0x5d40e38e4dd60dd2, 0x7ae38b38268e4d71, 0x3ac48d916e8357e1}}, + {{0x16561f696a0aa75c, 0xc1bf725c5852bd6a, 0x11a8dd7f9a7966ad, 0x63d988a2d2851026}}, + {{0x00120753afbd232e, 0xe92bceb8fdd8f683, 0xf81669b384e72b91, 0x33fad52b2368a066}}}, +{{{0x540649c6c5e41e16, 0x0af86430333f7735, 0xb2acfcd2f305e746, 0x16c0f429a256dca7}}, + {{0x8d2cc8d0c422cfe8, 0x072b4f7b05a13acb, 0xa3feb6e6ecf6a56f, 0x3cc355ccb90a71e2}}, + {{0xe9b69443903e9131, 0xb8a494cb7a5637ce, 0xc87cd1a4baba9244, 0x631eaf426bae7568}}}, +{{{0xb3e90410da66fe9f, 0x85dd4b526c16e5a6, 0xbc3d97611ef9bf83, 0x5599648b1ea919b5}}, + {{0x47d975b9a3700de8, 0x7280c5fbe2f80552, 0x53658f2732e45de1, 0x431f2c7f665f80b5}}, + {{0xd6026344858f7b19, 0x14ab352fa1ea514a, 0x8900441a2090a9d7, 0x7b04715f91253b26}}}, +{{{0x83edbd28acf6ae43, 0x86357c8b7d5c7ab4, 0xc0404769b7eb2c44, 0x59b37bf5c2f6583f}}, + {{0xb376c280c4e6bac6, 0x970ed3dd6d1d9b0b, 0xb09a9558450bf944, 0x48d0acfa57cde223}}, + {{0xb60f26e47dabe671, 0xf1d1a197622f3a37, 0x4208ce7ee9960394, 0x16234191336d3bdb}}}, +{{{0xb9e499def6267ff6, 0x7772ca7b742c0843, 0x23a0153fe9a4f2b1, 0x2cdfdfecd5d05006}}, + {{0xdd499cd61ff38640, 0x29cd9bc3063625a0, 0x51e2d8023dd73dc3, 0x4a25707a203b9231}}, + {{0x2ab7668a53f6ed6a, 0x304242581dd170a1, 0x4000144c3ae20161, 0x5721896d248e49fc}}}, +{{{0x0b6e5517fd181bae, 0x9022629f2bb963b4, 0x5509bce932064625, 0x578edd74f63c13da}}, + {{0x285d5091a1d0da4e, 0x4baa6fa7b5fe3e08, 0x63e5177ce19393b3, 0x03c935afc4b030fd}}, + {{0x997276c6492b0c3d, 0x47ccc2c4dfe205fc, 0xdcd29b84dd623a3c, 0x3ec2ab590288c7a2}}}, +{{{0xa1a0d27be4d87bb9, 0xa98b4deb61391aed, 0x99a0ddd073cb9b83, 0x2dd5c25a200fcace}}, + {{0xa7213a09ae32d1cb, 0x0f2b87df40f5c2d5, 0x0baea4c6e81eab29, 0x0e1bf66c6adbac5e}}, + {{0xe2abd5e9792c887e, 0x1a020018cb926d5d, 0xbfba69cdbaae5f1e, 0x730548b35ae88f5f}}}, +{{{0xc43551a3cba8b8ee, 0x65a26f1db2115f16, 0x760f4f52ab8c3850, 0x3043443b411db8ca}}, + {{0x805b094ba1d6e334, 0xbf3ef17709353f19, 0x423f06cb0622702b, 0x585a2277d87845dd}}, + {{0xa18a5f8233d48962, 0x6698c4b5ec78257f, 0xa78e6fa5373e41ff, 0x7656278950ef981f}}}, +{{{0x38c3cf59d51fc8c0, 0x9bedd2fd0506b6f2, 0x26bf109fab570e8f, 0x3f4160a8c1b846a6}}, + {{0xe17073a3ea86cf9d, 0x3a8cfbb707155fdc, 0x4853e7fc31838a8e, 0x28bbf484b613f616}}, + {{0xf2612f5c6f136c7c, 0xafead107f6dd11be, 0x527e9ad213de6f33, 0x1e79cb358188f75d}}}, +{{{0x013436c3eef7e3f1, 0x828b6a7ffe9e10f8, 0x7ff908e5bcf9defc, 0x65d7951b3a3b3831}}, + {{0x77e953d8f5e08181, 0x84a50c44299dded9, 0xdc6c2d0c864525e5, 0x478ab52d39d1f2f4}}, + {{0x66a6a4d39252d159, 0xe5dde1bc871ac807, 0xb82c6b40a6c1c96f, 0x16d87a411a212214}}}, +{{{0xb3bd7e5a42066215, 0x879be3cd0c5a24c1, 0x57c05db1d6f994b7, 0x28f87c8165f38ca6}}, + {{0xfba4d5e2d54e0583, 0xe21fafd72ebd99fa, 0x497ac2736ee9778f, 0x1f990b577a5a6dde}}, + {{0xa3344ead1be8f7d6, 0x7d1e50ebacea798f, 0x77c6569e520de052, 0x45882fe1534d6d3e}}}, +{{{0x6669345d757983d6, 0x62b6ed1117aa11a6, 0x7ddd1857985e128f, 0x688fe5b8f626f6dd}}, + {{0xd8ac9929943c6fe4, 0xb5f9f161a38392a2, 0x2699db13bec89af3, 0x7dcf843ce405f074}}, + {{0x6c90d6484a4732c0, 0xd52143fdca563299, 0xb3be28c3915dc6e1, 0x6739687e7327191b}}}, +{{{0xef782014385675a6, 0xa2649f30aafda9e8, 0x4cd1eb505cdfa8cb, 0x46115aba1d4dc0b3}}, + {{0xa66dcc9dc80c1ac0, 0x97a05cf41b38a436, 0xa7ebf3be95dbd7c6, 0x7da0b8f68d7e7dab}}, + {{0xd40f1953c3b5da76, 0x1dac6f7321119e9b, 0x03cc6021feb25960, 0x5a5f887e83674b4b}}}, +{{{0x8f6301cf70a13d11, 0xcfceb815350dd0c4, 0xf70297d4a4bca47e, 0x3669b656e44d1434}}, + {{0x9e9628d3a0a643b9, 0xb5c3cb00e6c32064, 0x9b5302897c2dec32, 0x43e37ae2d5d1c70c}}, + {{0x387e3f06eda6e133, 0x67301d5199a13ac0, 0xbd5ad8f836263811, 0x6a21e6cd4fd5e9be}}}, +{{{0xf1c6170a3046e65f, 0x58712a2a00d23524, 0x69dbbd3c8c82b755, 0x586bf9f1a195ff57}}, + {{0xef4129126699b2e3, 0x71d30847708d1301, 0x325432d01182b0bd, 0x45371b07001e8b36}}, + {{0xa6db088d5ef8790b, 0x5278f0dc610937e5, 0xac0349d261a16eb8, 0x0eafb03790e52179}}}, +{{{0x960555c13748042f, 0x219a41e6820baa11, 0x1c81f73873486d0c, 0x309acc675a02c661}}, + {{0x5140805e0f75ae1d, 0xec02fbe32662cc30, 0x2cebdf1eea92396d, 0x44ae3344c5435bb3}}, + {{0x9cf289b9bba543ee, 0xf3760e9d5ac97142, 0x1d82e5c64f9360aa, 0x62d5221b7f94678f}}}, +{{{0x524c299c18d0936d, 0xc86bb56c8a0c1a0c, 0xa375052edb4a8631, 0x5c0efde4bc754562}}, + {{0x7585d4263af77a3c, 0xdfae7b11fee9144d, 0xa506708059f7193d, 0x14f29a5383922037}}, + {{0xdf717edc25b2d7f5, 0x21f970db99b53040, 0xda9234b7c3ed4c62, 0x5e72365c7bee093e}}}, +{{{0x575bfc074571217f, 0x3779675d0694d95b, 0x9a0a37bbf4191e33, 0x77f1104c47b4eabc}}, + {{0x7d9339062f08b33e, 0x5b9659e5df9f32be, 0xacff3dad1f9ebdfd, 0x70b20555cb7349b7}}, + {{0xbe5113c555112c4c, 0x6688423a9a881fcd, 0x446677855e503b47, 0x0e34398f4a06404a}}}, +{{{0xb67d22d93ecebde8, 0x09b3e84127822f07, 0x743fa61fb05b6d8d, 0x5e5405368a362372}}, + {{0x18930b093e4b1928, 0x7de3e10e73f3f640, 0xf43217da73395d6f, 0x6f8aded6ca379c3e}}, + {{0xe340123dfdb7b29a, 0x487b97e1a21ab291, 0xf9967d02fde6949e, 0x780de72ec8d3de97}}}, +{{{0x0ae28545089ae7bc, 0x388ddecf1c7f4d06, 0x38ac15510a4811b8, 0x0eb28bf671928ce4}}, + {{0x671feaf300f42772, 0x8f72eb2a2a8c41aa, 0x29a17fd797373292, 0x1defc6ad32b587a6}}, + {{0xaf5bbe1aef5195a7, 0x148c1277917b15ed, 0x2991f7fb7ae5da2e, 0x467d201bf8dd2867}}}, +{{{0x95fe919a74ef4fad, 0x3a827becf6a308a2, 0x964e01d309a47b01, 0x71c43c4f5ba3c797}}, + {{0xbc1ef4bd567ae7a9, 0x3f624cb2d64498bd, 0xe41064d22c1f4ec8, 0x2ef9c5a5ba384001}}, + {{0xb6fd6df6fa9e74cd, 0xf18278bce4af267a, 0x8255b3d0f1ef990e, 0x5a758ca390c5f293}}}, +{{{0xa2b72710d9462495, 0x3aa8c6d2d57d5003, 0xe3d400bfa0b487ca, 0x2dbae244b3eb72ec}}, + {{0x8ce0918b1d61dc94, 0x8ded36469a813066, 0xd4e6a829afe8aad3, 0x0a738027f639d43f}}, + {{0x980f4a2f57ffe1cc, 0x00670d0de1839843, 0x105c3f4a49fb15fd, 0x2698ca635126a69c}}}, +{{{0xe765318832b0ba78, 0x381831f7925cff8b, 0x08a81b91a0291fcc, 0x1fb43dcc49caeb07}}, + {{0x2e3d702f5e3dd90e, 0x9e3f0918e4d25386, 0x5e773ef6024da96a, 0x3c004b0c4afa3332}}, + {{0x9aa946ac06f4b82b, 0x1ca284a5a806c4f3, 0x3ed3265fc6cd4787, 0x6b43fd01cd1fd217}}}, +{{{0xc7a75d4b4697c544, 0x15fdf848df0fffbf, 0x2868b9ebaa46785a, 0x5a68d7105b52f714}}, + {{0xb5c742583e760ef3, 0x75dc52b9ee0ab990, 0xbf1427c2072b923f, 0x73420b2d6ff0d9f0}}, + {{0xaf2cf6cb9e851e06, 0x8f593913c62238c4, 0xda8ab89699fbf373, 0x3db5632fea34bc9e}}}, +{{{0xf46eee2bf75dd9d8, 0x0d17b1f6396759a5, 0x1bf2d131499e7273, 0x04321adf49d75f13}}, + {{0x2e4990b1829825d5, 0xedeaeb873e9a8991, 0xeef03d394c704af8, 0x59197ea495df2b0e}}, + {{0x04e16019e4e55aae, 0xe77b437a7e2f92e9, 0xc7ce2dc16f159aa4, 0x45eafdc1f4d70cc0}}}, +{{{0x698401858045d72b, 0x4c22faa2cf2f0651, 0x941a36656b222dc6, 0x5a5eebc80362dade}}, + {{0xb60e4624cfccb1ed, 0x59dbc292bd5c0395, 0x31a09d1ddc0481c9, 0x3f73ceea5d56d940}}, + {{0xb7a7bfd10a4e8dc6, 0xbe57007e44c9b339, 0x60c1207f1557aefa, 0x26058891266218db}}}, +{{{0x59f704a68360ff04, 0xc3d93fde7661e6f4, 0x831b2a7312873551, 0x54ad0c2e4e615d57}}, + {{0x4c818e3cc676e542, 0x5e422c9303ceccad, 0xec07cccab4129f08, 0x0dedfa10b24443b8}}, + {{0xee3b67d5b82b522a, 0x36f163469fa5c1eb, 0xa5b4d2f26ec19fd3, 0x62ecb2baa77a9408}}}, +{{{0xe5ed795261152b3d, 0x4962357d0eddd7d1, 0x7482c8d0b96b4c71, 0x2e59f919a966d8be}}, + {{0x92072836afb62874, 0x5fcd5e8579e104a5, 0x5aad01adc630a14a, 0x61913d5075663f98}}, + {{0x0dc62d361a3231da, 0xfa47583294200270, 0x02d801513f9594ce, 0x3ddbc2a131c05d5c}}}, +{{{0x9adc0ff9ce5ec54b, 0x039c2a6b8c2f130d, 0x028007c7f0f89515, 0x78968314ac04b36b}}, + {{0xf3aa57a22796bb14, 0x883abab79b07da21, 0xe54be21831a0391c, 0x5ee7fb38d83205f9}}, + {{0x538dfdcb41446a8e, 0xa5acfda9434937f9, 0x46af908d263c8c78, 0x61d0633c9bca0d09}}}, +{{{0x63744935ffdb2566, 0xc5bd6b89780b68bb, 0x6f1b3280553eec03, 0x6e965fd847aed7f5}}, + {{0xada328bcf8fc73df, 0xee84695da6f037fc, 0x637fb4db38c2a909, 0x5b23ac2df8067bdc}}, + {{0x9ad2b953ee80527b, 0xe88f19aafade6d8d, 0x0e711704150e82cf, 0x79b9bbb9dd95dedc}}}, +{{{0xebb355406a3126c2, 0xd26383a868c8c393, 0x6c0c6429e5b97a82, 0x5065f158c9fd2147}}, + {{0xd1997dae8e9f7374, 0xa032a2f8cfbb0816, 0xcd6cba126d445f0a, 0x1ba811460accb834}}, + {{0x708169fb0c429954, 0xe14600acd76ecf67, 0x2eaab98a70e645ba, 0x3981f39e58a4faf2}}}, +{{{0x18fb8a7559230a93, 0x1d168f6960e6f45d, 0x3a85a94514a93cb5, 0x38dc083705acd0fd}}, + {{0xc845dfa56de66fde, 0xe152a5002c40483a, 0xe9d2e163c7b4f632, 0x30f4452edcbc1b65}}, + {{0x856d2782c5759740, 0xfa134569f99cbecc, 0x8844fc73c0ea4e71, 0x632d9a1a593f2469}}}, +{{{0xf6bb6b15b807cba6, 0x1823c7dfbc54f0d7, 0xbb1d97036e29670b, 0x0b24f48847ed4a57}}, + {{0xbf09fd11ed0c84a7, 0x63f071810d9f693a, 0x21908c2d57cf8779, 0x3a5a7df28af64ba2}}, + {{0xdcdad4be511beac7, 0xa4538075ed26ccf2, 0xe19cff9f005f9a65, 0x34fcf74475481f63}}}, +{{{0xc197e04c789767ca, 0xb8714dcb38d9467d, 0x55de888283f95fa8, 0x3d3bdc164dfa63f7}}, + {{0xa5bb1dab78cfaa98, 0x5ceda267190b72f2, 0x9309c9110a92608e, 0x0119a3042fb374b0}}, + {{0x67a2d89ce8c2177d, 0x669da5f66895d0c1, 0xf56598e5b282a2b0, 0x56c088f1ede20a73}}}, +{{{0x336d3d1110a86e17, 0xd7f388320b75b2fa, 0xf915337625072988, 0x09674c6b99108b87}}, + {{0x581b5fac24f38f02, 0xa90be9febae30cbd, 0x9a2169028acf92f0, 0x038b7ea48359038f}}, + {{0x9f4ef82199316ff8, 0x2f49d282eaa78d4f, 0x0971a5ab5aef3174, 0x6e5e31025969eb65}}}, +{{{0xb16c62f587e593fb, 0x4999eddeca5d3e71, 0xb491c1e014cc3e6d, 0x08f5114789a8dba8}}, + {{0x3304fb0e63066222, 0xfb35068987acba3f, 0xbd1924778c1061a3, 0x3058ad43d1838620}}, + {{0x323c0ffde57663d0, 0x05c3df38a22ea610, 0xbdc78abdac994f9a, 0x26549fa4efe3dc99}}}, +{{{0x741d5a461e6bf9d6, 0x2305b3fc7777a581, 0xd45574a26474d3d9, 0x1926e1dc6401e0ff}}, + {{0xdb468549af3f666e, 0xd77fcf04f14a0ea5, 0x3df23ff7a4ba0c47, 0x3a10dfe132ce3c85}}, + {{0xe07f4e8aea17cea0, 0x2fd515463a1fc1fd, 0x175322fd31f2c0f1, 0x1fa1d01d861e5d15}}}, +{{{0xcc8055947d599832, 0x1e4656da37f15520, 0x99f6f7744e059320, 0x773563bc6a75cf33}}, + {{0x38dcac00d1df94ab, 0x2e712bddd1080de9, 0x7f13e93efdd5e262, 0x73fced18ee9a01e5}}, + {{0x06b1e90863139cb3, 0xa493da67c5a03ecd, 0x8d77cec8ad638932, 0x1f426b701b864f44}}}, +{{{0xefc9264c41911c01, 0xf1a3b7b817a22c25, 0x5875da6bf30f1447, 0x4e1af5271d31b090}}, + {{0xf17e35c891a12552, 0xb76b8153575e9c76, 0xfa83406f0d9b723e, 0x0b76bb1b3fa7e438}}, + {{0x08b8c1f97f92939b, 0xbe6771cbd444ab6e, 0x22e5646399bb8017, 0x7b6dd61eb772a955}}}, +{{{0xb7adc1e850f33d92, 0x7998fa4f608cd5cf, 0xad962dbd8dfc5bdb, 0x703e9bceaf1d2f4f}}, + {{0x5730abf9ab01d2c7, 0x16fb76dc40143b18, 0x866cbe65a0cbb281, 0x53fa9b659bff6afe}}, + {{0x6c14c8e994885455, 0x843a5d6665aed4e5, 0x181bb73ebcd65af1, 0x398d93e5c4c61f50}}}, +{{{0x1c4bd16733e248f3, 0xbd9e128715bf0a5f, 0xd43f8cf0a10b0376, 0x53b09b5ddf191b13}}, + {{0xc3877c60d2e7e3f2, 0x3b34aaa030828bb1, 0x283e26e7739ef138, 0x699c9c9002c30577}}, + {{0xf306a7235946f1cc, 0x921718b5cce5d97d, 0x28cdd24781b4e975, 0x51caf30c6fcdd907}}}, +{{{0xa60ba7427674e00a, 0x630e8570a17a7bf3, 0x3758563dcf3324cc, 0x5504aa292383fdaa}}, + {{0x737af99a18ac54c7, 0x903378dcc51cb30f, 0x2b89bc334ce10cc7, 0x12ae29c189f8e99a}}, + {{0xa99ec0cb1f0d01cf, 0x0dd1efcc3a34f7ae, 0x55ca7521d09c4e22, 0x5fd14fe958eba5ea}}}, +{{{0xb5dc2ddf2845ab2c, 0x069491b10a7fe993, 0x4daaf3d64002e346, 0x093ff26e586474d1}}, + {{0x3c42fe5ebf93cb8e, 0xbedfa85136d4565f, 0xe0f0859e884220e8, 0x7dd73f960725d128}}, + {{0xb10d24fe68059829, 0x75730672dbaf23e5, 0x1367253ab457ac29, 0x2f59bcbc86b470a4}}}, +{{{0x83847d429917135f, 0xad1b911f567d03d7, 0x7e7748d9be77aad1, 0x5458b42e2e51af4a}}, + {{0x7041d560b691c301, 0x85201b3fadd7e71e, 0x16c2e16311335585, 0x2aa55e3d010828b1}}, + {{0xed5192e60c07444f, 0x42c54e2d74421d10, 0x352b4c82fdb5c864, 0x13e9004a8a768664}}}, +{{{0x739d8845832fcedb, 0xfa38d6c9ae6bf863, 0x32bc0dcab74ffef7, 0x73937e8814bce45e}}, + {{0xbb2e00c9193b877f, 0xece3a890e0dc506b, 0xecf3b7c036de649f, 0x5f46040898de9e1a}}, + {{0xb9037116297bf48d, 0xa9d13b22d4f06834, 0xe19715574696bdc6, 0x2cf8a4e891d5e835}}}, +{{{0x6d93fd8707110f67, 0xdd4c09d37c38b549, 0x7cb16a4cc2736a86, 0x2049bd6e58252a09}}, + {{0x2cb5487e17d06ba2, 0x24d2381c3950196b, 0xd7659c8185978a30, 0x7a6f7f2891d6a4f6}}, + {{0x7d09fd8d6a9aef49, 0xf0ee60be5b3db90b, 0x4c21b52c519ebfd4, 0x6011aadfc545941d}}}, +{{{0x5f67926dcf95f83c, 0x7c7e856171289071, 0xd6a1e7f3998f7a5b, 0x6fc5cc1b0b62f9e0}}, + {{0x63ded0c802cbf890, 0xfbd098ca0dff6aaa, 0x624d0afdb9b6ed99, 0x69ce18b779340b1e}}, + {{0xd1ef5528b29879cb, 0xdd1aae3cd47e9092, 0x127e0442189f2352, 0x15596b3ae57101f1}}}, +{{{0x462739d23f9179a2, 0xff83123197d6ddcf, 0x1307deb553f2148a, 0x0d2237687b5f4dda}}, + {{0x09ff31167e5124ca, 0x0be4158bd9c745df, 0x292b7d227ef556e5, 0x3aa4e241afb6d138}}, + {{0x2cc138bf2a3305f5, 0x48583f8fa2e926c3, 0x083ab1a25549d2eb, 0x32fcaa6e4687a36c}}}, +{{{0x7bc56e8dc57d9af5, 0x3e0bd2ed9df0bdf2, 0xaac014de22efe4a3, 0x4627e9cefebd6a5c}}, + {{0x3207a4732787ccdf, 0x17e31908f213e3f8, 0xd5b2ecd7f60d964e, 0x746f6336c2600be9}}, + {{0x3f4af345ab6c971c, 0xe288eb729943731f, 0x33596a8a0344186d, 0x7b4917007ed66293}}}, +{{{0x2d85fb5cab84b064, 0x497810d289f3bc14, 0x476adc447b15ce0c, 0x122ba376f844fd7b}}, + {{0x54341b28dd53a2dd, 0xaa17905bdf42fc3f, 0x0ff592d94dd2f8f4, 0x1d03620fe08cd37d}}, + {{0xc20232cda2b4e554, 0x9ed0fd42115d187f, 0x2eabb4be7dd479d9, 0x02c70bf52b68ec4c}}}, +{{{0xa287ec4b5d0b2fbb, 0x415c5790074882ca, 0xe044a61ec1d0815c, 0x26334f0a409ef5e0}}, + {{0xace532bf458d72e1, 0x5be768e07cb73cb5, 0x56cf7d94ee8bbde7, 0x6b0697e3feb43a03}}, + {{0xb6c8f04adf62a3c0, 0x3ef000ef076da45d, 0x9c9cb95849f0d2a9, 0x1cc37f43441b2fae}}}, +{{{0x508f565a5cc7324f, 0xd061c4c0e506a922, 0xfb18abdb5c45ac19, 0x6c6809c10380314a}}, + {{0xd76656f1c9ceaeb9, 0x1c5b15f818e5656a, 0x26e72832844c2334, 0x3a346f772f196838}}, + {{0xd2d55112e2da6ac8, 0xe9bd0331b1e851ed, 0x960746dd8ec67262, 0x05911b9f6ef7c5d0}}}, +{{{0xc1339983f5df0ebb, 0xc0f3758f512c4cac, 0x2cf1130a0bb398e1, 0x6b3cecf9aa270c62}}, + {{0x5349acf3512eeaef, 0x20c141d31cc1cb49, 0x24180c07a99a688d, 0x555ef9d1c64b2d17}}, + {{0x36a770ba3b73bd08, 0x624aef08a3afbf0c, 0x5737ff98b40946f2, 0x675f4de13381749d}}}, +{{{0x0e2c52036b1782fc, 0x64816c816cad83b4, 0xd0dcbdd96964073e, 0x13d99df70164c520}}, + {{0xa12ff6d93bdab31d, 0x0725d80f9d652dfe, 0x019c4ff39abe9487, 0x60f450b882cd3c43}}, + {{0x014b5ec321e5c0ca, 0x4fcb69c9d719bfa2, 0x4e5f1c18750023a0, 0x1c06de9e55edac80}}}, +{{{0x990f7ad6a33ec4e2, 0x6608f938be2ee08e, 0x9ca143c563284515, 0x4cf38a1fec2db60d}}, + {{0xffd52b40ff6d69aa, 0x34530b18dc4049bb, 0x5e4a5c2fa34d9897, 0x78096f8e7d32ba2d}}, + {{0xa0aaaa650dfa5ce7, 0xf9c49e2a48b5478c, 0x4f09cc7d7003725b, 0x373cad3a26091abe}}}, +{{{0xb294634d82c9f57c, 0x1fcbfde124934536, 0x9e9c4db3418cdb5a, 0x0040f3d9454419fc}}, + {{0xf1bea8fb89ddbbad, 0x3bcb2cbc61aeaecb, 0x8f58a7bb1f9b8d9d, 0x21547eda5112a686}}, + {{0xdefde939fd5986d3, 0xf4272c89510a380c, 0xb72ba407bb3119b9, 0x63550a334a254df4}}}, +{{{0x6507d6edb569cf37, 0x178429b00ca52ee1, 0xea7c0090eb6bd65d, 0x3eea62c7daf78f51}}, + {{0x9bba584572547b49, 0xf305c6fae2c408e0, 0x60e8fa69c734f18d, 0x39a92bafaa7d767a}}, + {{0x9d24c713e693274e, 0x5f63857768dbd375, 0x70525560eb8ab39a, 0x68436a0665c9c4cd}}}, +{{{0xbc0235e8202f3f27, 0xc75c00e264f975b0, 0x91a4e9d5a38c2416, 0x17b6e7f68ab789f9}}, + {{0x1e56d317e820107c, 0xc5266844840ae965, 0xc1e0a1c6320ffc7a, 0x5373669c91611472}}, + {{0x5d2814ab9a0e5257, 0x908f2084c9cab3fc, 0xafcaf5885b2d1eca, 0x1cb4b5a678f87d11}}}, +{{{0xb664c06b394afc6c, 0x0c88de2498da5fb1, 0x4f8d03164bcad834, 0x330bca78de7434a2}}, + {{0x6b74aa62a2a007e7, 0xf311e0b0f071c7b1, 0x5707e438000be223, 0x2dc0fd2d82ef6eac}}, + {{0x982eff841119744e, 0xf9695e962b074724, 0xc58ac14fbfc953fb, 0x3c31be1b369f1cf5}}}, +{{{0xb0f4864d08948aee, 0x07dc19ee91ba1c6f, 0x7975cdaea6aca158, 0x330b61134262d4bb}}, + {{0xc168bc93f9cb4272, 0xaeb8711fc7cedb98, 0x7f0e52aa34ac8d7a, 0x41cec1097e7d55bb}}, + {{0xf79619d7a26d808a, 0xbb1fd49e1d9e156d, 0x73d7c36cdba1df27, 0x26b44cd91f28777d}}}, +{{{0x51f048478f387475, 0xb25dbcf49cbecb3c, 0x9aab1244d99f2055, 0x2c709e6c1c10a5d6}}, + {{0xe1b7f29362730383, 0x4b5279ffebca8a2c, 0xdafc778abfd41314, 0x7deb10149c72610f}}, + {{0xcb62af6a8766ee7a, 0x66cbec045553cd0e, 0x588001380f0be4b5, 0x08e68e9ff62ce2ea}}}, +{{{0x34ad500a4bc130ad, 0x8d38db493d0bd49c, 0xa25c3d98500a89be, 0x2f1f3f87eeba3b09}}, + {{0x2f2d09d50ab8f2f9, 0xacb9218dc55923df, 0x4a8f342673766cb9, 0x4cb13bd738f719f5}}, + {{0xf7848c75e515b64a, 0xa59501badb4a9038, 0xc20d313f3f751b50, 0x19a1e353c0ae2ee8}}}, +{{{0x7d1c7560bafa05c3, 0xb3e1a0a0c6e55e61, 0xe3529718c0d66473, 0x41546b11c20c3486}}, + {{0xb42172cdd596bdbd, 0x93e0454398eefc40, 0x9fb15347b44109b5, 0x736bd3990266ae34}}, + {{0x85532d509334b3b4, 0x46fd114b60816573, 0xcc5f5f30425c8375, 0x412295a2b87fab5c}}}, +{{{0x19c99b88f57ed6e9, 0x5393cb266df8c825, 0x5cee3213b30ad273, 0x14e153ebb52d2e34}}, + {{0x2e655261e293eac6, 0x845a92032133acdb, 0x460975cb7900996b, 0x0760bb8d195add80}}, + {{0x413e1a17cde6818a, 0x57156da9ed69a084, 0x2cbf268f46caccb1, 0x6b34be9bc33ac5f2}}}, +{{{0xf3df2f643a78c0b2, 0x4c3e971ef22e027c, 0xec7d1c5e49c1b5a3, 0x2012c18f0922dd2d}}, + {{0x11fc69656571f2d3, 0xc6c9e845530e737a, 0xe33ae7a2d4fe5035, 0x01b9c7b62e6dd30b}}, + {{0x880b55e55ac89d29, 0x1483241f45a0a763, 0x3d36efdfc2e76c1f, 0x08af5b784e4bade8}}}, +{{{0x283499dc881f2533, 0x9d0525da779323b6, 0x897addfb673441f4, 0x32b79d71163a168d}}, + {{0xe27314d289cc2c4b, 0x4be4bd11a287178d, 0x18d528d6fa3364ce, 0x6423c1d5afd9826e}}, + {{0xcc85f8d9edfcb36a, 0x22bcc28f3746e5f9, 0xe49de338f9e5d3cd, 0x480a5efbc13e2dcc}}}, +{{{0x0b51e70b01622071, 0x06b505cf8b1dafc5, 0x2c6bb061ef5aabcd, 0x47aa27600cb7bf31}}, + {{0xb6614ce442ce221f, 0x6e199dcc4c053928, 0x663fb4a4dc1cbe03, 0x24b31d47691c8e06}}, + {{0x2a541eedc015f8c3, 0x11a4fe7e7c693f7c, 0xf0af66134ea278d6, 0x545b585d14dda094}}}, +{{{0x67bf275ea0d43a0f, 0xade68e34089beebe, 0x4289134cd479e72e, 0x0f62f9c332ba5454}}, + {{0x6204e4d0e3b321e1, 0x3baa637a28ff1e95, 0x0b0ccffd5b99bd9e, 0x4d22dc3e64c8d071}}, + {{0xfcb46589d63b5f39, 0x5cae6a3f57cbcf61, 0xfebac2d2953afa05, 0x1c0fa01a36371436}}}, +{{{0xd2c604b622943dff, 0xbc8cbece44cfb3a0, 0x5d254ff397808678, 0x0fa3614f3b1ca6bf}}, + {{0x69082b0e8c936a50, 0xf9c9a035c1dac5b6, 0x6fb73e54c4dfb634, 0x4005419b1d2bc140}}, + {{0xa003febdb9be82f0, 0x2089c1af3a44ac90, 0xf8499f911954fa8e, 0x1fba218aef40ab42}}}, +{{{0xab549448fac8f53e, 0x81f6e89a7ba63741, 0x74fd6c7d6c2b5e01, 0x392e3acaa8c86e42}}, + {{0x4f3e57043e7b0194, 0xa81d3eee08daaf7f, 0xc839c6ab99dcdef1, 0x6c535d13ff7761d5}}, + {{0x4cbd34e93e8a35af, 0x2e0781445887e816, 0x19319c76f29ab0ab, 0x25e17fe4d50ac13b}}}, +{{{0x0a289bd71e04f676, 0x208e1c52d6420f95, 0x5186d8b034691fab, 0x255751442a9fb351}}, + {{0x915f7ff576f121a7, 0xc34a32272fcd87e3, 0xccba2fde4d1be526, 0x6bba828f8969899b}}, + {{0xe2d1bc6690fe3901, 0x4cb54a18a0997ad5, 0x971d6914af8460d4, 0x559d504f7f6b7be4}}}, +{{{0xa7738378b3eb54d5, 0x1d69d366a5553c7c, 0x0a26cf62f92800ba, 0x01ab12d5807e3217}}, + {{0x9c4891e7f6d266fd, 0x0744a19b0307781b, 0x88388f1d6061e23b, 0x123ea6a3354bd50e}}, + {{0x118d189041e32d96, 0xb9ede3c2d8315848, 0x1eab4271d83245d9, 0x4a3961e2c918a154}}}, +{{{0x71dc3be0f8e6bba0, 0xd6cef8347effe30a, 0xa992425fe13a476a, 0x2cd6bce3fb1db763}}, + {{0x0327d644f3233f1e, 0x499a260e34fcf016, 0x83b5a716f2dab979, 0x68aceead9bd4111f}}, + {{0x38b4c90ef3d7c210, 0x308e6e24b7ad040c, 0x3860d9f1b7e73e23, 0x595760d5b508f597}}}, +{{{0x6129bfe104aa6397, 0x8f960008a4a7fccb, 0x3f8bc0897d909458, 0x709fa43edcb291a9}}, + {{0x882acbebfd022790, 0x89af3305c4115760, 0x65f492e37d3473f4, 0x2cb2c5df54515a2b}}, + {{0xeb0a5d8c63fd2aca, 0xd22bc1662e694eff, 0x2723f36ef8cbb03a, 0x70f029ecf0c8131f}}}, +{{{0x461307b32eed3e33, 0xae042f33a45581e7, 0xc94449d3195f0366, 0x0b7d5d8a6c314858}}, + {{0x2a6aafaa5e10b0b9, 0x78f0a370ef041aa9, 0x773efb77aa3ad61f, 0x44eca5a2a74bd9e1}}, + {{0x25d448327b95d543, 0x70d38300a3340f1d, 0xde1c531c60e1c52b, 0x272224512c7de9e4}}}, +{{{0x1abc92af49c5342e, 0xffeed811b2e6fad0, 0xefa28c8dfcc84e29, 0x11b5df18a44cc543}}, + {{0xbf7bbb8a42a975fc, 0x8c5c397796ada358, 0xe27fc76fcdedaa48, 0x19735fd7f6bc20a6}}, + {{0xe3ab90d042c84266, 0xeb848e0f7f19547e, 0x2503a1d065a497b9, 0x0fef911191df895f}}}
\ No newline at end of file diff --git a/ext/ed25519-amd64-asm/ge25519_base_slide_multiples.data b/ext/ed25519-amd64-asm/ge25519_base_slide_multiples.data new file mode 100644 index 00000000..32a5d474 --- /dev/null +++ b/ext/ed25519-amd64-asm/ge25519_base_slide_multiples.data @@ -0,0 +1,96 @@ +{{{0x9d103905d740913e, 0xfd399f05d140beb3, 0xa5c18434688f8a09, 0x44fd2f9298f81267}}, + {{0x2fbc93c6f58c3b85, 0xcf932dc6fb8c0e19, 0x270b4898643d42c2, 0x07cf9d3a33d4ba65}}, + {{0xabc91205877aaa68, 0x26d9e823ccaac49e, 0x5a1b7dcbdd43598c, 0x6f117b689f0c65a8}}}, +{{{0x56611fe8a4fcd265, 0x3bd353fde5c1ba7d, 0x8131f31a214bd6bd, 0x2ab91587555bda62}}, + {{0xaf25b0a84cee9730, 0x025a8430e8864b8a, 0xc11b50029f016732, 0x7a164e1b9a80f8f4}}, + {{0x14ae933f0dd0d889, 0x589423221c35da62, 0xd170e5458cf2db4c, 0x5a2826af12b9b4c6}}}, +{{{0x7f9182c3a447d6ba, 0xd50014d14b2729b7, 0xe33cf11cb864a087, 0x154a7e73eb1b55f3}}, + {{0xa212bc4408a5bb33, 0x8d5048c3c75eed02, 0xdd1beb0c5abfec44, 0x2945ccf146e206eb}}, + {{0xbcbbdbf1812a8285, 0x270e0807d0bdd1fc, 0xb41b670b1bbda72d, 0x43aabe696b3bb69a}}}, +{{{0xba6f2c9aaa3221b1, 0x6ca021533bba23a7, 0x9dea764f92192c3a, 0x1d6edd5d2e5317e0}}, + {{0x6b1a5cd0944ea3bf, 0x7470353ab39dc0d2, 0x71b2528228542e49, 0x461bea69283c927e}}, + {{0xf1836dc801b8b3a2, 0xb3035f47053ea49a, 0x529c41ba5877adf3, 0x7a9fbb1c6a0f90a7}}}, +{{{0xf36e217e039d8064, 0x98a081b6f520419b, 0x96cbc608e75eb044, 0x49c05a51fadc9c8f}}, + {{0x9b2e678aa6a8632f, 0xa6509e6f51bc46c5, 0xceb233c9c686f5b5, 0x34b9ed338add7f59}}, + {{0x06b4e8bf9045af1b, 0xe2ff83e8a719d22f, 0xaaf6fc2993d4cf16, 0x73c172021b008b06}}}, +{{{0x315f5b0249864348, 0x3ed6b36977088381, 0xa3a075556a8deb95, 0x18ab598029d5c77f}}, + {{0x2fbf00848a802ade, 0xe5d9fecf02302e27, 0x113e847117703406, 0x4275aae2546d8faf}}, + {{0xd82b2cc5fd6089e9, 0x031eb4a13282e4a4, 0x44311199b51a8622, 0x3dc65522b53df948}}}, +{{{0x506f013b327fbf93, 0xaefcebc99b776f6b, 0x9d12b232aaad5968, 0x0267882d176024a7}}, + {{0xbf70c222a2007f6d, 0xbf84b39ab5bcdedb, 0x537a0e12fb07ba07, 0x234fd7eec346f241}}, + {{0x5360a119732ea378, 0x2437e6b1df8dd471, 0xa2ef37f891a7e533, 0x497ba6fdaa097863}}}, +{{{0x040bcd86468ccf0b, 0xd3829ba42a9910d6, 0x7508300807b25192, 0x43b5cd4218d05ebf}}, + {{0x24cecc0313cfeaa0, 0x8648c28d189c246d, 0x2dbdbdfac1f2d4d0, 0x61e22917f12de72b}}, + {{0x5d9a762f9bd0b516, 0xeb38af4e373fdeee, 0x032e5a7d93d64270, 0x511d61210ae4d842}}}, +{{{0x081386484420de87, 0x8a1cf016b592edb4, 0x39fa4e2729942d25, 0x71a7fe6fe2482810}}, + {{0x92c676ef950e9d81, 0xa54620cdc0d7044f, 0xaa9b36646f8f1248, 0x6d325924ddb855e3}}, + {{0x6c7182b8a5c8c854, 0x33fd1479fe5f2a03, 0x72cf591883778d0c, 0x4746c4b6559eeaa9}}}, +{{{0x348546c864741147, 0x7d35aedd0efcc849, 0xff939a760672a332, 0x219663497db5e6d6}}, + {{0xd3777b3c6dc69a2b, 0xdefab2276f89f617, 0x45651cf7b53a16b5, 0x5c9a51de34fe9fb7}}, + {{0xf510f1cf79f10e67, 0xffdddaa1e658515b, 0x09c3a71710142277, 0x4804503c608223bb}}}, +{{{0x3b6821d23a36d175, 0xbbb40aa7e99b9e32, 0x5d9e5ce420838a47, 0x771e098858de4c5e}}, + {{0xc4249ed02ca37fc7, 0xa059a0e3a615acab, 0x88a96ed7c96e0e23, 0x553398a51650696d}}, + {{0x9a12f5d278451edf, 0x3ada5d7985899ccb, 0x477f4a2d9fa59508, 0x5a5ed1d68ff5a611}}}, +{{{0xbae5e0c558527359, 0x392e5c19cadb9d7e, 0x28653c1eda1cabe9, 0x019b60135fefdc44}}, + {{0x1195122afe150e83, 0xcf209a257e4b35d8, 0x7387f8291e711e20, 0x44acb897d8bf92f0}}, + {{0x1e6068145e134b83, 0xc4f5e64f24304c16, 0x506e88a8fc1a3ed7, 0x150c49fde6ad2f92}}}, +{{{0xb849863c9cdca868, 0xc83f44dbb8714ad0, 0xfe3ee3560c36168d, 0x78a6d7791e05fbc1}}, + {{0x8e7bf29509471138, 0x5d6fef394f75a651, 0x10af79c425a708ad, 0x6b2b5a075bb99922}}, + {{0x58bf704b47a0b976, 0xa601b355741748d5, 0xaa2b1fb1d542f590, 0x725c7ffc4ad55d00}}}, +{{{0x91802bf71cd098c0, 0xfe416ca4ed5e6366, 0xdf585d714902994c, 0x4cd54625f855fae7}}, + {{0xe4426715d1cf99b2, 0x7352d51102a20d34, 0x23d1157b8b12109f, 0x794cc9277cb1f3a3}}, + {{0x4af6c426c2ac5053, 0xbc9aedad32f67258, 0x2ad032f10a311021, 0x7008357b6fcc8e85}}}, +{{{0xd01b9fbb82584a34, 0x47ab6463d2b4792b, 0xb631639c48536202, 0x13a92a3669d6d428}}, + {{0x0b88672738773f01, 0xb8ccc8fa95fbccfb, 0x8d2dd5a3b9ad29b6, 0x06ef7e9851ad0f6a}}, + {{0xca93771cc0577de5, 0x7540e41e5035dc5c, 0x24680f01d802e071, 0x3c296ddf8a2af86a}}}, +{{{0xfceb4d2ebb1f2541, 0xb89510c740adb91f, 0xfc71a37dd0a1ad05, 0x0a892c700747717b}}, + {{0xaead15f9d914a713, 0xa92f7bf98c8ff912, 0xaff823179f53d730, 0x7a99d393490c77ba}}, + {{0x8f52ed2436bda3e8, 0x77a8c84157e80794, 0xa5a96563262f9ce0, 0x286762d28302f7d2}}}, +{{{0x7c558e2bce2ef5bd, 0xe4986cb46747bc63, 0x154a179f3bbb89b8, 0x7686f2a3d6f1767a}}, + {{0x4e7836093ce35b25, 0x82e1181db26baa97, 0x0cc192d3cbc7b83f, 0x32f1da046a9d9d3a}}, + {{0xaa8d12a66d597c6a, 0x8f11930304d3852b, 0x3f91dc73c209b022, 0x561305f8a9ad28a6}}}, +{{{0x6722cc28e7b0c0d5, 0x709de9bbdb075c53, 0xcaf68da7d7010a61, 0x030a1aef2c57cc6c}}, + {{0x100c978dec92aed1, 0xca43d5434d6d73e5, 0x83131b22d847ba48, 0x00aaec53e35d4d2c}}, + {{0x7bb1f773003ad2aa, 0x0b3f29802b216608, 0x7821dc86520ed23e, 0x20be9c1c24065480}}}, +{{{0x20e0e44ae2025e60, 0xb03b3b2fcbdcb938, 0x105d639cf95a0d1c, 0x69764c545067e311}}, + {{0xe15387d8249673a6, 0x5943bc2df546e493, 0x1c7f9a81c36f63b5, 0x750ab3361f0ac1de}}, + {{0x1e8a3283a2f81037, 0x6f2eda23bd7fcbf1, 0xb72fd15bac2e2563, 0x54f96b3fb7075040}}}, +{{{0x177dafc616b11ecd, 0x89764b9cfa576479, 0xb7a8a110e6ece785, 0x78e6839fbe85dbf0}}, + {{0x0fadf20429669279, 0x3adda2047d7d724a, 0x6f3d94828c5760f1, 0x3d7fe9c52bb7539e}}, + {{0x70332df737b8856b, 0x75d05d43041a178a, 0x320ff74aa0e59e22, 0x70f268f350088242}}}, +{{{0x2324112070dcf355, 0x380cc97ee7fce117, 0xb31ddeed3552b698, 0x404e56c039b8c4b9}}, + {{0x66864583b1805f47, 0xf535c5d160dd7c19, 0xe9874eb71e4cb006, 0x7c0d345cfad889d9}}, + {{0x591f1f4b8c78338a, 0xa0366ab167e0b5e1, 0x5cbc4152b45f3d44, 0x20d754762aaec777}}}, +{{{0x9d74feb135b9f543, 0x84b37df1de8c956c, 0xe9322b0757138ba9, 0x38b8ada8790b4ce1}}, + {{0x5e8fc36fc73bb758, 0xace543a5363cbb9a, 0xa9934a7d903bc922, 0x2b8f1e46f3ceec62}}, + {{0xb5c04a9cdf51f95d, 0x2b3952aecb1fdeac, 0x1d106d8b328b66da, 0x049aeb32ceba1953}}}, +{{{0xd7767d3c63dcfe7e, 0x209c594897856e40, 0xb6676861e14f7c13, 0x51c665e0c8d625fc}}, + {{0xaa507d0b75fc7931, 0x0fef924b7a6725d3, 0x1d82542b396b3930, 0x795ee17530f674fc}}, + {{0x254a5b0a52ecbd81, 0x5d411f6ee034afe7, 0xe6a24d0dcaee4a31, 0x6cd19bf49dc54477}}}, +{{{0x7e87619052179ca3, 0x571d0a060b2c9f85, 0x80a2baa88499711e, 0x7520f3db40b2e638}}, + {{0x1ffe612165afc386, 0x082a2a88b8d51b10, 0x76f6627e20990baa, 0x5e01b3a7429e43e7}}, + {{0x3db50be3d39357a1, 0x967b6cdd599e94a5, 0x1a309a64df311e6e, 0x71092c9ccef3c986}}}, +{{{0x53d8523f0364918c, 0xa2b404f43fab6b1c, 0x080b4a9e6681e5a4, 0x0ea15b03d0257ba7}}, + {{0x856bd8ac74051dcf, 0x03f6a40855b7aa1e, 0x3a4ae7cbc9743ceb, 0x4173a5bb7137abde}}, + {{0x17c56e31f0f9218a, 0x5a696e2b1afc4708, 0xf7931668f4b2f176, 0x5fc565614a4e3a67}}}, +{{{0x136e570dc46d7ae5, 0x0fd0aacc54f8dc8f, 0x59549f03310dad86, 0x62711c414c454aa1}}, + {{0x4892e1e67790988e, 0x01d5950f1c5cd722, 0xe3b0819ae5923eed, 0x3214c7409d46651b}}, + {{0x1329827406651770, 0x3ba4a0668a279436, 0xd9b6b8ec185d223c, 0x5bea94073ecb833c}}}, +{{{0x641dbf0912c89be4, 0xacf38b317d6e579c, 0xabfe9e02f697b065, 0x3aacd5c148f61eec}}, + {{0xb470ce63f343d2f8, 0x0067ba8f0543e8f1, 0x35da51a1a2117b6f, 0x4ad0785944f1bd2f}}, + {{0x858e3b34c3318301, 0xdc99c04707316826, 0x34085b2ed39da88c, 0x3aff0cb1d902853d}}}, +{{{0x87c5c7eb3a20405e, 0x8ee311efedad56c9, 0x29252e48ad29d5f9, 0x110e7e86f4cd251d}}, + {{0x9226430bf4c53505, 0x68e49c13261f2283, 0x09ef33788fd327c6, 0x2ccf9f732bd99e7f}}, + {{0x57c0d89ed603f5e4, 0x12888628f0b0200c, 0x53172709a02e3bb7, 0x05c557e0b9693a37}}}, +{{{0xd8f9ce311fc97e6f, 0x7a3f263011f9fdae, 0xe15b7ea08bed25dd, 0x6e154c178fe9875a}}, + {{0xf776bbb089c20eb0, 0x61f85bf6fa0fd85c, 0xb6b93f4e634421fb, 0x289fef0841861205}}, + {{0xcf616336fed69abf, 0x9b16e4e78335c94f, 0x13789765753a7fe7, 0x6afbf642a95ca319}}}, +{{{0x7da8de0c62f5d2c1, 0x98fc3da4b00e7b9a, 0x7deb6ada0dad70e0, 0x0db4b851b95038c4}}, + {{0x5de55070f913a8cc, 0x7d1d167b2b0cf561, 0xda2956b690ead489, 0x12c093cedb801ed9}}, + {{0xfc147f9308b8190f, 0x06969da0a11ae310, 0xcee75572dac7d7fd, 0x33aa8799c6635ce6}}}, +{{{0xaf0ff51ebd085cf2, 0x78f51a8967d33f1f, 0x6ec2bfe15060033c, 0x233c6f29e8e21a86}}, + {{0x8348f588fc156cb1, 0x6da2ba9b1a0a6d27, 0xe2262d5c87ca5ab6, 0x212cd0c1c8d589a6}}, + {{0xd2f4d5107f18c781, 0x122ecdf2527e9d28, 0xa70a862a3d3d3341, 0x1db7778911914ce3}}}, +{{{0xddf352397c6bc26f, 0x7a97e2cc53d50113, 0x7c74f43abf79a330, 0x31ad97ad26e2adfc}}, + {{0xb3394769dd701ab6, 0xe2b8ded419cf8da5, 0x15df4161fd2ac852, 0x7ae2ca8a017d24be}}, + {{0xb7e817ed0920b962, 0x1e8518cc3f19da9d, 0xe491c14f25560a64, 0x1ed1fc53a6622c83}}}
\ No newline at end of file diff --git a/ext/ed25519-amd64-asm/ge25519_dbl_p1p1.s b/ext/ed25519-amd64-asm/ge25519_dbl_p1p1.s new file mode 100644 index 00000000..7909a988 --- /dev/null +++ b/ext/ed25519-amd64-asm/ge25519_dbl_p1p1.s @@ -0,0 +1,2975 @@ + +# qhasm: int64 rp + +# qhasm: int64 pp + +# qhasm: input rp + +# qhasm: input pp + +# qhasm: int64 a0 + +# qhasm: int64 a1 + +# qhasm: int64 a2 + +# qhasm: int64 a3 + +# qhasm: stack64 a0_stack + +# qhasm: stack64 a1_stack + +# qhasm: stack64 a2_stack + +# qhasm: stack64 a3_stack + +# qhasm: int64 b0 + +# qhasm: int64 b1 + +# qhasm: int64 b2 + +# qhasm: int64 b3 + +# qhasm: stack64 b0_stack + +# qhasm: stack64 b1_stack + +# qhasm: stack64 b2_stack + +# qhasm: stack64 b3_stack + +# qhasm: int64 c0 + +# qhasm: int64 c1 + +# qhasm: int64 c2 + +# qhasm: int64 c3 + +# qhasm: stack64 c0_stack + +# qhasm: stack64 c1_stack + +# qhasm: stack64 c2_stack + +# qhasm: stack64 c3_stack + +# qhasm: int64 d0 + +# qhasm: int64 d1 + +# qhasm: int64 d2 + +# qhasm: int64 d3 + +# qhasm: stack64 d0_stack + +# qhasm: stack64 d1_stack + +# qhasm: stack64 d2_stack + +# qhasm: stack64 d3_stack + +# qhasm: int64 e0 + +# qhasm: int64 e1 + +# qhasm: int64 e2 + +# qhasm: int64 e3 + +# qhasm: stack64 e0_stack + +# qhasm: stack64 e1_stack + +# qhasm: stack64 e2_stack + +# qhasm: stack64 e3_stack + +# qhasm: int64 rx0 + +# qhasm: int64 rx1 + +# qhasm: int64 rx2 + +# qhasm: int64 rx3 + +# qhasm: stack64 rx0_stack + +# qhasm: stack64 rx1_stack + +# qhasm: stack64 rx2_stack + +# qhasm: stack64 rx3_stack + +# qhasm: int64 ry0 + +# qhasm: int64 ry1 + +# qhasm: int64 ry2 + +# qhasm: int64 ry3 + +# qhasm: int64 ry4 + +# qhasm: int64 rz0 + +# qhasm: int64 rz1 + +# qhasm: int64 rz2 + +# qhasm: int64 rz3 + +# qhasm: int64 rt0 + +# qhasm: int64 rt1 + +# qhasm: int64 rt2 + +# qhasm: int64 rt3 + +# qhasm: int64 mulr4 + +# qhasm: int64 mulr5 + +# qhasm: int64 mulr6 + +# qhasm: int64 mulr7 + +# qhasm: int64 mulr8 + +# qhasm: int64 mulrax + +# qhasm: int64 mulrdx + +# qhasm: int64 mulx0 + +# qhasm: int64 mulx1 + +# qhasm: int64 mulx2 + +# qhasm: int64 mulx3 + +# qhasm: int64 mulc + +# qhasm: int64 mulzero + +# qhasm: int64 muli38 + +# qhasm: int64 squarer4 + +# qhasm: int64 squarer5 + +# qhasm: int64 squarer6 + +# qhasm: int64 squarer7 + +# qhasm: int64 squarer8 + +# qhasm: int64 squarerax + +# qhasm: int64 squarerdx + +# qhasm: int64 squaret1 + +# qhasm: int64 squaret2 + +# qhasm: int64 squaret3 + +# qhasm: int64 squarec + +# qhasm: int64 squarezero + +# qhasm: int64 squarei38 + +# qhasm: int64 addt0 + +# qhasm: int64 addt1 + +# qhasm: int64 subt0 + +# qhasm: int64 subt1 + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: enter crypto_sign_ed25519_amd64_64_ge25519_dbl_p1p1 +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_64_ge25519_dbl_p1p1 +.globl crypto_sign_ed25519_amd64_64_ge25519_dbl_p1p1 +_crypto_sign_ed25519_amd64_64_ge25519_dbl_p1p1: +crypto_sign_ed25519_amd64_64_ge25519_dbl_p1p1: +mov %rsp,%r11 +and $31,%r11 +add $192,%r11 +sub %r11,%rsp + +# qhasm: caller1_stack = caller1 +# asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1 +# asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2 +# asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3 +# asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4 +# asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5 +# asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6 +# asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7 +# asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: squarer7 = 0 +# asm 1: mov $0,>squarer7=int64#4 +# asm 2: mov $0,>squarer7=%rcx +mov $0,%rcx + +# qhasm: squarerax = *(uint64 *)(pp + 8) +# asm 1: movq 8(<pp=int64#2),>squarerax=int64#7 +# asm 2: movq 8(<pp=%rsi),>squarerax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 0) +# asm 1: mulq 0(<pp=int64#2) +# asm 2: mulq 0(<pp=%rsi) +mulq 0(%rsi) + +# qhasm: a1 = squarerax +# asm 1: mov <squarerax=int64#7,>a1=int64#5 +# asm 2: mov <squarerax=%rax,>a1=%r8 +mov %rax,%r8 + +# qhasm: a2 = squarerdx +# asm 1: mov <squarerdx=int64#3,>a2=int64#6 +# asm 2: mov <squarerdx=%rdx,>a2=%r9 +mov %rdx,%r9 + +# qhasm: squarerax = *(uint64 *)(pp + 16) +# asm 1: movq 16(<pp=int64#2),>squarerax=int64#7 +# asm 2: movq 16(<pp=%rsi),>squarerax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 8) +# asm 1: mulq 8(<pp=int64#2) +# asm 2: mulq 8(<pp=%rsi) +mulq 8(%rsi) + +# qhasm: a3 = squarerax +# asm 1: mov <squarerax=int64#7,>a3=int64#8 +# asm 2: mov <squarerax=%rax,>a3=%r10 +mov %rax,%r10 + +# qhasm: squarer4 = squarerdx +# asm 1: mov <squarerdx=int64#3,>squarer4=int64#9 +# asm 2: mov <squarerdx=%rdx,>squarer4=%r11 +mov %rdx,%r11 + +# qhasm: squarerax = *(uint64 *)(pp + 24) +# asm 1: movq 24(<pp=int64#2),>squarerax=int64#7 +# asm 2: movq 24(<pp=%rsi),>squarerax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 16) +# asm 1: mulq 16(<pp=int64#2) +# asm 2: mulq 16(<pp=%rsi) +mulq 16(%rsi) + +# qhasm: squarer5 = squarerax +# asm 1: mov <squarerax=int64#7,>squarer5=int64#10 +# asm 2: mov <squarerax=%rax,>squarer5=%r12 +mov %rax,%r12 + +# qhasm: squarer6 = squarerdx +# asm 1: mov <squarerdx=int64#3,>squarer6=int64#11 +# asm 2: mov <squarerdx=%rdx,>squarer6=%r13 +mov %rdx,%r13 + +# qhasm: squarerax = *(uint64 *)(pp + 16) +# asm 1: movq 16(<pp=int64#2),>squarerax=int64#7 +# asm 2: movq 16(<pp=%rsi),>squarerax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 0) +# asm 1: mulq 0(<pp=int64#2) +# asm 2: mulq 0(<pp=%rsi) +mulq 0(%rsi) + +# qhasm: carry? a2 += squarerax +# asm 1: add <squarerax=int64#7,<a2=int64#6 +# asm 2: add <squarerax=%rax,<a2=%r9 +add %rax,%r9 + +# qhasm: carry? a3 += squarerdx + carry +# asm 1: adc <squarerdx=int64#3,<a3=int64#8 +# asm 2: adc <squarerdx=%rdx,<a3=%r10 +adc %rdx,%r10 + +# qhasm: squarer4 += 0 + carry +# asm 1: adc $0,<squarer4=int64#9 +# asm 2: adc $0,<squarer4=%r11 +adc $0,%r11 + +# qhasm: squarerax = *(uint64 *)(pp + 24) +# asm 1: movq 24(<pp=int64#2),>squarerax=int64#7 +# asm 2: movq 24(<pp=%rsi),>squarerax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 8) +# asm 1: mulq 8(<pp=int64#2) +# asm 2: mulq 8(<pp=%rsi) +mulq 8(%rsi) + +# qhasm: carry? squarer4 += squarerax +# asm 1: add <squarerax=int64#7,<squarer4=int64#9 +# asm 2: add <squarerax=%rax,<squarer4=%r11 +add %rax,%r11 + +# qhasm: carry? squarer5 += squarerdx + carry +# asm 1: adc <squarerdx=int64#3,<squarer5=int64#10 +# asm 2: adc <squarerdx=%rdx,<squarer5=%r12 +adc %rdx,%r12 + +# qhasm: squarer6 += 0 + carry +# asm 1: adc $0,<squarer6=int64#11 +# asm 2: adc $0,<squarer6=%r13 +adc $0,%r13 + +# qhasm: squarerax = *(uint64 *)(pp + 24) +# asm 1: movq 24(<pp=int64#2),>squarerax=int64#7 +# asm 2: movq 24(<pp=%rsi),>squarerax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 0) +# asm 1: mulq 0(<pp=int64#2) +# asm 2: mulq 0(<pp=%rsi) +mulq 0(%rsi) + +# qhasm: carry? a3 += squarerax +# asm 1: add <squarerax=int64#7,<a3=int64#8 +# asm 2: add <squarerax=%rax,<a3=%r10 +add %rax,%r10 + +# qhasm: carry? squarer4 += squarerdx + carry +# asm 1: adc <squarerdx=int64#3,<squarer4=int64#9 +# asm 2: adc <squarerdx=%rdx,<squarer4=%r11 +adc %rdx,%r11 + +# qhasm: carry? squarer5 += 0 + carry +# asm 1: adc $0,<squarer5=int64#10 +# asm 2: adc $0,<squarer5=%r12 +adc $0,%r12 + +# qhasm: carry? squarer6 += 0 + carry +# asm 1: adc $0,<squarer6=int64#11 +# asm 2: adc $0,<squarer6=%r13 +adc $0,%r13 + +# qhasm: squarer7 += 0 + carry +# asm 1: adc $0,<squarer7=int64#4 +# asm 2: adc $0,<squarer7=%rcx +adc $0,%rcx + +# qhasm: carry? a1 += a1 +# asm 1: add <a1=int64#5,<a1=int64#5 +# asm 2: add <a1=%r8,<a1=%r8 +add %r8,%r8 + +# qhasm: carry? a2 += a2 + carry +# asm 1: adc <a2=int64#6,<a2=int64#6 +# asm 2: adc <a2=%r9,<a2=%r9 +adc %r9,%r9 + +# qhasm: carry? a3 += a3 + carry +# asm 1: adc <a3=int64#8,<a3=int64#8 +# asm 2: adc <a3=%r10,<a3=%r10 +adc %r10,%r10 + +# qhasm: carry? squarer4 += squarer4 + carry +# asm 1: adc <squarer4=int64#9,<squarer4=int64#9 +# asm 2: adc <squarer4=%r11,<squarer4=%r11 +adc %r11,%r11 + +# qhasm: carry? squarer5 += squarer5 + carry +# asm 1: adc <squarer5=int64#10,<squarer5=int64#10 +# asm 2: adc <squarer5=%r12,<squarer5=%r12 +adc %r12,%r12 + +# qhasm: carry? squarer6 += squarer6 + carry +# asm 1: adc <squarer6=int64#11,<squarer6=int64#11 +# asm 2: adc <squarer6=%r13,<squarer6=%r13 +adc %r13,%r13 + +# qhasm: squarer7 += squarer7 + carry +# asm 1: adc <squarer7=int64#4,<squarer7=int64#4 +# asm 2: adc <squarer7=%rcx,<squarer7=%rcx +adc %rcx,%rcx + +# qhasm: squarerax = *(uint64 *)(pp + 0) +# asm 1: movq 0(<pp=int64#2),>squarerax=int64#7 +# asm 2: movq 0(<pp=%rsi),>squarerax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 0) +# asm 1: mulq 0(<pp=int64#2) +# asm 2: mulq 0(<pp=%rsi) +mulq 0(%rsi) + +# qhasm: a0 = squarerax +# asm 1: mov <squarerax=int64#7,>a0=int64#12 +# asm 2: mov <squarerax=%rax,>a0=%r14 +mov %rax,%r14 + +# qhasm: squaret1 = squarerdx +# asm 1: mov <squarerdx=int64#3,>squaret1=int64#13 +# asm 2: mov <squarerdx=%rdx,>squaret1=%r15 +mov %rdx,%r15 + +# qhasm: squarerax = *(uint64 *)(pp + 8) +# asm 1: movq 8(<pp=int64#2),>squarerax=int64#7 +# asm 2: movq 8(<pp=%rsi),>squarerax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 8) +# asm 1: mulq 8(<pp=int64#2) +# asm 2: mulq 8(<pp=%rsi) +mulq 8(%rsi) + +# qhasm: squaret2 = squarerax +# asm 1: mov <squarerax=int64#7,>squaret2=int64#14 +# asm 2: mov <squarerax=%rax,>squaret2=%rbx +mov %rax,%rbx + +# qhasm: squaret3 = squarerdx +# asm 1: mov <squarerdx=int64#3,>squaret3=int64#15 +# asm 2: mov <squarerdx=%rdx,>squaret3=%rbp +mov %rdx,%rbp + +# qhasm: squarerax = *(uint64 *)(pp + 16) +# asm 1: movq 16(<pp=int64#2),>squarerax=int64#7 +# asm 2: movq 16(<pp=%rsi),>squarerax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 16) +# asm 1: mulq 16(<pp=int64#2) +# asm 2: mulq 16(<pp=%rsi) +mulq 16(%rsi) + +# qhasm: carry? a1 += squaret1 +# asm 1: add <squaret1=int64#13,<a1=int64#5 +# asm 2: add <squaret1=%r15,<a1=%r8 +add %r15,%r8 + +# qhasm: carry? a2 += squaret2 + carry +# asm 1: adc <squaret2=int64#14,<a2=int64#6 +# asm 2: adc <squaret2=%rbx,<a2=%r9 +adc %rbx,%r9 + +# qhasm: carry? a3 += squaret3 + carry +# asm 1: adc <squaret3=int64#15,<a3=int64#8 +# asm 2: adc <squaret3=%rbp,<a3=%r10 +adc %rbp,%r10 + +# qhasm: carry? squarer4 += squarerax + carry +# asm 1: adc <squarerax=int64#7,<squarer4=int64#9 +# asm 2: adc <squarerax=%rax,<squarer4=%r11 +adc %rax,%r11 + +# qhasm: carry? squarer5 += squarerdx + carry +# asm 1: adc <squarerdx=int64#3,<squarer5=int64#10 +# asm 2: adc <squarerdx=%rdx,<squarer5=%r12 +adc %rdx,%r12 + +# qhasm: carry? squarer6 += 0 + carry +# asm 1: adc $0,<squarer6=int64#11 +# asm 2: adc $0,<squarer6=%r13 +adc $0,%r13 + +# qhasm: squarer7 += 0 + carry +# asm 1: adc $0,<squarer7=int64#4 +# asm 2: adc $0,<squarer7=%rcx +adc $0,%rcx + +# qhasm: squarerax = *(uint64 *)(pp + 24) +# asm 1: movq 24(<pp=int64#2),>squarerax=int64#7 +# asm 2: movq 24(<pp=%rsi),>squarerax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 24) +# asm 1: mulq 24(<pp=int64#2) +# asm 2: mulq 24(<pp=%rsi) +mulq 24(%rsi) + +# qhasm: carry? squarer6 += squarerax +# asm 1: add <squarerax=int64#7,<squarer6=int64#11 +# asm 2: add <squarerax=%rax,<squarer6=%r13 +add %rax,%r13 + +# qhasm: squarer7 += squarerdx + carry +# asm 1: adc <squarerdx=int64#3,<squarer7=int64#4 +# asm 2: adc <squarerdx=%rdx,<squarer7=%rcx +adc %rdx,%rcx + +# qhasm: squarerax = squarer4 +# asm 1: mov <squarer4=int64#9,>squarerax=int64#7 +# asm 2: mov <squarer4=%r11,>squarerax=%rax +mov %r11,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: squarer4 = squarerax +# asm 1: mov <squarerax=int64#7,>squarer4=int64#9 +# asm 2: mov <squarerax=%rax,>squarer4=%r11 +mov %rax,%r11 + +# qhasm: squarerax = squarer5 +# asm 1: mov <squarer5=int64#10,>squarerax=int64#7 +# asm 2: mov <squarer5=%r12,>squarerax=%rax +mov %r12,%rax + +# qhasm: squarer5 = squarerdx +# asm 1: mov <squarerdx=int64#3,>squarer5=int64#10 +# asm 2: mov <squarerdx=%rdx,>squarer5=%r12 +mov %rdx,%r12 + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? squarer5 += squarerax +# asm 1: add <squarerax=int64#7,<squarer5=int64#10 +# asm 2: add <squarerax=%rax,<squarer5=%r12 +add %rax,%r12 + +# qhasm: squarerax = squarer6 +# asm 1: mov <squarer6=int64#11,>squarerax=int64#7 +# asm 2: mov <squarer6=%r13,>squarerax=%rax +mov %r13,%rax + +# qhasm: squarer6 = 0 +# asm 1: mov $0,>squarer6=int64#11 +# asm 2: mov $0,>squarer6=%r13 +mov $0,%r13 + +# qhasm: squarer6 += squarerdx + carry +# asm 1: adc <squarerdx=int64#3,<squarer6=int64#11 +# asm 2: adc <squarerdx=%rdx,<squarer6=%r13 +adc %rdx,%r13 + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? squarer6 += squarerax +# asm 1: add <squarerax=int64#7,<squarer6=int64#11 +# asm 2: add <squarerax=%rax,<squarer6=%r13 +add %rax,%r13 + +# qhasm: squarerax = squarer7 +# asm 1: mov <squarer7=int64#4,>squarerax=int64#7 +# asm 2: mov <squarer7=%rcx,>squarerax=%rax +mov %rcx,%rax + +# qhasm: squarer7 = 0 +# asm 1: mov $0,>squarer7=int64#4 +# asm 2: mov $0,>squarer7=%rcx +mov $0,%rcx + +# qhasm: squarer7 += squarerdx + carry +# asm 1: adc <squarerdx=int64#3,<squarer7=int64#4 +# asm 2: adc <squarerdx=%rdx,<squarer7=%rcx +adc %rdx,%rcx + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? squarer7 += squarerax +# asm 1: add <squarerax=int64#7,<squarer7=int64#4 +# asm 2: add <squarerax=%rax,<squarer7=%rcx +add %rax,%rcx + +# qhasm: squarer8 = 0 +# asm 1: mov $0,>squarer8=int64#7 +# asm 2: mov $0,>squarer8=%rax +mov $0,%rax + +# qhasm: squarer8 += squarerdx + carry +# asm 1: adc <squarerdx=int64#3,<squarer8=int64#7 +# asm 2: adc <squarerdx=%rdx,<squarer8=%rax +adc %rdx,%rax + +# qhasm: carry? a0 += squarer4 +# asm 1: add <squarer4=int64#9,<a0=int64#12 +# asm 2: add <squarer4=%r11,<a0=%r14 +add %r11,%r14 + +# qhasm: carry? a1 += squarer5 + carry +# asm 1: adc <squarer5=int64#10,<a1=int64#5 +# asm 2: adc <squarer5=%r12,<a1=%r8 +adc %r12,%r8 + +# qhasm: carry? a2 += squarer6 + carry +# asm 1: adc <squarer6=int64#11,<a2=int64#6 +# asm 2: adc <squarer6=%r13,<a2=%r9 +adc %r13,%r9 + +# qhasm: carry? a3 += squarer7 + carry +# asm 1: adc <squarer7=int64#4,<a3=int64#8 +# asm 2: adc <squarer7=%rcx,<a3=%r10 +adc %rcx,%r10 + +# qhasm: squarezero = 0 +# asm 1: mov $0,>squarezero=int64#3 +# asm 2: mov $0,>squarezero=%rdx +mov $0,%rdx + +# qhasm: squarer8 += squarezero + carry +# asm 1: adc <squarezero=int64#3,<squarer8=int64#7 +# asm 2: adc <squarezero=%rdx,<squarer8=%rax +adc %rdx,%rax + +# qhasm: squarer8 *= 38 +# asm 1: imulq $38,<squarer8=int64#7,>squarer8=int64#4 +# asm 2: imulq $38,<squarer8=%rax,>squarer8=%rcx +imulq $38,%rax,%rcx + +# qhasm: carry? a0 += squarer8 +# asm 1: add <squarer8=int64#4,<a0=int64#12 +# asm 2: add <squarer8=%rcx,<a0=%r14 +add %rcx,%r14 + +# qhasm: carry? a1 += squarezero + carry +# asm 1: adc <squarezero=int64#3,<a1=int64#5 +# asm 2: adc <squarezero=%rdx,<a1=%r8 +adc %rdx,%r8 + +# qhasm: carry? a2 += squarezero + carry +# asm 1: adc <squarezero=int64#3,<a2=int64#6 +# asm 2: adc <squarezero=%rdx,<a2=%r9 +adc %rdx,%r9 + +# qhasm: carry? a3 += squarezero + carry +# asm 1: adc <squarezero=int64#3,<a3=int64#8 +# asm 2: adc <squarezero=%rdx,<a3=%r10 +adc %rdx,%r10 + +# qhasm: squarezero += squarezero + carry +# asm 1: adc <squarezero=int64#3,<squarezero=int64#3 +# asm 2: adc <squarezero=%rdx,<squarezero=%rdx +adc %rdx,%rdx + +# qhasm: squarezero *= 38 +# asm 1: imulq $38,<squarezero=int64#3,>squarezero=int64#3 +# asm 2: imulq $38,<squarezero=%rdx,>squarezero=%rdx +imulq $38,%rdx,%rdx + +# qhasm: a0 += squarezero +# asm 1: add <squarezero=int64#3,<a0=int64#12 +# asm 2: add <squarezero=%rdx,<a0=%r14 +add %rdx,%r14 + +# qhasm: a0_stack = a0 +# asm 1: movq <a0=int64#12,>a0_stack=stack64#8 +# asm 2: movq <a0=%r14,>a0_stack=56(%rsp) +movq %r14,56(%rsp) + +# qhasm: a1_stack = a1 +# asm 1: movq <a1=int64#5,>a1_stack=stack64#9 +# asm 2: movq <a1=%r8,>a1_stack=64(%rsp) +movq %r8,64(%rsp) + +# qhasm: a2_stack = a2 +# asm 1: movq <a2=int64#6,>a2_stack=stack64#10 +# asm 2: movq <a2=%r9,>a2_stack=72(%rsp) +movq %r9,72(%rsp) + +# qhasm: a3_stack = a3 +# asm 1: movq <a3=int64#8,>a3_stack=stack64#11 +# asm 2: movq <a3=%r10,>a3_stack=80(%rsp) +movq %r10,80(%rsp) + +# qhasm: squarer7 = 0 +# asm 1: mov $0,>squarer7=int64#4 +# asm 2: mov $0,>squarer7=%rcx +mov $0,%rcx + +# qhasm: squarerax = *(uint64 *)(pp + 40) +# asm 1: movq 40(<pp=int64#2),>squarerax=int64#7 +# asm 2: movq 40(<pp=%rsi),>squarerax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 32) +# asm 1: mulq 32(<pp=int64#2) +# asm 2: mulq 32(<pp=%rsi) +mulq 32(%rsi) + +# qhasm: b1 = squarerax +# asm 1: mov <squarerax=int64#7,>b1=int64#5 +# asm 2: mov <squarerax=%rax,>b1=%r8 +mov %rax,%r8 + +# qhasm: b2 = squarerdx +# asm 1: mov <squarerdx=int64#3,>b2=int64#6 +# asm 2: mov <squarerdx=%rdx,>b2=%r9 +mov %rdx,%r9 + +# qhasm: squarerax = *(uint64 *)(pp + 48) +# asm 1: movq 48(<pp=int64#2),>squarerax=int64#7 +# asm 2: movq 48(<pp=%rsi),>squarerax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 40) +# asm 1: mulq 40(<pp=int64#2) +# asm 2: mulq 40(<pp=%rsi) +mulq 40(%rsi) + +# qhasm: b3 = squarerax +# asm 1: mov <squarerax=int64#7,>b3=int64#8 +# asm 2: mov <squarerax=%rax,>b3=%r10 +mov %rax,%r10 + +# qhasm: squarer4 = squarerdx +# asm 1: mov <squarerdx=int64#3,>squarer4=int64#9 +# asm 2: mov <squarerdx=%rdx,>squarer4=%r11 +mov %rdx,%r11 + +# qhasm: squarerax = *(uint64 *)(pp + 56) +# asm 1: movq 56(<pp=int64#2),>squarerax=int64#7 +# asm 2: movq 56(<pp=%rsi),>squarerax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 48) +# asm 1: mulq 48(<pp=int64#2) +# asm 2: mulq 48(<pp=%rsi) +mulq 48(%rsi) + +# qhasm: squarer5 = squarerax +# asm 1: mov <squarerax=int64#7,>squarer5=int64#10 +# asm 2: mov <squarerax=%rax,>squarer5=%r12 +mov %rax,%r12 + +# qhasm: squarer6 = squarerdx +# asm 1: mov <squarerdx=int64#3,>squarer6=int64#11 +# asm 2: mov <squarerdx=%rdx,>squarer6=%r13 +mov %rdx,%r13 + +# qhasm: squarerax = *(uint64 *)(pp + 48) +# asm 1: movq 48(<pp=int64#2),>squarerax=int64#7 +# asm 2: movq 48(<pp=%rsi),>squarerax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 32) +# asm 1: mulq 32(<pp=int64#2) +# asm 2: mulq 32(<pp=%rsi) +mulq 32(%rsi) + +# qhasm: carry? b2 += squarerax +# asm 1: add <squarerax=int64#7,<b2=int64#6 +# asm 2: add <squarerax=%rax,<b2=%r9 +add %rax,%r9 + +# qhasm: carry? b3 += squarerdx + carry +# asm 1: adc <squarerdx=int64#3,<b3=int64#8 +# asm 2: adc <squarerdx=%rdx,<b3=%r10 +adc %rdx,%r10 + +# qhasm: squarer4 += 0 + carry +# asm 1: adc $0,<squarer4=int64#9 +# asm 2: adc $0,<squarer4=%r11 +adc $0,%r11 + +# qhasm: squarerax = *(uint64 *)(pp + 56) +# asm 1: movq 56(<pp=int64#2),>squarerax=int64#7 +# asm 2: movq 56(<pp=%rsi),>squarerax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 40) +# asm 1: mulq 40(<pp=int64#2) +# asm 2: mulq 40(<pp=%rsi) +mulq 40(%rsi) + +# qhasm: carry? squarer4 += squarerax +# asm 1: add <squarerax=int64#7,<squarer4=int64#9 +# asm 2: add <squarerax=%rax,<squarer4=%r11 +add %rax,%r11 + +# qhasm: carry? squarer5 += squarerdx + carry +# asm 1: adc <squarerdx=int64#3,<squarer5=int64#10 +# asm 2: adc <squarerdx=%rdx,<squarer5=%r12 +adc %rdx,%r12 + +# qhasm: squarer6 += 0 + carry +# asm 1: adc $0,<squarer6=int64#11 +# asm 2: adc $0,<squarer6=%r13 +adc $0,%r13 + +# qhasm: squarerax = *(uint64 *)(pp + 56) +# asm 1: movq 56(<pp=int64#2),>squarerax=int64#7 +# asm 2: movq 56(<pp=%rsi),>squarerax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 32) +# asm 1: mulq 32(<pp=int64#2) +# asm 2: mulq 32(<pp=%rsi) +mulq 32(%rsi) + +# qhasm: carry? b3 += squarerax +# asm 1: add <squarerax=int64#7,<b3=int64#8 +# asm 2: add <squarerax=%rax,<b3=%r10 +add %rax,%r10 + +# qhasm: carry? squarer4 += squarerdx + carry +# asm 1: adc <squarerdx=int64#3,<squarer4=int64#9 +# asm 2: adc <squarerdx=%rdx,<squarer4=%r11 +adc %rdx,%r11 + +# qhasm: carry? squarer5 += 0 + carry +# asm 1: adc $0,<squarer5=int64#10 +# asm 2: adc $0,<squarer5=%r12 +adc $0,%r12 + +# qhasm: carry? squarer6 += 0 + carry +# asm 1: adc $0,<squarer6=int64#11 +# asm 2: adc $0,<squarer6=%r13 +adc $0,%r13 + +# qhasm: squarer7 += 0 + carry +# asm 1: adc $0,<squarer7=int64#4 +# asm 2: adc $0,<squarer7=%rcx +adc $0,%rcx + +# qhasm: carry? b1 += b1 +# asm 1: add <b1=int64#5,<b1=int64#5 +# asm 2: add <b1=%r8,<b1=%r8 +add %r8,%r8 + +# qhasm: carry? b2 += b2 + carry +# asm 1: adc <b2=int64#6,<b2=int64#6 +# asm 2: adc <b2=%r9,<b2=%r9 +adc %r9,%r9 + +# qhasm: carry? b3 += b3 + carry +# asm 1: adc <b3=int64#8,<b3=int64#8 +# asm 2: adc <b3=%r10,<b3=%r10 +adc %r10,%r10 + +# qhasm: carry? squarer4 += squarer4 + carry +# asm 1: adc <squarer4=int64#9,<squarer4=int64#9 +# asm 2: adc <squarer4=%r11,<squarer4=%r11 +adc %r11,%r11 + +# qhasm: carry? squarer5 += squarer5 + carry +# asm 1: adc <squarer5=int64#10,<squarer5=int64#10 +# asm 2: adc <squarer5=%r12,<squarer5=%r12 +adc %r12,%r12 + +# qhasm: carry? squarer6 += squarer6 + carry +# asm 1: adc <squarer6=int64#11,<squarer6=int64#11 +# asm 2: adc <squarer6=%r13,<squarer6=%r13 +adc %r13,%r13 + +# qhasm: squarer7 += squarer7 + carry +# asm 1: adc <squarer7=int64#4,<squarer7=int64#4 +# asm 2: adc <squarer7=%rcx,<squarer7=%rcx +adc %rcx,%rcx + +# qhasm: squarerax = *(uint64 *)(pp + 32) +# asm 1: movq 32(<pp=int64#2),>squarerax=int64#7 +# asm 2: movq 32(<pp=%rsi),>squarerax=%rax +movq 32(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 32) +# asm 1: mulq 32(<pp=int64#2) +# asm 2: mulq 32(<pp=%rsi) +mulq 32(%rsi) + +# qhasm: b0 = squarerax +# asm 1: mov <squarerax=int64#7,>b0=int64#12 +# asm 2: mov <squarerax=%rax,>b0=%r14 +mov %rax,%r14 + +# qhasm: squaret1 = squarerdx +# asm 1: mov <squarerdx=int64#3,>squaret1=int64#13 +# asm 2: mov <squarerdx=%rdx,>squaret1=%r15 +mov %rdx,%r15 + +# qhasm: squarerax = *(uint64 *)(pp + 40) +# asm 1: movq 40(<pp=int64#2),>squarerax=int64#7 +# asm 2: movq 40(<pp=%rsi),>squarerax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 40) +# asm 1: mulq 40(<pp=int64#2) +# asm 2: mulq 40(<pp=%rsi) +mulq 40(%rsi) + +# qhasm: squaret2 = squarerax +# asm 1: mov <squarerax=int64#7,>squaret2=int64#14 +# asm 2: mov <squarerax=%rax,>squaret2=%rbx +mov %rax,%rbx + +# qhasm: squaret3 = squarerdx +# asm 1: mov <squarerdx=int64#3,>squaret3=int64#15 +# asm 2: mov <squarerdx=%rdx,>squaret3=%rbp +mov %rdx,%rbp + +# qhasm: squarerax = *(uint64 *)(pp + 48) +# asm 1: movq 48(<pp=int64#2),>squarerax=int64#7 +# asm 2: movq 48(<pp=%rsi),>squarerax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 48) +# asm 1: mulq 48(<pp=int64#2) +# asm 2: mulq 48(<pp=%rsi) +mulq 48(%rsi) + +# qhasm: carry? b1 += squaret1 +# asm 1: add <squaret1=int64#13,<b1=int64#5 +# asm 2: add <squaret1=%r15,<b1=%r8 +add %r15,%r8 + +# qhasm: carry? b2 += squaret2 + carry +# asm 1: adc <squaret2=int64#14,<b2=int64#6 +# asm 2: adc <squaret2=%rbx,<b2=%r9 +adc %rbx,%r9 + +# qhasm: carry? b3 += squaret3 + carry +# asm 1: adc <squaret3=int64#15,<b3=int64#8 +# asm 2: adc <squaret3=%rbp,<b3=%r10 +adc %rbp,%r10 + +# qhasm: carry? squarer4 += squarerax + carry +# asm 1: adc <squarerax=int64#7,<squarer4=int64#9 +# asm 2: adc <squarerax=%rax,<squarer4=%r11 +adc %rax,%r11 + +# qhasm: carry? squarer5 += squarerdx + carry +# asm 1: adc <squarerdx=int64#3,<squarer5=int64#10 +# asm 2: adc <squarerdx=%rdx,<squarer5=%r12 +adc %rdx,%r12 + +# qhasm: carry? squarer6 += 0 + carry +# asm 1: adc $0,<squarer6=int64#11 +# asm 2: adc $0,<squarer6=%r13 +adc $0,%r13 + +# qhasm: squarer7 += 0 + carry +# asm 1: adc $0,<squarer7=int64#4 +# asm 2: adc $0,<squarer7=%rcx +adc $0,%rcx + +# qhasm: squarerax = *(uint64 *)(pp + 56) +# asm 1: movq 56(<pp=int64#2),>squarerax=int64#7 +# asm 2: movq 56(<pp=%rsi),>squarerax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 56) +# asm 1: mulq 56(<pp=int64#2) +# asm 2: mulq 56(<pp=%rsi) +mulq 56(%rsi) + +# qhasm: carry? squarer6 += squarerax +# asm 1: add <squarerax=int64#7,<squarer6=int64#11 +# asm 2: add <squarerax=%rax,<squarer6=%r13 +add %rax,%r13 + +# qhasm: squarer7 += squarerdx + carry +# asm 1: adc <squarerdx=int64#3,<squarer7=int64#4 +# asm 2: adc <squarerdx=%rdx,<squarer7=%rcx +adc %rdx,%rcx + +# qhasm: squarerax = squarer4 +# asm 1: mov <squarer4=int64#9,>squarerax=int64#7 +# asm 2: mov <squarer4=%r11,>squarerax=%rax +mov %r11,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: squarer4 = squarerax +# asm 1: mov <squarerax=int64#7,>squarer4=int64#9 +# asm 2: mov <squarerax=%rax,>squarer4=%r11 +mov %rax,%r11 + +# qhasm: squarerax = squarer5 +# asm 1: mov <squarer5=int64#10,>squarerax=int64#7 +# asm 2: mov <squarer5=%r12,>squarerax=%rax +mov %r12,%rax + +# qhasm: squarer5 = squarerdx +# asm 1: mov <squarerdx=int64#3,>squarer5=int64#10 +# asm 2: mov <squarerdx=%rdx,>squarer5=%r12 +mov %rdx,%r12 + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? squarer5 += squarerax +# asm 1: add <squarerax=int64#7,<squarer5=int64#10 +# asm 2: add <squarerax=%rax,<squarer5=%r12 +add %rax,%r12 + +# qhasm: squarerax = squarer6 +# asm 1: mov <squarer6=int64#11,>squarerax=int64#7 +# asm 2: mov <squarer6=%r13,>squarerax=%rax +mov %r13,%rax + +# qhasm: squarer6 = 0 +# asm 1: mov $0,>squarer6=int64#11 +# asm 2: mov $0,>squarer6=%r13 +mov $0,%r13 + +# qhasm: squarer6 += squarerdx + carry +# asm 1: adc <squarerdx=int64#3,<squarer6=int64#11 +# asm 2: adc <squarerdx=%rdx,<squarer6=%r13 +adc %rdx,%r13 + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? squarer6 += squarerax +# asm 1: add <squarerax=int64#7,<squarer6=int64#11 +# asm 2: add <squarerax=%rax,<squarer6=%r13 +add %rax,%r13 + +# qhasm: squarerax = squarer7 +# asm 1: mov <squarer7=int64#4,>squarerax=int64#7 +# asm 2: mov <squarer7=%rcx,>squarerax=%rax +mov %rcx,%rax + +# qhasm: squarer7 = 0 +# asm 1: mov $0,>squarer7=int64#4 +# asm 2: mov $0,>squarer7=%rcx +mov $0,%rcx + +# qhasm: squarer7 += squarerdx + carry +# asm 1: adc <squarerdx=int64#3,<squarer7=int64#4 +# asm 2: adc <squarerdx=%rdx,<squarer7=%rcx +adc %rdx,%rcx + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? squarer7 += squarerax +# asm 1: add <squarerax=int64#7,<squarer7=int64#4 +# asm 2: add <squarerax=%rax,<squarer7=%rcx +add %rax,%rcx + +# qhasm: squarer8 = 0 +# asm 1: mov $0,>squarer8=int64#7 +# asm 2: mov $0,>squarer8=%rax +mov $0,%rax + +# qhasm: squarer8 += squarerdx + carry +# asm 1: adc <squarerdx=int64#3,<squarer8=int64#7 +# asm 2: adc <squarerdx=%rdx,<squarer8=%rax +adc %rdx,%rax + +# qhasm: carry? b0 += squarer4 +# asm 1: add <squarer4=int64#9,<b0=int64#12 +# asm 2: add <squarer4=%r11,<b0=%r14 +add %r11,%r14 + +# qhasm: carry? b1 += squarer5 + carry +# asm 1: adc <squarer5=int64#10,<b1=int64#5 +# asm 2: adc <squarer5=%r12,<b1=%r8 +adc %r12,%r8 + +# qhasm: carry? b2 += squarer6 + carry +# asm 1: adc <squarer6=int64#11,<b2=int64#6 +# asm 2: adc <squarer6=%r13,<b2=%r9 +adc %r13,%r9 + +# qhasm: carry? b3 += squarer7 + carry +# asm 1: adc <squarer7=int64#4,<b3=int64#8 +# asm 2: adc <squarer7=%rcx,<b3=%r10 +adc %rcx,%r10 + +# qhasm: squarezero = 0 +# asm 1: mov $0,>squarezero=int64#3 +# asm 2: mov $0,>squarezero=%rdx +mov $0,%rdx + +# qhasm: squarer8 += squarezero + carry +# asm 1: adc <squarezero=int64#3,<squarer8=int64#7 +# asm 2: adc <squarezero=%rdx,<squarer8=%rax +adc %rdx,%rax + +# qhasm: squarer8 *= 38 +# asm 1: imulq $38,<squarer8=int64#7,>squarer8=int64#4 +# asm 2: imulq $38,<squarer8=%rax,>squarer8=%rcx +imulq $38,%rax,%rcx + +# qhasm: carry? b0 += squarer8 +# asm 1: add <squarer8=int64#4,<b0=int64#12 +# asm 2: add <squarer8=%rcx,<b0=%r14 +add %rcx,%r14 + +# qhasm: carry? b1 += squarezero + carry +# asm 1: adc <squarezero=int64#3,<b1=int64#5 +# asm 2: adc <squarezero=%rdx,<b1=%r8 +adc %rdx,%r8 + +# qhasm: carry? b2 += squarezero + carry +# asm 1: adc <squarezero=int64#3,<b2=int64#6 +# asm 2: adc <squarezero=%rdx,<b2=%r9 +adc %rdx,%r9 + +# qhasm: carry? b3 += squarezero + carry +# asm 1: adc <squarezero=int64#3,<b3=int64#8 +# asm 2: adc <squarezero=%rdx,<b3=%r10 +adc %rdx,%r10 + +# qhasm: squarezero += squarezero + carry +# asm 1: adc <squarezero=int64#3,<squarezero=int64#3 +# asm 2: adc <squarezero=%rdx,<squarezero=%rdx +adc %rdx,%rdx + +# qhasm: squarezero *= 38 +# asm 1: imulq $38,<squarezero=int64#3,>squarezero=int64#3 +# asm 2: imulq $38,<squarezero=%rdx,>squarezero=%rdx +imulq $38,%rdx,%rdx + +# qhasm: b0 += squarezero +# asm 1: add <squarezero=int64#3,<b0=int64#12 +# asm 2: add <squarezero=%rdx,<b0=%r14 +add %rdx,%r14 + +# qhasm: b0_stack = b0 +# asm 1: movq <b0=int64#12,>b0_stack=stack64#12 +# asm 2: movq <b0=%r14,>b0_stack=88(%rsp) +movq %r14,88(%rsp) + +# qhasm: b1_stack = b1 +# asm 1: movq <b1=int64#5,>b1_stack=stack64#13 +# asm 2: movq <b1=%r8,>b1_stack=96(%rsp) +movq %r8,96(%rsp) + +# qhasm: b2_stack = b2 +# asm 1: movq <b2=int64#6,>b2_stack=stack64#14 +# asm 2: movq <b2=%r9,>b2_stack=104(%rsp) +movq %r9,104(%rsp) + +# qhasm: b3_stack = b3 +# asm 1: movq <b3=int64#8,>b3_stack=stack64#15 +# asm 2: movq <b3=%r10,>b3_stack=112(%rsp) +movq %r10,112(%rsp) + +# qhasm: squarer7 = 0 +# asm 1: mov $0,>squarer7=int64#4 +# asm 2: mov $0,>squarer7=%rcx +mov $0,%rcx + +# qhasm: squarerax = *(uint64 *)(pp + 72) +# asm 1: movq 72(<pp=int64#2),>squarerax=int64#7 +# asm 2: movq 72(<pp=%rsi),>squarerax=%rax +movq 72(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 64) +# asm 1: mulq 64(<pp=int64#2) +# asm 2: mulq 64(<pp=%rsi) +mulq 64(%rsi) + +# qhasm: c1 = squarerax +# asm 1: mov <squarerax=int64#7,>c1=int64#5 +# asm 2: mov <squarerax=%rax,>c1=%r8 +mov %rax,%r8 + +# qhasm: c2 = squarerdx +# asm 1: mov <squarerdx=int64#3,>c2=int64#6 +# asm 2: mov <squarerdx=%rdx,>c2=%r9 +mov %rdx,%r9 + +# qhasm: squarerax = *(uint64 *)(pp + 80) +# asm 1: movq 80(<pp=int64#2),>squarerax=int64#7 +# asm 2: movq 80(<pp=%rsi),>squarerax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 72) +# asm 1: mulq 72(<pp=int64#2) +# asm 2: mulq 72(<pp=%rsi) +mulq 72(%rsi) + +# qhasm: c3 = squarerax +# asm 1: mov <squarerax=int64#7,>c3=int64#8 +# asm 2: mov <squarerax=%rax,>c3=%r10 +mov %rax,%r10 + +# qhasm: squarer4 = squarerdx +# asm 1: mov <squarerdx=int64#3,>squarer4=int64#9 +# asm 2: mov <squarerdx=%rdx,>squarer4=%r11 +mov %rdx,%r11 + +# qhasm: squarerax = *(uint64 *)(pp + 88) +# asm 1: movq 88(<pp=int64#2),>squarerax=int64#7 +# asm 2: movq 88(<pp=%rsi),>squarerax=%rax +movq 88(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 80) +# asm 1: mulq 80(<pp=int64#2) +# asm 2: mulq 80(<pp=%rsi) +mulq 80(%rsi) + +# qhasm: squarer5 = squarerax +# asm 1: mov <squarerax=int64#7,>squarer5=int64#10 +# asm 2: mov <squarerax=%rax,>squarer5=%r12 +mov %rax,%r12 + +# qhasm: squarer6 = squarerdx +# asm 1: mov <squarerdx=int64#3,>squarer6=int64#11 +# asm 2: mov <squarerdx=%rdx,>squarer6=%r13 +mov %rdx,%r13 + +# qhasm: squarerax = *(uint64 *)(pp + 80) +# asm 1: movq 80(<pp=int64#2),>squarerax=int64#7 +# asm 2: movq 80(<pp=%rsi),>squarerax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 64) +# asm 1: mulq 64(<pp=int64#2) +# asm 2: mulq 64(<pp=%rsi) +mulq 64(%rsi) + +# qhasm: carry? c2 += squarerax +# asm 1: add <squarerax=int64#7,<c2=int64#6 +# asm 2: add <squarerax=%rax,<c2=%r9 +add %rax,%r9 + +# qhasm: carry? c3 += squarerdx + carry +# asm 1: adc <squarerdx=int64#3,<c3=int64#8 +# asm 2: adc <squarerdx=%rdx,<c3=%r10 +adc %rdx,%r10 + +# qhasm: squarer4 += 0 + carry +# asm 1: adc $0,<squarer4=int64#9 +# asm 2: adc $0,<squarer4=%r11 +adc $0,%r11 + +# qhasm: squarerax = *(uint64 *)(pp + 88) +# asm 1: movq 88(<pp=int64#2),>squarerax=int64#7 +# asm 2: movq 88(<pp=%rsi),>squarerax=%rax +movq 88(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 72) +# asm 1: mulq 72(<pp=int64#2) +# asm 2: mulq 72(<pp=%rsi) +mulq 72(%rsi) + +# qhasm: carry? squarer4 += squarerax +# asm 1: add <squarerax=int64#7,<squarer4=int64#9 +# asm 2: add <squarerax=%rax,<squarer4=%r11 +add %rax,%r11 + +# qhasm: carry? squarer5 += squarerdx + carry +# asm 1: adc <squarerdx=int64#3,<squarer5=int64#10 +# asm 2: adc <squarerdx=%rdx,<squarer5=%r12 +adc %rdx,%r12 + +# qhasm: squarer6 += 0 + carry +# asm 1: adc $0,<squarer6=int64#11 +# asm 2: adc $0,<squarer6=%r13 +adc $0,%r13 + +# qhasm: squarerax = *(uint64 *)(pp + 88) +# asm 1: movq 88(<pp=int64#2),>squarerax=int64#7 +# asm 2: movq 88(<pp=%rsi),>squarerax=%rax +movq 88(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 64) +# asm 1: mulq 64(<pp=int64#2) +# asm 2: mulq 64(<pp=%rsi) +mulq 64(%rsi) + +# qhasm: carry? c3 += squarerax +# asm 1: add <squarerax=int64#7,<c3=int64#8 +# asm 2: add <squarerax=%rax,<c3=%r10 +add %rax,%r10 + +# qhasm: carry? squarer4 += squarerdx + carry +# asm 1: adc <squarerdx=int64#3,<squarer4=int64#9 +# asm 2: adc <squarerdx=%rdx,<squarer4=%r11 +adc %rdx,%r11 + +# qhasm: carry? squarer5 += 0 + carry +# asm 1: adc $0,<squarer5=int64#10 +# asm 2: adc $0,<squarer5=%r12 +adc $0,%r12 + +# qhasm: carry? squarer6 += 0 + carry +# asm 1: adc $0,<squarer6=int64#11 +# asm 2: adc $0,<squarer6=%r13 +adc $0,%r13 + +# qhasm: squarer7 += 0 + carry +# asm 1: adc $0,<squarer7=int64#4 +# asm 2: adc $0,<squarer7=%rcx +adc $0,%rcx + +# qhasm: carry? c1 += c1 +# asm 1: add <c1=int64#5,<c1=int64#5 +# asm 2: add <c1=%r8,<c1=%r8 +add %r8,%r8 + +# qhasm: carry? c2 += c2 + carry +# asm 1: adc <c2=int64#6,<c2=int64#6 +# asm 2: adc <c2=%r9,<c2=%r9 +adc %r9,%r9 + +# qhasm: carry? c3 += c3 + carry +# asm 1: adc <c3=int64#8,<c3=int64#8 +# asm 2: adc <c3=%r10,<c3=%r10 +adc %r10,%r10 + +# qhasm: carry? squarer4 += squarer4 + carry +# asm 1: adc <squarer4=int64#9,<squarer4=int64#9 +# asm 2: adc <squarer4=%r11,<squarer4=%r11 +adc %r11,%r11 + +# qhasm: carry? squarer5 += squarer5 + carry +# asm 1: adc <squarer5=int64#10,<squarer5=int64#10 +# asm 2: adc <squarer5=%r12,<squarer5=%r12 +adc %r12,%r12 + +# qhasm: carry? squarer6 += squarer6 + carry +# asm 1: adc <squarer6=int64#11,<squarer6=int64#11 +# asm 2: adc <squarer6=%r13,<squarer6=%r13 +adc %r13,%r13 + +# qhasm: squarer7 += squarer7 + carry +# asm 1: adc <squarer7=int64#4,<squarer7=int64#4 +# asm 2: adc <squarer7=%rcx,<squarer7=%rcx +adc %rcx,%rcx + +# qhasm: squarerax = *(uint64 *)(pp + 64) +# asm 1: movq 64(<pp=int64#2),>squarerax=int64#7 +# asm 2: movq 64(<pp=%rsi),>squarerax=%rax +movq 64(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 64) +# asm 1: mulq 64(<pp=int64#2) +# asm 2: mulq 64(<pp=%rsi) +mulq 64(%rsi) + +# qhasm: c0 = squarerax +# asm 1: mov <squarerax=int64#7,>c0=int64#12 +# asm 2: mov <squarerax=%rax,>c0=%r14 +mov %rax,%r14 + +# qhasm: squaret1 = squarerdx +# asm 1: mov <squarerdx=int64#3,>squaret1=int64#13 +# asm 2: mov <squarerdx=%rdx,>squaret1=%r15 +mov %rdx,%r15 + +# qhasm: squarerax = *(uint64 *)(pp + 72) +# asm 1: movq 72(<pp=int64#2),>squarerax=int64#7 +# asm 2: movq 72(<pp=%rsi),>squarerax=%rax +movq 72(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 72) +# asm 1: mulq 72(<pp=int64#2) +# asm 2: mulq 72(<pp=%rsi) +mulq 72(%rsi) + +# qhasm: squaret2 = squarerax +# asm 1: mov <squarerax=int64#7,>squaret2=int64#14 +# asm 2: mov <squarerax=%rax,>squaret2=%rbx +mov %rax,%rbx + +# qhasm: squaret3 = squarerdx +# asm 1: mov <squarerdx=int64#3,>squaret3=int64#15 +# asm 2: mov <squarerdx=%rdx,>squaret3=%rbp +mov %rdx,%rbp + +# qhasm: squarerax = *(uint64 *)(pp + 80) +# asm 1: movq 80(<pp=int64#2),>squarerax=int64#7 +# asm 2: movq 80(<pp=%rsi),>squarerax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 80) +# asm 1: mulq 80(<pp=int64#2) +# asm 2: mulq 80(<pp=%rsi) +mulq 80(%rsi) + +# qhasm: carry? c1 += squaret1 +# asm 1: add <squaret1=int64#13,<c1=int64#5 +# asm 2: add <squaret1=%r15,<c1=%r8 +add %r15,%r8 + +# qhasm: carry? c2 += squaret2 + carry +# asm 1: adc <squaret2=int64#14,<c2=int64#6 +# asm 2: adc <squaret2=%rbx,<c2=%r9 +adc %rbx,%r9 + +# qhasm: carry? c3 += squaret3 + carry +# asm 1: adc <squaret3=int64#15,<c3=int64#8 +# asm 2: adc <squaret3=%rbp,<c3=%r10 +adc %rbp,%r10 + +# qhasm: carry? squarer4 += squarerax + carry +# asm 1: adc <squarerax=int64#7,<squarer4=int64#9 +# asm 2: adc <squarerax=%rax,<squarer4=%r11 +adc %rax,%r11 + +# qhasm: carry? squarer5 += squarerdx + carry +# asm 1: adc <squarerdx=int64#3,<squarer5=int64#10 +# asm 2: adc <squarerdx=%rdx,<squarer5=%r12 +adc %rdx,%r12 + +# qhasm: carry? squarer6 += 0 + carry +# asm 1: adc $0,<squarer6=int64#11 +# asm 2: adc $0,<squarer6=%r13 +adc $0,%r13 + +# qhasm: squarer7 += 0 + carry +# asm 1: adc $0,<squarer7=int64#4 +# asm 2: adc $0,<squarer7=%rcx +adc $0,%rcx + +# qhasm: squarerax = *(uint64 *)(pp + 88) +# asm 1: movq 88(<pp=int64#2),>squarerax=int64#7 +# asm 2: movq 88(<pp=%rsi),>squarerax=%rax +movq 88(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 88) +# asm 1: mulq 88(<pp=int64#2) +# asm 2: mulq 88(<pp=%rsi) +mulq 88(%rsi) + +# qhasm: carry? squarer6 += squarerax +# asm 1: add <squarerax=int64#7,<squarer6=int64#11 +# asm 2: add <squarerax=%rax,<squarer6=%r13 +add %rax,%r13 + +# qhasm: squarer7 += squarerdx + carry +# asm 1: adc <squarerdx=int64#3,<squarer7=int64#4 +# asm 2: adc <squarerdx=%rdx,<squarer7=%rcx +adc %rdx,%rcx + +# qhasm: squarerax = squarer4 +# asm 1: mov <squarer4=int64#9,>squarerax=int64#7 +# asm 2: mov <squarer4=%r11,>squarerax=%rax +mov %r11,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: squarer4 = squarerax +# asm 1: mov <squarerax=int64#7,>squarer4=int64#9 +# asm 2: mov <squarerax=%rax,>squarer4=%r11 +mov %rax,%r11 + +# qhasm: squarerax = squarer5 +# asm 1: mov <squarer5=int64#10,>squarerax=int64#7 +# asm 2: mov <squarer5=%r12,>squarerax=%rax +mov %r12,%rax + +# qhasm: squarer5 = squarerdx +# asm 1: mov <squarerdx=int64#3,>squarer5=int64#10 +# asm 2: mov <squarerdx=%rdx,>squarer5=%r12 +mov %rdx,%r12 + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? squarer5 += squarerax +# asm 1: add <squarerax=int64#7,<squarer5=int64#10 +# asm 2: add <squarerax=%rax,<squarer5=%r12 +add %rax,%r12 + +# qhasm: squarerax = squarer6 +# asm 1: mov <squarer6=int64#11,>squarerax=int64#7 +# asm 2: mov <squarer6=%r13,>squarerax=%rax +mov %r13,%rax + +# qhasm: squarer6 = 0 +# asm 1: mov $0,>squarer6=int64#11 +# asm 2: mov $0,>squarer6=%r13 +mov $0,%r13 + +# qhasm: squarer6 += squarerdx + carry +# asm 1: adc <squarerdx=int64#3,<squarer6=int64#11 +# asm 2: adc <squarerdx=%rdx,<squarer6=%r13 +adc %rdx,%r13 + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? squarer6 += squarerax +# asm 1: add <squarerax=int64#7,<squarer6=int64#11 +# asm 2: add <squarerax=%rax,<squarer6=%r13 +add %rax,%r13 + +# qhasm: squarerax = squarer7 +# asm 1: mov <squarer7=int64#4,>squarerax=int64#7 +# asm 2: mov <squarer7=%rcx,>squarerax=%rax +mov %rcx,%rax + +# qhasm: squarer7 = 0 +# asm 1: mov $0,>squarer7=int64#4 +# asm 2: mov $0,>squarer7=%rcx +mov $0,%rcx + +# qhasm: squarer7 += squarerdx + carry +# asm 1: adc <squarerdx=int64#3,<squarer7=int64#4 +# asm 2: adc <squarerdx=%rdx,<squarer7=%rcx +adc %rdx,%rcx + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? squarer7 += squarerax +# asm 1: add <squarerax=int64#7,<squarer7=int64#4 +# asm 2: add <squarerax=%rax,<squarer7=%rcx +add %rax,%rcx + +# qhasm: squarer8 = 0 +# asm 1: mov $0,>squarer8=int64#7 +# asm 2: mov $0,>squarer8=%rax +mov $0,%rax + +# qhasm: squarer8 += squarerdx + carry +# asm 1: adc <squarerdx=int64#3,<squarer8=int64#7 +# asm 2: adc <squarerdx=%rdx,<squarer8=%rax +adc %rdx,%rax + +# qhasm: carry? c0 += squarer4 +# asm 1: add <squarer4=int64#9,<c0=int64#12 +# asm 2: add <squarer4=%r11,<c0=%r14 +add %r11,%r14 + +# qhasm: carry? c1 += squarer5 + carry +# asm 1: adc <squarer5=int64#10,<c1=int64#5 +# asm 2: adc <squarer5=%r12,<c1=%r8 +adc %r12,%r8 + +# qhasm: carry? c2 += squarer6 + carry +# asm 1: adc <squarer6=int64#11,<c2=int64#6 +# asm 2: adc <squarer6=%r13,<c2=%r9 +adc %r13,%r9 + +# qhasm: carry? c3 += squarer7 + carry +# asm 1: adc <squarer7=int64#4,<c3=int64#8 +# asm 2: adc <squarer7=%rcx,<c3=%r10 +adc %rcx,%r10 + +# qhasm: squarezero = 0 +# asm 1: mov $0,>squarezero=int64#3 +# asm 2: mov $0,>squarezero=%rdx +mov $0,%rdx + +# qhasm: squarer8 += squarezero + carry +# asm 1: adc <squarezero=int64#3,<squarer8=int64#7 +# asm 2: adc <squarezero=%rdx,<squarer8=%rax +adc %rdx,%rax + +# qhasm: squarer8 *= 38 +# asm 1: imulq $38,<squarer8=int64#7,>squarer8=int64#4 +# asm 2: imulq $38,<squarer8=%rax,>squarer8=%rcx +imulq $38,%rax,%rcx + +# qhasm: carry? c0 += squarer8 +# asm 1: add <squarer8=int64#4,<c0=int64#12 +# asm 2: add <squarer8=%rcx,<c0=%r14 +add %rcx,%r14 + +# qhasm: carry? c1 += squarezero + carry +# asm 1: adc <squarezero=int64#3,<c1=int64#5 +# asm 2: adc <squarezero=%rdx,<c1=%r8 +adc %rdx,%r8 + +# qhasm: carry? c2 += squarezero + carry +# asm 1: adc <squarezero=int64#3,<c2=int64#6 +# asm 2: adc <squarezero=%rdx,<c2=%r9 +adc %rdx,%r9 + +# qhasm: carry? c3 += squarezero + carry +# asm 1: adc <squarezero=int64#3,<c3=int64#8 +# asm 2: adc <squarezero=%rdx,<c3=%r10 +adc %rdx,%r10 + +# qhasm: squarezero += squarezero + carry +# asm 1: adc <squarezero=int64#3,<squarezero=int64#3 +# asm 2: adc <squarezero=%rdx,<squarezero=%rdx +adc %rdx,%rdx + +# qhasm: squarezero *= 38 +# asm 1: imulq $38,<squarezero=int64#3,>squarezero=int64#3 +# asm 2: imulq $38,<squarezero=%rdx,>squarezero=%rdx +imulq $38,%rdx,%rdx + +# qhasm: c0 += squarezero +# asm 1: add <squarezero=int64#3,<c0=int64#12 +# asm 2: add <squarezero=%rdx,<c0=%r14 +add %rdx,%r14 + +# qhasm: carry? c0 += c0 +# asm 1: add <c0=int64#12,<c0=int64#12 +# asm 2: add <c0=%r14,<c0=%r14 +add %r14,%r14 + +# qhasm: carry? c1 += c1 + carry +# asm 1: adc <c1=int64#5,<c1=int64#5 +# asm 2: adc <c1=%r8,<c1=%r8 +adc %r8,%r8 + +# qhasm: carry? c2 += c2 + carry +# asm 1: adc <c2=int64#6,<c2=int64#6 +# asm 2: adc <c2=%r9,<c2=%r9 +adc %r9,%r9 + +# qhasm: carry? c3 += c3 + carry +# asm 1: adc <c3=int64#8,<c3=int64#8 +# asm 2: adc <c3=%r10,<c3=%r10 +adc %r10,%r10 + +# qhasm: addt0 = 0 +# asm 1: mov $0,>addt0=int64#3 +# asm 2: mov $0,>addt0=%rdx +mov $0,%rdx + +# qhasm: addt1 = 38 +# asm 1: mov $38,>addt1=int64#4 +# asm 2: mov $38,>addt1=%rcx +mov $38,%rcx + +# qhasm: addt1 = addt0 if !carry +# asm 1: cmovae <addt0=int64#3,<addt1=int64#4 +# asm 2: cmovae <addt0=%rdx,<addt1=%rcx +cmovae %rdx,%rcx + +# qhasm: carry? c0 += addt1 +# asm 1: add <addt1=int64#4,<c0=int64#12 +# asm 2: add <addt1=%rcx,<c0=%r14 +add %rcx,%r14 + +# qhasm: carry? c1 += addt0 + carry +# asm 1: adc <addt0=int64#3,<c1=int64#5 +# asm 2: adc <addt0=%rdx,<c1=%r8 +adc %rdx,%r8 + +# qhasm: carry? c2 += addt0 + carry +# asm 1: adc <addt0=int64#3,<c2=int64#6 +# asm 2: adc <addt0=%rdx,<c2=%r9 +adc %rdx,%r9 + +# qhasm: carry? c3 += addt0 + carry +# asm 1: adc <addt0=int64#3,<c3=int64#8 +# asm 2: adc <addt0=%rdx,<c3=%r10 +adc %rdx,%r10 + +# qhasm: addt0 = addt1 if carry +# asm 1: cmovc <addt1=int64#4,<addt0=int64#3 +# asm 2: cmovc <addt1=%rcx,<addt0=%rdx +cmovc %rcx,%rdx + +# qhasm: c0 += addt0 +# asm 1: add <addt0=int64#3,<c0=int64#12 +# asm 2: add <addt0=%rdx,<c0=%r14 +add %rdx,%r14 + +# qhasm: c0_stack = c0 +# asm 1: movq <c0=int64#12,>c0_stack=stack64#16 +# asm 2: movq <c0=%r14,>c0_stack=120(%rsp) +movq %r14,120(%rsp) + +# qhasm: c1_stack = c1 +# asm 1: movq <c1=int64#5,>c1_stack=stack64#17 +# asm 2: movq <c1=%r8,>c1_stack=128(%rsp) +movq %r8,128(%rsp) + +# qhasm: c2_stack = c2 +# asm 1: movq <c2=int64#6,>c2_stack=stack64#18 +# asm 2: movq <c2=%r9,>c2_stack=136(%rsp) +movq %r9,136(%rsp) + +# qhasm: c3_stack = c3 +# asm 1: movq <c3=int64#8,>c3_stack=stack64#19 +# asm 2: movq <c3=%r10,>c3_stack=144(%rsp) +movq %r10,144(%rsp) + +# qhasm: d0 = 0 +# asm 1: mov $0,>d0=int64#3 +# asm 2: mov $0,>d0=%rdx +mov $0,%rdx + +# qhasm: d1 = 0 +# asm 1: mov $0,>d1=int64#4 +# asm 2: mov $0,>d1=%rcx +mov $0,%rcx + +# qhasm: d2 = 0 +# asm 1: mov $0,>d2=int64#5 +# asm 2: mov $0,>d2=%r8 +mov $0,%r8 + +# qhasm: d3 = 0 +# asm 1: mov $0,>d3=int64#6 +# asm 2: mov $0,>d3=%r9 +mov $0,%r9 + +# qhasm: carry? d0 -= a0_stack +# asm 1: subq <a0_stack=stack64#8,<d0=int64#3 +# asm 2: subq <a0_stack=56(%rsp),<d0=%rdx +subq 56(%rsp),%rdx + +# qhasm: carry? d1 -= a1_stack - carry +# asm 1: sbbq <a1_stack=stack64#9,<d1=int64#4 +# asm 2: sbbq <a1_stack=64(%rsp),<d1=%rcx +sbbq 64(%rsp),%rcx + +# qhasm: carry? d2 -= a2_stack - carry +# asm 1: sbbq <a2_stack=stack64#10,<d2=int64#5 +# asm 2: sbbq <a2_stack=72(%rsp),<d2=%r8 +sbbq 72(%rsp),%r8 + +# qhasm: carry? d3 -= a3_stack - carry +# asm 1: sbbq <a3_stack=stack64#11,<d3=int64#6 +# asm 2: sbbq <a3_stack=80(%rsp),<d3=%r9 +sbbq 80(%rsp),%r9 + +# qhasm: subt0 = 0 +# asm 1: mov $0,>subt0=int64#7 +# asm 2: mov $0,>subt0=%rax +mov $0,%rax + +# qhasm: subt1 = 38 +# asm 1: mov $38,>subt1=int64#8 +# asm 2: mov $38,>subt1=%r10 +mov $38,%r10 + +# qhasm: subt1 = subt0 if !carry +# asm 1: cmovae <subt0=int64#7,<subt1=int64#8 +# asm 2: cmovae <subt0=%rax,<subt1=%r10 +cmovae %rax,%r10 + +# qhasm: carry? d0 -= subt1 +# asm 1: sub <subt1=int64#8,<d0=int64#3 +# asm 2: sub <subt1=%r10,<d0=%rdx +sub %r10,%rdx + +# qhasm: carry? d1 -= subt0 - carry +# asm 1: sbb <subt0=int64#7,<d1=int64#4 +# asm 2: sbb <subt0=%rax,<d1=%rcx +sbb %rax,%rcx + +# qhasm: carry? d2 -= subt0 - carry +# asm 1: sbb <subt0=int64#7,<d2=int64#5 +# asm 2: sbb <subt0=%rax,<d2=%r8 +sbb %rax,%r8 + +# qhasm: carry? d3 -= subt0 - carry +# asm 1: sbb <subt0=int64#7,<d3=int64#6 +# asm 2: sbb <subt0=%rax,<d3=%r9 +sbb %rax,%r9 + +# qhasm: subt0 = subt1 if carry +# asm 1: cmovc <subt1=int64#8,<subt0=int64#7 +# asm 2: cmovc <subt1=%r10,<subt0=%rax +cmovc %r10,%rax + +# qhasm: d0 -= subt0 +# asm 1: sub <subt0=int64#7,<d0=int64#3 +# asm 2: sub <subt0=%rax,<d0=%rdx +sub %rax,%rdx + +# qhasm: d0_stack = d0 +# asm 1: movq <d0=int64#3,>d0_stack=stack64#8 +# asm 2: movq <d0=%rdx,>d0_stack=56(%rsp) +movq %rdx,56(%rsp) + +# qhasm: d1_stack = d1 +# asm 1: movq <d1=int64#4,>d1_stack=stack64#9 +# asm 2: movq <d1=%rcx,>d1_stack=64(%rsp) +movq %rcx,64(%rsp) + +# qhasm: d2_stack = d2 +# asm 1: movq <d2=int64#5,>d2_stack=stack64#10 +# asm 2: movq <d2=%r8,>d2_stack=72(%rsp) +movq %r8,72(%rsp) + +# qhasm: d3_stack = d3 +# asm 1: movq <d3=int64#6,>d3_stack=stack64#11 +# asm 2: movq <d3=%r9,>d3_stack=80(%rsp) +movq %r9,80(%rsp) + +# qhasm: e0 = 0 +# asm 1: mov $0,>e0=int64#7 +# asm 2: mov $0,>e0=%rax +mov $0,%rax + +# qhasm: e1 = 0 +# asm 1: mov $0,>e1=int64#8 +# asm 2: mov $0,>e1=%r10 +mov $0,%r10 + +# qhasm: e2 = 0 +# asm 1: mov $0,>e2=int64#9 +# asm 2: mov $0,>e2=%r11 +mov $0,%r11 + +# qhasm: e3 = 0 +# asm 1: mov $0,>e3=int64#10 +# asm 2: mov $0,>e3=%r12 +mov $0,%r12 + +# qhasm: carry? e0 -= b0_stack +# asm 1: subq <b0_stack=stack64#12,<e0=int64#7 +# asm 2: subq <b0_stack=88(%rsp),<e0=%rax +subq 88(%rsp),%rax + +# qhasm: carry? e1 -= b1_stack - carry +# asm 1: sbbq <b1_stack=stack64#13,<e1=int64#8 +# asm 2: sbbq <b1_stack=96(%rsp),<e1=%r10 +sbbq 96(%rsp),%r10 + +# qhasm: carry? e2 -= b2_stack - carry +# asm 1: sbbq <b2_stack=stack64#14,<e2=int64#9 +# asm 2: sbbq <b2_stack=104(%rsp),<e2=%r11 +sbbq 104(%rsp),%r11 + +# qhasm: carry? e3 -= b3_stack - carry +# asm 1: sbbq <b3_stack=stack64#15,<e3=int64#10 +# asm 2: sbbq <b3_stack=112(%rsp),<e3=%r12 +sbbq 112(%rsp),%r12 + +# qhasm: subt0 = 0 +# asm 1: mov $0,>subt0=int64#11 +# asm 2: mov $0,>subt0=%r13 +mov $0,%r13 + +# qhasm: subt1 = 38 +# asm 1: mov $38,>subt1=int64#12 +# asm 2: mov $38,>subt1=%r14 +mov $38,%r14 + +# qhasm: subt1 = subt0 if !carry +# asm 1: cmovae <subt0=int64#11,<subt1=int64#12 +# asm 2: cmovae <subt0=%r13,<subt1=%r14 +cmovae %r13,%r14 + +# qhasm: carry? e0 -= subt1 +# asm 1: sub <subt1=int64#12,<e0=int64#7 +# asm 2: sub <subt1=%r14,<e0=%rax +sub %r14,%rax + +# qhasm: carry? e1 -= subt0 - carry +# asm 1: sbb <subt0=int64#11,<e1=int64#8 +# asm 2: sbb <subt0=%r13,<e1=%r10 +sbb %r13,%r10 + +# qhasm: carry? e2 -= subt0 - carry +# asm 1: sbb <subt0=int64#11,<e2=int64#9 +# asm 2: sbb <subt0=%r13,<e2=%r11 +sbb %r13,%r11 + +# qhasm: carry? e3 -= subt0 - carry +# asm 1: sbb <subt0=int64#11,<e3=int64#10 +# asm 2: sbb <subt0=%r13,<e3=%r12 +sbb %r13,%r12 + +# qhasm: subt0 = subt1 if carry +# asm 1: cmovc <subt1=int64#12,<subt0=int64#11 +# asm 2: cmovc <subt1=%r14,<subt0=%r13 +cmovc %r14,%r13 + +# qhasm: e0 -= subt0 +# asm 1: sub <subt0=int64#11,<e0=int64#7 +# asm 2: sub <subt0=%r13,<e0=%rax +sub %r13,%rax + +# qhasm: e0_stack = e0 +# asm 1: movq <e0=int64#7,>e0_stack=stack64#20 +# asm 2: movq <e0=%rax,>e0_stack=152(%rsp) +movq %rax,152(%rsp) + +# qhasm: e1_stack = e1 +# asm 1: movq <e1=int64#8,>e1_stack=stack64#21 +# asm 2: movq <e1=%r10,>e1_stack=160(%rsp) +movq %r10,160(%rsp) + +# qhasm: e2_stack = e2 +# asm 1: movq <e2=int64#9,>e2_stack=stack64#22 +# asm 2: movq <e2=%r11,>e2_stack=168(%rsp) +movq %r11,168(%rsp) + +# qhasm: e3_stack = e3 +# asm 1: movq <e3=int64#10,>e3_stack=stack64#23 +# asm 2: movq <e3=%r12,>e3_stack=176(%rsp) +movq %r12,176(%rsp) + +# qhasm: rz0 = d0 +# asm 1: mov <d0=int64#3,>rz0=int64#7 +# asm 2: mov <d0=%rdx,>rz0=%rax +mov %rdx,%rax + +# qhasm: rz1 = d1 +# asm 1: mov <d1=int64#4,>rz1=int64#8 +# asm 2: mov <d1=%rcx,>rz1=%r10 +mov %rcx,%r10 + +# qhasm: rz2 = d2 +# asm 1: mov <d2=int64#5,>rz2=int64#9 +# asm 2: mov <d2=%r8,>rz2=%r11 +mov %r8,%r11 + +# qhasm: rz3 = d3 +# asm 1: mov <d3=int64#6,>rz3=int64#10 +# asm 2: mov <d3=%r9,>rz3=%r12 +mov %r9,%r12 + +# qhasm: carry? rz0 += b0_stack +# asm 1: addq <b0_stack=stack64#12,<rz0=int64#7 +# asm 2: addq <b0_stack=88(%rsp),<rz0=%rax +addq 88(%rsp),%rax + +# qhasm: carry? rz1 += b1_stack + carry +# asm 1: adcq <b1_stack=stack64#13,<rz1=int64#8 +# asm 2: adcq <b1_stack=96(%rsp),<rz1=%r10 +adcq 96(%rsp),%r10 + +# qhasm: carry? rz2 += b2_stack + carry +# asm 1: adcq <b2_stack=stack64#14,<rz2=int64#9 +# asm 2: adcq <b2_stack=104(%rsp),<rz2=%r11 +adcq 104(%rsp),%r11 + +# qhasm: carry? rz3 += b3_stack + carry +# asm 1: adcq <b3_stack=stack64#15,<rz3=int64#10 +# asm 2: adcq <b3_stack=112(%rsp),<rz3=%r12 +adcq 112(%rsp),%r12 + +# qhasm: addt0 = 0 +# asm 1: mov $0,>addt0=int64#11 +# asm 2: mov $0,>addt0=%r13 +mov $0,%r13 + +# qhasm: addt1 = 38 +# asm 1: mov $38,>addt1=int64#12 +# asm 2: mov $38,>addt1=%r14 +mov $38,%r14 + +# qhasm: addt1 = addt0 if !carry +# asm 1: cmovae <addt0=int64#11,<addt1=int64#12 +# asm 2: cmovae <addt0=%r13,<addt1=%r14 +cmovae %r13,%r14 + +# qhasm: carry? rz0 += addt1 +# asm 1: add <addt1=int64#12,<rz0=int64#7 +# asm 2: add <addt1=%r14,<rz0=%rax +add %r14,%rax + +# qhasm: carry? rz1 += addt0 + carry +# asm 1: adc <addt0=int64#11,<rz1=int64#8 +# asm 2: adc <addt0=%r13,<rz1=%r10 +adc %r13,%r10 + +# qhasm: carry? rz2 += addt0 + carry +# asm 1: adc <addt0=int64#11,<rz2=int64#9 +# asm 2: adc <addt0=%r13,<rz2=%r11 +adc %r13,%r11 + +# qhasm: carry? rz3 += addt0 + carry +# asm 1: adc <addt0=int64#11,<rz3=int64#10 +# asm 2: adc <addt0=%r13,<rz3=%r12 +adc %r13,%r12 + +# qhasm: addt0 = addt1 if carry +# asm 1: cmovc <addt1=int64#12,<addt0=int64#11 +# asm 2: cmovc <addt1=%r14,<addt0=%r13 +cmovc %r14,%r13 + +# qhasm: rz0 += addt0 +# asm 1: add <addt0=int64#11,<rz0=int64#7 +# asm 2: add <addt0=%r13,<rz0=%rax +add %r13,%rax + +# qhasm: *(uint64 *) (rp + 32) = rz0 +# asm 1: movq <rz0=int64#7,32(<rp=int64#1) +# asm 2: movq <rz0=%rax,32(<rp=%rdi) +movq %rax,32(%rdi) + +# qhasm: *(uint64 *) (rp + 40) = rz1 +# asm 1: movq <rz1=int64#8,40(<rp=int64#1) +# asm 2: movq <rz1=%r10,40(<rp=%rdi) +movq %r10,40(%rdi) + +# qhasm: *(uint64 *) (rp + 48) = rz2 +# asm 1: movq <rz2=int64#9,48(<rp=int64#1) +# asm 2: movq <rz2=%r11,48(<rp=%rdi) +movq %r11,48(%rdi) + +# qhasm: *(uint64 *) (rp + 56) = rz3 +# asm 1: movq <rz3=int64#10,56(<rp=int64#1) +# asm 2: movq <rz3=%r12,56(<rp=%rdi) +movq %r12,56(%rdi) + +# qhasm: carry? d0 -= b0_stack +# asm 1: subq <b0_stack=stack64#12,<d0=int64#3 +# asm 2: subq <b0_stack=88(%rsp),<d0=%rdx +subq 88(%rsp),%rdx + +# qhasm: carry? d1 -= b1_stack - carry +# asm 1: sbbq <b1_stack=stack64#13,<d1=int64#4 +# asm 2: sbbq <b1_stack=96(%rsp),<d1=%rcx +sbbq 96(%rsp),%rcx + +# qhasm: carry? d2 -= b2_stack - carry +# asm 1: sbbq <b2_stack=stack64#14,<d2=int64#5 +# asm 2: sbbq <b2_stack=104(%rsp),<d2=%r8 +sbbq 104(%rsp),%r8 + +# qhasm: carry? d3 -= b3_stack - carry +# asm 1: sbbq <b3_stack=stack64#15,<d3=int64#6 +# asm 2: sbbq <b3_stack=112(%rsp),<d3=%r9 +sbbq 112(%rsp),%r9 + +# qhasm: subt0 = 0 +# asm 1: mov $0,>subt0=int64#11 +# asm 2: mov $0,>subt0=%r13 +mov $0,%r13 + +# qhasm: subt1 = 38 +# asm 1: mov $38,>subt1=int64#12 +# asm 2: mov $38,>subt1=%r14 +mov $38,%r14 + +# qhasm: subt1 = subt0 if !carry +# asm 1: cmovae <subt0=int64#11,<subt1=int64#12 +# asm 2: cmovae <subt0=%r13,<subt1=%r14 +cmovae %r13,%r14 + +# qhasm: carry? d0 -= subt1 +# asm 1: sub <subt1=int64#12,<d0=int64#3 +# asm 2: sub <subt1=%r14,<d0=%rdx +sub %r14,%rdx + +# qhasm: carry? d1 -= subt0 - carry +# asm 1: sbb <subt0=int64#11,<d1=int64#4 +# asm 2: sbb <subt0=%r13,<d1=%rcx +sbb %r13,%rcx + +# qhasm: carry? d2 -= subt0 - carry +# asm 1: sbb <subt0=int64#11,<d2=int64#5 +# asm 2: sbb <subt0=%r13,<d2=%r8 +sbb %r13,%r8 + +# qhasm: carry? d3 -= subt0 - carry +# asm 1: sbb <subt0=int64#11,<d3=int64#6 +# asm 2: sbb <subt0=%r13,<d3=%r9 +sbb %r13,%r9 + +# qhasm: subt0 = subt1 if carry +# asm 1: cmovc <subt1=int64#12,<subt0=int64#11 +# asm 2: cmovc <subt1=%r14,<subt0=%r13 +cmovc %r14,%r13 + +# qhasm: d0 -= subt0 +# asm 1: sub <subt0=int64#11,<d0=int64#3 +# asm 2: sub <subt0=%r13,<d0=%rdx +sub %r13,%rdx + +# qhasm: *(uint64 *)(rp + 64) = d0 +# asm 1: movq <d0=int64#3,64(<rp=int64#1) +# asm 2: movq <d0=%rdx,64(<rp=%rdi) +movq %rdx,64(%rdi) + +# qhasm: *(uint64 *)(rp + 72) = d1 +# asm 1: movq <d1=int64#4,72(<rp=int64#1) +# asm 2: movq <d1=%rcx,72(<rp=%rdi) +movq %rcx,72(%rdi) + +# qhasm: *(uint64 *)(rp + 80) = d2 +# asm 1: movq <d2=int64#5,80(<rp=int64#1) +# asm 2: movq <d2=%r8,80(<rp=%rdi) +movq %r8,80(%rdi) + +# qhasm: *(uint64 *)(rp + 88) = d3 +# asm 1: movq <d3=int64#6,88(<rp=int64#1) +# asm 2: movq <d3=%r9,88(<rp=%rdi) +movq %r9,88(%rdi) + +# qhasm: carry? rz0 -= c0_stack +# asm 1: subq <c0_stack=stack64#16,<rz0=int64#7 +# asm 2: subq <c0_stack=120(%rsp),<rz0=%rax +subq 120(%rsp),%rax + +# qhasm: carry? rz1 -= c1_stack - carry +# asm 1: sbbq <c1_stack=stack64#17,<rz1=int64#8 +# asm 2: sbbq <c1_stack=128(%rsp),<rz1=%r10 +sbbq 128(%rsp),%r10 + +# qhasm: carry? rz2 -= c2_stack - carry +# asm 1: sbbq <c2_stack=stack64#18,<rz2=int64#9 +# asm 2: sbbq <c2_stack=136(%rsp),<rz2=%r11 +sbbq 136(%rsp),%r11 + +# qhasm: carry? rz3 -= c3_stack - carry +# asm 1: sbbq <c3_stack=stack64#19,<rz3=int64#10 +# asm 2: sbbq <c3_stack=144(%rsp),<rz3=%r12 +sbbq 144(%rsp),%r12 + +# qhasm: subt0 = 0 +# asm 1: mov $0,>subt0=int64#3 +# asm 2: mov $0,>subt0=%rdx +mov $0,%rdx + +# qhasm: subt1 = 38 +# asm 1: mov $38,>subt1=int64#4 +# asm 2: mov $38,>subt1=%rcx +mov $38,%rcx + +# qhasm: subt1 = subt0 if !carry +# asm 1: cmovae <subt0=int64#3,<subt1=int64#4 +# asm 2: cmovae <subt0=%rdx,<subt1=%rcx +cmovae %rdx,%rcx + +# qhasm: carry? rz0 -= subt1 +# asm 1: sub <subt1=int64#4,<rz0=int64#7 +# asm 2: sub <subt1=%rcx,<rz0=%rax +sub %rcx,%rax + +# qhasm: carry? rz1 -= subt0 - carry +# asm 1: sbb <subt0=int64#3,<rz1=int64#8 +# asm 2: sbb <subt0=%rdx,<rz1=%r10 +sbb %rdx,%r10 + +# qhasm: carry? rz2 -= subt0 - carry +# asm 1: sbb <subt0=int64#3,<rz2=int64#9 +# asm 2: sbb <subt0=%rdx,<rz2=%r11 +sbb %rdx,%r11 + +# qhasm: carry? rz3 -= subt0 - carry +# asm 1: sbb <subt0=int64#3,<rz3=int64#10 +# asm 2: sbb <subt0=%rdx,<rz3=%r12 +sbb %rdx,%r12 + +# qhasm: subt0 = subt1 if carry +# asm 1: cmovc <subt1=int64#4,<subt0=int64#3 +# asm 2: cmovc <subt1=%rcx,<subt0=%rdx +cmovc %rcx,%rdx + +# qhasm: rz0 -= subt0 +# asm 1: sub <subt0=int64#3,<rz0=int64#7 +# asm 2: sub <subt0=%rdx,<rz0=%rax +sub %rdx,%rax + +# qhasm: *(uint64 *) (rp + 96) = rz0 +# asm 1: movq <rz0=int64#7,96(<rp=int64#1) +# asm 2: movq <rz0=%rax,96(<rp=%rdi) +movq %rax,96(%rdi) + +# qhasm: *(uint64 *) (rp + 104) = rz1 +# asm 1: movq <rz1=int64#8,104(<rp=int64#1) +# asm 2: movq <rz1=%r10,104(<rp=%rdi) +movq %r10,104(%rdi) + +# qhasm: *(uint64 *) (rp + 112) = rz2 +# asm 1: movq <rz2=int64#9,112(<rp=int64#1) +# asm 2: movq <rz2=%r11,112(<rp=%rdi) +movq %r11,112(%rdi) + +# qhasm: *(uint64 *) (rp + 120) = rz3 +# asm 1: movq <rz3=int64#10,120(<rp=int64#1) +# asm 2: movq <rz3=%r12,120(<rp=%rdi) +movq %r12,120(%rdi) + +# qhasm: rx0 = *(uint64 *)(pp + 0) +# asm 1: movq 0(<pp=int64#2),>rx0=int64#3 +# asm 2: movq 0(<pp=%rsi),>rx0=%rdx +movq 0(%rsi),%rdx + +# qhasm: rx1 = *(uint64 *)(pp + 8) +# asm 1: movq 8(<pp=int64#2),>rx1=int64#4 +# asm 2: movq 8(<pp=%rsi),>rx1=%rcx +movq 8(%rsi),%rcx + +# qhasm: rx2 = *(uint64 *)(pp + 16) +# asm 1: movq 16(<pp=int64#2),>rx2=int64#5 +# asm 2: movq 16(<pp=%rsi),>rx2=%r8 +movq 16(%rsi),%r8 + +# qhasm: rx3 = *(uint64 *)(pp + 24) +# asm 1: movq 24(<pp=int64#2),>rx3=int64#6 +# asm 2: movq 24(<pp=%rsi),>rx3=%r9 +movq 24(%rsi),%r9 + +# qhasm: carry? rx0 += *(uint64 *)(pp + 32) +# asm 1: addq 32(<pp=int64#2),<rx0=int64#3 +# asm 2: addq 32(<pp=%rsi),<rx0=%rdx +addq 32(%rsi),%rdx + +# qhasm: carry? rx1 += *(uint64 *)(pp + 40) + carry +# asm 1: adcq 40(<pp=int64#2),<rx1=int64#4 +# asm 2: adcq 40(<pp=%rsi),<rx1=%rcx +adcq 40(%rsi),%rcx + +# qhasm: carry? rx2 += *(uint64 *)(pp + 48) + carry +# asm 1: adcq 48(<pp=int64#2),<rx2=int64#5 +# asm 2: adcq 48(<pp=%rsi),<rx2=%r8 +adcq 48(%rsi),%r8 + +# qhasm: carry? rx3 += *(uint64 *)(pp + 56) + carry +# asm 1: adcq 56(<pp=int64#2),<rx3=int64#6 +# asm 2: adcq 56(<pp=%rsi),<rx3=%r9 +adcq 56(%rsi),%r9 + +# qhasm: addt0 = 0 +# asm 1: mov $0,>addt0=int64#2 +# asm 2: mov $0,>addt0=%rsi +mov $0,%rsi + +# qhasm: addt1 = 38 +# asm 1: mov $38,>addt1=int64#7 +# asm 2: mov $38,>addt1=%rax +mov $38,%rax + +# qhasm: addt1 = addt0 if !carry +# asm 1: cmovae <addt0=int64#2,<addt1=int64#7 +# asm 2: cmovae <addt0=%rsi,<addt1=%rax +cmovae %rsi,%rax + +# qhasm: carry? rx0 += addt1 +# asm 1: add <addt1=int64#7,<rx0=int64#3 +# asm 2: add <addt1=%rax,<rx0=%rdx +add %rax,%rdx + +# qhasm: carry? rx1 += addt0 + carry +# asm 1: adc <addt0=int64#2,<rx1=int64#4 +# asm 2: adc <addt0=%rsi,<rx1=%rcx +adc %rsi,%rcx + +# qhasm: carry? rx2 += addt0 + carry +# asm 1: adc <addt0=int64#2,<rx2=int64#5 +# asm 2: adc <addt0=%rsi,<rx2=%r8 +adc %rsi,%r8 + +# qhasm: carry? rx3 += addt0 + carry +# asm 1: adc <addt0=int64#2,<rx3=int64#6 +# asm 2: adc <addt0=%rsi,<rx3=%r9 +adc %rsi,%r9 + +# qhasm: addt0 = addt1 if carry +# asm 1: cmovc <addt1=int64#7,<addt0=int64#2 +# asm 2: cmovc <addt1=%rax,<addt0=%rsi +cmovc %rax,%rsi + +# qhasm: rx0 += addt0 +# asm 1: add <addt0=int64#2,<rx0=int64#3 +# asm 2: add <addt0=%rsi,<rx0=%rdx +add %rsi,%rdx + +# qhasm: rx0_stack = rx0 +# asm 1: movq <rx0=int64#3,>rx0_stack=stack64#12 +# asm 2: movq <rx0=%rdx,>rx0_stack=88(%rsp) +movq %rdx,88(%rsp) + +# qhasm: rx1_stack = rx1 +# asm 1: movq <rx1=int64#4,>rx1_stack=stack64#13 +# asm 2: movq <rx1=%rcx,>rx1_stack=96(%rsp) +movq %rcx,96(%rsp) + +# qhasm: rx2_stack = rx2 +# asm 1: movq <rx2=int64#5,>rx2_stack=stack64#14 +# asm 2: movq <rx2=%r8,>rx2_stack=104(%rsp) +movq %r8,104(%rsp) + +# qhasm: rx3_stack = rx3 +# asm 1: movq <rx3=int64#6,>rx3_stack=stack64#15 +# asm 2: movq <rx3=%r9,>rx3_stack=112(%rsp) +movq %r9,112(%rsp) + +# qhasm: squarer7 = 0 +# asm 1: mov $0,>squarer7=int64#2 +# asm 2: mov $0,>squarer7=%rsi +mov $0,%rsi + +# qhasm: squarerax = rx1_stack +# asm 1: movq <rx1_stack=stack64#13,>squarerax=int64#7 +# asm 2: movq <rx1_stack=96(%rsp),>squarerax=%rax +movq 96(%rsp),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * rx0_stack +# asm 1: mulq <rx0_stack=stack64#12 +# asm 2: mulq <rx0_stack=88(%rsp) +mulq 88(%rsp) + +# qhasm: rx1 = squarerax +# asm 1: mov <squarerax=int64#7,>rx1=int64#4 +# asm 2: mov <squarerax=%rax,>rx1=%rcx +mov %rax,%rcx + +# qhasm: rx2 = squarerdx +# asm 1: mov <squarerdx=int64#3,>rx2=int64#5 +# asm 2: mov <squarerdx=%rdx,>rx2=%r8 +mov %rdx,%r8 + +# qhasm: squarerax = rx2_stack +# asm 1: movq <rx2_stack=stack64#14,>squarerax=int64#7 +# asm 2: movq <rx2_stack=104(%rsp),>squarerax=%rax +movq 104(%rsp),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * rx1_stack +# asm 1: mulq <rx1_stack=stack64#13 +# asm 2: mulq <rx1_stack=96(%rsp) +mulq 96(%rsp) + +# qhasm: rx3 = squarerax +# asm 1: mov <squarerax=int64#7,>rx3=int64#6 +# asm 2: mov <squarerax=%rax,>rx3=%r9 +mov %rax,%r9 + +# qhasm: squarer4 = squarerdx +# asm 1: mov <squarerdx=int64#3,>squarer4=int64#8 +# asm 2: mov <squarerdx=%rdx,>squarer4=%r10 +mov %rdx,%r10 + +# qhasm: squarerax = rx3_stack +# asm 1: movq <rx3_stack=stack64#15,>squarerax=int64#7 +# asm 2: movq <rx3_stack=112(%rsp),>squarerax=%rax +movq 112(%rsp),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * rx2_stack +# asm 1: mulq <rx2_stack=stack64#14 +# asm 2: mulq <rx2_stack=104(%rsp) +mulq 104(%rsp) + +# qhasm: squarer5 = squarerax +# asm 1: mov <squarerax=int64#7,>squarer5=int64#9 +# asm 2: mov <squarerax=%rax,>squarer5=%r11 +mov %rax,%r11 + +# qhasm: squarer6 = squarerdx +# asm 1: mov <squarerdx=int64#3,>squarer6=int64#10 +# asm 2: mov <squarerdx=%rdx,>squarer6=%r12 +mov %rdx,%r12 + +# qhasm: squarerax = rx2_stack +# asm 1: movq <rx2_stack=stack64#14,>squarerax=int64#7 +# asm 2: movq <rx2_stack=104(%rsp),>squarerax=%rax +movq 104(%rsp),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * rx0_stack +# asm 1: mulq <rx0_stack=stack64#12 +# asm 2: mulq <rx0_stack=88(%rsp) +mulq 88(%rsp) + +# qhasm: carry? rx2 += squarerax +# asm 1: add <squarerax=int64#7,<rx2=int64#5 +# asm 2: add <squarerax=%rax,<rx2=%r8 +add %rax,%r8 + +# qhasm: carry? rx3 += squarerdx + carry +# asm 1: adc <squarerdx=int64#3,<rx3=int64#6 +# asm 2: adc <squarerdx=%rdx,<rx3=%r9 +adc %rdx,%r9 + +# qhasm: squarer4 += 0 + carry +# asm 1: adc $0,<squarer4=int64#8 +# asm 2: adc $0,<squarer4=%r10 +adc $0,%r10 + +# qhasm: squarerax = rx3_stack +# asm 1: movq <rx3_stack=stack64#15,>squarerax=int64#7 +# asm 2: movq <rx3_stack=112(%rsp),>squarerax=%rax +movq 112(%rsp),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * rx1_stack +# asm 1: mulq <rx1_stack=stack64#13 +# asm 2: mulq <rx1_stack=96(%rsp) +mulq 96(%rsp) + +# qhasm: carry? squarer4 += squarerax +# asm 1: add <squarerax=int64#7,<squarer4=int64#8 +# asm 2: add <squarerax=%rax,<squarer4=%r10 +add %rax,%r10 + +# qhasm: carry? squarer5 += squarerdx + carry +# asm 1: adc <squarerdx=int64#3,<squarer5=int64#9 +# asm 2: adc <squarerdx=%rdx,<squarer5=%r11 +adc %rdx,%r11 + +# qhasm: squarer6 += 0 + carry +# asm 1: adc $0,<squarer6=int64#10 +# asm 2: adc $0,<squarer6=%r12 +adc $0,%r12 + +# qhasm: squarerax = rx3_stack +# asm 1: movq <rx3_stack=stack64#15,>squarerax=int64#7 +# asm 2: movq <rx3_stack=112(%rsp),>squarerax=%rax +movq 112(%rsp),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * rx0_stack +# asm 1: mulq <rx0_stack=stack64#12 +# asm 2: mulq <rx0_stack=88(%rsp) +mulq 88(%rsp) + +# qhasm: carry? rx3 += squarerax +# asm 1: add <squarerax=int64#7,<rx3=int64#6 +# asm 2: add <squarerax=%rax,<rx3=%r9 +add %rax,%r9 + +# qhasm: carry? squarer4 += squarerdx + carry +# asm 1: adc <squarerdx=int64#3,<squarer4=int64#8 +# asm 2: adc <squarerdx=%rdx,<squarer4=%r10 +adc %rdx,%r10 + +# qhasm: carry? squarer5 += 0 + carry +# asm 1: adc $0,<squarer5=int64#9 +# asm 2: adc $0,<squarer5=%r11 +adc $0,%r11 + +# qhasm: carry? squarer6 += 0 + carry +# asm 1: adc $0,<squarer6=int64#10 +# asm 2: adc $0,<squarer6=%r12 +adc $0,%r12 + +# qhasm: squarer7 += 0 + carry +# asm 1: adc $0,<squarer7=int64#2 +# asm 2: adc $0,<squarer7=%rsi +adc $0,%rsi + +# qhasm: carry? rx1 += rx1 +# asm 1: add <rx1=int64#4,<rx1=int64#4 +# asm 2: add <rx1=%rcx,<rx1=%rcx +add %rcx,%rcx + +# qhasm: carry? rx2 += rx2 + carry +# asm 1: adc <rx2=int64#5,<rx2=int64#5 +# asm 2: adc <rx2=%r8,<rx2=%r8 +adc %r8,%r8 + +# qhasm: carry? rx3 += rx3 + carry +# asm 1: adc <rx3=int64#6,<rx3=int64#6 +# asm 2: adc <rx3=%r9,<rx3=%r9 +adc %r9,%r9 + +# qhasm: carry? squarer4 += squarer4 + carry +# asm 1: adc <squarer4=int64#8,<squarer4=int64#8 +# asm 2: adc <squarer4=%r10,<squarer4=%r10 +adc %r10,%r10 + +# qhasm: carry? squarer5 += squarer5 + carry +# asm 1: adc <squarer5=int64#9,<squarer5=int64#9 +# asm 2: adc <squarer5=%r11,<squarer5=%r11 +adc %r11,%r11 + +# qhasm: carry? squarer6 += squarer6 + carry +# asm 1: adc <squarer6=int64#10,<squarer6=int64#10 +# asm 2: adc <squarer6=%r12,<squarer6=%r12 +adc %r12,%r12 + +# qhasm: squarer7 += squarer7 + carry +# asm 1: adc <squarer7=int64#2,<squarer7=int64#2 +# asm 2: adc <squarer7=%rsi,<squarer7=%rsi +adc %rsi,%rsi + +# qhasm: squarerax = rx0_stack +# asm 1: movq <rx0_stack=stack64#12,>squarerax=int64#7 +# asm 2: movq <rx0_stack=88(%rsp),>squarerax=%rax +movq 88(%rsp),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * rx0_stack +# asm 1: mulq <rx0_stack=stack64#12 +# asm 2: mulq <rx0_stack=88(%rsp) +mulq 88(%rsp) + +# qhasm: rx0 = squarerax +# asm 1: mov <squarerax=int64#7,>rx0=int64#11 +# asm 2: mov <squarerax=%rax,>rx0=%r13 +mov %rax,%r13 + +# qhasm: squaret1 = squarerdx +# asm 1: mov <squarerdx=int64#3,>squaret1=int64#12 +# asm 2: mov <squarerdx=%rdx,>squaret1=%r14 +mov %rdx,%r14 + +# qhasm: squarerax = rx1_stack +# asm 1: movq <rx1_stack=stack64#13,>squarerax=int64#7 +# asm 2: movq <rx1_stack=96(%rsp),>squarerax=%rax +movq 96(%rsp),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * rx1_stack +# asm 1: mulq <rx1_stack=stack64#13 +# asm 2: mulq <rx1_stack=96(%rsp) +mulq 96(%rsp) + +# qhasm: squaret2 = squarerax +# asm 1: mov <squarerax=int64#7,>squaret2=int64#13 +# asm 2: mov <squarerax=%rax,>squaret2=%r15 +mov %rax,%r15 + +# qhasm: squaret3 = squarerdx +# asm 1: mov <squarerdx=int64#3,>squaret3=int64#14 +# asm 2: mov <squarerdx=%rdx,>squaret3=%rbx +mov %rdx,%rbx + +# qhasm: squarerax = rx2_stack +# asm 1: movq <rx2_stack=stack64#14,>squarerax=int64#7 +# asm 2: movq <rx2_stack=104(%rsp),>squarerax=%rax +movq 104(%rsp),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * rx2_stack +# asm 1: mulq <rx2_stack=stack64#14 +# asm 2: mulq <rx2_stack=104(%rsp) +mulq 104(%rsp) + +# qhasm: carry? rx1 += squaret1 +# asm 1: add <squaret1=int64#12,<rx1=int64#4 +# asm 2: add <squaret1=%r14,<rx1=%rcx +add %r14,%rcx + +# qhasm: carry? rx2 += squaret2 + carry +# asm 1: adc <squaret2=int64#13,<rx2=int64#5 +# asm 2: adc <squaret2=%r15,<rx2=%r8 +adc %r15,%r8 + +# qhasm: carry? rx3 += squaret3 + carry +# asm 1: adc <squaret3=int64#14,<rx3=int64#6 +# asm 2: adc <squaret3=%rbx,<rx3=%r9 +adc %rbx,%r9 + +# qhasm: carry? squarer4 += squarerax + carry +# asm 1: adc <squarerax=int64#7,<squarer4=int64#8 +# asm 2: adc <squarerax=%rax,<squarer4=%r10 +adc %rax,%r10 + +# qhasm: carry? squarer5 += squarerdx + carry +# asm 1: adc <squarerdx=int64#3,<squarer5=int64#9 +# asm 2: adc <squarerdx=%rdx,<squarer5=%r11 +adc %rdx,%r11 + +# qhasm: carry? squarer6 += 0 + carry +# asm 1: adc $0,<squarer6=int64#10 +# asm 2: adc $0,<squarer6=%r12 +adc $0,%r12 + +# qhasm: squarer7 += 0 + carry +# asm 1: adc $0,<squarer7=int64#2 +# asm 2: adc $0,<squarer7=%rsi +adc $0,%rsi + +# qhasm: squarerax = rx3_stack +# asm 1: movq <rx3_stack=stack64#15,>squarerax=int64#7 +# asm 2: movq <rx3_stack=112(%rsp),>squarerax=%rax +movq 112(%rsp),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * rx3_stack +# asm 1: mulq <rx3_stack=stack64#15 +# asm 2: mulq <rx3_stack=112(%rsp) +mulq 112(%rsp) + +# qhasm: carry? squarer6 += squarerax +# asm 1: add <squarerax=int64#7,<squarer6=int64#10 +# asm 2: add <squarerax=%rax,<squarer6=%r12 +add %rax,%r12 + +# qhasm: squarer7 += squarerdx + carry +# asm 1: adc <squarerdx=int64#3,<squarer7=int64#2 +# asm 2: adc <squarerdx=%rdx,<squarer7=%rsi +adc %rdx,%rsi + +# qhasm: squarerax = squarer4 +# asm 1: mov <squarer4=int64#8,>squarerax=int64#7 +# asm 2: mov <squarer4=%r10,>squarerax=%rax +mov %r10,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: squarer4 = squarerax +# asm 1: mov <squarerax=int64#7,>squarer4=int64#8 +# asm 2: mov <squarerax=%rax,>squarer4=%r10 +mov %rax,%r10 + +# qhasm: squarerax = squarer5 +# asm 1: mov <squarer5=int64#9,>squarerax=int64#7 +# asm 2: mov <squarer5=%r11,>squarerax=%rax +mov %r11,%rax + +# qhasm: squarer5 = squarerdx +# asm 1: mov <squarerdx=int64#3,>squarer5=int64#9 +# asm 2: mov <squarerdx=%rdx,>squarer5=%r11 +mov %rdx,%r11 + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? squarer5 += squarerax +# asm 1: add <squarerax=int64#7,<squarer5=int64#9 +# asm 2: add <squarerax=%rax,<squarer5=%r11 +add %rax,%r11 + +# qhasm: squarerax = squarer6 +# asm 1: mov <squarer6=int64#10,>squarerax=int64#7 +# asm 2: mov <squarer6=%r12,>squarerax=%rax +mov %r12,%rax + +# qhasm: squarer6 = 0 +# asm 1: mov $0,>squarer6=int64#10 +# asm 2: mov $0,>squarer6=%r12 +mov $0,%r12 + +# qhasm: squarer6 += squarerdx + carry +# asm 1: adc <squarerdx=int64#3,<squarer6=int64#10 +# asm 2: adc <squarerdx=%rdx,<squarer6=%r12 +adc %rdx,%r12 + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? squarer6 += squarerax +# asm 1: add <squarerax=int64#7,<squarer6=int64#10 +# asm 2: add <squarerax=%rax,<squarer6=%r12 +add %rax,%r12 + +# qhasm: squarerax = squarer7 +# asm 1: mov <squarer7=int64#2,>squarerax=int64#7 +# asm 2: mov <squarer7=%rsi,>squarerax=%rax +mov %rsi,%rax + +# qhasm: squarer7 = 0 +# asm 1: mov $0,>squarer7=int64#2 +# asm 2: mov $0,>squarer7=%rsi +mov $0,%rsi + +# qhasm: squarer7 += squarerdx + carry +# asm 1: adc <squarerdx=int64#3,<squarer7=int64#2 +# asm 2: adc <squarerdx=%rdx,<squarer7=%rsi +adc %rdx,%rsi + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? squarer7 += squarerax +# asm 1: add <squarerax=int64#7,<squarer7=int64#2 +# asm 2: add <squarerax=%rax,<squarer7=%rsi +add %rax,%rsi + +# qhasm: squarer8 = 0 +# asm 1: mov $0,>squarer8=int64#7 +# asm 2: mov $0,>squarer8=%rax +mov $0,%rax + +# qhasm: squarer8 += squarerdx + carry +# asm 1: adc <squarerdx=int64#3,<squarer8=int64#7 +# asm 2: adc <squarerdx=%rdx,<squarer8=%rax +adc %rdx,%rax + +# qhasm: carry? rx0 += squarer4 +# asm 1: add <squarer4=int64#8,<rx0=int64#11 +# asm 2: add <squarer4=%r10,<rx0=%r13 +add %r10,%r13 + +# qhasm: carry? rx1 += squarer5 + carry +# asm 1: adc <squarer5=int64#9,<rx1=int64#4 +# asm 2: adc <squarer5=%r11,<rx1=%rcx +adc %r11,%rcx + +# qhasm: carry? rx2 += squarer6 + carry +# asm 1: adc <squarer6=int64#10,<rx2=int64#5 +# asm 2: adc <squarer6=%r12,<rx2=%r8 +adc %r12,%r8 + +# qhasm: carry? rx3 += squarer7 + carry +# asm 1: adc <squarer7=int64#2,<rx3=int64#6 +# asm 2: adc <squarer7=%rsi,<rx3=%r9 +adc %rsi,%r9 + +# qhasm: squarezero = 0 +# asm 1: mov $0,>squarezero=int64#2 +# asm 2: mov $0,>squarezero=%rsi +mov $0,%rsi + +# qhasm: squarer8 += squarezero + carry +# asm 1: adc <squarezero=int64#2,<squarer8=int64#7 +# asm 2: adc <squarezero=%rsi,<squarer8=%rax +adc %rsi,%rax + +# qhasm: squarer8 *= 38 +# asm 1: imulq $38,<squarer8=int64#7,>squarer8=int64#3 +# asm 2: imulq $38,<squarer8=%rax,>squarer8=%rdx +imulq $38,%rax,%rdx + +# qhasm: carry? rx0 += squarer8 +# asm 1: add <squarer8=int64#3,<rx0=int64#11 +# asm 2: add <squarer8=%rdx,<rx0=%r13 +add %rdx,%r13 + +# qhasm: carry? rx1 += squarezero + carry +# asm 1: adc <squarezero=int64#2,<rx1=int64#4 +# asm 2: adc <squarezero=%rsi,<rx1=%rcx +adc %rsi,%rcx + +# qhasm: carry? rx2 += squarezero + carry +# asm 1: adc <squarezero=int64#2,<rx2=int64#5 +# asm 2: adc <squarezero=%rsi,<rx2=%r8 +adc %rsi,%r8 + +# qhasm: carry? rx3 += squarezero + carry +# asm 1: adc <squarezero=int64#2,<rx3=int64#6 +# asm 2: adc <squarezero=%rsi,<rx3=%r9 +adc %rsi,%r9 + +# qhasm: squarezero += squarezero + carry +# asm 1: adc <squarezero=int64#2,<squarezero=int64#2 +# asm 2: adc <squarezero=%rsi,<squarezero=%rsi +adc %rsi,%rsi + +# qhasm: squarezero *= 38 +# asm 1: imulq $38,<squarezero=int64#2,>squarezero=int64#2 +# asm 2: imulq $38,<squarezero=%rsi,>squarezero=%rsi +imulq $38,%rsi,%rsi + +# qhasm: rx0 += squarezero +# asm 1: add <squarezero=int64#2,<rx0=int64#11 +# asm 2: add <squarezero=%rsi,<rx0=%r13 +add %rsi,%r13 + +# qhasm: carry? rx0 += d0_stack +# asm 1: addq <d0_stack=stack64#8,<rx0=int64#11 +# asm 2: addq <d0_stack=56(%rsp),<rx0=%r13 +addq 56(%rsp),%r13 + +# qhasm: carry? rx1 += d1_stack + carry +# asm 1: adcq <d1_stack=stack64#9,<rx1=int64#4 +# asm 2: adcq <d1_stack=64(%rsp),<rx1=%rcx +adcq 64(%rsp),%rcx + +# qhasm: carry? rx2 += d2_stack + carry +# asm 1: adcq <d2_stack=stack64#10,<rx2=int64#5 +# asm 2: adcq <d2_stack=72(%rsp),<rx2=%r8 +adcq 72(%rsp),%r8 + +# qhasm: carry? rx3 += d3_stack + carry +# asm 1: adcq <d3_stack=stack64#11,<rx3=int64#6 +# asm 2: adcq <d3_stack=80(%rsp),<rx3=%r9 +adcq 80(%rsp),%r9 + +# qhasm: addt0 = 0 +# asm 1: mov $0,>addt0=int64#2 +# asm 2: mov $0,>addt0=%rsi +mov $0,%rsi + +# qhasm: addt1 = 38 +# asm 1: mov $38,>addt1=int64#3 +# asm 2: mov $38,>addt1=%rdx +mov $38,%rdx + +# qhasm: addt1 = addt0 if !carry +# asm 1: cmovae <addt0=int64#2,<addt1=int64#3 +# asm 2: cmovae <addt0=%rsi,<addt1=%rdx +cmovae %rsi,%rdx + +# qhasm: carry? rx0 += addt1 +# asm 1: add <addt1=int64#3,<rx0=int64#11 +# asm 2: add <addt1=%rdx,<rx0=%r13 +add %rdx,%r13 + +# qhasm: carry? rx1 += addt0 + carry +# asm 1: adc <addt0=int64#2,<rx1=int64#4 +# asm 2: adc <addt0=%rsi,<rx1=%rcx +adc %rsi,%rcx + +# qhasm: carry? rx2 += addt0 + carry +# asm 1: adc <addt0=int64#2,<rx2=int64#5 +# asm 2: adc <addt0=%rsi,<rx2=%r8 +adc %rsi,%r8 + +# qhasm: carry? rx3 += addt0 + carry +# asm 1: adc <addt0=int64#2,<rx3=int64#6 +# asm 2: adc <addt0=%rsi,<rx3=%r9 +adc %rsi,%r9 + +# qhasm: addt0 = addt1 if carry +# asm 1: cmovc <addt1=int64#3,<addt0=int64#2 +# asm 2: cmovc <addt1=%rdx,<addt0=%rsi +cmovc %rdx,%rsi + +# qhasm: rx0 += addt0 +# asm 1: add <addt0=int64#2,<rx0=int64#11 +# asm 2: add <addt0=%rsi,<rx0=%r13 +add %rsi,%r13 + +# qhasm: carry? rx0 += e0_stack +# asm 1: addq <e0_stack=stack64#20,<rx0=int64#11 +# asm 2: addq <e0_stack=152(%rsp),<rx0=%r13 +addq 152(%rsp),%r13 + +# qhasm: carry? rx1 += e1_stack + carry +# asm 1: adcq <e1_stack=stack64#21,<rx1=int64#4 +# asm 2: adcq <e1_stack=160(%rsp),<rx1=%rcx +adcq 160(%rsp),%rcx + +# qhasm: carry? rx2 += e2_stack + carry +# asm 1: adcq <e2_stack=stack64#22,<rx2=int64#5 +# asm 2: adcq <e2_stack=168(%rsp),<rx2=%r8 +adcq 168(%rsp),%r8 + +# qhasm: carry? rx3 += e3_stack + carry +# asm 1: adcq <e3_stack=stack64#23,<rx3=int64#6 +# asm 2: adcq <e3_stack=176(%rsp),<rx3=%r9 +adcq 176(%rsp),%r9 + +# qhasm: addt0 = 0 +# asm 1: mov $0,>addt0=int64#2 +# asm 2: mov $0,>addt0=%rsi +mov $0,%rsi + +# qhasm: addt1 = 38 +# asm 1: mov $38,>addt1=int64#3 +# asm 2: mov $38,>addt1=%rdx +mov $38,%rdx + +# qhasm: addt1 = addt0 if !carry +# asm 1: cmovae <addt0=int64#2,<addt1=int64#3 +# asm 2: cmovae <addt0=%rsi,<addt1=%rdx +cmovae %rsi,%rdx + +# qhasm: carry? rx0 += addt1 +# asm 1: add <addt1=int64#3,<rx0=int64#11 +# asm 2: add <addt1=%rdx,<rx0=%r13 +add %rdx,%r13 + +# qhasm: carry? rx1 += addt0 + carry +# asm 1: adc <addt0=int64#2,<rx1=int64#4 +# asm 2: adc <addt0=%rsi,<rx1=%rcx +adc %rsi,%rcx + +# qhasm: carry? rx2 += addt0 + carry +# asm 1: adc <addt0=int64#2,<rx2=int64#5 +# asm 2: adc <addt0=%rsi,<rx2=%r8 +adc %rsi,%r8 + +# qhasm: carry? rx3 += addt0 + carry +# asm 1: adc <addt0=int64#2,<rx3=int64#6 +# asm 2: adc <addt0=%rsi,<rx3=%r9 +adc %rsi,%r9 + +# qhasm: addt0 = addt1 if carry +# asm 1: cmovc <addt1=int64#3,<addt0=int64#2 +# asm 2: cmovc <addt1=%rdx,<addt0=%rsi +cmovc %rdx,%rsi + +# qhasm: rx0 += addt0 +# asm 1: add <addt0=int64#2,<rx0=int64#11 +# asm 2: add <addt0=%rsi,<rx0=%r13 +add %rsi,%r13 + +# qhasm: *(uint64 *)(rp + 0) = rx0 +# asm 1: movq <rx0=int64#11,0(<rp=int64#1) +# asm 2: movq <rx0=%r13,0(<rp=%rdi) +movq %r13,0(%rdi) + +# qhasm: *(uint64 *)(rp + 8) = rx1 +# asm 1: movq <rx1=int64#4,8(<rp=int64#1) +# asm 2: movq <rx1=%rcx,8(<rp=%rdi) +movq %rcx,8(%rdi) + +# qhasm: *(uint64 *)(rp + 16) = rx2 +# asm 1: movq <rx2=int64#5,16(<rp=int64#1) +# asm 2: movq <rx2=%r8,16(<rp=%rdi) +movq %r8,16(%rdi) + +# qhasm: *(uint64 *)(rp + 24) = rx3 +# asm 1: movq <rx3=int64#6,24(<rp=int64#1) +# asm 2: movq <rx3=%r9,24(<rp=%rdi) +movq %r9,24(%rdi) + +# qhasm: caller1 = caller1_stack +# asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9 +# asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10 +# asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11 +# asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12 +# asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13 +# asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14 +# asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15 +# asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/ext/ed25519-amd64-asm/ge25519_double.c b/ext/ed25519-amd64-asm/ge25519_double.c new file mode 100644 index 00000000..d55e2b4f --- /dev/null +++ b/ext/ed25519-amd64-asm/ge25519_double.c @@ -0,0 +1,8 @@ +#include "ge25519.h" + +void ge25519_double(ge25519_p3 *r, const ge25519_p3 *p) +{ + ge25519_p1p1 grp1p1; + ge25519_dbl_p1p1(&grp1p1, (ge25519_p2 *)p); + ge25519_p1p1_to_p3(r, &grp1p1); +} diff --git a/ext/ed25519-amd64-asm/ge25519_double_scalarmult.c b/ext/ed25519-amd64-asm/ge25519_double_scalarmult.c new file mode 100644 index 00000000..30c922af --- /dev/null +++ b/ext/ed25519-amd64-asm/ge25519_double_scalarmult.c @@ -0,0 +1,102 @@ +#include "fe25519.h" +#include "sc25519.h" +#include "ge25519.h" + +#define S1_SWINDOWSIZE 5 +#define PRE1_SIZE (1<<(S1_SWINDOWSIZE-2)) +#define S2_SWINDOWSIZE 7 +#define PRE2_SIZE (1<<(S2_SWINDOWSIZE-2)) + +ge25519_niels pre2[PRE2_SIZE] = { +#include "ge25519_base_slide_multiples.data" +}; + +static const fe25519 ec2d = {{0xEBD69B9426B2F146, 0x00E0149A8283B156, 0x198E80F2EEF3D130, 0xA406D9DC56DFFCE7}}; + +static void setneutral(ge25519 *r) +{ + fe25519_setint(&r->x,0); + fe25519_setint(&r->y,1); + fe25519_setint(&r->z,1); + fe25519_setint(&r->t,0); +} + +/* computes [s1]p1 + [s2]p2 */ +void ge25519_double_scalarmult_vartime(ge25519_p3 *r, const ge25519_p3 *p1, const sc25519 *s1, const sc25519 *s2) +{ + signed char slide1[256], slide2[256]; + ge25519_pniels pre1[PRE1_SIZE], neg; + ge25519_p3 d1; + ge25519_p1p1 t; + ge25519_niels nneg; + fe25519 d; + int i; + + sc25519_slide(slide1, s1, S1_SWINDOWSIZE); + sc25519_slide(slide2, s2, S2_SWINDOWSIZE); + + /* precomputation */ + pre1[0] = *(ge25519_pniels *)p1; + ge25519_dbl_p1p1(&t,(ge25519_p2 *)pre1); ge25519_p1p1_to_p3(&d1, &t); + /* Convert pre[0] to projective Niels representation */ + d = pre1[0].ysubx; + fe25519_sub(&pre1[0].ysubx, &pre1[0].xaddy, &pre1[0].ysubx); + fe25519_add(&pre1[0].xaddy, &pre1[0].xaddy, &d); + fe25519_mul(&pre1[0].t2d, &pre1[0].t2d, &ec2d); + + for(i=0;i<PRE1_SIZE-1;i++) + { + ge25519_pnielsadd_p1p1(&t, &d1, &pre1[i]); ge25519_p1p1_to_p3((ge25519_p3 *)&pre1[i+1], &t); + /* Convert pre1[i+1] to projective Niels representation */ + d = pre1[i+1].ysubx; + fe25519_sub(&pre1[i+1].ysubx, &pre1[i+1].xaddy, &pre1[i+1].ysubx); + fe25519_add(&pre1[i+1].xaddy, &pre1[i+1].xaddy, &d); + fe25519_mul(&pre1[i+1].t2d, &pre1[i+1].t2d, &ec2d); + } + + setneutral(r); + for (i = 255;i >= 0;--i) { + if (slide1[i] || slide2[i]) goto firstbit; + } + + for(;i>=0;i--) + { + firstbit: + + ge25519_dbl_p1p1(&t, (ge25519_p2 *)r); + + if(slide1[i]>0) + { + ge25519_p1p1_to_p3(r, &t); + ge25519_pnielsadd_p1p1(&t, r, &pre1[slide1[i]/2]); + } + else if(slide1[i]<0) + { + ge25519_p1p1_to_p3(r, &t); + neg = pre1[-slide1[i]/2]; + d = neg.ysubx; + neg.ysubx = neg.xaddy; + neg.xaddy = d; + fe25519_neg(&neg.t2d, &neg.t2d); + ge25519_pnielsadd_p1p1(&t, r, &neg); + } + + if(slide2[i]>0) + { + ge25519_p1p1_to_p3(r, &t); + ge25519_nielsadd_p1p1(&t, r, &pre2[slide2[i]/2]); + } + else if(slide2[i]<0) + { + ge25519_p1p1_to_p3(r, &t); + nneg = pre2[-slide2[i]/2]; + d = nneg.ysubx; + nneg.ysubx = nneg.xaddy; + nneg.xaddy = d; + fe25519_neg(&nneg.t2d, &nneg.t2d); + ge25519_nielsadd_p1p1(&t, r, &nneg); + } + + ge25519_p1p1_to_p2((ge25519_p2 *)r, &t); + } +} diff --git a/ext/ed25519-amd64-asm/ge25519_isneutral.c b/ext/ed25519-amd64-asm/ge25519_isneutral.c new file mode 100644 index 00000000..cf566dba --- /dev/null +++ b/ext/ed25519-amd64-asm/ge25519_isneutral.c @@ -0,0 +1,9 @@ +#include "fe25519.h" +#include "ge25519.h" + +int ge25519_isneutral_vartime(const ge25519_p3 *p) +{ + if(!fe25519_iszero_vartime(&p->x)) return 0; + if(!fe25519_iseq_vartime(&p->y, &p->z)) return 0; + return 1; +} diff --git a/ext/ed25519-amd64-asm/ge25519_multi_scalarmult.c b/ext/ed25519-amd64-asm/ge25519_multi_scalarmult.c new file mode 100644 index 00000000..afc6aeae --- /dev/null +++ b/ext/ed25519-amd64-asm/ge25519_multi_scalarmult.c @@ -0,0 +1,102 @@ +#include "fe25519.h" +#include "sc25519.h" +#include "ge25519.h" +#include "index_heap.h" + +static void setneutral(ge25519 *r) +{ + fe25519_setint(&r->x,0); + fe25519_setint(&r->y,1); + fe25519_setint(&r->z,1); + fe25519_setint(&r->t,0); +} + +static void ge25519_scalarmult_vartime_2limbs(ge25519 *r, ge25519 *p, sc25519 *s) +{ + if (s->v[1] == 0 && s->v[0] == 1) /* This will happen most of the time after Bos-Coster */ + *r = *p; + else if (s->v[1] == 0 && s->v[0] == 0) /* This won't ever happen, except for all scalars == 0 in Bos-Coster */ + setneutral(r); + else + { + ge25519 d; + unsigned long long mask = (1ULL << 63); + int i = 1; + while(!(mask & s->v[1]) && mask != 0) + mask >>= 1; + if(mask == 0) + { + mask = (1ULL << 63); + i = 0; + while(!(mask & s->v[0]) && mask != 0) + mask >>= 1; + } + d = *p; + mask >>= 1; + for(;mask != 0;mask >>= 1) + { + ge25519_double(&d,&d); + if(s->v[i] & mask) + ge25519_add(&d,&d,p); + } + if(i==1) + { + mask = (1ULL << 63); + for(;mask != 0;mask >>= 1) + { + ge25519_double(&d,&d); + if(s->v[0] & mask) + ge25519_add(&d,&d,p); + } + } + *r = d; + } +} + +/* caller's responsibility to ensure npoints >= 5 */ +void ge25519_multi_scalarmult_vartime(ge25519_p3 *r, ge25519_p3 *p, sc25519 *s, const unsigned long long npoints) +{ + unsigned long long pos[npoints]; + unsigned long long hlen=((npoints+1)/2)|1; + unsigned long long max1, max2,i; + + heap_init(pos, hlen, s); + + for(i=0;;i++) + { + heap_get2max(pos, &max1, &max2, s); + if((s[max1].v[3] == 0) || (sc25519_iszero_vartime(&s[max2]))) break; + sc25519_sub_nored(&s[max1],&s[max1],&s[max2]); + ge25519_add(&p[max2],&p[max2],&p[max1]); + heap_rootreplaced(pos, hlen, s); + } + for(;;i++) + { + heap_get2max(pos, &max1, &max2, s); + if((s[max1].v[2] == 0) || (sc25519_iszero_vartime(&s[max2]))) break; + sc25519_sub_nored(&s[max1],&s[max1],&s[max2]); + ge25519_add(&p[max2],&p[max2],&p[max1]); + heap_rootreplaced_3limbs(pos, hlen, s); + } + /* We know that (npoints-1)/2 scalars are only 128-bit scalars */ + heap_extend(pos, hlen, npoints, s); + hlen = npoints; + for(;;i++) + { + heap_get2max(pos, &max1, &max2, s); + if((s[max1].v[1] == 0) || (sc25519_iszero_vartime(&s[max2]))) break; + sc25519_sub_nored(&s[max1],&s[max1],&s[max2]); + ge25519_add(&p[max2],&p[max2],&p[max1]); + heap_rootreplaced_2limbs(pos, hlen, s); + } + for(;;i++) + { + heap_get2max(pos, &max1, &max2, s); + if(sc25519_iszero_vartime(&s[max2])) break; + sc25519_sub_nored(&s[max1],&s[max1],&s[max2]); + ge25519_add(&p[max2],&p[max2],&p[max1]); + heap_rootreplaced_1limb(pos, hlen, s); + } + + ge25519_scalarmult_vartime_2limbs(r, &p[max1], &s[max1]); +} diff --git a/ext/ed25519-amd64-asm/ge25519_nielsadd2.s b/ext/ed25519-amd64-asm/ge25519_nielsadd2.s new file mode 100644 index 00000000..ec31e023 --- /dev/null +++ b/ext/ed25519-amd64-asm/ge25519_nielsadd2.s @@ -0,0 +1,5791 @@ + +# qhasm: int64 rp + +# qhasm: int64 qp + +# qhasm: input rp + +# qhasm: input qp + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: int64 a0 + +# qhasm: int64 a1 + +# qhasm: int64 a2 + +# qhasm: int64 a3 + +# qhasm: stack64 a0_stack + +# qhasm: stack64 a1_stack + +# qhasm: stack64 a2_stack + +# qhasm: stack64 a3_stack + +# qhasm: int64 b0 + +# qhasm: int64 b1 + +# qhasm: int64 b2 + +# qhasm: int64 b3 + +# qhasm: stack64 b0_stack + +# qhasm: stack64 b1_stack + +# qhasm: stack64 b2_stack + +# qhasm: stack64 b3_stack + +# qhasm: int64 c0 + +# qhasm: int64 c1 + +# qhasm: int64 c2 + +# qhasm: int64 c3 + +# qhasm: stack64 c0_stack + +# qhasm: stack64 c1_stack + +# qhasm: stack64 c2_stack + +# qhasm: stack64 c3_stack + +# qhasm: int64 d0 + +# qhasm: int64 d1 + +# qhasm: int64 d2 + +# qhasm: int64 d3 + +# qhasm: stack64 d0_stack + +# qhasm: stack64 d1_stack + +# qhasm: stack64 d2_stack + +# qhasm: stack64 d3_stack + +# qhasm: int64 e0 + +# qhasm: int64 e1 + +# qhasm: int64 e2 + +# qhasm: int64 e3 + +# qhasm: stack64 e0_stack + +# qhasm: stack64 e1_stack + +# qhasm: stack64 e2_stack + +# qhasm: stack64 e3_stack + +# qhasm: int64 f0 + +# qhasm: int64 f1 + +# qhasm: int64 f2 + +# qhasm: int64 f3 + +# qhasm: stack64 f0_stack + +# qhasm: stack64 f1_stack + +# qhasm: stack64 f2_stack + +# qhasm: stack64 f3_stack + +# qhasm: int64 g0 + +# qhasm: int64 g1 + +# qhasm: int64 g2 + +# qhasm: int64 g3 + +# qhasm: stack64 g0_stack + +# qhasm: stack64 g1_stack + +# qhasm: stack64 g2_stack + +# qhasm: stack64 g3_stack + +# qhasm: int64 h0 + +# qhasm: int64 h1 + +# qhasm: int64 h2 + +# qhasm: int64 h3 + +# qhasm: stack64 h0_stack + +# qhasm: stack64 h1_stack + +# qhasm: stack64 h2_stack + +# qhasm: stack64 h3_stack + +# qhasm: int64 qt0 + +# qhasm: int64 qt1 + +# qhasm: int64 qt2 + +# qhasm: int64 qt3 + +# qhasm: stack64 qt0_stack + +# qhasm: stack64 qt1_stack + +# qhasm: stack64 qt2_stack + +# qhasm: stack64 qt3_stack + +# qhasm: int64 t10 + +# qhasm: int64 t11 + +# qhasm: int64 t12 + +# qhasm: int64 t13 + +# qhasm: stack64 t10_stack + +# qhasm: stack64 t11_stack + +# qhasm: stack64 t12_stack + +# qhasm: stack64 t13_stack + +# qhasm: int64 t20 + +# qhasm: int64 t21 + +# qhasm: int64 t22 + +# qhasm: int64 t23 + +# qhasm: stack64 t20_stack + +# qhasm: stack64 t21_stack + +# qhasm: stack64 t22_stack + +# qhasm: stack64 t23_stack + +# qhasm: int64 rx0 + +# qhasm: int64 rx1 + +# qhasm: int64 rx2 + +# qhasm: int64 rx3 + +# qhasm: int64 ry0 + +# qhasm: int64 ry1 + +# qhasm: int64 ry2 + +# qhasm: int64 ry3 + +# qhasm: int64 rz0 + +# qhasm: int64 rz1 + +# qhasm: int64 rz2 + +# qhasm: int64 rz3 + +# qhasm: int64 rt0 + +# qhasm: int64 rt1 + +# qhasm: int64 rt2 + +# qhasm: int64 rt3 + +# qhasm: int64 mulr4 + +# qhasm: int64 mulr5 + +# qhasm: int64 mulr6 + +# qhasm: int64 mulr7 + +# qhasm: int64 mulr8 + +# qhasm: int64 mulrax + +# qhasm: int64 mulrdx + +# qhasm: int64 mulx0 + +# qhasm: int64 mulx1 + +# qhasm: int64 mulx2 + +# qhasm: int64 mulx3 + +# qhasm: int64 mulc + +# qhasm: int64 mulzero + +# qhasm: int64 muli38 + +# qhasm: int64 addt0 + +# qhasm: int64 addt1 + +# qhasm: int64 subt0 + +# qhasm: int64 subt1 + +# qhasm: enter crypto_sign_ed25519_amd64_64_ge25519_nielsadd2 +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_64_ge25519_nielsadd2 +.globl crypto_sign_ed25519_amd64_64_ge25519_nielsadd2 +_crypto_sign_ed25519_amd64_64_ge25519_nielsadd2: +crypto_sign_ed25519_amd64_64_ge25519_nielsadd2: +mov %rsp,%r11 +and $31,%r11 +add $192,%r11 +sub %r11,%rsp + +# qhasm: caller1_stack = caller1 +# asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1 +# asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2 +# asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3 +# asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4 +# asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5 +# asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6 +# asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7 +# asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: a0 = *(uint64 *)(rp + 32) +# asm 1: movq 32(<rp=int64#1),>a0=int64#3 +# asm 2: movq 32(<rp=%rdi),>a0=%rdx +movq 32(%rdi),%rdx + +# qhasm: a1 = *(uint64 *)(rp + 40) +# asm 1: movq 40(<rp=int64#1),>a1=int64#4 +# asm 2: movq 40(<rp=%rdi),>a1=%rcx +movq 40(%rdi),%rcx + +# qhasm: a2 = *(uint64 *)(rp + 48) +# asm 1: movq 48(<rp=int64#1),>a2=int64#5 +# asm 2: movq 48(<rp=%rdi),>a2=%r8 +movq 48(%rdi),%r8 + +# qhasm: a3 = *(uint64 *)(rp + 56) +# asm 1: movq 56(<rp=int64#1),>a3=int64#6 +# asm 2: movq 56(<rp=%rdi),>a3=%r9 +movq 56(%rdi),%r9 + +# qhasm: b0 = a0 +# asm 1: mov <a0=int64#3,>b0=int64#7 +# asm 2: mov <a0=%rdx,>b0=%rax +mov %rdx,%rax + +# qhasm: b1 = a1 +# asm 1: mov <a1=int64#4,>b1=int64#8 +# asm 2: mov <a1=%rcx,>b1=%r10 +mov %rcx,%r10 + +# qhasm: b2 = a2 +# asm 1: mov <a2=int64#5,>b2=int64#9 +# asm 2: mov <a2=%r8,>b2=%r11 +mov %r8,%r11 + +# qhasm: b3 = a3 +# asm 1: mov <a3=int64#6,>b3=int64#10 +# asm 2: mov <a3=%r9,>b3=%r12 +mov %r9,%r12 + +# qhasm: carry? a0 -= *(uint64 *) (rp + 0) +# asm 1: subq 0(<rp=int64#1),<a0=int64#3 +# asm 2: subq 0(<rp=%rdi),<a0=%rdx +subq 0(%rdi),%rdx + +# qhasm: carry? a1 -= *(uint64 *) (rp + 8) - carry +# asm 1: sbbq 8(<rp=int64#1),<a1=int64#4 +# asm 2: sbbq 8(<rp=%rdi),<a1=%rcx +sbbq 8(%rdi),%rcx + +# qhasm: carry? a2 -= *(uint64 *) (rp + 16) - carry +# asm 1: sbbq 16(<rp=int64#1),<a2=int64#5 +# asm 2: sbbq 16(<rp=%rdi),<a2=%r8 +sbbq 16(%rdi),%r8 + +# qhasm: carry? a3 -= *(uint64 *) (rp + 24) - carry +# asm 1: sbbq 24(<rp=int64#1),<a3=int64#6 +# asm 2: sbbq 24(<rp=%rdi),<a3=%r9 +sbbq 24(%rdi),%r9 + +# qhasm: subt0 = 0 +# asm 1: mov $0,>subt0=int64#11 +# asm 2: mov $0,>subt0=%r13 +mov $0,%r13 + +# qhasm: subt1 = 38 +# asm 1: mov $38,>subt1=int64#12 +# asm 2: mov $38,>subt1=%r14 +mov $38,%r14 + +# qhasm: subt1 = subt0 if !carry +# asm 1: cmovae <subt0=int64#11,<subt1=int64#12 +# asm 2: cmovae <subt0=%r13,<subt1=%r14 +cmovae %r13,%r14 + +# qhasm: carry? a0 -= subt1 +# asm 1: sub <subt1=int64#12,<a0=int64#3 +# asm 2: sub <subt1=%r14,<a0=%rdx +sub %r14,%rdx + +# qhasm: carry? a1 -= subt0 - carry +# asm 1: sbb <subt0=int64#11,<a1=int64#4 +# asm 2: sbb <subt0=%r13,<a1=%rcx +sbb %r13,%rcx + +# qhasm: carry? a2 -= subt0 - carry +# asm 1: sbb <subt0=int64#11,<a2=int64#5 +# asm 2: sbb <subt0=%r13,<a2=%r8 +sbb %r13,%r8 + +# qhasm: carry? a3 -= subt0 - carry +# asm 1: sbb <subt0=int64#11,<a3=int64#6 +# asm 2: sbb <subt0=%r13,<a3=%r9 +sbb %r13,%r9 + +# qhasm: subt0 = subt1 if carry +# asm 1: cmovc <subt1=int64#12,<subt0=int64#11 +# asm 2: cmovc <subt1=%r14,<subt0=%r13 +cmovc %r14,%r13 + +# qhasm: a0 -= subt0 +# asm 1: sub <subt0=int64#11,<a0=int64#3 +# asm 2: sub <subt0=%r13,<a0=%rdx +sub %r13,%rdx + +# qhasm: carry? b0 += *(uint64 *) (rp + 0) +# asm 1: addq 0(<rp=int64#1),<b0=int64#7 +# asm 2: addq 0(<rp=%rdi),<b0=%rax +addq 0(%rdi),%rax + +# qhasm: carry? b1 += *(uint64 *) (rp + 8) + carry +# asm 1: adcq 8(<rp=int64#1),<b1=int64#8 +# asm 2: adcq 8(<rp=%rdi),<b1=%r10 +adcq 8(%rdi),%r10 + +# qhasm: carry? b2 += *(uint64 *) (rp + 16) + carry +# asm 1: adcq 16(<rp=int64#1),<b2=int64#9 +# asm 2: adcq 16(<rp=%rdi),<b2=%r11 +adcq 16(%rdi),%r11 + +# qhasm: carry? b3 += *(uint64 *) (rp + 24) + carry +# asm 1: adcq 24(<rp=int64#1),<b3=int64#10 +# asm 2: adcq 24(<rp=%rdi),<b3=%r12 +adcq 24(%rdi),%r12 + +# qhasm: addt0 = 0 +# asm 1: mov $0,>addt0=int64#11 +# asm 2: mov $0,>addt0=%r13 +mov $0,%r13 + +# qhasm: addt1 = 38 +# asm 1: mov $38,>addt1=int64#12 +# asm 2: mov $38,>addt1=%r14 +mov $38,%r14 + +# qhasm: addt1 = addt0 if !carry +# asm 1: cmovae <addt0=int64#11,<addt1=int64#12 +# asm 2: cmovae <addt0=%r13,<addt1=%r14 +cmovae %r13,%r14 + +# qhasm: carry? b0 += addt1 +# asm 1: add <addt1=int64#12,<b0=int64#7 +# asm 2: add <addt1=%r14,<b0=%rax +add %r14,%rax + +# qhasm: carry? b1 += addt0 + carry +# asm 1: adc <addt0=int64#11,<b1=int64#8 +# asm 2: adc <addt0=%r13,<b1=%r10 +adc %r13,%r10 + +# qhasm: carry? b2 += addt0 + carry +# asm 1: adc <addt0=int64#11,<b2=int64#9 +# asm 2: adc <addt0=%r13,<b2=%r11 +adc %r13,%r11 + +# qhasm: carry? b3 += addt0 + carry +# asm 1: adc <addt0=int64#11,<b3=int64#10 +# asm 2: adc <addt0=%r13,<b3=%r12 +adc %r13,%r12 + +# qhasm: addt0 = addt1 if carry +# asm 1: cmovc <addt1=int64#12,<addt0=int64#11 +# asm 2: cmovc <addt1=%r14,<addt0=%r13 +cmovc %r14,%r13 + +# qhasm: b0 += addt0 +# asm 1: add <addt0=int64#11,<b0=int64#7 +# asm 2: add <addt0=%r13,<b0=%rax +add %r13,%rax + +# qhasm: a0_stack = a0 +# asm 1: movq <a0=int64#3,>a0_stack=stack64#8 +# asm 2: movq <a0=%rdx,>a0_stack=56(%rsp) +movq %rdx,56(%rsp) + +# qhasm: a1_stack = a1 +# asm 1: movq <a1=int64#4,>a1_stack=stack64#9 +# asm 2: movq <a1=%rcx,>a1_stack=64(%rsp) +movq %rcx,64(%rsp) + +# qhasm: a2_stack = a2 +# asm 1: movq <a2=int64#5,>a2_stack=stack64#10 +# asm 2: movq <a2=%r8,>a2_stack=72(%rsp) +movq %r8,72(%rsp) + +# qhasm: a3_stack = a3 +# asm 1: movq <a3=int64#6,>a3_stack=stack64#11 +# asm 2: movq <a3=%r9,>a3_stack=80(%rsp) +movq %r9,80(%rsp) + +# qhasm: b0_stack = b0 +# asm 1: movq <b0=int64#7,>b0_stack=stack64#12 +# asm 2: movq <b0=%rax,>b0_stack=88(%rsp) +movq %rax,88(%rsp) + +# qhasm: b1_stack = b1 +# asm 1: movq <b1=int64#8,>b1_stack=stack64#13 +# asm 2: movq <b1=%r10,>b1_stack=96(%rsp) +movq %r10,96(%rsp) + +# qhasm: b2_stack = b2 +# asm 1: movq <b2=int64#9,>b2_stack=stack64#14 +# asm 2: movq <b2=%r11,>b2_stack=104(%rsp) +movq %r11,104(%rsp) + +# qhasm: b3_stack = b3 +# asm 1: movq <b3=int64#10,>b3_stack=stack64#15 +# asm 2: movq <b3=%r12,>b3_stack=112(%rsp) +movq %r12,112(%rsp) + +# qhasm: mulr4 = 0 +# asm 1: mov $0,>mulr4=int64#4 +# asm 2: mov $0,>mulr4=%rcx +mov $0,%rcx + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#5 +# asm 2: mov $0,>mulr5=%r8 +mov $0,%r8 + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#6 +# asm 2: mov $0,>mulr6=%r9 +mov $0,%r9 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#8 +# asm 2: mov $0,>mulr7=%r10 +mov $0,%r10 + +# qhasm: mulx0 = a0_stack +# asm 1: movq <a0_stack=stack64#8,>mulx0=int64#9 +# asm 2: movq <a0_stack=56(%rsp),>mulx0=%r11 +movq 56(%rsp),%r11 + +# qhasm: mulrax = *(uint64 *)(qp + 0) +# asm 1: movq 0(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 0(<qp=%rsi),>mulrax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#9 +# asm 2: mul <mulx0=%r11 +mul %r11 + +# qhasm: a0 = mulrax +# asm 1: mov <mulrax=int64#7,>a0=int64#10 +# asm 2: mov <mulrax=%rax,>a0=%r12 +mov %rax,%r12 + +# qhasm: a1 = mulrdx +# asm 1: mov <mulrdx=int64#3,>a1=int64#11 +# asm 2: mov <mulrdx=%rdx,>a1=%r13 +mov %rdx,%r13 + +# qhasm: mulrax = *(uint64 *)(qp + 8) +# asm 1: movq 8(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 8(<qp=%rsi),>mulrax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#9 +# asm 2: mul <mulx0=%r11 +mul %r11 + +# qhasm: carry? a1 += mulrax +# asm 1: add <mulrax=int64#7,<a1=int64#11 +# asm 2: add <mulrax=%rax,<a1=%r13 +add %rax,%r13 + +# qhasm: a2 = 0 +# asm 1: mov $0,>a2=int64#12 +# asm 2: mov $0,>a2=%r14 +mov $0,%r14 + +# qhasm: a2 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<a2=int64#12 +# asm 2: adc <mulrdx=%rdx,<a2=%r14 +adc %rdx,%r14 + +# qhasm: mulrax = *(uint64 *)(qp + 16) +# asm 1: movq 16(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 16(<qp=%rsi),>mulrax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#9 +# asm 2: mul <mulx0=%r11 +mul %r11 + +# qhasm: carry? a2 += mulrax +# asm 1: add <mulrax=int64#7,<a2=int64#12 +# asm 2: add <mulrax=%rax,<a2=%r14 +add %rax,%r14 + +# qhasm: a3 = 0 +# asm 1: mov $0,>a3=int64#13 +# asm 2: mov $0,>a3=%r15 +mov $0,%r15 + +# qhasm: a3 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<a3=int64#13 +# asm 2: adc <mulrdx=%rdx,<a3=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = *(uint64 *)(qp + 24) +# asm 1: movq 24(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 24(<qp=%rsi),>mulrax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#9 +# asm 2: mul <mulx0=%r11 +mul %r11 + +# qhasm: carry? a3 += mulrax +# asm 1: add <mulrax=int64#7,<a3=int64#13 +# asm 2: add <mulrax=%rax,<a3=%r15 +add %rax,%r15 + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr4=int64#4 +# asm 2: adc <mulrdx=%rdx,<mulr4=%rcx +adc %rdx,%rcx + +# qhasm: mulx1 = a1_stack +# asm 1: movq <a1_stack=stack64#9,>mulx1=int64#9 +# asm 2: movq <a1_stack=64(%rsp),>mulx1=%r11 +movq 64(%rsp),%r11 + +# qhasm: mulrax = *(uint64 *)(qp + 0) +# asm 1: movq 0(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 0(<qp=%rsi),>mulrax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#9 +# asm 2: mul <mulx1=%r11 +mul %r11 + +# qhasm: carry? a1 += mulrax +# asm 1: add <mulrax=int64#7,<a1=int64#11 +# asm 2: add <mulrax=%rax,<a1=%r13 +add %rax,%r13 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(qp + 8) +# asm 1: movq 8(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 8(<qp=%rsi),>mulrax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#9 +# asm 2: mul <mulx1=%r11 +mul %r11 + +# qhasm: carry? a2 += mulrax +# asm 1: add <mulrax=int64#7,<a2=int64#12 +# asm 2: add <mulrax=%rax,<a2=%r14 +add %rax,%r14 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? a2 += mulc +# asm 1: add <mulc=int64#14,<a2=int64#12 +# asm 2: add <mulc=%rbx,<a2=%r14 +add %rbx,%r14 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(qp + 16) +# asm 1: movq 16(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 16(<qp=%rsi),>mulrax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#9 +# asm 2: mul <mulx1=%r11 +mul %r11 + +# qhasm: carry? a3 += mulrax +# asm 1: add <mulrax=int64#7,<a3=int64#13 +# asm 2: add <mulrax=%rax,<a3=%r15 +add %rax,%r15 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? a3 += mulc +# asm 1: add <mulc=int64#14,<a3=int64#13 +# asm 2: add <mulc=%rbx,<a3=%r15 +add %rbx,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(qp + 24) +# asm 1: movq 24(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 24(<qp=%rsi),>mulrax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#9 +# asm 2: mul <mulx1=%r11 +mul %r11 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#4 +# asm 2: add <mulrax=%rax,<mulr4=%rcx +add %rax,%rcx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#14,<mulr4=int64#4 +# asm 2: add <mulc=%rbx,<mulr4=%rcx +add %rbx,%rcx + +# qhasm: mulr5 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr5=int64#5 +# asm 2: adc <mulrdx=%rdx,<mulr5=%r8 +adc %rdx,%r8 + +# qhasm: mulx2 = a2_stack +# asm 1: movq <a2_stack=stack64#10,>mulx2=int64#9 +# asm 2: movq <a2_stack=72(%rsp),>mulx2=%r11 +movq 72(%rsp),%r11 + +# qhasm: mulrax = *(uint64 *)(qp + 0) +# asm 1: movq 0(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 0(<qp=%rsi),>mulrax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#9 +# asm 2: mul <mulx2=%r11 +mul %r11 + +# qhasm: carry? a2 += mulrax +# asm 1: add <mulrax=int64#7,<a2=int64#12 +# asm 2: add <mulrax=%rax,<a2=%r14 +add %rax,%r14 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(qp + 8) +# asm 1: movq 8(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 8(<qp=%rsi),>mulrax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#9 +# asm 2: mul <mulx2=%r11 +mul %r11 + +# qhasm: carry? a3 += mulrax +# asm 1: add <mulrax=int64#7,<a3=int64#13 +# asm 2: add <mulrax=%rax,<a3=%r15 +add %rax,%r15 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? a3 += mulc +# asm 1: add <mulc=int64#14,<a3=int64#13 +# asm 2: add <mulc=%rbx,<a3=%r15 +add %rbx,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(qp + 16) +# asm 1: movq 16(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 16(<qp=%rsi),>mulrax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#9 +# asm 2: mul <mulx2=%r11 +mul %r11 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#4 +# asm 2: add <mulrax=%rax,<mulr4=%rcx +add %rax,%rcx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#14,<mulr4=int64#4 +# asm 2: add <mulc=%rbx,<mulr4=%rcx +add %rbx,%rcx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(qp + 24) +# asm 1: movq 24(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 24(<qp=%rsi),>mulrax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#9 +# asm 2: mul <mulx2=%r11 +mul %r11 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#5 +# asm 2: add <mulrax=%rax,<mulr5=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#14,<mulr5=int64#5 +# asm 2: add <mulc=%rbx,<mulr5=%r8 +add %rbx,%r8 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#6 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r9 +adc %rdx,%r9 + +# qhasm: mulx3 = a3_stack +# asm 1: movq <a3_stack=stack64#11,>mulx3=int64#9 +# asm 2: movq <a3_stack=80(%rsp),>mulx3=%r11 +movq 80(%rsp),%r11 + +# qhasm: mulrax = *(uint64 *)(qp + 0) +# asm 1: movq 0(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 0(<qp=%rsi),>mulrax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#9 +# asm 2: mul <mulx3=%r11 +mul %r11 + +# qhasm: carry? a3 += mulrax +# asm 1: add <mulrax=int64#7,<a3=int64#13 +# asm 2: add <mulrax=%rax,<a3=%r15 +add %rax,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(qp + 8) +# asm 1: movq 8(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 8(<qp=%rsi),>mulrax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#9 +# asm 2: mul <mulx3=%r11 +mul %r11 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#4 +# asm 2: add <mulrax=%rax,<mulr4=%rcx +add %rax,%rcx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#14,<mulr4=int64#4 +# asm 2: add <mulc=%rbx,<mulr4=%rcx +add %rbx,%rcx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(qp + 16) +# asm 1: movq 16(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 16(<qp=%rsi),>mulrax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#9 +# asm 2: mul <mulx3=%r11 +mul %r11 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#5 +# asm 2: add <mulrax=%rax,<mulr5=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#14,<mulr5=int64#5 +# asm 2: add <mulc=%rbx,<mulr5=%r8 +add %rbx,%r8 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(qp + 24) +# asm 1: movq 24(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 24(<qp=%rsi),>mulrax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#9 +# asm 2: mul <mulx3=%r11 +mul %r11 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#6 +# asm 2: add <mulrax=%rax,<mulr6=%r9 +add %rax,%r9 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr6 += mulc +# asm 1: add <mulc=int64#14,<mulr6=int64#6 +# asm 2: add <mulc=%rbx,<mulr6=%r9 +add %rbx,%r9 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#8 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r10 +adc %rdx,%r10 + +# qhasm: mulrax = mulr4 +# asm 1: mov <mulr4=int64#4,>mulrax=int64#7 +# asm 2: mov <mulr4=%rcx,>mulrax=%rax +mov %rcx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: mulr4 = mulrax +# asm 1: mov <mulrax=int64#7,>mulr4=int64#4 +# asm 2: mov <mulrax=%rax,>mulr4=%rcx +mov %rax,%rcx + +# qhasm: mulrax = mulr5 +# asm 1: mov <mulr5=int64#5,>mulrax=int64#7 +# asm 2: mov <mulr5=%r8,>mulrax=%rax +mov %r8,%rax + +# qhasm: mulr5 = mulrdx +# asm 1: mov <mulrdx=int64#3,>mulr5=int64#5 +# asm 2: mov <mulrdx=%rdx,>mulr5=%r8 +mov %rdx,%r8 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#5 +# asm 2: add <mulrax=%rax,<mulr5=%r8 +add %rax,%r8 + +# qhasm: mulrax = mulr6 +# asm 1: mov <mulr6=int64#6,>mulrax=int64#7 +# asm 2: mov <mulr6=%r9,>mulrax=%rax +mov %r9,%rax + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#6 +# asm 2: mov $0,>mulr6=%r9 +mov $0,%r9 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#6 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r9 +adc %rdx,%r9 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#6 +# asm 2: add <mulrax=%rax,<mulr6=%r9 +add %rax,%r9 + +# qhasm: mulrax = mulr7 +# asm 1: mov <mulr7=int64#8,>mulrax=int64#7 +# asm 2: mov <mulr7=%r10,>mulrax=%rax +mov %r10,%rax + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#8 +# asm 2: mov $0,>mulr7=%r10 +mov $0,%r10 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#8 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r10 +adc %rdx,%r10 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr7 += mulrax +# asm 1: add <mulrax=int64#7,<mulr7=int64#8 +# asm 2: add <mulrax=%rax,<mulr7=%r10 +add %rax,%r10 + +# qhasm: mulr8 = 0 +# asm 1: mov $0,>mulr8=int64#7 +# asm 2: mov $0,>mulr8=%rax +mov $0,%rax + +# qhasm: mulr8 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr8=int64#7 +# asm 2: adc <mulrdx=%rdx,<mulr8=%rax +adc %rdx,%rax + +# qhasm: carry? a0 += mulr4 +# asm 1: add <mulr4=int64#4,<a0=int64#10 +# asm 2: add <mulr4=%rcx,<a0=%r12 +add %rcx,%r12 + +# qhasm: carry? a1 += mulr5 + carry +# asm 1: adc <mulr5=int64#5,<a1=int64#11 +# asm 2: adc <mulr5=%r8,<a1=%r13 +adc %r8,%r13 + +# qhasm: carry? a2 += mulr6 + carry +# asm 1: adc <mulr6=int64#6,<a2=int64#12 +# asm 2: adc <mulr6=%r9,<a2=%r14 +adc %r9,%r14 + +# qhasm: carry? a3 += mulr7 + carry +# asm 1: adc <mulr7=int64#8,<a3=int64#13 +# asm 2: adc <mulr7=%r10,<a3=%r15 +adc %r10,%r15 + +# qhasm: mulzero = 0 +# asm 1: mov $0,>mulzero=int64#3 +# asm 2: mov $0,>mulzero=%rdx +mov $0,%rdx + +# qhasm: mulr8 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<mulr8=int64#7 +# asm 2: adc <mulzero=%rdx,<mulr8=%rax +adc %rdx,%rax + +# qhasm: mulr8 *= 38 +# asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#4 +# asm 2: imulq $38,<mulr8=%rax,>mulr8=%rcx +imulq $38,%rax,%rcx + +# qhasm: carry? a0 += mulr8 +# asm 1: add <mulr8=int64#4,<a0=int64#10 +# asm 2: add <mulr8=%rcx,<a0=%r12 +add %rcx,%r12 + +# qhasm: carry? a1 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<a1=int64#11 +# asm 2: adc <mulzero=%rdx,<a1=%r13 +adc %rdx,%r13 + +# qhasm: carry? a2 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<a2=int64#12 +# asm 2: adc <mulzero=%rdx,<a2=%r14 +adc %rdx,%r14 + +# qhasm: carry? a3 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<a3=int64#13 +# asm 2: adc <mulzero=%rdx,<a3=%r15 +adc %rdx,%r15 + +# qhasm: mulzero += mulzero + carry +# asm 1: adc <mulzero=int64#3,<mulzero=int64#3 +# asm 2: adc <mulzero=%rdx,<mulzero=%rdx +adc %rdx,%rdx + +# qhasm: mulzero *= 38 +# asm 1: imulq $38,<mulzero=int64#3,>mulzero=int64#3 +# asm 2: imulq $38,<mulzero=%rdx,>mulzero=%rdx +imulq $38,%rdx,%rdx + +# qhasm: a0 += mulzero +# asm 1: add <mulzero=int64#3,<a0=int64#10 +# asm 2: add <mulzero=%rdx,<a0=%r12 +add %rdx,%r12 + +# qhasm: a0_stack = a0 +# asm 1: movq <a0=int64#10,>a0_stack=stack64#8 +# asm 2: movq <a0=%r12,>a0_stack=56(%rsp) +movq %r12,56(%rsp) + +# qhasm: a1_stack = a1 +# asm 1: movq <a1=int64#11,>a1_stack=stack64#9 +# asm 2: movq <a1=%r13,>a1_stack=64(%rsp) +movq %r13,64(%rsp) + +# qhasm: a2_stack = a2 +# asm 1: movq <a2=int64#12,>a2_stack=stack64#10 +# asm 2: movq <a2=%r14,>a2_stack=72(%rsp) +movq %r14,72(%rsp) + +# qhasm: a3_stack = a3 +# asm 1: movq <a3=int64#13,>a3_stack=stack64#11 +# asm 2: movq <a3=%r15,>a3_stack=80(%rsp) +movq %r15,80(%rsp) + +# qhasm: mulr4 = 0 +# asm 1: mov $0,>mulr4=int64#4 +# asm 2: mov $0,>mulr4=%rcx +mov $0,%rcx + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#5 +# asm 2: mov $0,>mulr5=%r8 +mov $0,%r8 + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#6 +# asm 2: mov $0,>mulr6=%r9 +mov $0,%r9 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#8 +# asm 2: mov $0,>mulr7=%r10 +mov $0,%r10 + +# qhasm: mulx0 = b0_stack +# asm 1: movq <b0_stack=stack64#12,>mulx0=int64#9 +# asm 2: movq <b0_stack=88(%rsp),>mulx0=%r11 +movq 88(%rsp),%r11 + +# qhasm: mulrax = *(uint64 *)(qp + 32) +# asm 1: movq 32(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 32(<qp=%rsi),>mulrax=%rax +movq 32(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#9 +# asm 2: mul <mulx0=%r11 +mul %r11 + +# qhasm: e0 = mulrax +# asm 1: mov <mulrax=int64#7,>e0=int64#10 +# asm 2: mov <mulrax=%rax,>e0=%r12 +mov %rax,%r12 + +# qhasm: e1 = mulrdx +# asm 1: mov <mulrdx=int64#3,>e1=int64#11 +# asm 2: mov <mulrdx=%rdx,>e1=%r13 +mov %rdx,%r13 + +# qhasm: mulrax = *(uint64 *)(qp + 40) +# asm 1: movq 40(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 40(<qp=%rsi),>mulrax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#9 +# asm 2: mul <mulx0=%r11 +mul %r11 + +# qhasm: carry? e1 += mulrax +# asm 1: add <mulrax=int64#7,<e1=int64#11 +# asm 2: add <mulrax=%rax,<e1=%r13 +add %rax,%r13 + +# qhasm: e2 = 0 +# asm 1: mov $0,>e2=int64#12 +# asm 2: mov $0,>e2=%r14 +mov $0,%r14 + +# qhasm: e2 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<e2=int64#12 +# asm 2: adc <mulrdx=%rdx,<e2=%r14 +adc %rdx,%r14 + +# qhasm: mulrax = *(uint64 *)(qp + 48) +# asm 1: movq 48(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 48(<qp=%rsi),>mulrax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#9 +# asm 2: mul <mulx0=%r11 +mul %r11 + +# qhasm: carry? e2 += mulrax +# asm 1: add <mulrax=int64#7,<e2=int64#12 +# asm 2: add <mulrax=%rax,<e2=%r14 +add %rax,%r14 + +# qhasm: e3 = 0 +# asm 1: mov $0,>e3=int64#13 +# asm 2: mov $0,>e3=%r15 +mov $0,%r15 + +# qhasm: e3 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<e3=int64#13 +# asm 2: adc <mulrdx=%rdx,<e3=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = *(uint64 *)(qp + 56) +# asm 1: movq 56(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 56(<qp=%rsi),>mulrax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#9 +# asm 2: mul <mulx0=%r11 +mul %r11 + +# qhasm: carry? e3 += mulrax +# asm 1: add <mulrax=int64#7,<e3=int64#13 +# asm 2: add <mulrax=%rax,<e3=%r15 +add %rax,%r15 + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr4=int64#4 +# asm 2: adc <mulrdx=%rdx,<mulr4=%rcx +adc %rdx,%rcx + +# qhasm: mulx1 = b1_stack +# asm 1: movq <b1_stack=stack64#13,>mulx1=int64#9 +# asm 2: movq <b1_stack=96(%rsp),>mulx1=%r11 +movq 96(%rsp),%r11 + +# qhasm: mulrax = *(uint64 *)(qp + 32) +# asm 1: movq 32(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 32(<qp=%rsi),>mulrax=%rax +movq 32(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#9 +# asm 2: mul <mulx1=%r11 +mul %r11 + +# qhasm: carry? e1 += mulrax +# asm 1: add <mulrax=int64#7,<e1=int64#11 +# asm 2: add <mulrax=%rax,<e1=%r13 +add %rax,%r13 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(qp + 40) +# asm 1: movq 40(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 40(<qp=%rsi),>mulrax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#9 +# asm 2: mul <mulx1=%r11 +mul %r11 + +# qhasm: carry? e2 += mulrax +# asm 1: add <mulrax=int64#7,<e2=int64#12 +# asm 2: add <mulrax=%rax,<e2=%r14 +add %rax,%r14 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? e2 += mulc +# asm 1: add <mulc=int64#14,<e2=int64#12 +# asm 2: add <mulc=%rbx,<e2=%r14 +add %rbx,%r14 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(qp + 48) +# asm 1: movq 48(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 48(<qp=%rsi),>mulrax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#9 +# asm 2: mul <mulx1=%r11 +mul %r11 + +# qhasm: carry? e3 += mulrax +# asm 1: add <mulrax=int64#7,<e3=int64#13 +# asm 2: add <mulrax=%rax,<e3=%r15 +add %rax,%r15 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? e3 += mulc +# asm 1: add <mulc=int64#14,<e3=int64#13 +# asm 2: add <mulc=%rbx,<e3=%r15 +add %rbx,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(qp + 56) +# asm 1: movq 56(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 56(<qp=%rsi),>mulrax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#9 +# asm 2: mul <mulx1=%r11 +mul %r11 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#4 +# asm 2: add <mulrax=%rax,<mulr4=%rcx +add %rax,%rcx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#14,<mulr4=int64#4 +# asm 2: add <mulc=%rbx,<mulr4=%rcx +add %rbx,%rcx + +# qhasm: mulr5 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr5=int64#5 +# asm 2: adc <mulrdx=%rdx,<mulr5=%r8 +adc %rdx,%r8 + +# qhasm: mulx2 = b2_stack +# asm 1: movq <b2_stack=stack64#14,>mulx2=int64#9 +# asm 2: movq <b2_stack=104(%rsp),>mulx2=%r11 +movq 104(%rsp),%r11 + +# qhasm: mulrax = *(uint64 *)(qp + 32) +# asm 1: movq 32(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 32(<qp=%rsi),>mulrax=%rax +movq 32(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#9 +# asm 2: mul <mulx2=%r11 +mul %r11 + +# qhasm: carry? e2 += mulrax +# asm 1: add <mulrax=int64#7,<e2=int64#12 +# asm 2: add <mulrax=%rax,<e2=%r14 +add %rax,%r14 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(qp + 40) +# asm 1: movq 40(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 40(<qp=%rsi),>mulrax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#9 +# asm 2: mul <mulx2=%r11 +mul %r11 + +# qhasm: carry? e3 += mulrax +# asm 1: add <mulrax=int64#7,<e3=int64#13 +# asm 2: add <mulrax=%rax,<e3=%r15 +add %rax,%r15 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? e3 += mulc +# asm 1: add <mulc=int64#14,<e3=int64#13 +# asm 2: add <mulc=%rbx,<e3=%r15 +add %rbx,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(qp + 48) +# asm 1: movq 48(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 48(<qp=%rsi),>mulrax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#9 +# asm 2: mul <mulx2=%r11 +mul %r11 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#4 +# asm 2: add <mulrax=%rax,<mulr4=%rcx +add %rax,%rcx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#14,<mulr4=int64#4 +# asm 2: add <mulc=%rbx,<mulr4=%rcx +add %rbx,%rcx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(qp + 56) +# asm 1: movq 56(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 56(<qp=%rsi),>mulrax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#9 +# asm 2: mul <mulx2=%r11 +mul %r11 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#5 +# asm 2: add <mulrax=%rax,<mulr5=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#14,<mulr5=int64#5 +# asm 2: add <mulc=%rbx,<mulr5=%r8 +add %rbx,%r8 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#6 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r9 +adc %rdx,%r9 + +# qhasm: mulx3 = b3_stack +# asm 1: movq <b3_stack=stack64#15,>mulx3=int64#9 +# asm 2: movq <b3_stack=112(%rsp),>mulx3=%r11 +movq 112(%rsp),%r11 + +# qhasm: mulrax = *(uint64 *)(qp + 32) +# asm 1: movq 32(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 32(<qp=%rsi),>mulrax=%rax +movq 32(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#9 +# asm 2: mul <mulx3=%r11 +mul %r11 + +# qhasm: carry? e3 += mulrax +# asm 1: add <mulrax=int64#7,<e3=int64#13 +# asm 2: add <mulrax=%rax,<e3=%r15 +add %rax,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(qp + 40) +# asm 1: movq 40(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 40(<qp=%rsi),>mulrax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#9 +# asm 2: mul <mulx3=%r11 +mul %r11 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#4 +# asm 2: add <mulrax=%rax,<mulr4=%rcx +add %rax,%rcx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#14,<mulr4=int64#4 +# asm 2: add <mulc=%rbx,<mulr4=%rcx +add %rbx,%rcx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(qp + 48) +# asm 1: movq 48(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 48(<qp=%rsi),>mulrax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#9 +# asm 2: mul <mulx3=%r11 +mul %r11 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#5 +# asm 2: add <mulrax=%rax,<mulr5=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#14,<mulr5=int64#5 +# asm 2: add <mulc=%rbx,<mulr5=%r8 +add %rbx,%r8 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(qp + 56) +# asm 1: movq 56(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 56(<qp=%rsi),>mulrax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#9 +# asm 2: mul <mulx3=%r11 +mul %r11 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#6 +# asm 2: add <mulrax=%rax,<mulr6=%r9 +add %rax,%r9 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr6 += mulc +# asm 1: add <mulc=int64#14,<mulr6=int64#6 +# asm 2: add <mulc=%rbx,<mulr6=%r9 +add %rbx,%r9 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#8 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r10 +adc %rdx,%r10 + +# qhasm: mulrax = mulr4 +# asm 1: mov <mulr4=int64#4,>mulrax=int64#7 +# asm 2: mov <mulr4=%rcx,>mulrax=%rax +mov %rcx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: mulr4 = mulrax +# asm 1: mov <mulrax=int64#7,>mulr4=int64#4 +# asm 2: mov <mulrax=%rax,>mulr4=%rcx +mov %rax,%rcx + +# qhasm: mulrax = mulr5 +# asm 1: mov <mulr5=int64#5,>mulrax=int64#7 +# asm 2: mov <mulr5=%r8,>mulrax=%rax +mov %r8,%rax + +# qhasm: mulr5 = mulrdx +# asm 1: mov <mulrdx=int64#3,>mulr5=int64#5 +# asm 2: mov <mulrdx=%rdx,>mulr5=%r8 +mov %rdx,%r8 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#5 +# asm 2: add <mulrax=%rax,<mulr5=%r8 +add %rax,%r8 + +# qhasm: mulrax = mulr6 +# asm 1: mov <mulr6=int64#6,>mulrax=int64#7 +# asm 2: mov <mulr6=%r9,>mulrax=%rax +mov %r9,%rax + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#6 +# asm 2: mov $0,>mulr6=%r9 +mov $0,%r9 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#6 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r9 +adc %rdx,%r9 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#6 +# asm 2: add <mulrax=%rax,<mulr6=%r9 +add %rax,%r9 + +# qhasm: mulrax = mulr7 +# asm 1: mov <mulr7=int64#8,>mulrax=int64#7 +# asm 2: mov <mulr7=%r10,>mulrax=%rax +mov %r10,%rax + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#8 +# asm 2: mov $0,>mulr7=%r10 +mov $0,%r10 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#8 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r10 +adc %rdx,%r10 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr7 += mulrax +# asm 1: add <mulrax=int64#7,<mulr7=int64#8 +# asm 2: add <mulrax=%rax,<mulr7=%r10 +add %rax,%r10 + +# qhasm: mulr8 = 0 +# asm 1: mov $0,>mulr8=int64#7 +# asm 2: mov $0,>mulr8=%rax +mov $0,%rax + +# qhasm: mulr8 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr8=int64#7 +# asm 2: adc <mulrdx=%rdx,<mulr8=%rax +adc %rdx,%rax + +# qhasm: carry? e0 += mulr4 +# asm 1: add <mulr4=int64#4,<e0=int64#10 +# asm 2: add <mulr4=%rcx,<e0=%r12 +add %rcx,%r12 + +# qhasm: carry? e1 += mulr5 + carry +# asm 1: adc <mulr5=int64#5,<e1=int64#11 +# asm 2: adc <mulr5=%r8,<e1=%r13 +adc %r8,%r13 + +# qhasm: carry? e2 += mulr6 + carry +# asm 1: adc <mulr6=int64#6,<e2=int64#12 +# asm 2: adc <mulr6=%r9,<e2=%r14 +adc %r9,%r14 + +# qhasm: carry? e3 += mulr7 + carry +# asm 1: adc <mulr7=int64#8,<e3=int64#13 +# asm 2: adc <mulr7=%r10,<e3=%r15 +adc %r10,%r15 + +# qhasm: mulzero = 0 +# asm 1: mov $0,>mulzero=int64#3 +# asm 2: mov $0,>mulzero=%rdx +mov $0,%rdx + +# qhasm: mulr8 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<mulr8=int64#7 +# asm 2: adc <mulzero=%rdx,<mulr8=%rax +adc %rdx,%rax + +# qhasm: mulr8 *= 38 +# asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#4 +# asm 2: imulq $38,<mulr8=%rax,>mulr8=%rcx +imulq $38,%rax,%rcx + +# qhasm: carry? e0 += mulr8 +# asm 1: add <mulr8=int64#4,<e0=int64#10 +# asm 2: add <mulr8=%rcx,<e0=%r12 +add %rcx,%r12 + +# qhasm: carry? e1 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<e1=int64#11 +# asm 2: adc <mulzero=%rdx,<e1=%r13 +adc %rdx,%r13 + +# qhasm: carry? e2 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<e2=int64#12 +# asm 2: adc <mulzero=%rdx,<e2=%r14 +adc %rdx,%r14 + +# qhasm: carry? e3 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<e3=int64#13 +# asm 2: adc <mulzero=%rdx,<e3=%r15 +adc %rdx,%r15 + +# qhasm: mulzero += mulzero + carry +# asm 1: adc <mulzero=int64#3,<mulzero=int64#3 +# asm 2: adc <mulzero=%rdx,<mulzero=%rdx +adc %rdx,%rdx + +# qhasm: mulzero *= 38 +# asm 1: imulq $38,<mulzero=int64#3,>mulzero=int64#3 +# asm 2: imulq $38,<mulzero=%rdx,>mulzero=%rdx +imulq $38,%rdx,%rdx + +# qhasm: e0 += mulzero +# asm 1: add <mulzero=int64#3,<e0=int64#10 +# asm 2: add <mulzero=%rdx,<e0=%r12 +add %rdx,%r12 + +# qhasm: h0 = e0 +# asm 1: mov <e0=int64#10,>h0=int64#3 +# asm 2: mov <e0=%r12,>h0=%rdx +mov %r12,%rdx + +# qhasm: h1 = e1 +# asm 1: mov <e1=int64#11,>h1=int64#4 +# asm 2: mov <e1=%r13,>h1=%rcx +mov %r13,%rcx + +# qhasm: h2 = e2 +# asm 1: mov <e2=int64#12,>h2=int64#5 +# asm 2: mov <e2=%r14,>h2=%r8 +mov %r14,%r8 + +# qhasm: h3 = e3 +# asm 1: mov <e3=int64#13,>h3=int64#6 +# asm 2: mov <e3=%r15,>h3=%r9 +mov %r15,%r9 + +# qhasm: carry? e0 -= a0_stack +# asm 1: subq <a0_stack=stack64#8,<e0=int64#10 +# asm 2: subq <a0_stack=56(%rsp),<e0=%r12 +subq 56(%rsp),%r12 + +# qhasm: carry? e1 -= a1_stack - carry +# asm 1: sbbq <a1_stack=stack64#9,<e1=int64#11 +# asm 2: sbbq <a1_stack=64(%rsp),<e1=%r13 +sbbq 64(%rsp),%r13 + +# qhasm: carry? e2 -= a2_stack - carry +# asm 1: sbbq <a2_stack=stack64#10,<e2=int64#12 +# asm 2: sbbq <a2_stack=72(%rsp),<e2=%r14 +sbbq 72(%rsp),%r14 + +# qhasm: carry? e3 -= a3_stack - carry +# asm 1: sbbq <a3_stack=stack64#11,<e3=int64#13 +# asm 2: sbbq <a3_stack=80(%rsp),<e3=%r15 +sbbq 80(%rsp),%r15 + +# qhasm: subt0 = 0 +# asm 1: mov $0,>subt0=int64#7 +# asm 2: mov $0,>subt0=%rax +mov $0,%rax + +# qhasm: subt1 = 38 +# asm 1: mov $38,>subt1=int64#8 +# asm 2: mov $38,>subt1=%r10 +mov $38,%r10 + +# qhasm: subt1 = subt0 if !carry +# asm 1: cmovae <subt0=int64#7,<subt1=int64#8 +# asm 2: cmovae <subt0=%rax,<subt1=%r10 +cmovae %rax,%r10 + +# qhasm: carry? e0 -= subt1 +# asm 1: sub <subt1=int64#8,<e0=int64#10 +# asm 2: sub <subt1=%r10,<e0=%r12 +sub %r10,%r12 + +# qhasm: carry? e1 -= subt0 - carry +# asm 1: sbb <subt0=int64#7,<e1=int64#11 +# asm 2: sbb <subt0=%rax,<e1=%r13 +sbb %rax,%r13 + +# qhasm: carry? e2 -= subt0 - carry +# asm 1: sbb <subt0=int64#7,<e2=int64#12 +# asm 2: sbb <subt0=%rax,<e2=%r14 +sbb %rax,%r14 + +# qhasm: carry? e3 -= subt0 - carry +# asm 1: sbb <subt0=int64#7,<e3=int64#13 +# asm 2: sbb <subt0=%rax,<e3=%r15 +sbb %rax,%r15 + +# qhasm: subt0 = subt1 if carry +# asm 1: cmovc <subt1=int64#8,<subt0=int64#7 +# asm 2: cmovc <subt1=%r10,<subt0=%rax +cmovc %r10,%rax + +# qhasm: e0 -= subt0 +# asm 1: sub <subt0=int64#7,<e0=int64#10 +# asm 2: sub <subt0=%rax,<e0=%r12 +sub %rax,%r12 + +# qhasm: carry? h0 += a0_stack +# asm 1: addq <a0_stack=stack64#8,<h0=int64#3 +# asm 2: addq <a0_stack=56(%rsp),<h0=%rdx +addq 56(%rsp),%rdx + +# qhasm: carry? h1 += a1_stack + carry +# asm 1: adcq <a1_stack=stack64#9,<h1=int64#4 +# asm 2: adcq <a1_stack=64(%rsp),<h1=%rcx +adcq 64(%rsp),%rcx + +# qhasm: carry? h2 += a2_stack + carry +# asm 1: adcq <a2_stack=stack64#10,<h2=int64#5 +# asm 2: adcq <a2_stack=72(%rsp),<h2=%r8 +adcq 72(%rsp),%r8 + +# qhasm: carry? h3 += a3_stack + carry +# asm 1: adcq <a3_stack=stack64#11,<h3=int64#6 +# asm 2: adcq <a3_stack=80(%rsp),<h3=%r9 +adcq 80(%rsp),%r9 + +# qhasm: addt0 = 0 +# asm 1: mov $0,>addt0=int64#7 +# asm 2: mov $0,>addt0=%rax +mov $0,%rax + +# qhasm: addt1 = 38 +# asm 1: mov $38,>addt1=int64#8 +# asm 2: mov $38,>addt1=%r10 +mov $38,%r10 + +# qhasm: addt1 = addt0 if !carry +# asm 1: cmovae <addt0=int64#7,<addt1=int64#8 +# asm 2: cmovae <addt0=%rax,<addt1=%r10 +cmovae %rax,%r10 + +# qhasm: carry? h0 += addt1 +# asm 1: add <addt1=int64#8,<h0=int64#3 +# asm 2: add <addt1=%r10,<h0=%rdx +add %r10,%rdx + +# qhasm: carry? h1 += addt0 + carry +# asm 1: adc <addt0=int64#7,<h1=int64#4 +# asm 2: adc <addt0=%rax,<h1=%rcx +adc %rax,%rcx + +# qhasm: carry? h2 += addt0 + carry +# asm 1: adc <addt0=int64#7,<h2=int64#5 +# asm 2: adc <addt0=%rax,<h2=%r8 +adc %rax,%r8 + +# qhasm: carry? h3 += addt0 + carry +# asm 1: adc <addt0=int64#7,<h3=int64#6 +# asm 2: adc <addt0=%rax,<h3=%r9 +adc %rax,%r9 + +# qhasm: addt0 = addt1 if carry +# asm 1: cmovc <addt1=int64#8,<addt0=int64#7 +# asm 2: cmovc <addt1=%r10,<addt0=%rax +cmovc %r10,%rax + +# qhasm: h0 += addt0 +# asm 1: add <addt0=int64#7,<h0=int64#3 +# asm 2: add <addt0=%rax,<h0=%rdx +add %rax,%rdx + +# qhasm: h0_stack = h0 +# asm 1: movq <h0=int64#3,>h0_stack=stack64#8 +# asm 2: movq <h0=%rdx,>h0_stack=56(%rsp) +movq %rdx,56(%rsp) + +# qhasm: h1_stack = h1 +# asm 1: movq <h1=int64#4,>h1_stack=stack64#9 +# asm 2: movq <h1=%rcx,>h1_stack=64(%rsp) +movq %rcx,64(%rsp) + +# qhasm: h2_stack = h2 +# asm 1: movq <h2=int64#5,>h2_stack=stack64#10 +# asm 2: movq <h2=%r8,>h2_stack=72(%rsp) +movq %r8,72(%rsp) + +# qhasm: h3_stack = h3 +# asm 1: movq <h3=int64#6,>h3_stack=stack64#11 +# asm 2: movq <h3=%r9,>h3_stack=80(%rsp) +movq %r9,80(%rsp) + +# qhasm: e0_stack = e0 +# asm 1: movq <e0=int64#10,>e0_stack=stack64#12 +# asm 2: movq <e0=%r12,>e0_stack=88(%rsp) +movq %r12,88(%rsp) + +# qhasm: e1_stack = e1 +# asm 1: movq <e1=int64#11,>e1_stack=stack64#13 +# asm 2: movq <e1=%r13,>e1_stack=96(%rsp) +movq %r13,96(%rsp) + +# qhasm: e2_stack = e2 +# asm 1: movq <e2=int64#12,>e2_stack=stack64#14 +# asm 2: movq <e2=%r14,>e2_stack=104(%rsp) +movq %r14,104(%rsp) + +# qhasm: e3_stack = e3 +# asm 1: movq <e3=int64#13,>e3_stack=stack64#15 +# asm 2: movq <e3=%r15,>e3_stack=112(%rsp) +movq %r15,112(%rsp) + +# qhasm: mulr4 = 0 +# asm 1: mov $0,>mulr4=int64#4 +# asm 2: mov $0,>mulr4=%rcx +mov $0,%rcx + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#5 +# asm 2: mov $0,>mulr5=%r8 +mov $0,%r8 + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#6 +# asm 2: mov $0,>mulr6=%r9 +mov $0,%r9 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#8 +# asm 2: mov $0,>mulr7=%r10 +mov $0,%r10 + +# qhasm: mulx0 = *(uint64 *)(rp + 96) +# asm 1: movq 96(<rp=int64#1),>mulx0=int64#9 +# asm 2: movq 96(<rp=%rdi),>mulx0=%r11 +movq 96(%rdi),%r11 + +# qhasm: mulrax = *(uint64 *)(qp + 64) +# asm 1: movq 64(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 64(<qp=%rsi),>mulrax=%rax +movq 64(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#9 +# asm 2: mul <mulx0=%r11 +mul %r11 + +# qhasm: c0 = mulrax +# asm 1: mov <mulrax=int64#7,>c0=int64#10 +# asm 2: mov <mulrax=%rax,>c0=%r12 +mov %rax,%r12 + +# qhasm: c1 = mulrdx +# asm 1: mov <mulrdx=int64#3,>c1=int64#11 +# asm 2: mov <mulrdx=%rdx,>c1=%r13 +mov %rdx,%r13 + +# qhasm: mulrax = *(uint64 *)(qp + 72) +# asm 1: movq 72(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 72(<qp=%rsi),>mulrax=%rax +movq 72(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#9 +# asm 2: mul <mulx0=%r11 +mul %r11 + +# qhasm: carry? c1 += mulrax +# asm 1: add <mulrax=int64#7,<c1=int64#11 +# asm 2: add <mulrax=%rax,<c1=%r13 +add %rax,%r13 + +# qhasm: c2 = 0 +# asm 1: mov $0,>c2=int64#12 +# asm 2: mov $0,>c2=%r14 +mov $0,%r14 + +# qhasm: c2 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<c2=int64#12 +# asm 2: adc <mulrdx=%rdx,<c2=%r14 +adc %rdx,%r14 + +# qhasm: mulrax = *(uint64 *)(qp + 80) +# asm 1: movq 80(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 80(<qp=%rsi),>mulrax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#9 +# asm 2: mul <mulx0=%r11 +mul %r11 + +# qhasm: carry? c2 += mulrax +# asm 1: add <mulrax=int64#7,<c2=int64#12 +# asm 2: add <mulrax=%rax,<c2=%r14 +add %rax,%r14 + +# qhasm: c3 = 0 +# asm 1: mov $0,>c3=int64#13 +# asm 2: mov $0,>c3=%r15 +mov $0,%r15 + +# qhasm: c3 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<c3=int64#13 +# asm 2: adc <mulrdx=%rdx,<c3=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = *(uint64 *)(qp + 88) +# asm 1: movq 88(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 88(<qp=%rsi),>mulrax=%rax +movq 88(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#9 +# asm 2: mul <mulx0=%r11 +mul %r11 + +# qhasm: carry? c3 += mulrax +# asm 1: add <mulrax=int64#7,<c3=int64#13 +# asm 2: add <mulrax=%rax,<c3=%r15 +add %rax,%r15 + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr4=int64#4 +# asm 2: adc <mulrdx=%rdx,<mulr4=%rcx +adc %rdx,%rcx + +# qhasm: mulx1 = *(uint64 *)(rp + 104) +# asm 1: movq 104(<rp=int64#1),>mulx1=int64#9 +# asm 2: movq 104(<rp=%rdi),>mulx1=%r11 +movq 104(%rdi),%r11 + +# qhasm: mulrax = *(uint64 *)(qp + 64) +# asm 1: movq 64(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 64(<qp=%rsi),>mulrax=%rax +movq 64(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#9 +# asm 2: mul <mulx1=%r11 +mul %r11 + +# qhasm: carry? c1 += mulrax +# asm 1: add <mulrax=int64#7,<c1=int64#11 +# asm 2: add <mulrax=%rax,<c1=%r13 +add %rax,%r13 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(qp + 72) +# asm 1: movq 72(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 72(<qp=%rsi),>mulrax=%rax +movq 72(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#9 +# asm 2: mul <mulx1=%r11 +mul %r11 + +# qhasm: carry? c2 += mulrax +# asm 1: add <mulrax=int64#7,<c2=int64#12 +# asm 2: add <mulrax=%rax,<c2=%r14 +add %rax,%r14 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? c2 += mulc +# asm 1: add <mulc=int64#14,<c2=int64#12 +# asm 2: add <mulc=%rbx,<c2=%r14 +add %rbx,%r14 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(qp + 80) +# asm 1: movq 80(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 80(<qp=%rsi),>mulrax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#9 +# asm 2: mul <mulx1=%r11 +mul %r11 + +# qhasm: carry? c3 += mulrax +# asm 1: add <mulrax=int64#7,<c3=int64#13 +# asm 2: add <mulrax=%rax,<c3=%r15 +add %rax,%r15 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? c3 += mulc +# asm 1: add <mulc=int64#14,<c3=int64#13 +# asm 2: add <mulc=%rbx,<c3=%r15 +add %rbx,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(qp + 88) +# asm 1: movq 88(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 88(<qp=%rsi),>mulrax=%rax +movq 88(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#9 +# asm 2: mul <mulx1=%r11 +mul %r11 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#4 +# asm 2: add <mulrax=%rax,<mulr4=%rcx +add %rax,%rcx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#14,<mulr4=int64#4 +# asm 2: add <mulc=%rbx,<mulr4=%rcx +add %rbx,%rcx + +# qhasm: mulr5 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr5=int64#5 +# asm 2: adc <mulrdx=%rdx,<mulr5=%r8 +adc %rdx,%r8 + +# qhasm: mulx2 = *(uint64 *)(rp + 112) +# asm 1: movq 112(<rp=int64#1),>mulx2=int64#9 +# asm 2: movq 112(<rp=%rdi),>mulx2=%r11 +movq 112(%rdi),%r11 + +# qhasm: mulrax = *(uint64 *)(qp + 64) +# asm 1: movq 64(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 64(<qp=%rsi),>mulrax=%rax +movq 64(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#9 +# asm 2: mul <mulx2=%r11 +mul %r11 + +# qhasm: carry? c2 += mulrax +# asm 1: add <mulrax=int64#7,<c2=int64#12 +# asm 2: add <mulrax=%rax,<c2=%r14 +add %rax,%r14 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(qp + 72) +# asm 1: movq 72(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 72(<qp=%rsi),>mulrax=%rax +movq 72(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#9 +# asm 2: mul <mulx2=%r11 +mul %r11 + +# qhasm: carry? c3 += mulrax +# asm 1: add <mulrax=int64#7,<c3=int64#13 +# asm 2: add <mulrax=%rax,<c3=%r15 +add %rax,%r15 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? c3 += mulc +# asm 1: add <mulc=int64#14,<c3=int64#13 +# asm 2: add <mulc=%rbx,<c3=%r15 +add %rbx,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(qp + 80) +# asm 1: movq 80(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 80(<qp=%rsi),>mulrax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#9 +# asm 2: mul <mulx2=%r11 +mul %r11 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#4 +# asm 2: add <mulrax=%rax,<mulr4=%rcx +add %rax,%rcx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#14,<mulr4=int64#4 +# asm 2: add <mulc=%rbx,<mulr4=%rcx +add %rbx,%rcx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(qp + 88) +# asm 1: movq 88(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 88(<qp=%rsi),>mulrax=%rax +movq 88(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#9 +# asm 2: mul <mulx2=%r11 +mul %r11 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#5 +# asm 2: add <mulrax=%rax,<mulr5=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#14,<mulr5=int64#5 +# asm 2: add <mulc=%rbx,<mulr5=%r8 +add %rbx,%r8 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#6 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r9 +adc %rdx,%r9 + +# qhasm: mulx3 = *(uint64 *)(rp + 120) +# asm 1: movq 120(<rp=int64#1),>mulx3=int64#9 +# asm 2: movq 120(<rp=%rdi),>mulx3=%r11 +movq 120(%rdi),%r11 + +# qhasm: mulrax = *(uint64 *)(qp + 64) +# asm 1: movq 64(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 64(<qp=%rsi),>mulrax=%rax +movq 64(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#9 +# asm 2: mul <mulx3=%r11 +mul %r11 + +# qhasm: carry? c3 += mulrax +# asm 1: add <mulrax=int64#7,<c3=int64#13 +# asm 2: add <mulrax=%rax,<c3=%r15 +add %rax,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(qp + 72) +# asm 1: movq 72(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 72(<qp=%rsi),>mulrax=%rax +movq 72(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#9 +# asm 2: mul <mulx3=%r11 +mul %r11 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#4 +# asm 2: add <mulrax=%rax,<mulr4=%rcx +add %rax,%rcx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#14,<mulr4=int64#4 +# asm 2: add <mulc=%rbx,<mulr4=%rcx +add %rbx,%rcx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(qp + 80) +# asm 1: movq 80(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 80(<qp=%rsi),>mulrax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#9 +# asm 2: mul <mulx3=%r11 +mul %r11 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#5 +# asm 2: add <mulrax=%rax,<mulr5=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#14,<mulr5=int64#5 +# asm 2: add <mulc=%rbx,<mulr5=%r8 +add %rbx,%r8 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(qp + 88) +# asm 1: movq 88(<qp=int64#2),>mulrax=int64#7 +# asm 2: movq 88(<qp=%rsi),>mulrax=%rax +movq 88(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#9 +# asm 2: mul <mulx3=%r11 +mul %r11 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#6 +# asm 2: add <mulrax=%rax,<mulr6=%r9 +add %rax,%r9 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr6 += mulc +# asm 1: add <mulc=int64#14,<mulr6=int64#6 +# asm 2: add <mulc=%rbx,<mulr6=%r9 +add %rbx,%r9 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#8 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r10 +adc %rdx,%r10 + +# qhasm: mulrax = mulr4 +# asm 1: mov <mulr4=int64#4,>mulrax=int64#7 +# asm 2: mov <mulr4=%rcx,>mulrax=%rax +mov %rcx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: mulr4 = mulrax +# asm 1: mov <mulrax=int64#7,>mulr4=int64#2 +# asm 2: mov <mulrax=%rax,>mulr4=%rsi +mov %rax,%rsi + +# qhasm: mulrax = mulr5 +# asm 1: mov <mulr5=int64#5,>mulrax=int64#7 +# asm 2: mov <mulr5=%r8,>mulrax=%rax +mov %r8,%rax + +# qhasm: mulr5 = mulrdx +# asm 1: mov <mulrdx=int64#3,>mulr5=int64#4 +# asm 2: mov <mulrdx=%rdx,>mulr5=%rcx +mov %rdx,%rcx + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#4 +# asm 2: add <mulrax=%rax,<mulr5=%rcx +add %rax,%rcx + +# qhasm: mulrax = mulr6 +# asm 1: mov <mulr6=int64#6,>mulrax=int64#7 +# asm 2: mov <mulr6=%r9,>mulrax=%rax +mov %r9,%rax + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#5 +# asm 2: mov $0,>mulr6=%r8 +mov $0,%r8 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#5 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r8 +adc %rdx,%r8 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#5 +# asm 2: add <mulrax=%rax,<mulr6=%r8 +add %rax,%r8 + +# qhasm: mulrax = mulr7 +# asm 1: mov <mulr7=int64#8,>mulrax=int64#7 +# asm 2: mov <mulr7=%r10,>mulrax=%rax +mov %r10,%rax + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#6 +# asm 2: mov $0,>mulr7=%r9 +mov $0,%r9 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#6 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r9 +adc %rdx,%r9 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr7 += mulrax +# asm 1: add <mulrax=int64#7,<mulr7=int64#6 +# asm 2: add <mulrax=%rax,<mulr7=%r9 +add %rax,%r9 + +# qhasm: mulr8 = 0 +# asm 1: mov $0,>mulr8=int64#7 +# asm 2: mov $0,>mulr8=%rax +mov $0,%rax + +# qhasm: mulr8 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr8=int64#7 +# asm 2: adc <mulrdx=%rdx,<mulr8=%rax +adc %rdx,%rax + +# qhasm: carry? c0 += mulr4 +# asm 1: add <mulr4=int64#2,<c0=int64#10 +# asm 2: add <mulr4=%rsi,<c0=%r12 +add %rsi,%r12 + +# qhasm: carry? c1 += mulr5 + carry +# asm 1: adc <mulr5=int64#4,<c1=int64#11 +# asm 2: adc <mulr5=%rcx,<c1=%r13 +adc %rcx,%r13 + +# qhasm: carry? c2 += mulr6 + carry +# asm 1: adc <mulr6=int64#5,<c2=int64#12 +# asm 2: adc <mulr6=%r8,<c2=%r14 +adc %r8,%r14 + +# qhasm: carry? c3 += mulr7 + carry +# asm 1: adc <mulr7=int64#6,<c3=int64#13 +# asm 2: adc <mulr7=%r9,<c3=%r15 +adc %r9,%r15 + +# qhasm: mulzero = 0 +# asm 1: mov $0,>mulzero=int64#2 +# asm 2: mov $0,>mulzero=%rsi +mov $0,%rsi + +# qhasm: mulr8 += mulzero + carry +# asm 1: adc <mulzero=int64#2,<mulr8=int64#7 +# asm 2: adc <mulzero=%rsi,<mulr8=%rax +adc %rsi,%rax + +# qhasm: mulr8 *= 38 +# asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#3 +# asm 2: imulq $38,<mulr8=%rax,>mulr8=%rdx +imulq $38,%rax,%rdx + +# qhasm: carry? c0 += mulr8 +# asm 1: add <mulr8=int64#3,<c0=int64#10 +# asm 2: add <mulr8=%rdx,<c0=%r12 +add %rdx,%r12 + +# qhasm: carry? c1 += mulzero + carry +# asm 1: adc <mulzero=int64#2,<c1=int64#11 +# asm 2: adc <mulzero=%rsi,<c1=%r13 +adc %rsi,%r13 + +# qhasm: carry? c2 += mulzero + carry +# asm 1: adc <mulzero=int64#2,<c2=int64#12 +# asm 2: adc <mulzero=%rsi,<c2=%r14 +adc %rsi,%r14 + +# qhasm: carry? c3 += mulzero + carry +# asm 1: adc <mulzero=int64#2,<c3=int64#13 +# asm 2: adc <mulzero=%rsi,<c3=%r15 +adc %rsi,%r15 + +# qhasm: mulzero += mulzero + carry +# asm 1: adc <mulzero=int64#2,<mulzero=int64#2 +# asm 2: adc <mulzero=%rsi,<mulzero=%rsi +adc %rsi,%rsi + +# qhasm: mulzero *= 38 +# asm 1: imulq $38,<mulzero=int64#2,>mulzero=int64#2 +# asm 2: imulq $38,<mulzero=%rsi,>mulzero=%rsi +imulq $38,%rsi,%rsi + +# qhasm: c0 += mulzero +# asm 1: add <mulzero=int64#2,<c0=int64#10 +# asm 2: add <mulzero=%rsi,<c0=%r12 +add %rsi,%r12 + +# qhasm: f0 = *(uint64 *)(rp + 64) +# asm 1: movq 64(<rp=int64#1),>f0=int64#2 +# asm 2: movq 64(<rp=%rdi),>f0=%rsi +movq 64(%rdi),%rsi + +# qhasm: f1 = *(uint64 *)(rp + 72) +# asm 1: movq 72(<rp=int64#1),>f1=int64#3 +# asm 2: movq 72(<rp=%rdi),>f1=%rdx +movq 72(%rdi),%rdx + +# qhasm: f2 = *(uint64 *)(rp + 80) +# asm 1: movq 80(<rp=int64#1),>f2=int64#4 +# asm 2: movq 80(<rp=%rdi),>f2=%rcx +movq 80(%rdi),%rcx + +# qhasm: f3 = *(uint64 *)(rp + 88) +# asm 1: movq 88(<rp=int64#1),>f3=int64#5 +# asm 2: movq 88(<rp=%rdi),>f3=%r8 +movq 88(%rdi),%r8 + +# qhasm: carry? f0 += f0 +# asm 1: add <f0=int64#2,<f0=int64#2 +# asm 2: add <f0=%rsi,<f0=%rsi +add %rsi,%rsi + +# qhasm: carry? f1 += f1 + carry +# asm 1: adc <f1=int64#3,<f1=int64#3 +# asm 2: adc <f1=%rdx,<f1=%rdx +adc %rdx,%rdx + +# qhasm: carry? f2 += f2 + carry +# asm 1: adc <f2=int64#4,<f2=int64#4 +# asm 2: adc <f2=%rcx,<f2=%rcx +adc %rcx,%rcx + +# qhasm: carry? f3 += f3 + carry +# asm 1: adc <f3=int64#5,<f3=int64#5 +# asm 2: adc <f3=%r8,<f3=%r8 +adc %r8,%r8 + +# qhasm: addt0 = 0 +# asm 1: mov $0,>addt0=int64#6 +# asm 2: mov $0,>addt0=%r9 +mov $0,%r9 + +# qhasm: addt1 = 38 +# asm 1: mov $38,>addt1=int64#7 +# asm 2: mov $38,>addt1=%rax +mov $38,%rax + +# qhasm: addt1 = addt0 if !carry +# asm 1: cmovae <addt0=int64#6,<addt1=int64#7 +# asm 2: cmovae <addt0=%r9,<addt1=%rax +cmovae %r9,%rax + +# qhasm: carry? f0 += addt1 +# asm 1: add <addt1=int64#7,<f0=int64#2 +# asm 2: add <addt1=%rax,<f0=%rsi +add %rax,%rsi + +# qhasm: carry? f1 += addt0 + carry +# asm 1: adc <addt0=int64#6,<f1=int64#3 +# asm 2: adc <addt0=%r9,<f1=%rdx +adc %r9,%rdx + +# qhasm: carry? f2 += addt0 + carry +# asm 1: adc <addt0=int64#6,<f2=int64#4 +# asm 2: adc <addt0=%r9,<f2=%rcx +adc %r9,%rcx + +# qhasm: carry? f3 += addt0 + carry +# asm 1: adc <addt0=int64#6,<f3=int64#5 +# asm 2: adc <addt0=%r9,<f3=%r8 +adc %r9,%r8 + +# qhasm: addt0 = addt1 if carry +# asm 1: cmovc <addt1=int64#7,<addt0=int64#6 +# asm 2: cmovc <addt1=%rax,<addt0=%r9 +cmovc %rax,%r9 + +# qhasm: f0 += addt0 +# asm 1: add <addt0=int64#6,<f0=int64#2 +# asm 2: add <addt0=%r9,<f0=%rsi +add %r9,%rsi + +# qhasm: g0 = f0 +# asm 1: mov <f0=int64#2,>g0=int64#6 +# asm 2: mov <f0=%rsi,>g0=%r9 +mov %rsi,%r9 + +# qhasm: g1 = f1 +# asm 1: mov <f1=int64#3,>g1=int64#7 +# asm 2: mov <f1=%rdx,>g1=%rax +mov %rdx,%rax + +# qhasm: g2 = f2 +# asm 1: mov <f2=int64#4,>g2=int64#8 +# asm 2: mov <f2=%rcx,>g2=%r10 +mov %rcx,%r10 + +# qhasm: g3 = f3 +# asm 1: mov <f3=int64#5,>g3=int64#9 +# asm 2: mov <f3=%r8,>g3=%r11 +mov %r8,%r11 + +# qhasm: carry? f0 -= c0 +# asm 1: sub <c0=int64#10,<f0=int64#2 +# asm 2: sub <c0=%r12,<f0=%rsi +sub %r12,%rsi + +# qhasm: carry? f1 -= c1 - carry +# asm 1: sbb <c1=int64#11,<f1=int64#3 +# asm 2: sbb <c1=%r13,<f1=%rdx +sbb %r13,%rdx + +# qhasm: carry? f2 -= c2 - carry +# asm 1: sbb <c2=int64#12,<f2=int64#4 +# asm 2: sbb <c2=%r14,<f2=%rcx +sbb %r14,%rcx + +# qhasm: carry? f3 -= c3 - carry +# asm 1: sbb <c3=int64#13,<f3=int64#5 +# asm 2: sbb <c3=%r15,<f3=%r8 +sbb %r15,%r8 + +# qhasm: subt0 = 0 +# asm 1: mov $0,>subt0=int64#14 +# asm 2: mov $0,>subt0=%rbx +mov $0,%rbx + +# qhasm: subt1 = 38 +# asm 1: mov $38,>subt1=int64#15 +# asm 2: mov $38,>subt1=%rbp +mov $38,%rbp + +# qhasm: subt1 = subt0 if !carry +# asm 1: cmovae <subt0=int64#14,<subt1=int64#15 +# asm 2: cmovae <subt0=%rbx,<subt1=%rbp +cmovae %rbx,%rbp + +# qhasm: carry? f0 -= subt1 +# asm 1: sub <subt1=int64#15,<f0=int64#2 +# asm 2: sub <subt1=%rbp,<f0=%rsi +sub %rbp,%rsi + +# qhasm: carry? f1 -= subt0 - carry +# asm 1: sbb <subt0=int64#14,<f1=int64#3 +# asm 2: sbb <subt0=%rbx,<f1=%rdx +sbb %rbx,%rdx + +# qhasm: carry? f2 -= subt0 - carry +# asm 1: sbb <subt0=int64#14,<f2=int64#4 +# asm 2: sbb <subt0=%rbx,<f2=%rcx +sbb %rbx,%rcx + +# qhasm: carry? f3 -= subt0 - carry +# asm 1: sbb <subt0=int64#14,<f3=int64#5 +# asm 2: sbb <subt0=%rbx,<f3=%r8 +sbb %rbx,%r8 + +# qhasm: subt0 = subt1 if carry +# asm 1: cmovc <subt1=int64#15,<subt0=int64#14 +# asm 2: cmovc <subt1=%rbp,<subt0=%rbx +cmovc %rbp,%rbx + +# qhasm: f0 -= subt0 +# asm 1: sub <subt0=int64#14,<f0=int64#2 +# asm 2: sub <subt0=%rbx,<f0=%rsi +sub %rbx,%rsi + +# qhasm: carry? g0 += c0 +# asm 1: add <c0=int64#10,<g0=int64#6 +# asm 2: add <c0=%r12,<g0=%r9 +add %r12,%r9 + +# qhasm: carry? g1 += c1 + carry +# asm 1: adc <c1=int64#11,<g1=int64#7 +# asm 2: adc <c1=%r13,<g1=%rax +adc %r13,%rax + +# qhasm: carry? g2 += c2 + carry +# asm 1: adc <c2=int64#12,<g2=int64#8 +# asm 2: adc <c2=%r14,<g2=%r10 +adc %r14,%r10 + +# qhasm: carry? g3 += c3 + carry +# asm 1: adc <c3=int64#13,<g3=int64#9 +# asm 2: adc <c3=%r15,<g3=%r11 +adc %r15,%r11 + +# qhasm: addt0 = 0 +# asm 1: mov $0,>addt0=int64#10 +# asm 2: mov $0,>addt0=%r12 +mov $0,%r12 + +# qhasm: addt1 = 38 +# asm 1: mov $38,>addt1=int64#11 +# asm 2: mov $38,>addt1=%r13 +mov $38,%r13 + +# qhasm: addt1 = addt0 if !carry +# asm 1: cmovae <addt0=int64#10,<addt1=int64#11 +# asm 2: cmovae <addt0=%r12,<addt1=%r13 +cmovae %r12,%r13 + +# qhasm: carry? g0 += addt1 +# asm 1: add <addt1=int64#11,<g0=int64#6 +# asm 2: add <addt1=%r13,<g0=%r9 +add %r13,%r9 + +# qhasm: carry? g1 += addt0 + carry +# asm 1: adc <addt0=int64#10,<g1=int64#7 +# asm 2: adc <addt0=%r12,<g1=%rax +adc %r12,%rax + +# qhasm: carry? g2 += addt0 + carry +# asm 1: adc <addt0=int64#10,<g2=int64#8 +# asm 2: adc <addt0=%r12,<g2=%r10 +adc %r12,%r10 + +# qhasm: carry? g3 += addt0 + carry +# asm 1: adc <addt0=int64#10,<g3=int64#9 +# asm 2: adc <addt0=%r12,<g3=%r11 +adc %r12,%r11 + +# qhasm: addt0 = addt1 if carry +# asm 1: cmovc <addt1=int64#11,<addt0=int64#10 +# asm 2: cmovc <addt1=%r13,<addt0=%r12 +cmovc %r13,%r12 + +# qhasm: g0 += addt0 +# asm 1: add <addt0=int64#10,<g0=int64#6 +# asm 2: add <addt0=%r12,<g0=%r9 +add %r12,%r9 + +# qhasm: g0_stack = g0 +# asm 1: movq <g0=int64#6,>g0_stack=stack64#16 +# asm 2: movq <g0=%r9,>g0_stack=120(%rsp) +movq %r9,120(%rsp) + +# qhasm: g1_stack = g1 +# asm 1: movq <g1=int64#7,>g1_stack=stack64#17 +# asm 2: movq <g1=%rax,>g1_stack=128(%rsp) +movq %rax,128(%rsp) + +# qhasm: g2_stack = g2 +# asm 1: movq <g2=int64#8,>g2_stack=stack64#18 +# asm 2: movq <g2=%r10,>g2_stack=136(%rsp) +movq %r10,136(%rsp) + +# qhasm: g3_stack = g3 +# asm 1: movq <g3=int64#9,>g3_stack=stack64#19 +# asm 2: movq <g3=%r11,>g3_stack=144(%rsp) +movq %r11,144(%rsp) + +# qhasm: f0_stack = f0 +# asm 1: movq <f0=int64#2,>f0_stack=stack64#20 +# asm 2: movq <f0=%rsi,>f0_stack=152(%rsp) +movq %rsi,152(%rsp) + +# qhasm: f1_stack = f1 +# asm 1: movq <f1=int64#3,>f1_stack=stack64#21 +# asm 2: movq <f1=%rdx,>f1_stack=160(%rsp) +movq %rdx,160(%rsp) + +# qhasm: f2_stack = f2 +# asm 1: movq <f2=int64#4,>f2_stack=stack64#22 +# asm 2: movq <f2=%rcx,>f2_stack=168(%rsp) +movq %rcx,168(%rsp) + +# qhasm: f3_stack = f3 +# asm 1: movq <f3=int64#5,>f3_stack=stack64#23 +# asm 2: movq <f3=%r8,>f3_stack=176(%rsp) +movq %r8,176(%rsp) + +# qhasm: mulr4 = 0 +# asm 1: mov $0,>mulr4=int64#2 +# asm 2: mov $0,>mulr4=%rsi +mov $0,%rsi + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#4 +# asm 2: mov $0,>mulr5=%rcx +mov $0,%rcx + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#5 +# asm 2: mov $0,>mulr6=%r8 +mov $0,%r8 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#6 +# asm 2: mov $0,>mulr7=%r9 +mov $0,%r9 + +# qhasm: mulx0 = e0_stack +# asm 1: movq <e0_stack=stack64#12,>mulx0=int64#8 +# asm 2: movq <e0_stack=88(%rsp),>mulx0=%r10 +movq 88(%rsp),%r10 + +# qhasm: mulrax = f0_stack +# asm 1: movq <f0_stack=stack64#20,>mulrax=int64#7 +# asm 2: movq <f0_stack=152(%rsp),>mulrax=%rax +movq 152(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#8 +# asm 2: mul <mulx0=%r10 +mul %r10 + +# qhasm: rx0 = mulrax +# asm 1: mov <mulrax=int64#7,>rx0=int64#9 +# asm 2: mov <mulrax=%rax,>rx0=%r11 +mov %rax,%r11 + +# qhasm: rx1 = mulrdx +# asm 1: mov <mulrdx=int64#3,>rx1=int64#10 +# asm 2: mov <mulrdx=%rdx,>rx1=%r12 +mov %rdx,%r12 + +# qhasm: mulrax = f1_stack +# asm 1: movq <f1_stack=stack64#21,>mulrax=int64#7 +# asm 2: movq <f1_stack=160(%rsp),>mulrax=%rax +movq 160(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#8 +# asm 2: mul <mulx0=%r10 +mul %r10 + +# qhasm: carry? rx1 += mulrax +# asm 1: add <mulrax=int64#7,<rx1=int64#10 +# asm 2: add <mulrax=%rax,<rx1=%r12 +add %rax,%r12 + +# qhasm: rx2 = 0 +# asm 1: mov $0,>rx2=int64#11 +# asm 2: mov $0,>rx2=%r13 +mov $0,%r13 + +# qhasm: rx2 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<rx2=int64#11 +# asm 2: adc <mulrdx=%rdx,<rx2=%r13 +adc %rdx,%r13 + +# qhasm: mulrax = f2_stack +# asm 1: movq <f2_stack=stack64#22,>mulrax=int64#7 +# asm 2: movq <f2_stack=168(%rsp),>mulrax=%rax +movq 168(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#8 +# asm 2: mul <mulx0=%r10 +mul %r10 + +# qhasm: carry? rx2 += mulrax +# asm 1: add <mulrax=int64#7,<rx2=int64#11 +# asm 2: add <mulrax=%rax,<rx2=%r13 +add %rax,%r13 + +# qhasm: rx3 = 0 +# asm 1: mov $0,>rx3=int64#12 +# asm 2: mov $0,>rx3=%r14 +mov $0,%r14 + +# qhasm: rx3 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<rx3=int64#12 +# asm 2: adc <mulrdx=%rdx,<rx3=%r14 +adc %rdx,%r14 + +# qhasm: mulrax = f3_stack +# asm 1: movq <f3_stack=stack64#23,>mulrax=int64#7 +# asm 2: movq <f3_stack=176(%rsp),>mulrax=%rax +movq 176(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#8 +# asm 2: mul <mulx0=%r10 +mul %r10 + +# qhasm: carry? rx3 += mulrax +# asm 1: add <mulrax=int64#7,<rx3=int64#12 +# asm 2: add <mulrax=%rax,<rx3=%r14 +add %rax,%r14 + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr4=int64#2 +# asm 2: adc <mulrdx=%rdx,<mulr4=%rsi +adc %rdx,%rsi + +# qhasm: mulx1 = e1_stack +# asm 1: movq <e1_stack=stack64#13,>mulx1=int64#8 +# asm 2: movq <e1_stack=96(%rsp),>mulx1=%r10 +movq 96(%rsp),%r10 + +# qhasm: mulrax = f0_stack +# asm 1: movq <f0_stack=stack64#20,>mulrax=int64#7 +# asm 2: movq <f0_stack=152(%rsp),>mulrax=%rax +movq 152(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#8 +# asm 2: mul <mulx1=%r10 +mul %r10 + +# qhasm: carry? rx1 += mulrax +# asm 1: add <mulrax=int64#7,<rx1=int64#10 +# asm 2: add <mulrax=%rax,<rx1=%r12 +add %rax,%r12 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#13 +# asm 2: adc <mulrdx=%rdx,<mulc=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = f1_stack +# asm 1: movq <f1_stack=stack64#21,>mulrax=int64#7 +# asm 2: movq <f1_stack=160(%rsp),>mulrax=%rax +movq 160(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#8 +# asm 2: mul <mulx1=%r10 +mul %r10 + +# qhasm: carry? rx2 += mulrax +# asm 1: add <mulrax=int64#7,<rx2=int64#11 +# asm 2: add <mulrax=%rax,<rx2=%r13 +add %rax,%r13 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? rx2 += mulc +# asm 1: add <mulc=int64#13,<rx2=int64#11 +# asm 2: add <mulc=%r15,<rx2=%r13 +add %r15,%r13 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#13 +# asm 2: adc <mulrdx=%rdx,<mulc=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = f2_stack +# asm 1: movq <f2_stack=stack64#22,>mulrax=int64#7 +# asm 2: movq <f2_stack=168(%rsp),>mulrax=%rax +movq 168(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#8 +# asm 2: mul <mulx1=%r10 +mul %r10 + +# qhasm: carry? rx3 += mulrax +# asm 1: add <mulrax=int64#7,<rx3=int64#12 +# asm 2: add <mulrax=%rax,<rx3=%r14 +add %rax,%r14 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? rx3 += mulc +# asm 1: add <mulc=int64#13,<rx3=int64#12 +# asm 2: add <mulc=%r15,<rx3=%r14 +add %r15,%r14 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#13 +# asm 2: adc <mulrdx=%rdx,<mulc=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = f3_stack +# asm 1: movq <f3_stack=stack64#23,>mulrax=int64#7 +# asm 2: movq <f3_stack=176(%rsp),>mulrax=%rax +movq 176(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#8 +# asm 2: mul <mulx1=%r10 +mul %r10 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#2 +# asm 2: add <mulrax=%rax,<mulr4=%rsi +add %rax,%rsi + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#13,<mulr4=int64#2 +# asm 2: add <mulc=%r15,<mulr4=%rsi +add %r15,%rsi + +# qhasm: mulr5 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr5=int64#4 +# asm 2: adc <mulrdx=%rdx,<mulr5=%rcx +adc %rdx,%rcx + +# qhasm: mulx2 = e2_stack +# asm 1: movq <e2_stack=stack64#14,>mulx2=int64#8 +# asm 2: movq <e2_stack=104(%rsp),>mulx2=%r10 +movq 104(%rsp),%r10 + +# qhasm: mulrax = f0_stack +# asm 1: movq <f0_stack=stack64#20,>mulrax=int64#7 +# asm 2: movq <f0_stack=152(%rsp),>mulrax=%rax +movq 152(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#8 +# asm 2: mul <mulx2=%r10 +mul %r10 + +# qhasm: carry? rx2 += mulrax +# asm 1: add <mulrax=int64#7,<rx2=int64#11 +# asm 2: add <mulrax=%rax,<rx2=%r13 +add %rax,%r13 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#13 +# asm 2: adc <mulrdx=%rdx,<mulc=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = f1_stack +# asm 1: movq <f1_stack=stack64#21,>mulrax=int64#7 +# asm 2: movq <f1_stack=160(%rsp),>mulrax=%rax +movq 160(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#8 +# asm 2: mul <mulx2=%r10 +mul %r10 + +# qhasm: carry? rx3 += mulrax +# asm 1: add <mulrax=int64#7,<rx3=int64#12 +# asm 2: add <mulrax=%rax,<rx3=%r14 +add %rax,%r14 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? rx3 += mulc +# asm 1: add <mulc=int64#13,<rx3=int64#12 +# asm 2: add <mulc=%r15,<rx3=%r14 +add %r15,%r14 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#13 +# asm 2: adc <mulrdx=%rdx,<mulc=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = f2_stack +# asm 1: movq <f2_stack=stack64#22,>mulrax=int64#7 +# asm 2: movq <f2_stack=168(%rsp),>mulrax=%rax +movq 168(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#8 +# asm 2: mul <mulx2=%r10 +mul %r10 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#2 +# asm 2: add <mulrax=%rax,<mulr4=%rsi +add %rax,%rsi + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#13,<mulr4=int64#2 +# asm 2: add <mulc=%r15,<mulr4=%rsi +add %r15,%rsi + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#13 +# asm 2: adc <mulrdx=%rdx,<mulc=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = f3_stack +# asm 1: movq <f3_stack=stack64#23,>mulrax=int64#7 +# asm 2: movq <f3_stack=176(%rsp),>mulrax=%rax +movq 176(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#8 +# asm 2: mul <mulx2=%r10 +mul %r10 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#4 +# asm 2: add <mulrax=%rax,<mulr5=%rcx +add %rax,%rcx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#13,<mulr5=int64#4 +# asm 2: add <mulc=%r15,<mulr5=%rcx +add %r15,%rcx + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#5 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r8 +adc %rdx,%r8 + +# qhasm: mulx3 = e3_stack +# asm 1: movq <e3_stack=stack64#15,>mulx3=int64#8 +# asm 2: movq <e3_stack=112(%rsp),>mulx3=%r10 +movq 112(%rsp),%r10 + +# qhasm: mulrax = f0_stack +# asm 1: movq <f0_stack=stack64#20,>mulrax=int64#7 +# asm 2: movq <f0_stack=152(%rsp),>mulrax=%rax +movq 152(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#8 +# asm 2: mul <mulx3=%r10 +mul %r10 + +# qhasm: carry? rx3 += mulrax +# asm 1: add <mulrax=int64#7,<rx3=int64#12 +# asm 2: add <mulrax=%rax,<rx3=%r14 +add %rax,%r14 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#13 +# asm 2: adc <mulrdx=%rdx,<mulc=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = f1_stack +# asm 1: movq <f1_stack=stack64#21,>mulrax=int64#7 +# asm 2: movq <f1_stack=160(%rsp),>mulrax=%rax +movq 160(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#8 +# asm 2: mul <mulx3=%r10 +mul %r10 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#2 +# asm 2: add <mulrax=%rax,<mulr4=%rsi +add %rax,%rsi + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#13,<mulr4=int64#2 +# asm 2: add <mulc=%r15,<mulr4=%rsi +add %r15,%rsi + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#13 +# asm 2: adc <mulrdx=%rdx,<mulc=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = f2_stack +# asm 1: movq <f2_stack=stack64#22,>mulrax=int64#7 +# asm 2: movq <f2_stack=168(%rsp),>mulrax=%rax +movq 168(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#8 +# asm 2: mul <mulx3=%r10 +mul %r10 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#4 +# asm 2: add <mulrax=%rax,<mulr5=%rcx +add %rax,%rcx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#13,<mulr5=int64#4 +# asm 2: add <mulc=%r15,<mulr5=%rcx +add %r15,%rcx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#13 +# asm 2: adc <mulrdx=%rdx,<mulc=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = f3_stack +# asm 1: movq <f3_stack=stack64#23,>mulrax=int64#7 +# asm 2: movq <f3_stack=176(%rsp),>mulrax=%rax +movq 176(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#8 +# asm 2: mul <mulx3=%r10 +mul %r10 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#5 +# asm 2: add <mulrax=%rax,<mulr6=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr6 += mulc +# asm 1: add <mulc=int64#13,<mulr6=int64#5 +# asm 2: add <mulc=%r15,<mulr6=%r8 +add %r15,%r8 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#6 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r9 +adc %rdx,%r9 + +# qhasm: mulrax = mulr4 +# asm 1: mov <mulr4=int64#2,>mulrax=int64#7 +# asm 2: mov <mulr4=%rsi,>mulrax=%rax +mov %rsi,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: mulr4 = mulrax +# asm 1: mov <mulrax=int64#7,>mulr4=int64#2 +# asm 2: mov <mulrax=%rax,>mulr4=%rsi +mov %rax,%rsi + +# qhasm: mulrax = mulr5 +# asm 1: mov <mulr5=int64#4,>mulrax=int64#7 +# asm 2: mov <mulr5=%rcx,>mulrax=%rax +mov %rcx,%rax + +# qhasm: mulr5 = mulrdx +# asm 1: mov <mulrdx=int64#3,>mulr5=int64#4 +# asm 2: mov <mulrdx=%rdx,>mulr5=%rcx +mov %rdx,%rcx + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#4 +# asm 2: add <mulrax=%rax,<mulr5=%rcx +add %rax,%rcx + +# qhasm: mulrax = mulr6 +# asm 1: mov <mulr6=int64#5,>mulrax=int64#7 +# asm 2: mov <mulr6=%r8,>mulrax=%rax +mov %r8,%rax + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#5 +# asm 2: mov $0,>mulr6=%r8 +mov $0,%r8 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#5 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r8 +adc %rdx,%r8 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#5 +# asm 2: add <mulrax=%rax,<mulr6=%r8 +add %rax,%r8 + +# qhasm: mulrax = mulr7 +# asm 1: mov <mulr7=int64#6,>mulrax=int64#7 +# asm 2: mov <mulr7=%r9,>mulrax=%rax +mov %r9,%rax + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#6 +# asm 2: mov $0,>mulr7=%r9 +mov $0,%r9 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#6 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r9 +adc %rdx,%r9 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr7 += mulrax +# asm 1: add <mulrax=int64#7,<mulr7=int64#6 +# asm 2: add <mulrax=%rax,<mulr7=%r9 +add %rax,%r9 + +# qhasm: mulr8 = 0 +# asm 1: mov $0,>mulr8=int64#7 +# asm 2: mov $0,>mulr8=%rax +mov $0,%rax + +# qhasm: mulr8 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr8=int64#7 +# asm 2: adc <mulrdx=%rdx,<mulr8=%rax +adc %rdx,%rax + +# qhasm: carry? rx0 += mulr4 +# asm 1: add <mulr4=int64#2,<rx0=int64#9 +# asm 2: add <mulr4=%rsi,<rx0=%r11 +add %rsi,%r11 + +# qhasm: carry? rx1 += mulr5 + carry +# asm 1: adc <mulr5=int64#4,<rx1=int64#10 +# asm 2: adc <mulr5=%rcx,<rx1=%r12 +adc %rcx,%r12 + +# qhasm: carry? rx2 += mulr6 + carry +# asm 1: adc <mulr6=int64#5,<rx2=int64#11 +# asm 2: adc <mulr6=%r8,<rx2=%r13 +adc %r8,%r13 + +# qhasm: carry? rx3 += mulr7 + carry +# asm 1: adc <mulr7=int64#6,<rx3=int64#12 +# asm 2: adc <mulr7=%r9,<rx3=%r14 +adc %r9,%r14 + +# qhasm: mulzero = 0 +# asm 1: mov $0,>mulzero=int64#2 +# asm 2: mov $0,>mulzero=%rsi +mov $0,%rsi + +# qhasm: mulr8 += mulzero + carry +# asm 1: adc <mulzero=int64#2,<mulr8=int64#7 +# asm 2: adc <mulzero=%rsi,<mulr8=%rax +adc %rsi,%rax + +# qhasm: mulr8 *= 38 +# asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#3 +# asm 2: imulq $38,<mulr8=%rax,>mulr8=%rdx +imulq $38,%rax,%rdx + +# qhasm: carry? rx0 += mulr8 +# asm 1: add <mulr8=int64#3,<rx0=int64#9 +# asm 2: add <mulr8=%rdx,<rx0=%r11 +add %rdx,%r11 + +# qhasm: carry? rx1 += mulzero + carry +# asm 1: adc <mulzero=int64#2,<rx1=int64#10 +# asm 2: adc <mulzero=%rsi,<rx1=%r12 +adc %rsi,%r12 + +# qhasm: carry? rx2 += mulzero + carry +# asm 1: adc <mulzero=int64#2,<rx2=int64#11 +# asm 2: adc <mulzero=%rsi,<rx2=%r13 +adc %rsi,%r13 + +# qhasm: carry? rx3 += mulzero + carry +# asm 1: adc <mulzero=int64#2,<rx3=int64#12 +# asm 2: adc <mulzero=%rsi,<rx3=%r14 +adc %rsi,%r14 + +# qhasm: mulzero += mulzero + carry +# asm 1: adc <mulzero=int64#2,<mulzero=int64#2 +# asm 2: adc <mulzero=%rsi,<mulzero=%rsi +adc %rsi,%rsi + +# qhasm: mulzero *= 38 +# asm 1: imulq $38,<mulzero=int64#2,>mulzero=int64#2 +# asm 2: imulq $38,<mulzero=%rsi,>mulzero=%rsi +imulq $38,%rsi,%rsi + +# qhasm: rx0 += mulzero +# asm 1: add <mulzero=int64#2,<rx0=int64#9 +# asm 2: add <mulzero=%rsi,<rx0=%r11 +add %rsi,%r11 + +# qhasm: *(uint64 *)(rp + 0) = rx0 +# asm 1: movq <rx0=int64#9,0(<rp=int64#1) +# asm 2: movq <rx0=%r11,0(<rp=%rdi) +movq %r11,0(%rdi) + +# qhasm: *(uint64 *)(rp + 8) = rx1 +# asm 1: movq <rx1=int64#10,8(<rp=int64#1) +# asm 2: movq <rx1=%r12,8(<rp=%rdi) +movq %r12,8(%rdi) + +# qhasm: *(uint64 *)(rp + 16) = rx2 +# asm 1: movq <rx2=int64#11,16(<rp=int64#1) +# asm 2: movq <rx2=%r13,16(<rp=%rdi) +movq %r13,16(%rdi) + +# qhasm: *(uint64 *)(rp + 24) = rx3 +# asm 1: movq <rx3=int64#12,24(<rp=int64#1) +# asm 2: movq <rx3=%r14,24(<rp=%rdi) +movq %r14,24(%rdi) + +# qhasm: mulr4 = 0 +# asm 1: mov $0,>mulr4=int64#2 +# asm 2: mov $0,>mulr4=%rsi +mov $0,%rsi + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#4 +# asm 2: mov $0,>mulr5=%rcx +mov $0,%rcx + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#5 +# asm 2: mov $0,>mulr6=%r8 +mov $0,%r8 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#6 +# asm 2: mov $0,>mulr7=%r9 +mov $0,%r9 + +# qhasm: mulx0 = h0_stack +# asm 1: movq <h0_stack=stack64#8,>mulx0=int64#8 +# asm 2: movq <h0_stack=56(%rsp),>mulx0=%r10 +movq 56(%rsp),%r10 + +# qhasm: mulrax = g0_stack +# asm 1: movq <g0_stack=stack64#16,>mulrax=int64#7 +# asm 2: movq <g0_stack=120(%rsp),>mulrax=%rax +movq 120(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#8 +# asm 2: mul <mulx0=%r10 +mul %r10 + +# qhasm: ry0 = mulrax +# asm 1: mov <mulrax=int64#7,>ry0=int64#9 +# asm 2: mov <mulrax=%rax,>ry0=%r11 +mov %rax,%r11 + +# qhasm: ry1 = mulrdx +# asm 1: mov <mulrdx=int64#3,>ry1=int64#10 +# asm 2: mov <mulrdx=%rdx,>ry1=%r12 +mov %rdx,%r12 + +# qhasm: mulrax = g1_stack +# asm 1: movq <g1_stack=stack64#17,>mulrax=int64#7 +# asm 2: movq <g1_stack=128(%rsp),>mulrax=%rax +movq 128(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#8 +# asm 2: mul <mulx0=%r10 +mul %r10 + +# qhasm: carry? ry1 += mulrax +# asm 1: add <mulrax=int64#7,<ry1=int64#10 +# asm 2: add <mulrax=%rax,<ry1=%r12 +add %rax,%r12 + +# qhasm: ry2 = 0 +# asm 1: mov $0,>ry2=int64#11 +# asm 2: mov $0,>ry2=%r13 +mov $0,%r13 + +# qhasm: ry2 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<ry2=int64#11 +# asm 2: adc <mulrdx=%rdx,<ry2=%r13 +adc %rdx,%r13 + +# qhasm: mulrax = g2_stack +# asm 1: movq <g2_stack=stack64#18,>mulrax=int64#7 +# asm 2: movq <g2_stack=136(%rsp),>mulrax=%rax +movq 136(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#8 +# asm 2: mul <mulx0=%r10 +mul %r10 + +# qhasm: carry? ry2 += mulrax +# asm 1: add <mulrax=int64#7,<ry2=int64#11 +# asm 2: add <mulrax=%rax,<ry2=%r13 +add %rax,%r13 + +# qhasm: ry3 = 0 +# asm 1: mov $0,>ry3=int64#12 +# asm 2: mov $0,>ry3=%r14 +mov $0,%r14 + +# qhasm: ry3 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<ry3=int64#12 +# asm 2: adc <mulrdx=%rdx,<ry3=%r14 +adc %rdx,%r14 + +# qhasm: mulrax = g3_stack +# asm 1: movq <g3_stack=stack64#19,>mulrax=int64#7 +# asm 2: movq <g3_stack=144(%rsp),>mulrax=%rax +movq 144(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#8 +# asm 2: mul <mulx0=%r10 +mul %r10 + +# qhasm: carry? ry3 += mulrax +# asm 1: add <mulrax=int64#7,<ry3=int64#12 +# asm 2: add <mulrax=%rax,<ry3=%r14 +add %rax,%r14 + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr4=int64#2 +# asm 2: adc <mulrdx=%rdx,<mulr4=%rsi +adc %rdx,%rsi + +# qhasm: mulx1 = h1_stack +# asm 1: movq <h1_stack=stack64#9,>mulx1=int64#8 +# asm 2: movq <h1_stack=64(%rsp),>mulx1=%r10 +movq 64(%rsp),%r10 + +# qhasm: mulrax = g0_stack +# asm 1: movq <g0_stack=stack64#16,>mulrax=int64#7 +# asm 2: movq <g0_stack=120(%rsp),>mulrax=%rax +movq 120(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#8 +# asm 2: mul <mulx1=%r10 +mul %r10 + +# qhasm: carry? ry1 += mulrax +# asm 1: add <mulrax=int64#7,<ry1=int64#10 +# asm 2: add <mulrax=%rax,<ry1=%r12 +add %rax,%r12 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#13 +# asm 2: adc <mulrdx=%rdx,<mulc=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = g1_stack +# asm 1: movq <g1_stack=stack64#17,>mulrax=int64#7 +# asm 2: movq <g1_stack=128(%rsp),>mulrax=%rax +movq 128(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#8 +# asm 2: mul <mulx1=%r10 +mul %r10 + +# qhasm: carry? ry2 += mulrax +# asm 1: add <mulrax=int64#7,<ry2=int64#11 +# asm 2: add <mulrax=%rax,<ry2=%r13 +add %rax,%r13 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? ry2 += mulc +# asm 1: add <mulc=int64#13,<ry2=int64#11 +# asm 2: add <mulc=%r15,<ry2=%r13 +add %r15,%r13 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#13 +# asm 2: adc <mulrdx=%rdx,<mulc=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = g2_stack +# asm 1: movq <g2_stack=stack64#18,>mulrax=int64#7 +# asm 2: movq <g2_stack=136(%rsp),>mulrax=%rax +movq 136(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#8 +# asm 2: mul <mulx1=%r10 +mul %r10 + +# qhasm: carry? ry3 += mulrax +# asm 1: add <mulrax=int64#7,<ry3=int64#12 +# asm 2: add <mulrax=%rax,<ry3=%r14 +add %rax,%r14 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? ry3 += mulc +# asm 1: add <mulc=int64#13,<ry3=int64#12 +# asm 2: add <mulc=%r15,<ry3=%r14 +add %r15,%r14 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#13 +# asm 2: adc <mulrdx=%rdx,<mulc=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = g3_stack +# asm 1: movq <g3_stack=stack64#19,>mulrax=int64#7 +# asm 2: movq <g3_stack=144(%rsp),>mulrax=%rax +movq 144(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#8 +# asm 2: mul <mulx1=%r10 +mul %r10 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#2 +# asm 2: add <mulrax=%rax,<mulr4=%rsi +add %rax,%rsi + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#13,<mulr4=int64#2 +# asm 2: add <mulc=%r15,<mulr4=%rsi +add %r15,%rsi + +# qhasm: mulr5 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr5=int64#4 +# asm 2: adc <mulrdx=%rdx,<mulr5=%rcx +adc %rdx,%rcx + +# qhasm: mulx2 = h2_stack +# asm 1: movq <h2_stack=stack64#10,>mulx2=int64#8 +# asm 2: movq <h2_stack=72(%rsp),>mulx2=%r10 +movq 72(%rsp),%r10 + +# qhasm: mulrax = g0_stack +# asm 1: movq <g0_stack=stack64#16,>mulrax=int64#7 +# asm 2: movq <g0_stack=120(%rsp),>mulrax=%rax +movq 120(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#8 +# asm 2: mul <mulx2=%r10 +mul %r10 + +# qhasm: carry? ry2 += mulrax +# asm 1: add <mulrax=int64#7,<ry2=int64#11 +# asm 2: add <mulrax=%rax,<ry2=%r13 +add %rax,%r13 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#13 +# asm 2: adc <mulrdx=%rdx,<mulc=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = g1_stack +# asm 1: movq <g1_stack=stack64#17,>mulrax=int64#7 +# asm 2: movq <g1_stack=128(%rsp),>mulrax=%rax +movq 128(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#8 +# asm 2: mul <mulx2=%r10 +mul %r10 + +# qhasm: carry? ry3 += mulrax +# asm 1: add <mulrax=int64#7,<ry3=int64#12 +# asm 2: add <mulrax=%rax,<ry3=%r14 +add %rax,%r14 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? ry3 += mulc +# asm 1: add <mulc=int64#13,<ry3=int64#12 +# asm 2: add <mulc=%r15,<ry3=%r14 +add %r15,%r14 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#13 +# asm 2: adc <mulrdx=%rdx,<mulc=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = g2_stack +# asm 1: movq <g2_stack=stack64#18,>mulrax=int64#7 +# asm 2: movq <g2_stack=136(%rsp),>mulrax=%rax +movq 136(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#8 +# asm 2: mul <mulx2=%r10 +mul %r10 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#2 +# asm 2: add <mulrax=%rax,<mulr4=%rsi +add %rax,%rsi + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#13,<mulr4=int64#2 +# asm 2: add <mulc=%r15,<mulr4=%rsi +add %r15,%rsi + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#13 +# asm 2: adc <mulrdx=%rdx,<mulc=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = g3_stack +# asm 1: movq <g3_stack=stack64#19,>mulrax=int64#7 +# asm 2: movq <g3_stack=144(%rsp),>mulrax=%rax +movq 144(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#8 +# asm 2: mul <mulx2=%r10 +mul %r10 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#4 +# asm 2: add <mulrax=%rax,<mulr5=%rcx +add %rax,%rcx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#13,<mulr5=int64#4 +# asm 2: add <mulc=%r15,<mulr5=%rcx +add %r15,%rcx + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#5 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r8 +adc %rdx,%r8 + +# qhasm: mulx3 = h3_stack +# asm 1: movq <h3_stack=stack64#11,>mulx3=int64#8 +# asm 2: movq <h3_stack=80(%rsp),>mulx3=%r10 +movq 80(%rsp),%r10 + +# qhasm: mulrax = g0_stack +# asm 1: movq <g0_stack=stack64#16,>mulrax=int64#7 +# asm 2: movq <g0_stack=120(%rsp),>mulrax=%rax +movq 120(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#8 +# asm 2: mul <mulx3=%r10 +mul %r10 + +# qhasm: carry? ry3 += mulrax +# asm 1: add <mulrax=int64#7,<ry3=int64#12 +# asm 2: add <mulrax=%rax,<ry3=%r14 +add %rax,%r14 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#13 +# asm 2: adc <mulrdx=%rdx,<mulc=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = g1_stack +# asm 1: movq <g1_stack=stack64#17,>mulrax=int64#7 +# asm 2: movq <g1_stack=128(%rsp),>mulrax=%rax +movq 128(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#8 +# asm 2: mul <mulx3=%r10 +mul %r10 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#2 +# asm 2: add <mulrax=%rax,<mulr4=%rsi +add %rax,%rsi + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#13,<mulr4=int64#2 +# asm 2: add <mulc=%r15,<mulr4=%rsi +add %r15,%rsi + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#13 +# asm 2: adc <mulrdx=%rdx,<mulc=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = g2_stack +# asm 1: movq <g2_stack=stack64#18,>mulrax=int64#7 +# asm 2: movq <g2_stack=136(%rsp),>mulrax=%rax +movq 136(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#8 +# asm 2: mul <mulx3=%r10 +mul %r10 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#4 +# asm 2: add <mulrax=%rax,<mulr5=%rcx +add %rax,%rcx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#13,<mulr5=int64#4 +# asm 2: add <mulc=%r15,<mulr5=%rcx +add %r15,%rcx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#13 +# asm 2: adc <mulrdx=%rdx,<mulc=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = g3_stack +# asm 1: movq <g3_stack=stack64#19,>mulrax=int64#7 +# asm 2: movq <g3_stack=144(%rsp),>mulrax=%rax +movq 144(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#8 +# asm 2: mul <mulx3=%r10 +mul %r10 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#5 +# asm 2: add <mulrax=%rax,<mulr6=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr6 += mulc +# asm 1: add <mulc=int64#13,<mulr6=int64#5 +# asm 2: add <mulc=%r15,<mulr6=%r8 +add %r15,%r8 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#6 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r9 +adc %rdx,%r9 + +# qhasm: mulrax = mulr4 +# asm 1: mov <mulr4=int64#2,>mulrax=int64#7 +# asm 2: mov <mulr4=%rsi,>mulrax=%rax +mov %rsi,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: mulr4 = mulrax +# asm 1: mov <mulrax=int64#7,>mulr4=int64#2 +# asm 2: mov <mulrax=%rax,>mulr4=%rsi +mov %rax,%rsi + +# qhasm: mulrax = mulr5 +# asm 1: mov <mulr5=int64#4,>mulrax=int64#7 +# asm 2: mov <mulr5=%rcx,>mulrax=%rax +mov %rcx,%rax + +# qhasm: mulr5 = mulrdx +# asm 1: mov <mulrdx=int64#3,>mulr5=int64#4 +# asm 2: mov <mulrdx=%rdx,>mulr5=%rcx +mov %rdx,%rcx + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#4 +# asm 2: add <mulrax=%rax,<mulr5=%rcx +add %rax,%rcx + +# qhasm: mulrax = mulr6 +# asm 1: mov <mulr6=int64#5,>mulrax=int64#7 +# asm 2: mov <mulr6=%r8,>mulrax=%rax +mov %r8,%rax + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#5 +# asm 2: mov $0,>mulr6=%r8 +mov $0,%r8 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#5 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r8 +adc %rdx,%r8 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#5 +# asm 2: add <mulrax=%rax,<mulr6=%r8 +add %rax,%r8 + +# qhasm: mulrax = mulr7 +# asm 1: mov <mulr7=int64#6,>mulrax=int64#7 +# asm 2: mov <mulr7=%r9,>mulrax=%rax +mov %r9,%rax + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#6 +# asm 2: mov $0,>mulr7=%r9 +mov $0,%r9 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#6 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r9 +adc %rdx,%r9 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr7 += mulrax +# asm 1: add <mulrax=int64#7,<mulr7=int64#6 +# asm 2: add <mulrax=%rax,<mulr7=%r9 +add %rax,%r9 + +# qhasm: mulr8 = 0 +# asm 1: mov $0,>mulr8=int64#7 +# asm 2: mov $0,>mulr8=%rax +mov $0,%rax + +# qhasm: mulr8 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr8=int64#7 +# asm 2: adc <mulrdx=%rdx,<mulr8=%rax +adc %rdx,%rax + +# qhasm: carry? ry0 += mulr4 +# asm 1: add <mulr4=int64#2,<ry0=int64#9 +# asm 2: add <mulr4=%rsi,<ry0=%r11 +add %rsi,%r11 + +# qhasm: carry? ry1 += mulr5 + carry +# asm 1: adc <mulr5=int64#4,<ry1=int64#10 +# asm 2: adc <mulr5=%rcx,<ry1=%r12 +adc %rcx,%r12 + +# qhasm: carry? ry2 += mulr6 + carry +# asm 1: adc <mulr6=int64#5,<ry2=int64#11 +# asm 2: adc <mulr6=%r8,<ry2=%r13 +adc %r8,%r13 + +# qhasm: carry? ry3 += mulr7 + carry +# asm 1: adc <mulr7=int64#6,<ry3=int64#12 +# asm 2: adc <mulr7=%r9,<ry3=%r14 +adc %r9,%r14 + +# qhasm: mulzero = 0 +# asm 1: mov $0,>mulzero=int64#2 +# asm 2: mov $0,>mulzero=%rsi +mov $0,%rsi + +# qhasm: mulr8 += mulzero + carry +# asm 1: adc <mulzero=int64#2,<mulr8=int64#7 +# asm 2: adc <mulzero=%rsi,<mulr8=%rax +adc %rsi,%rax + +# qhasm: mulr8 *= 38 +# asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#3 +# asm 2: imulq $38,<mulr8=%rax,>mulr8=%rdx +imulq $38,%rax,%rdx + +# qhasm: carry? ry0 += mulr8 +# asm 1: add <mulr8=int64#3,<ry0=int64#9 +# asm 2: add <mulr8=%rdx,<ry0=%r11 +add %rdx,%r11 + +# qhasm: carry? ry1 += mulzero + carry +# asm 1: adc <mulzero=int64#2,<ry1=int64#10 +# asm 2: adc <mulzero=%rsi,<ry1=%r12 +adc %rsi,%r12 + +# qhasm: carry? ry2 += mulzero + carry +# asm 1: adc <mulzero=int64#2,<ry2=int64#11 +# asm 2: adc <mulzero=%rsi,<ry2=%r13 +adc %rsi,%r13 + +# qhasm: carry? ry3 += mulzero + carry +# asm 1: adc <mulzero=int64#2,<ry3=int64#12 +# asm 2: adc <mulzero=%rsi,<ry3=%r14 +adc %rsi,%r14 + +# qhasm: mulzero += mulzero + carry +# asm 1: adc <mulzero=int64#2,<mulzero=int64#2 +# asm 2: adc <mulzero=%rsi,<mulzero=%rsi +adc %rsi,%rsi + +# qhasm: mulzero *= 38 +# asm 1: imulq $38,<mulzero=int64#2,>mulzero=int64#2 +# asm 2: imulq $38,<mulzero=%rsi,>mulzero=%rsi +imulq $38,%rsi,%rsi + +# qhasm: ry0 += mulzero +# asm 1: add <mulzero=int64#2,<ry0=int64#9 +# asm 2: add <mulzero=%rsi,<ry0=%r11 +add %rsi,%r11 + +# qhasm: *(uint64 *)(rp + 32) = ry0 +# asm 1: movq <ry0=int64#9,32(<rp=int64#1) +# asm 2: movq <ry0=%r11,32(<rp=%rdi) +movq %r11,32(%rdi) + +# qhasm: *(uint64 *)(rp + 40) = ry1 +# asm 1: movq <ry1=int64#10,40(<rp=int64#1) +# asm 2: movq <ry1=%r12,40(<rp=%rdi) +movq %r12,40(%rdi) + +# qhasm: *(uint64 *)(rp + 48) = ry2 +# asm 1: movq <ry2=int64#11,48(<rp=int64#1) +# asm 2: movq <ry2=%r13,48(<rp=%rdi) +movq %r13,48(%rdi) + +# qhasm: *(uint64 *)(rp + 56) = ry3 +# asm 1: movq <ry3=int64#12,56(<rp=int64#1) +# asm 2: movq <ry3=%r14,56(<rp=%rdi) +movq %r14,56(%rdi) + +# qhasm: mulr4 = 0 +# asm 1: mov $0,>mulr4=int64#2 +# asm 2: mov $0,>mulr4=%rsi +mov $0,%rsi + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#4 +# asm 2: mov $0,>mulr5=%rcx +mov $0,%rcx + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#5 +# asm 2: mov $0,>mulr6=%r8 +mov $0,%r8 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#6 +# asm 2: mov $0,>mulr7=%r9 +mov $0,%r9 + +# qhasm: mulx0 = g0_stack +# asm 1: movq <g0_stack=stack64#16,>mulx0=int64#8 +# asm 2: movq <g0_stack=120(%rsp),>mulx0=%r10 +movq 120(%rsp),%r10 + +# qhasm: mulrax = f0_stack +# asm 1: movq <f0_stack=stack64#20,>mulrax=int64#7 +# asm 2: movq <f0_stack=152(%rsp),>mulrax=%rax +movq 152(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#8 +# asm 2: mul <mulx0=%r10 +mul %r10 + +# qhasm: rz0 = mulrax +# asm 1: mov <mulrax=int64#7,>rz0=int64#9 +# asm 2: mov <mulrax=%rax,>rz0=%r11 +mov %rax,%r11 + +# qhasm: rz1 = mulrdx +# asm 1: mov <mulrdx=int64#3,>rz1=int64#10 +# asm 2: mov <mulrdx=%rdx,>rz1=%r12 +mov %rdx,%r12 + +# qhasm: mulrax = f1_stack +# asm 1: movq <f1_stack=stack64#21,>mulrax=int64#7 +# asm 2: movq <f1_stack=160(%rsp),>mulrax=%rax +movq 160(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#8 +# asm 2: mul <mulx0=%r10 +mul %r10 + +# qhasm: carry? rz1 += mulrax +# asm 1: add <mulrax=int64#7,<rz1=int64#10 +# asm 2: add <mulrax=%rax,<rz1=%r12 +add %rax,%r12 + +# qhasm: rz2 = 0 +# asm 1: mov $0,>rz2=int64#11 +# asm 2: mov $0,>rz2=%r13 +mov $0,%r13 + +# qhasm: rz2 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<rz2=int64#11 +# asm 2: adc <mulrdx=%rdx,<rz2=%r13 +adc %rdx,%r13 + +# qhasm: mulrax = f2_stack +# asm 1: movq <f2_stack=stack64#22,>mulrax=int64#7 +# asm 2: movq <f2_stack=168(%rsp),>mulrax=%rax +movq 168(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#8 +# asm 2: mul <mulx0=%r10 +mul %r10 + +# qhasm: carry? rz2 += mulrax +# asm 1: add <mulrax=int64#7,<rz2=int64#11 +# asm 2: add <mulrax=%rax,<rz2=%r13 +add %rax,%r13 + +# qhasm: rz3 = 0 +# asm 1: mov $0,>rz3=int64#12 +# asm 2: mov $0,>rz3=%r14 +mov $0,%r14 + +# qhasm: rz3 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<rz3=int64#12 +# asm 2: adc <mulrdx=%rdx,<rz3=%r14 +adc %rdx,%r14 + +# qhasm: mulrax = f3_stack +# asm 1: movq <f3_stack=stack64#23,>mulrax=int64#7 +# asm 2: movq <f3_stack=176(%rsp),>mulrax=%rax +movq 176(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#8 +# asm 2: mul <mulx0=%r10 +mul %r10 + +# qhasm: carry? rz3 += mulrax +# asm 1: add <mulrax=int64#7,<rz3=int64#12 +# asm 2: add <mulrax=%rax,<rz3=%r14 +add %rax,%r14 + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr4=int64#2 +# asm 2: adc <mulrdx=%rdx,<mulr4=%rsi +adc %rdx,%rsi + +# qhasm: mulx1 = g1_stack +# asm 1: movq <g1_stack=stack64#17,>mulx1=int64#8 +# asm 2: movq <g1_stack=128(%rsp),>mulx1=%r10 +movq 128(%rsp),%r10 + +# qhasm: mulrax = f0_stack +# asm 1: movq <f0_stack=stack64#20,>mulrax=int64#7 +# asm 2: movq <f0_stack=152(%rsp),>mulrax=%rax +movq 152(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#8 +# asm 2: mul <mulx1=%r10 +mul %r10 + +# qhasm: carry? rz1 += mulrax +# asm 1: add <mulrax=int64#7,<rz1=int64#10 +# asm 2: add <mulrax=%rax,<rz1=%r12 +add %rax,%r12 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#13 +# asm 2: adc <mulrdx=%rdx,<mulc=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = f1_stack +# asm 1: movq <f1_stack=stack64#21,>mulrax=int64#7 +# asm 2: movq <f1_stack=160(%rsp),>mulrax=%rax +movq 160(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#8 +# asm 2: mul <mulx1=%r10 +mul %r10 + +# qhasm: carry? rz2 += mulrax +# asm 1: add <mulrax=int64#7,<rz2=int64#11 +# asm 2: add <mulrax=%rax,<rz2=%r13 +add %rax,%r13 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? rz2 += mulc +# asm 1: add <mulc=int64#13,<rz2=int64#11 +# asm 2: add <mulc=%r15,<rz2=%r13 +add %r15,%r13 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#13 +# asm 2: adc <mulrdx=%rdx,<mulc=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = f2_stack +# asm 1: movq <f2_stack=stack64#22,>mulrax=int64#7 +# asm 2: movq <f2_stack=168(%rsp),>mulrax=%rax +movq 168(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#8 +# asm 2: mul <mulx1=%r10 +mul %r10 + +# qhasm: carry? rz3 += mulrax +# asm 1: add <mulrax=int64#7,<rz3=int64#12 +# asm 2: add <mulrax=%rax,<rz3=%r14 +add %rax,%r14 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? rz3 += mulc +# asm 1: add <mulc=int64#13,<rz3=int64#12 +# asm 2: add <mulc=%r15,<rz3=%r14 +add %r15,%r14 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#13 +# asm 2: adc <mulrdx=%rdx,<mulc=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = f3_stack +# asm 1: movq <f3_stack=stack64#23,>mulrax=int64#7 +# asm 2: movq <f3_stack=176(%rsp),>mulrax=%rax +movq 176(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#8 +# asm 2: mul <mulx1=%r10 +mul %r10 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#2 +# asm 2: add <mulrax=%rax,<mulr4=%rsi +add %rax,%rsi + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#13,<mulr4=int64#2 +# asm 2: add <mulc=%r15,<mulr4=%rsi +add %r15,%rsi + +# qhasm: mulr5 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr5=int64#4 +# asm 2: adc <mulrdx=%rdx,<mulr5=%rcx +adc %rdx,%rcx + +# qhasm: mulx2 = g2_stack +# asm 1: movq <g2_stack=stack64#18,>mulx2=int64#8 +# asm 2: movq <g2_stack=136(%rsp),>mulx2=%r10 +movq 136(%rsp),%r10 + +# qhasm: mulrax = f0_stack +# asm 1: movq <f0_stack=stack64#20,>mulrax=int64#7 +# asm 2: movq <f0_stack=152(%rsp),>mulrax=%rax +movq 152(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#8 +# asm 2: mul <mulx2=%r10 +mul %r10 + +# qhasm: carry? rz2 += mulrax +# asm 1: add <mulrax=int64#7,<rz2=int64#11 +# asm 2: add <mulrax=%rax,<rz2=%r13 +add %rax,%r13 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#13 +# asm 2: adc <mulrdx=%rdx,<mulc=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = f1_stack +# asm 1: movq <f1_stack=stack64#21,>mulrax=int64#7 +# asm 2: movq <f1_stack=160(%rsp),>mulrax=%rax +movq 160(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#8 +# asm 2: mul <mulx2=%r10 +mul %r10 + +# qhasm: carry? rz3 += mulrax +# asm 1: add <mulrax=int64#7,<rz3=int64#12 +# asm 2: add <mulrax=%rax,<rz3=%r14 +add %rax,%r14 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? rz3 += mulc +# asm 1: add <mulc=int64#13,<rz3=int64#12 +# asm 2: add <mulc=%r15,<rz3=%r14 +add %r15,%r14 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#13 +# asm 2: adc <mulrdx=%rdx,<mulc=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = f2_stack +# asm 1: movq <f2_stack=stack64#22,>mulrax=int64#7 +# asm 2: movq <f2_stack=168(%rsp),>mulrax=%rax +movq 168(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#8 +# asm 2: mul <mulx2=%r10 +mul %r10 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#2 +# asm 2: add <mulrax=%rax,<mulr4=%rsi +add %rax,%rsi + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#13,<mulr4=int64#2 +# asm 2: add <mulc=%r15,<mulr4=%rsi +add %r15,%rsi + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#13 +# asm 2: adc <mulrdx=%rdx,<mulc=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = f3_stack +# asm 1: movq <f3_stack=stack64#23,>mulrax=int64#7 +# asm 2: movq <f3_stack=176(%rsp),>mulrax=%rax +movq 176(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#8 +# asm 2: mul <mulx2=%r10 +mul %r10 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#4 +# asm 2: add <mulrax=%rax,<mulr5=%rcx +add %rax,%rcx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#13,<mulr5=int64#4 +# asm 2: add <mulc=%r15,<mulr5=%rcx +add %r15,%rcx + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#5 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r8 +adc %rdx,%r8 + +# qhasm: mulx3 = g3_stack +# asm 1: movq <g3_stack=stack64#19,>mulx3=int64#8 +# asm 2: movq <g3_stack=144(%rsp),>mulx3=%r10 +movq 144(%rsp),%r10 + +# qhasm: mulrax = f0_stack +# asm 1: movq <f0_stack=stack64#20,>mulrax=int64#7 +# asm 2: movq <f0_stack=152(%rsp),>mulrax=%rax +movq 152(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#8 +# asm 2: mul <mulx3=%r10 +mul %r10 + +# qhasm: carry? rz3 += mulrax +# asm 1: add <mulrax=int64#7,<rz3=int64#12 +# asm 2: add <mulrax=%rax,<rz3=%r14 +add %rax,%r14 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#13 +# asm 2: adc <mulrdx=%rdx,<mulc=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = f1_stack +# asm 1: movq <f1_stack=stack64#21,>mulrax=int64#7 +# asm 2: movq <f1_stack=160(%rsp),>mulrax=%rax +movq 160(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#8 +# asm 2: mul <mulx3=%r10 +mul %r10 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#2 +# asm 2: add <mulrax=%rax,<mulr4=%rsi +add %rax,%rsi + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#13,<mulr4=int64#2 +# asm 2: add <mulc=%r15,<mulr4=%rsi +add %r15,%rsi + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#13 +# asm 2: adc <mulrdx=%rdx,<mulc=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = f2_stack +# asm 1: movq <f2_stack=stack64#22,>mulrax=int64#7 +# asm 2: movq <f2_stack=168(%rsp),>mulrax=%rax +movq 168(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#8 +# asm 2: mul <mulx3=%r10 +mul %r10 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#4 +# asm 2: add <mulrax=%rax,<mulr5=%rcx +add %rax,%rcx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#13,<mulr5=int64#4 +# asm 2: add <mulc=%r15,<mulr5=%rcx +add %r15,%rcx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#13 +# asm 2: adc <mulrdx=%rdx,<mulc=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = f3_stack +# asm 1: movq <f3_stack=stack64#23,>mulrax=int64#7 +# asm 2: movq <f3_stack=176(%rsp),>mulrax=%rax +movq 176(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#8 +# asm 2: mul <mulx3=%r10 +mul %r10 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#5 +# asm 2: add <mulrax=%rax,<mulr6=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr6 += mulc +# asm 1: add <mulc=int64#13,<mulr6=int64#5 +# asm 2: add <mulc=%r15,<mulr6=%r8 +add %r15,%r8 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#6 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r9 +adc %rdx,%r9 + +# qhasm: mulrax = mulr4 +# asm 1: mov <mulr4=int64#2,>mulrax=int64#7 +# asm 2: mov <mulr4=%rsi,>mulrax=%rax +mov %rsi,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: mulr4 = mulrax +# asm 1: mov <mulrax=int64#7,>mulr4=int64#2 +# asm 2: mov <mulrax=%rax,>mulr4=%rsi +mov %rax,%rsi + +# qhasm: mulrax = mulr5 +# asm 1: mov <mulr5=int64#4,>mulrax=int64#7 +# asm 2: mov <mulr5=%rcx,>mulrax=%rax +mov %rcx,%rax + +# qhasm: mulr5 = mulrdx +# asm 1: mov <mulrdx=int64#3,>mulr5=int64#4 +# asm 2: mov <mulrdx=%rdx,>mulr5=%rcx +mov %rdx,%rcx + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#4 +# asm 2: add <mulrax=%rax,<mulr5=%rcx +add %rax,%rcx + +# qhasm: mulrax = mulr6 +# asm 1: mov <mulr6=int64#5,>mulrax=int64#7 +# asm 2: mov <mulr6=%r8,>mulrax=%rax +mov %r8,%rax + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#5 +# asm 2: mov $0,>mulr6=%r8 +mov $0,%r8 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#5 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r8 +adc %rdx,%r8 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#5 +# asm 2: add <mulrax=%rax,<mulr6=%r8 +add %rax,%r8 + +# qhasm: mulrax = mulr7 +# asm 1: mov <mulr7=int64#6,>mulrax=int64#7 +# asm 2: mov <mulr7=%r9,>mulrax=%rax +mov %r9,%rax + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#6 +# asm 2: mov $0,>mulr7=%r9 +mov $0,%r9 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#6 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r9 +adc %rdx,%r9 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr7 += mulrax +# asm 1: add <mulrax=int64#7,<mulr7=int64#6 +# asm 2: add <mulrax=%rax,<mulr7=%r9 +add %rax,%r9 + +# qhasm: mulr8 = 0 +# asm 1: mov $0,>mulr8=int64#7 +# asm 2: mov $0,>mulr8=%rax +mov $0,%rax + +# qhasm: mulr8 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr8=int64#7 +# asm 2: adc <mulrdx=%rdx,<mulr8=%rax +adc %rdx,%rax + +# qhasm: carry? rz0 += mulr4 +# asm 1: add <mulr4=int64#2,<rz0=int64#9 +# asm 2: add <mulr4=%rsi,<rz0=%r11 +add %rsi,%r11 + +# qhasm: carry? rz1 += mulr5 + carry +# asm 1: adc <mulr5=int64#4,<rz1=int64#10 +# asm 2: adc <mulr5=%rcx,<rz1=%r12 +adc %rcx,%r12 + +# qhasm: carry? rz2 += mulr6 + carry +# asm 1: adc <mulr6=int64#5,<rz2=int64#11 +# asm 2: adc <mulr6=%r8,<rz2=%r13 +adc %r8,%r13 + +# qhasm: carry? rz3 += mulr7 + carry +# asm 1: adc <mulr7=int64#6,<rz3=int64#12 +# asm 2: adc <mulr7=%r9,<rz3=%r14 +adc %r9,%r14 + +# qhasm: mulzero = 0 +# asm 1: mov $0,>mulzero=int64#2 +# asm 2: mov $0,>mulzero=%rsi +mov $0,%rsi + +# qhasm: mulr8 += mulzero + carry +# asm 1: adc <mulzero=int64#2,<mulr8=int64#7 +# asm 2: adc <mulzero=%rsi,<mulr8=%rax +adc %rsi,%rax + +# qhasm: mulr8 *= 38 +# asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#3 +# asm 2: imulq $38,<mulr8=%rax,>mulr8=%rdx +imulq $38,%rax,%rdx + +# qhasm: carry? rz0 += mulr8 +# asm 1: add <mulr8=int64#3,<rz0=int64#9 +# asm 2: add <mulr8=%rdx,<rz0=%r11 +add %rdx,%r11 + +# qhasm: carry? rz1 += mulzero + carry +# asm 1: adc <mulzero=int64#2,<rz1=int64#10 +# asm 2: adc <mulzero=%rsi,<rz1=%r12 +adc %rsi,%r12 + +# qhasm: carry? rz2 += mulzero + carry +# asm 1: adc <mulzero=int64#2,<rz2=int64#11 +# asm 2: adc <mulzero=%rsi,<rz2=%r13 +adc %rsi,%r13 + +# qhasm: carry? rz3 += mulzero + carry +# asm 1: adc <mulzero=int64#2,<rz3=int64#12 +# asm 2: adc <mulzero=%rsi,<rz3=%r14 +adc %rsi,%r14 + +# qhasm: mulzero += mulzero + carry +# asm 1: adc <mulzero=int64#2,<mulzero=int64#2 +# asm 2: adc <mulzero=%rsi,<mulzero=%rsi +adc %rsi,%rsi + +# qhasm: mulzero *= 38 +# asm 1: imulq $38,<mulzero=int64#2,>mulzero=int64#2 +# asm 2: imulq $38,<mulzero=%rsi,>mulzero=%rsi +imulq $38,%rsi,%rsi + +# qhasm: rz0 += mulzero +# asm 1: add <mulzero=int64#2,<rz0=int64#9 +# asm 2: add <mulzero=%rsi,<rz0=%r11 +add %rsi,%r11 + +# qhasm: *(uint64 *)(rp + 64) = rz0 +# asm 1: movq <rz0=int64#9,64(<rp=int64#1) +# asm 2: movq <rz0=%r11,64(<rp=%rdi) +movq %r11,64(%rdi) + +# qhasm: *(uint64 *)(rp + 72) = rz1 +# asm 1: movq <rz1=int64#10,72(<rp=int64#1) +# asm 2: movq <rz1=%r12,72(<rp=%rdi) +movq %r12,72(%rdi) + +# qhasm: *(uint64 *)(rp + 80) = rz2 +# asm 1: movq <rz2=int64#11,80(<rp=int64#1) +# asm 2: movq <rz2=%r13,80(<rp=%rdi) +movq %r13,80(%rdi) + +# qhasm: *(uint64 *)(rp + 88) = rz3 +# asm 1: movq <rz3=int64#12,88(<rp=int64#1) +# asm 2: movq <rz3=%r14,88(<rp=%rdi) +movq %r14,88(%rdi) + +# qhasm: mulr4 = 0 +# asm 1: mov $0,>mulr4=int64#2 +# asm 2: mov $0,>mulr4=%rsi +mov $0,%rsi + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#4 +# asm 2: mov $0,>mulr5=%rcx +mov $0,%rcx + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#5 +# asm 2: mov $0,>mulr6=%r8 +mov $0,%r8 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#6 +# asm 2: mov $0,>mulr7=%r9 +mov $0,%r9 + +# qhasm: mulx0 = e0_stack +# asm 1: movq <e0_stack=stack64#12,>mulx0=int64#8 +# asm 2: movq <e0_stack=88(%rsp),>mulx0=%r10 +movq 88(%rsp),%r10 + +# qhasm: mulrax = h0_stack +# asm 1: movq <h0_stack=stack64#8,>mulrax=int64#7 +# asm 2: movq <h0_stack=56(%rsp),>mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#8 +# asm 2: mul <mulx0=%r10 +mul %r10 + +# qhasm: rt0 = mulrax +# asm 1: mov <mulrax=int64#7,>rt0=int64#9 +# asm 2: mov <mulrax=%rax,>rt0=%r11 +mov %rax,%r11 + +# qhasm: rt1 = mulrdx +# asm 1: mov <mulrdx=int64#3,>rt1=int64#10 +# asm 2: mov <mulrdx=%rdx,>rt1=%r12 +mov %rdx,%r12 + +# qhasm: mulrax = h1_stack +# asm 1: movq <h1_stack=stack64#9,>mulrax=int64#7 +# asm 2: movq <h1_stack=64(%rsp),>mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#8 +# asm 2: mul <mulx0=%r10 +mul %r10 + +# qhasm: carry? rt1 += mulrax +# asm 1: add <mulrax=int64#7,<rt1=int64#10 +# asm 2: add <mulrax=%rax,<rt1=%r12 +add %rax,%r12 + +# qhasm: rt2 = 0 +# asm 1: mov $0,>rt2=int64#11 +# asm 2: mov $0,>rt2=%r13 +mov $0,%r13 + +# qhasm: rt2 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<rt2=int64#11 +# asm 2: adc <mulrdx=%rdx,<rt2=%r13 +adc %rdx,%r13 + +# qhasm: mulrax = h2_stack +# asm 1: movq <h2_stack=stack64#10,>mulrax=int64#7 +# asm 2: movq <h2_stack=72(%rsp),>mulrax=%rax +movq 72(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#8 +# asm 2: mul <mulx0=%r10 +mul %r10 + +# qhasm: carry? rt2 += mulrax +# asm 1: add <mulrax=int64#7,<rt2=int64#11 +# asm 2: add <mulrax=%rax,<rt2=%r13 +add %rax,%r13 + +# qhasm: rt3 = 0 +# asm 1: mov $0,>rt3=int64#12 +# asm 2: mov $0,>rt3=%r14 +mov $0,%r14 + +# qhasm: rt3 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<rt3=int64#12 +# asm 2: adc <mulrdx=%rdx,<rt3=%r14 +adc %rdx,%r14 + +# qhasm: mulrax = h3_stack +# asm 1: movq <h3_stack=stack64#11,>mulrax=int64#7 +# asm 2: movq <h3_stack=80(%rsp),>mulrax=%rax +movq 80(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#8 +# asm 2: mul <mulx0=%r10 +mul %r10 + +# qhasm: carry? rt3 += mulrax +# asm 1: add <mulrax=int64#7,<rt3=int64#12 +# asm 2: add <mulrax=%rax,<rt3=%r14 +add %rax,%r14 + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr4=int64#2 +# asm 2: adc <mulrdx=%rdx,<mulr4=%rsi +adc %rdx,%rsi + +# qhasm: mulx1 = e1_stack +# asm 1: movq <e1_stack=stack64#13,>mulx1=int64#8 +# asm 2: movq <e1_stack=96(%rsp),>mulx1=%r10 +movq 96(%rsp),%r10 + +# qhasm: mulrax = h0_stack +# asm 1: movq <h0_stack=stack64#8,>mulrax=int64#7 +# asm 2: movq <h0_stack=56(%rsp),>mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#8 +# asm 2: mul <mulx1=%r10 +mul %r10 + +# qhasm: carry? rt1 += mulrax +# asm 1: add <mulrax=int64#7,<rt1=int64#10 +# asm 2: add <mulrax=%rax,<rt1=%r12 +add %rax,%r12 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#13 +# asm 2: adc <mulrdx=%rdx,<mulc=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = h1_stack +# asm 1: movq <h1_stack=stack64#9,>mulrax=int64#7 +# asm 2: movq <h1_stack=64(%rsp),>mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#8 +# asm 2: mul <mulx1=%r10 +mul %r10 + +# qhasm: carry? rt2 += mulrax +# asm 1: add <mulrax=int64#7,<rt2=int64#11 +# asm 2: add <mulrax=%rax,<rt2=%r13 +add %rax,%r13 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? rt2 += mulc +# asm 1: add <mulc=int64#13,<rt2=int64#11 +# asm 2: add <mulc=%r15,<rt2=%r13 +add %r15,%r13 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#13 +# asm 2: adc <mulrdx=%rdx,<mulc=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = h2_stack +# asm 1: movq <h2_stack=stack64#10,>mulrax=int64#7 +# asm 2: movq <h2_stack=72(%rsp),>mulrax=%rax +movq 72(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#8 +# asm 2: mul <mulx1=%r10 +mul %r10 + +# qhasm: carry? rt3 += mulrax +# asm 1: add <mulrax=int64#7,<rt3=int64#12 +# asm 2: add <mulrax=%rax,<rt3=%r14 +add %rax,%r14 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? rt3 += mulc +# asm 1: add <mulc=int64#13,<rt3=int64#12 +# asm 2: add <mulc=%r15,<rt3=%r14 +add %r15,%r14 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#13 +# asm 2: adc <mulrdx=%rdx,<mulc=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = h3_stack +# asm 1: movq <h3_stack=stack64#11,>mulrax=int64#7 +# asm 2: movq <h3_stack=80(%rsp),>mulrax=%rax +movq 80(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#8 +# asm 2: mul <mulx1=%r10 +mul %r10 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#2 +# asm 2: add <mulrax=%rax,<mulr4=%rsi +add %rax,%rsi + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#13,<mulr4=int64#2 +# asm 2: add <mulc=%r15,<mulr4=%rsi +add %r15,%rsi + +# qhasm: mulr5 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr5=int64#4 +# asm 2: adc <mulrdx=%rdx,<mulr5=%rcx +adc %rdx,%rcx + +# qhasm: mulx2 = e2_stack +# asm 1: movq <e2_stack=stack64#14,>mulx2=int64#8 +# asm 2: movq <e2_stack=104(%rsp),>mulx2=%r10 +movq 104(%rsp),%r10 + +# qhasm: mulrax = h0_stack +# asm 1: movq <h0_stack=stack64#8,>mulrax=int64#7 +# asm 2: movq <h0_stack=56(%rsp),>mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#8 +# asm 2: mul <mulx2=%r10 +mul %r10 + +# qhasm: carry? rt2 += mulrax +# asm 1: add <mulrax=int64#7,<rt2=int64#11 +# asm 2: add <mulrax=%rax,<rt2=%r13 +add %rax,%r13 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#13 +# asm 2: adc <mulrdx=%rdx,<mulc=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = h1_stack +# asm 1: movq <h1_stack=stack64#9,>mulrax=int64#7 +# asm 2: movq <h1_stack=64(%rsp),>mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#8 +# asm 2: mul <mulx2=%r10 +mul %r10 + +# qhasm: carry? rt3 += mulrax +# asm 1: add <mulrax=int64#7,<rt3=int64#12 +# asm 2: add <mulrax=%rax,<rt3=%r14 +add %rax,%r14 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? rt3 += mulc +# asm 1: add <mulc=int64#13,<rt3=int64#12 +# asm 2: add <mulc=%r15,<rt3=%r14 +add %r15,%r14 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#13 +# asm 2: adc <mulrdx=%rdx,<mulc=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = h2_stack +# asm 1: movq <h2_stack=stack64#10,>mulrax=int64#7 +# asm 2: movq <h2_stack=72(%rsp),>mulrax=%rax +movq 72(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#8 +# asm 2: mul <mulx2=%r10 +mul %r10 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#2 +# asm 2: add <mulrax=%rax,<mulr4=%rsi +add %rax,%rsi + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#13,<mulr4=int64#2 +# asm 2: add <mulc=%r15,<mulr4=%rsi +add %r15,%rsi + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#13 +# asm 2: adc <mulrdx=%rdx,<mulc=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = h3_stack +# asm 1: movq <h3_stack=stack64#11,>mulrax=int64#7 +# asm 2: movq <h3_stack=80(%rsp),>mulrax=%rax +movq 80(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#8 +# asm 2: mul <mulx2=%r10 +mul %r10 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#4 +# asm 2: add <mulrax=%rax,<mulr5=%rcx +add %rax,%rcx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#13,<mulr5=int64#4 +# asm 2: add <mulc=%r15,<mulr5=%rcx +add %r15,%rcx + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#5 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r8 +adc %rdx,%r8 + +# qhasm: mulx3 = e3_stack +# asm 1: movq <e3_stack=stack64#15,>mulx3=int64#8 +# asm 2: movq <e3_stack=112(%rsp),>mulx3=%r10 +movq 112(%rsp),%r10 + +# qhasm: mulrax = h0_stack +# asm 1: movq <h0_stack=stack64#8,>mulrax=int64#7 +# asm 2: movq <h0_stack=56(%rsp),>mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#8 +# asm 2: mul <mulx3=%r10 +mul %r10 + +# qhasm: carry? rt3 += mulrax +# asm 1: add <mulrax=int64#7,<rt3=int64#12 +# asm 2: add <mulrax=%rax,<rt3=%r14 +add %rax,%r14 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#13 +# asm 2: adc <mulrdx=%rdx,<mulc=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = h1_stack +# asm 1: movq <h1_stack=stack64#9,>mulrax=int64#7 +# asm 2: movq <h1_stack=64(%rsp),>mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#8 +# asm 2: mul <mulx3=%r10 +mul %r10 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#2 +# asm 2: add <mulrax=%rax,<mulr4=%rsi +add %rax,%rsi + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#13,<mulr4=int64#2 +# asm 2: add <mulc=%r15,<mulr4=%rsi +add %r15,%rsi + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#13 +# asm 2: adc <mulrdx=%rdx,<mulc=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = h2_stack +# asm 1: movq <h2_stack=stack64#10,>mulrax=int64#7 +# asm 2: movq <h2_stack=72(%rsp),>mulrax=%rax +movq 72(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#8 +# asm 2: mul <mulx3=%r10 +mul %r10 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#4 +# asm 2: add <mulrax=%rax,<mulr5=%rcx +add %rax,%rcx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#13,<mulr5=int64#4 +# asm 2: add <mulc=%r15,<mulr5=%rcx +add %r15,%rcx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#13 +# asm 2: adc <mulrdx=%rdx,<mulc=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = h3_stack +# asm 1: movq <h3_stack=stack64#11,>mulrax=int64#7 +# asm 2: movq <h3_stack=80(%rsp),>mulrax=%rax +movq 80(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#8 +# asm 2: mul <mulx3=%r10 +mul %r10 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#5 +# asm 2: add <mulrax=%rax,<mulr6=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr6 += mulc +# asm 1: add <mulc=int64#13,<mulr6=int64#5 +# asm 2: add <mulc=%r15,<mulr6=%r8 +add %r15,%r8 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#6 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r9 +adc %rdx,%r9 + +# qhasm: mulrax = mulr4 +# asm 1: mov <mulr4=int64#2,>mulrax=int64#7 +# asm 2: mov <mulr4=%rsi,>mulrax=%rax +mov %rsi,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: mulr4 = mulrax +# asm 1: mov <mulrax=int64#7,>mulr4=int64#2 +# asm 2: mov <mulrax=%rax,>mulr4=%rsi +mov %rax,%rsi + +# qhasm: mulrax = mulr5 +# asm 1: mov <mulr5=int64#4,>mulrax=int64#7 +# asm 2: mov <mulr5=%rcx,>mulrax=%rax +mov %rcx,%rax + +# qhasm: mulr5 = mulrdx +# asm 1: mov <mulrdx=int64#3,>mulr5=int64#4 +# asm 2: mov <mulrdx=%rdx,>mulr5=%rcx +mov %rdx,%rcx + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#4 +# asm 2: add <mulrax=%rax,<mulr5=%rcx +add %rax,%rcx + +# qhasm: mulrax = mulr6 +# asm 1: mov <mulr6=int64#5,>mulrax=int64#7 +# asm 2: mov <mulr6=%r8,>mulrax=%rax +mov %r8,%rax + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#5 +# asm 2: mov $0,>mulr6=%r8 +mov $0,%r8 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#5 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r8 +adc %rdx,%r8 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#5 +# asm 2: add <mulrax=%rax,<mulr6=%r8 +add %rax,%r8 + +# qhasm: mulrax = mulr7 +# asm 1: mov <mulr7=int64#6,>mulrax=int64#7 +# asm 2: mov <mulr7=%r9,>mulrax=%rax +mov %r9,%rax + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#6 +# asm 2: mov $0,>mulr7=%r9 +mov $0,%r9 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#6 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r9 +adc %rdx,%r9 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr7 += mulrax +# asm 1: add <mulrax=int64#7,<mulr7=int64#6 +# asm 2: add <mulrax=%rax,<mulr7=%r9 +add %rax,%r9 + +# qhasm: mulr8 = 0 +# asm 1: mov $0,>mulr8=int64#7 +# asm 2: mov $0,>mulr8=%rax +mov $0,%rax + +# qhasm: mulr8 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr8=int64#7 +# asm 2: adc <mulrdx=%rdx,<mulr8=%rax +adc %rdx,%rax + +# qhasm: carry? rt0 += mulr4 +# asm 1: add <mulr4=int64#2,<rt0=int64#9 +# asm 2: add <mulr4=%rsi,<rt0=%r11 +add %rsi,%r11 + +# qhasm: carry? rt1 += mulr5 + carry +# asm 1: adc <mulr5=int64#4,<rt1=int64#10 +# asm 2: adc <mulr5=%rcx,<rt1=%r12 +adc %rcx,%r12 + +# qhasm: carry? rt2 += mulr6 + carry +# asm 1: adc <mulr6=int64#5,<rt2=int64#11 +# asm 2: adc <mulr6=%r8,<rt2=%r13 +adc %r8,%r13 + +# qhasm: carry? rt3 += mulr7 + carry +# asm 1: adc <mulr7=int64#6,<rt3=int64#12 +# asm 2: adc <mulr7=%r9,<rt3=%r14 +adc %r9,%r14 + +# qhasm: mulzero = 0 +# asm 1: mov $0,>mulzero=int64#2 +# asm 2: mov $0,>mulzero=%rsi +mov $0,%rsi + +# qhasm: mulr8 += mulzero + carry +# asm 1: adc <mulzero=int64#2,<mulr8=int64#7 +# asm 2: adc <mulzero=%rsi,<mulr8=%rax +adc %rsi,%rax + +# qhasm: mulr8 *= 38 +# asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#3 +# asm 2: imulq $38,<mulr8=%rax,>mulr8=%rdx +imulq $38,%rax,%rdx + +# qhasm: carry? rt0 += mulr8 +# asm 1: add <mulr8=int64#3,<rt0=int64#9 +# asm 2: add <mulr8=%rdx,<rt0=%r11 +add %rdx,%r11 + +# qhasm: carry? rt1 += mulzero + carry +# asm 1: adc <mulzero=int64#2,<rt1=int64#10 +# asm 2: adc <mulzero=%rsi,<rt1=%r12 +adc %rsi,%r12 + +# qhasm: carry? rt2 += mulzero + carry +# asm 1: adc <mulzero=int64#2,<rt2=int64#11 +# asm 2: adc <mulzero=%rsi,<rt2=%r13 +adc %rsi,%r13 + +# qhasm: carry? rt3 += mulzero + carry +# asm 1: adc <mulzero=int64#2,<rt3=int64#12 +# asm 2: adc <mulzero=%rsi,<rt3=%r14 +adc %rsi,%r14 + +# qhasm: mulzero += mulzero + carry +# asm 1: adc <mulzero=int64#2,<mulzero=int64#2 +# asm 2: adc <mulzero=%rsi,<mulzero=%rsi +adc %rsi,%rsi + +# qhasm: mulzero *= 38 +# asm 1: imulq $38,<mulzero=int64#2,>mulzero=int64#2 +# asm 2: imulq $38,<mulzero=%rsi,>mulzero=%rsi +imulq $38,%rsi,%rsi + +# qhasm: rt0 += mulzero +# asm 1: add <mulzero=int64#2,<rt0=int64#9 +# asm 2: add <mulzero=%rsi,<rt0=%r11 +add %rsi,%r11 + +# qhasm: *(uint64 *)(rp + 96) = rt0 +# asm 1: movq <rt0=int64#9,96(<rp=int64#1) +# asm 2: movq <rt0=%r11,96(<rp=%rdi) +movq %r11,96(%rdi) + +# qhasm: *(uint64 *)(rp + 104) = rt1 +# asm 1: movq <rt1=int64#10,104(<rp=int64#1) +# asm 2: movq <rt1=%r12,104(<rp=%rdi) +movq %r12,104(%rdi) + +# qhasm: *(uint64 *)(rp + 112) = rt2 +# asm 1: movq <rt2=int64#11,112(<rp=int64#1) +# asm 2: movq <rt2=%r13,112(<rp=%rdi) +movq %r13,112(%rdi) + +# qhasm: *(uint64 *)(rp + 120) = rt3 +# asm 1: movq <rt3=int64#12,120(<rp=int64#1) +# asm 2: movq <rt3=%r14,120(<rp=%rdi) +movq %r14,120(%rdi) + +# qhasm: caller1 = caller1_stack +# asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9 +# asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10 +# asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11 +# asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12 +# asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13 +# asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14 +# asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15 +# asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/ext/ed25519-amd64-asm/ge25519_nielsadd_p1p1.s b/ext/ed25519-amd64-asm/ge25519_nielsadd_p1p1.s new file mode 100644 index 00000000..04e9b52b --- /dev/null +++ b/ext/ed25519-amd64-asm/ge25519_nielsadd_p1p1.s @@ -0,0 +1,3072 @@ + +# qhasm: int64 rp + +# qhasm: int64 pp + +# qhasm: int64 qp + +# qhasm: input rp + +# qhasm: input pp + +# qhasm: input qp + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: int64 a0 + +# qhasm: int64 a1 + +# qhasm: int64 a2 + +# qhasm: int64 a3 + +# qhasm: stack64 a0_stack + +# qhasm: stack64 a1_stack + +# qhasm: stack64 a2_stack + +# qhasm: stack64 a3_stack + +# qhasm: int64 b0 + +# qhasm: int64 b1 + +# qhasm: int64 b2 + +# qhasm: int64 b3 + +# qhasm: stack64 b0_stack + +# qhasm: stack64 b1_stack + +# qhasm: stack64 b2_stack + +# qhasm: stack64 b3_stack + +# qhasm: int64 c0 + +# qhasm: int64 c1 + +# qhasm: int64 c2 + +# qhasm: int64 c3 + +# qhasm: stack64 c0_stack + +# qhasm: stack64 c1_stack + +# qhasm: stack64 c2_stack + +# qhasm: stack64 c3_stack + +# qhasm: int64 d0 + +# qhasm: int64 d1 + +# qhasm: int64 d2 + +# qhasm: int64 d3 + +# qhasm: stack64 d0_stack + +# qhasm: stack64 d1_stack + +# qhasm: stack64 d2_stack + +# qhasm: stack64 d3_stack + +# qhasm: int64 e0 + +# qhasm: int64 e1 + +# qhasm: int64 e2 + +# qhasm: int64 e3 + +# qhasm: stack64 e0_stack + +# qhasm: stack64 e1_stack + +# qhasm: stack64 e2_stack + +# qhasm: stack64 e3_stack + +# qhasm: int64 f0 + +# qhasm: int64 f1 + +# qhasm: int64 f2 + +# qhasm: int64 f3 + +# qhasm: stack64 f0_stack + +# qhasm: stack64 f1_stack + +# qhasm: stack64 f2_stack + +# qhasm: stack64 f3_stack + +# qhasm: int64 g0 + +# qhasm: int64 g1 + +# qhasm: int64 g2 + +# qhasm: int64 g3 + +# qhasm: stack64 g0_stack + +# qhasm: stack64 g1_stack + +# qhasm: stack64 g2_stack + +# qhasm: stack64 g3_stack + +# qhasm: int64 h0 + +# qhasm: int64 h1 + +# qhasm: int64 h2 + +# qhasm: int64 h3 + +# qhasm: stack64 h0_stack + +# qhasm: stack64 h1_stack + +# qhasm: stack64 h2_stack + +# qhasm: stack64 h3_stack + +# qhasm: int64 qt0 + +# qhasm: int64 qt1 + +# qhasm: int64 qt2 + +# qhasm: int64 qt3 + +# qhasm: stack64 qt0_stack + +# qhasm: stack64 qt1_stack + +# qhasm: stack64 qt2_stack + +# qhasm: stack64 qt3_stack + +# qhasm: int64 t10 + +# qhasm: int64 t11 + +# qhasm: int64 t12 + +# qhasm: int64 t13 + +# qhasm: stack64 t10_stack + +# qhasm: stack64 t11_stack + +# qhasm: stack64 t12_stack + +# qhasm: stack64 t13_stack + +# qhasm: int64 t20 + +# qhasm: int64 t21 + +# qhasm: int64 t22 + +# qhasm: int64 t23 + +# qhasm: stack64 t20_stack + +# qhasm: stack64 t21_stack + +# qhasm: stack64 t22_stack + +# qhasm: stack64 t23_stack + +# qhasm: int64 rx0 + +# qhasm: int64 rx1 + +# qhasm: int64 rx2 + +# qhasm: int64 rx3 + +# qhasm: int64 ry0 + +# qhasm: int64 ry1 + +# qhasm: int64 ry2 + +# qhasm: int64 ry3 + +# qhasm: int64 rz0 + +# qhasm: int64 rz1 + +# qhasm: int64 rz2 + +# qhasm: int64 rz3 + +# qhasm: int64 rt0 + +# qhasm: int64 rt1 + +# qhasm: int64 rt2 + +# qhasm: int64 rt3 + +# qhasm: int64 mulr4 + +# qhasm: int64 mulr5 + +# qhasm: int64 mulr6 + +# qhasm: int64 mulr7 + +# qhasm: int64 mulr8 + +# qhasm: int64 mulrax + +# qhasm: int64 mulrdx + +# qhasm: int64 mulx0 + +# qhasm: int64 mulx1 + +# qhasm: int64 mulx2 + +# qhasm: int64 mulx3 + +# qhasm: int64 mulc + +# qhasm: int64 mulzero + +# qhasm: int64 muli38 + +# qhasm: int64 addt0 + +# qhasm: int64 addt1 + +# qhasm: int64 subt0 + +# qhasm: int64 subt1 + +# qhasm: enter crypto_sign_ed25519_amd64_64_ge25519_nielsadd_p1p1 +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_64_ge25519_nielsadd_p1p1 +.globl crypto_sign_ed25519_amd64_64_ge25519_nielsadd_p1p1 +_crypto_sign_ed25519_amd64_64_ge25519_nielsadd_p1p1: +crypto_sign_ed25519_amd64_64_ge25519_nielsadd_p1p1: +mov %rsp,%r11 +and $31,%r11 +add $128,%r11 +sub %r11,%rsp + +# qhasm: caller1_stack = caller1 +# asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1 +# asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2 +# asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3 +# asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4 +# asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5 +# asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6 +# asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7 +# asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: qp = qp +# asm 1: mov <qp=int64#3,>qp=int64#4 +# asm 2: mov <qp=%rdx,>qp=%rcx +mov %rdx,%rcx + +# qhasm: a0 = *(uint64 *)(pp + 32) +# asm 1: movq 32(<pp=int64#2),>a0=int64#3 +# asm 2: movq 32(<pp=%rsi),>a0=%rdx +movq 32(%rsi),%rdx + +# qhasm: a1 = *(uint64 *)(pp + 40) +# asm 1: movq 40(<pp=int64#2),>a1=int64#5 +# asm 2: movq 40(<pp=%rsi),>a1=%r8 +movq 40(%rsi),%r8 + +# qhasm: a2 = *(uint64 *)(pp + 48) +# asm 1: movq 48(<pp=int64#2),>a2=int64#6 +# asm 2: movq 48(<pp=%rsi),>a2=%r9 +movq 48(%rsi),%r9 + +# qhasm: a3 = *(uint64 *)(pp + 56) +# asm 1: movq 56(<pp=int64#2),>a3=int64#7 +# asm 2: movq 56(<pp=%rsi),>a3=%rax +movq 56(%rsi),%rax + +# qhasm: b0 = a0 +# asm 1: mov <a0=int64#3,>b0=int64#8 +# asm 2: mov <a0=%rdx,>b0=%r10 +mov %rdx,%r10 + +# qhasm: b1 = a1 +# asm 1: mov <a1=int64#5,>b1=int64#9 +# asm 2: mov <a1=%r8,>b1=%r11 +mov %r8,%r11 + +# qhasm: b2 = a2 +# asm 1: mov <a2=int64#6,>b2=int64#10 +# asm 2: mov <a2=%r9,>b2=%r12 +mov %r9,%r12 + +# qhasm: b3 = a3 +# asm 1: mov <a3=int64#7,>b3=int64#11 +# asm 2: mov <a3=%rax,>b3=%r13 +mov %rax,%r13 + +# qhasm: carry? a0 -= *(uint64 *) (pp + 0) +# asm 1: subq 0(<pp=int64#2),<a0=int64#3 +# asm 2: subq 0(<pp=%rsi),<a0=%rdx +subq 0(%rsi),%rdx + +# qhasm: carry? a1 -= *(uint64 *) (pp + 8) - carry +# asm 1: sbbq 8(<pp=int64#2),<a1=int64#5 +# asm 2: sbbq 8(<pp=%rsi),<a1=%r8 +sbbq 8(%rsi),%r8 + +# qhasm: carry? a2 -= *(uint64 *) (pp + 16) - carry +# asm 1: sbbq 16(<pp=int64#2),<a2=int64#6 +# asm 2: sbbq 16(<pp=%rsi),<a2=%r9 +sbbq 16(%rsi),%r9 + +# qhasm: carry? a3 -= *(uint64 *) (pp + 24) - carry +# asm 1: sbbq 24(<pp=int64#2),<a3=int64#7 +# asm 2: sbbq 24(<pp=%rsi),<a3=%rax +sbbq 24(%rsi),%rax + +# qhasm: subt0 = 0 +# asm 1: mov $0,>subt0=int64#12 +# asm 2: mov $0,>subt0=%r14 +mov $0,%r14 + +# qhasm: subt1 = 38 +# asm 1: mov $38,>subt1=int64#13 +# asm 2: mov $38,>subt1=%r15 +mov $38,%r15 + +# qhasm: subt1 = subt0 if !carry +# asm 1: cmovae <subt0=int64#12,<subt1=int64#13 +# asm 2: cmovae <subt0=%r14,<subt1=%r15 +cmovae %r14,%r15 + +# qhasm: carry? a0 -= subt1 +# asm 1: sub <subt1=int64#13,<a0=int64#3 +# asm 2: sub <subt1=%r15,<a0=%rdx +sub %r15,%rdx + +# qhasm: carry? a1 -= subt0 - carry +# asm 1: sbb <subt0=int64#12,<a1=int64#5 +# asm 2: sbb <subt0=%r14,<a1=%r8 +sbb %r14,%r8 + +# qhasm: carry? a2 -= subt0 - carry +# asm 1: sbb <subt0=int64#12,<a2=int64#6 +# asm 2: sbb <subt0=%r14,<a2=%r9 +sbb %r14,%r9 + +# qhasm: carry? a3 -= subt0 - carry +# asm 1: sbb <subt0=int64#12,<a3=int64#7 +# asm 2: sbb <subt0=%r14,<a3=%rax +sbb %r14,%rax + +# qhasm: subt0 = subt1 if carry +# asm 1: cmovc <subt1=int64#13,<subt0=int64#12 +# asm 2: cmovc <subt1=%r15,<subt0=%r14 +cmovc %r15,%r14 + +# qhasm: a0 -= subt0 +# asm 1: sub <subt0=int64#12,<a0=int64#3 +# asm 2: sub <subt0=%r14,<a0=%rdx +sub %r14,%rdx + +# qhasm: carry? b0 += *(uint64 *) (pp + 0) +# asm 1: addq 0(<pp=int64#2),<b0=int64#8 +# asm 2: addq 0(<pp=%rsi),<b0=%r10 +addq 0(%rsi),%r10 + +# qhasm: carry? b1 += *(uint64 *) (pp + 8) + carry +# asm 1: adcq 8(<pp=int64#2),<b1=int64#9 +# asm 2: adcq 8(<pp=%rsi),<b1=%r11 +adcq 8(%rsi),%r11 + +# qhasm: carry? b2 += *(uint64 *) (pp + 16) + carry +# asm 1: adcq 16(<pp=int64#2),<b2=int64#10 +# asm 2: adcq 16(<pp=%rsi),<b2=%r12 +adcq 16(%rsi),%r12 + +# qhasm: carry? b3 += *(uint64 *) (pp + 24) + carry +# asm 1: adcq 24(<pp=int64#2),<b3=int64#11 +# asm 2: adcq 24(<pp=%rsi),<b3=%r13 +adcq 24(%rsi),%r13 + +# qhasm: addt0 = 0 +# asm 1: mov $0,>addt0=int64#12 +# asm 2: mov $0,>addt0=%r14 +mov $0,%r14 + +# qhasm: addt1 = 38 +# asm 1: mov $38,>addt1=int64#13 +# asm 2: mov $38,>addt1=%r15 +mov $38,%r15 + +# qhasm: addt1 = addt0 if !carry +# asm 1: cmovae <addt0=int64#12,<addt1=int64#13 +# asm 2: cmovae <addt0=%r14,<addt1=%r15 +cmovae %r14,%r15 + +# qhasm: carry? b0 += addt1 +# asm 1: add <addt1=int64#13,<b0=int64#8 +# asm 2: add <addt1=%r15,<b0=%r10 +add %r15,%r10 + +# qhasm: carry? b1 += addt0 + carry +# asm 1: adc <addt0=int64#12,<b1=int64#9 +# asm 2: adc <addt0=%r14,<b1=%r11 +adc %r14,%r11 + +# qhasm: carry? b2 += addt0 + carry +# asm 1: adc <addt0=int64#12,<b2=int64#10 +# asm 2: adc <addt0=%r14,<b2=%r12 +adc %r14,%r12 + +# qhasm: carry? b3 += addt0 + carry +# asm 1: adc <addt0=int64#12,<b3=int64#11 +# asm 2: adc <addt0=%r14,<b3=%r13 +adc %r14,%r13 + +# qhasm: addt0 = addt1 if carry +# asm 1: cmovc <addt1=int64#13,<addt0=int64#12 +# asm 2: cmovc <addt1=%r15,<addt0=%r14 +cmovc %r15,%r14 + +# qhasm: b0 += addt0 +# asm 1: add <addt0=int64#12,<b0=int64#8 +# asm 2: add <addt0=%r14,<b0=%r10 +add %r14,%r10 + +# qhasm: a0_stack = a0 +# asm 1: movq <a0=int64#3,>a0_stack=stack64#8 +# asm 2: movq <a0=%rdx,>a0_stack=56(%rsp) +movq %rdx,56(%rsp) + +# qhasm: a1_stack = a1 +# asm 1: movq <a1=int64#5,>a1_stack=stack64#9 +# asm 2: movq <a1=%r8,>a1_stack=64(%rsp) +movq %r8,64(%rsp) + +# qhasm: a2_stack = a2 +# asm 1: movq <a2=int64#6,>a2_stack=stack64#10 +# asm 2: movq <a2=%r9,>a2_stack=72(%rsp) +movq %r9,72(%rsp) + +# qhasm: a3_stack = a3 +# asm 1: movq <a3=int64#7,>a3_stack=stack64#11 +# asm 2: movq <a3=%rax,>a3_stack=80(%rsp) +movq %rax,80(%rsp) + +# qhasm: b0_stack = b0 +# asm 1: movq <b0=int64#8,>b0_stack=stack64#12 +# asm 2: movq <b0=%r10,>b0_stack=88(%rsp) +movq %r10,88(%rsp) + +# qhasm: b1_stack = b1 +# asm 1: movq <b1=int64#9,>b1_stack=stack64#13 +# asm 2: movq <b1=%r11,>b1_stack=96(%rsp) +movq %r11,96(%rsp) + +# qhasm: b2_stack = b2 +# asm 1: movq <b2=int64#10,>b2_stack=stack64#14 +# asm 2: movq <b2=%r12,>b2_stack=104(%rsp) +movq %r12,104(%rsp) + +# qhasm: b3_stack = b3 +# asm 1: movq <b3=int64#11,>b3_stack=stack64#15 +# asm 2: movq <b3=%r13,>b3_stack=112(%rsp) +movq %r13,112(%rsp) + +# qhasm: mulr4 = 0 +# asm 1: mov $0,>mulr4=int64#5 +# asm 2: mov $0,>mulr4=%r8 +mov $0,%r8 + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#6 +# asm 2: mov $0,>mulr5=%r9 +mov $0,%r9 + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#8 +# asm 2: mov $0,>mulr6=%r10 +mov $0,%r10 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#9 +# asm 2: mov $0,>mulr7=%r11 +mov $0,%r11 + +# qhasm: mulx0 = a0_stack +# asm 1: movq <a0_stack=stack64#8,>mulx0=int64#10 +# asm 2: movq <a0_stack=56(%rsp),>mulx0=%r12 +movq 56(%rsp),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 0) +# asm 1: movq 0(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 0(<qp=%rcx),>mulrax=%rax +movq 0(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: a0 = mulrax +# asm 1: mov <mulrax=int64#7,>a0=int64#11 +# asm 2: mov <mulrax=%rax,>a0=%r13 +mov %rax,%r13 + +# qhasm: a1 = mulrdx +# asm 1: mov <mulrdx=int64#3,>a1=int64#12 +# asm 2: mov <mulrdx=%rdx,>a1=%r14 +mov %rdx,%r14 + +# qhasm: mulrax = *(uint64 *)(qp + 8) +# asm 1: movq 8(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 8(<qp=%rcx),>mulrax=%rax +movq 8(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: carry? a1 += mulrax +# asm 1: add <mulrax=int64#7,<a1=int64#12 +# asm 2: add <mulrax=%rax,<a1=%r14 +add %rax,%r14 + +# qhasm: a2 = 0 +# asm 1: mov $0,>a2=int64#13 +# asm 2: mov $0,>a2=%r15 +mov $0,%r15 + +# qhasm: a2 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<a2=int64#13 +# asm 2: adc <mulrdx=%rdx,<a2=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = *(uint64 *)(qp + 16) +# asm 1: movq 16(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 16(<qp=%rcx),>mulrax=%rax +movq 16(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: carry? a2 += mulrax +# asm 1: add <mulrax=int64#7,<a2=int64#13 +# asm 2: add <mulrax=%rax,<a2=%r15 +add %rax,%r15 + +# qhasm: a3 = 0 +# asm 1: mov $0,>a3=int64#14 +# asm 2: mov $0,>a3=%rbx +mov $0,%rbx + +# qhasm: a3 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<a3=int64#14 +# asm 2: adc <mulrdx=%rdx,<a3=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(qp + 24) +# asm 1: movq 24(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 24(<qp=%rcx),>mulrax=%rax +movq 24(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: carry? a3 += mulrax +# asm 1: add <mulrax=int64#7,<a3=int64#14 +# asm 2: add <mulrax=%rax,<a3=%rbx +add %rax,%rbx + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr4=int64#5 +# asm 2: adc <mulrdx=%rdx,<mulr4=%r8 +adc %rdx,%r8 + +# qhasm: mulx1 = a1_stack +# asm 1: movq <a1_stack=stack64#9,>mulx1=int64#10 +# asm 2: movq <a1_stack=64(%rsp),>mulx1=%r12 +movq 64(%rsp),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 0) +# asm 1: movq 0(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 0(<qp=%rcx),>mulrax=%rax +movq 0(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? a1 += mulrax +# asm 1: add <mulrax=int64#7,<a1=int64#12 +# asm 2: add <mulrax=%rax,<a1=%r14 +add %rax,%r14 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 8) +# asm 1: movq 8(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 8(<qp=%rcx),>mulrax=%rax +movq 8(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? a2 += mulrax +# asm 1: add <mulrax=int64#7,<a2=int64#13 +# asm 2: add <mulrax=%rax,<a2=%r15 +add %rax,%r15 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? a2 += mulc +# asm 1: add <mulc=int64#15,<a2=int64#13 +# asm 2: add <mulc=%rbp,<a2=%r15 +add %rbp,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 16) +# asm 1: movq 16(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 16(<qp=%rcx),>mulrax=%rax +movq 16(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? a3 += mulrax +# asm 1: add <mulrax=int64#7,<a3=int64#14 +# asm 2: add <mulrax=%rax,<a3=%rbx +add %rax,%rbx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? a3 += mulc +# asm 1: add <mulc=int64#15,<a3=int64#14 +# asm 2: add <mulc=%rbp,<a3=%rbx +add %rbp,%rbx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 24) +# asm 1: movq 24(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 24(<qp=%rcx),>mulrax=%rax +movq 24(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#5 +# asm 2: add <mulrax=%rax,<mulr4=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#15,<mulr4=int64#5 +# asm 2: add <mulc=%rbp,<mulr4=%r8 +add %rbp,%r8 + +# qhasm: mulr5 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr5=int64#6 +# asm 2: adc <mulrdx=%rdx,<mulr5=%r9 +adc %rdx,%r9 + +# qhasm: mulx2 = a2_stack +# asm 1: movq <a2_stack=stack64#10,>mulx2=int64#10 +# asm 2: movq <a2_stack=72(%rsp),>mulx2=%r12 +movq 72(%rsp),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 0) +# asm 1: movq 0(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 0(<qp=%rcx),>mulrax=%rax +movq 0(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? a2 += mulrax +# asm 1: add <mulrax=int64#7,<a2=int64#13 +# asm 2: add <mulrax=%rax,<a2=%r15 +add %rax,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 8) +# asm 1: movq 8(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 8(<qp=%rcx),>mulrax=%rax +movq 8(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? a3 += mulrax +# asm 1: add <mulrax=int64#7,<a3=int64#14 +# asm 2: add <mulrax=%rax,<a3=%rbx +add %rax,%rbx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? a3 += mulc +# asm 1: add <mulc=int64#15,<a3=int64#14 +# asm 2: add <mulc=%rbp,<a3=%rbx +add %rbp,%rbx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 16) +# asm 1: movq 16(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 16(<qp=%rcx),>mulrax=%rax +movq 16(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#5 +# asm 2: add <mulrax=%rax,<mulr4=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#15,<mulr4=int64#5 +# asm 2: add <mulc=%rbp,<mulr4=%r8 +add %rbp,%r8 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 24) +# asm 1: movq 24(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 24(<qp=%rcx),>mulrax=%rax +movq 24(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#6 +# asm 2: add <mulrax=%rax,<mulr5=%r9 +add %rax,%r9 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#15,<mulr5=int64#6 +# asm 2: add <mulc=%rbp,<mulr5=%r9 +add %rbp,%r9 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#8 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r10 +adc %rdx,%r10 + +# qhasm: mulx3 = a3_stack +# asm 1: movq <a3_stack=stack64#11,>mulx3=int64#10 +# asm 2: movq <a3_stack=80(%rsp),>mulx3=%r12 +movq 80(%rsp),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 0) +# asm 1: movq 0(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 0(<qp=%rcx),>mulrax=%rax +movq 0(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#10 +# asm 2: mul <mulx3=%r12 +mul %r12 + +# qhasm: carry? a3 += mulrax +# asm 1: add <mulrax=int64#7,<a3=int64#14 +# asm 2: add <mulrax=%rax,<a3=%rbx +add %rax,%rbx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 8) +# asm 1: movq 8(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 8(<qp=%rcx),>mulrax=%rax +movq 8(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#10 +# asm 2: mul <mulx3=%r12 +mul %r12 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#5 +# asm 2: add <mulrax=%rax,<mulr4=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#15,<mulr4=int64#5 +# asm 2: add <mulc=%rbp,<mulr4=%r8 +add %rbp,%r8 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 16) +# asm 1: movq 16(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 16(<qp=%rcx),>mulrax=%rax +movq 16(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#10 +# asm 2: mul <mulx3=%r12 +mul %r12 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#6 +# asm 2: add <mulrax=%rax,<mulr5=%r9 +add %rax,%r9 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#15,<mulr5=int64#6 +# asm 2: add <mulc=%rbp,<mulr5=%r9 +add %rbp,%r9 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 24) +# asm 1: movq 24(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 24(<qp=%rcx),>mulrax=%rax +movq 24(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#10 +# asm 2: mul <mulx3=%r12 +mul %r12 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#8 +# asm 2: add <mulrax=%rax,<mulr6=%r10 +add %rax,%r10 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr6 += mulc +# asm 1: add <mulc=int64#15,<mulr6=int64#8 +# asm 2: add <mulc=%rbp,<mulr6=%r10 +add %rbp,%r10 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#9 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r11 +adc %rdx,%r11 + +# qhasm: mulrax = mulr4 +# asm 1: mov <mulr4=int64#5,>mulrax=int64#7 +# asm 2: mov <mulr4=%r8,>mulrax=%rax +mov %r8,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: mulr4 = mulrax +# asm 1: mov <mulrax=int64#7,>mulr4=int64#5 +# asm 2: mov <mulrax=%rax,>mulr4=%r8 +mov %rax,%r8 + +# qhasm: mulrax = mulr5 +# asm 1: mov <mulr5=int64#6,>mulrax=int64#7 +# asm 2: mov <mulr5=%r9,>mulrax=%rax +mov %r9,%rax + +# qhasm: mulr5 = mulrdx +# asm 1: mov <mulrdx=int64#3,>mulr5=int64#6 +# asm 2: mov <mulrdx=%rdx,>mulr5=%r9 +mov %rdx,%r9 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#6 +# asm 2: add <mulrax=%rax,<mulr5=%r9 +add %rax,%r9 + +# qhasm: mulrax = mulr6 +# asm 1: mov <mulr6=int64#8,>mulrax=int64#7 +# asm 2: mov <mulr6=%r10,>mulrax=%rax +mov %r10,%rax + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#8 +# asm 2: mov $0,>mulr6=%r10 +mov $0,%r10 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#8 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r10 +adc %rdx,%r10 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#8 +# asm 2: add <mulrax=%rax,<mulr6=%r10 +add %rax,%r10 + +# qhasm: mulrax = mulr7 +# asm 1: mov <mulr7=int64#9,>mulrax=int64#7 +# asm 2: mov <mulr7=%r11,>mulrax=%rax +mov %r11,%rax + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#9 +# asm 2: mov $0,>mulr7=%r11 +mov $0,%r11 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#9 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r11 +adc %rdx,%r11 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr7 += mulrax +# asm 1: add <mulrax=int64#7,<mulr7=int64#9 +# asm 2: add <mulrax=%rax,<mulr7=%r11 +add %rax,%r11 + +# qhasm: mulr8 = 0 +# asm 1: mov $0,>mulr8=int64#7 +# asm 2: mov $0,>mulr8=%rax +mov $0,%rax + +# qhasm: mulr8 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr8=int64#7 +# asm 2: adc <mulrdx=%rdx,<mulr8=%rax +adc %rdx,%rax + +# qhasm: carry? a0 += mulr4 +# asm 1: add <mulr4=int64#5,<a0=int64#11 +# asm 2: add <mulr4=%r8,<a0=%r13 +add %r8,%r13 + +# qhasm: carry? a1 += mulr5 + carry +# asm 1: adc <mulr5=int64#6,<a1=int64#12 +# asm 2: adc <mulr5=%r9,<a1=%r14 +adc %r9,%r14 + +# qhasm: carry? a2 += mulr6 + carry +# asm 1: adc <mulr6=int64#8,<a2=int64#13 +# asm 2: adc <mulr6=%r10,<a2=%r15 +adc %r10,%r15 + +# qhasm: carry? a3 += mulr7 + carry +# asm 1: adc <mulr7=int64#9,<a3=int64#14 +# asm 2: adc <mulr7=%r11,<a3=%rbx +adc %r11,%rbx + +# qhasm: mulzero = 0 +# asm 1: mov $0,>mulzero=int64#3 +# asm 2: mov $0,>mulzero=%rdx +mov $0,%rdx + +# qhasm: mulr8 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<mulr8=int64#7 +# asm 2: adc <mulzero=%rdx,<mulr8=%rax +adc %rdx,%rax + +# qhasm: mulr8 *= 38 +# asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#5 +# asm 2: imulq $38,<mulr8=%rax,>mulr8=%r8 +imulq $38,%rax,%r8 + +# qhasm: carry? a0 += mulr8 +# asm 1: add <mulr8=int64#5,<a0=int64#11 +# asm 2: add <mulr8=%r8,<a0=%r13 +add %r8,%r13 + +# qhasm: carry? a1 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<a1=int64#12 +# asm 2: adc <mulzero=%rdx,<a1=%r14 +adc %rdx,%r14 + +# qhasm: carry? a2 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<a2=int64#13 +# asm 2: adc <mulzero=%rdx,<a2=%r15 +adc %rdx,%r15 + +# qhasm: carry? a3 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<a3=int64#14 +# asm 2: adc <mulzero=%rdx,<a3=%rbx +adc %rdx,%rbx + +# qhasm: mulzero += mulzero + carry +# asm 1: adc <mulzero=int64#3,<mulzero=int64#3 +# asm 2: adc <mulzero=%rdx,<mulzero=%rdx +adc %rdx,%rdx + +# qhasm: mulzero *= 38 +# asm 1: imulq $38,<mulzero=int64#3,>mulzero=int64#3 +# asm 2: imulq $38,<mulzero=%rdx,>mulzero=%rdx +imulq $38,%rdx,%rdx + +# qhasm: a0 += mulzero +# asm 1: add <mulzero=int64#3,<a0=int64#11 +# asm 2: add <mulzero=%rdx,<a0=%r13 +add %rdx,%r13 + +# qhasm: a0_stack = a0 +# asm 1: movq <a0=int64#11,>a0_stack=stack64#8 +# asm 2: movq <a0=%r13,>a0_stack=56(%rsp) +movq %r13,56(%rsp) + +# qhasm: a1_stack = a1 +# asm 1: movq <a1=int64#12,>a1_stack=stack64#9 +# asm 2: movq <a1=%r14,>a1_stack=64(%rsp) +movq %r14,64(%rsp) + +# qhasm: a2_stack = a2 +# asm 1: movq <a2=int64#13,>a2_stack=stack64#10 +# asm 2: movq <a2=%r15,>a2_stack=72(%rsp) +movq %r15,72(%rsp) + +# qhasm: a3_stack = a3 +# asm 1: movq <a3=int64#14,>a3_stack=stack64#11 +# asm 2: movq <a3=%rbx,>a3_stack=80(%rsp) +movq %rbx,80(%rsp) + +# qhasm: mulr4 = 0 +# asm 1: mov $0,>mulr4=int64#5 +# asm 2: mov $0,>mulr4=%r8 +mov $0,%r8 + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#6 +# asm 2: mov $0,>mulr5=%r9 +mov $0,%r9 + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#8 +# asm 2: mov $0,>mulr6=%r10 +mov $0,%r10 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#9 +# asm 2: mov $0,>mulr7=%r11 +mov $0,%r11 + +# qhasm: mulx0 = b0_stack +# asm 1: movq <b0_stack=stack64#12,>mulx0=int64#10 +# asm 2: movq <b0_stack=88(%rsp),>mulx0=%r12 +movq 88(%rsp),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 32) +# asm 1: movq 32(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 32(<qp=%rcx),>mulrax=%rax +movq 32(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: e0 = mulrax +# asm 1: mov <mulrax=int64#7,>e0=int64#11 +# asm 2: mov <mulrax=%rax,>e0=%r13 +mov %rax,%r13 + +# qhasm: e1 = mulrdx +# asm 1: mov <mulrdx=int64#3,>e1=int64#12 +# asm 2: mov <mulrdx=%rdx,>e1=%r14 +mov %rdx,%r14 + +# qhasm: mulrax = *(uint64 *)(qp + 40) +# asm 1: movq 40(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 40(<qp=%rcx),>mulrax=%rax +movq 40(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: carry? e1 += mulrax +# asm 1: add <mulrax=int64#7,<e1=int64#12 +# asm 2: add <mulrax=%rax,<e1=%r14 +add %rax,%r14 + +# qhasm: e2 = 0 +# asm 1: mov $0,>e2=int64#13 +# asm 2: mov $0,>e2=%r15 +mov $0,%r15 + +# qhasm: e2 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<e2=int64#13 +# asm 2: adc <mulrdx=%rdx,<e2=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = *(uint64 *)(qp + 48) +# asm 1: movq 48(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 48(<qp=%rcx),>mulrax=%rax +movq 48(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: carry? e2 += mulrax +# asm 1: add <mulrax=int64#7,<e2=int64#13 +# asm 2: add <mulrax=%rax,<e2=%r15 +add %rax,%r15 + +# qhasm: e3 = 0 +# asm 1: mov $0,>e3=int64#14 +# asm 2: mov $0,>e3=%rbx +mov $0,%rbx + +# qhasm: e3 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<e3=int64#14 +# asm 2: adc <mulrdx=%rdx,<e3=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(qp + 56) +# asm 1: movq 56(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 56(<qp=%rcx),>mulrax=%rax +movq 56(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: carry? e3 += mulrax +# asm 1: add <mulrax=int64#7,<e3=int64#14 +# asm 2: add <mulrax=%rax,<e3=%rbx +add %rax,%rbx + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr4=int64#5 +# asm 2: adc <mulrdx=%rdx,<mulr4=%r8 +adc %rdx,%r8 + +# qhasm: mulx1 = b1_stack +# asm 1: movq <b1_stack=stack64#13,>mulx1=int64#10 +# asm 2: movq <b1_stack=96(%rsp),>mulx1=%r12 +movq 96(%rsp),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 32) +# asm 1: movq 32(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 32(<qp=%rcx),>mulrax=%rax +movq 32(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? e1 += mulrax +# asm 1: add <mulrax=int64#7,<e1=int64#12 +# asm 2: add <mulrax=%rax,<e1=%r14 +add %rax,%r14 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 40) +# asm 1: movq 40(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 40(<qp=%rcx),>mulrax=%rax +movq 40(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? e2 += mulrax +# asm 1: add <mulrax=int64#7,<e2=int64#13 +# asm 2: add <mulrax=%rax,<e2=%r15 +add %rax,%r15 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? e2 += mulc +# asm 1: add <mulc=int64#15,<e2=int64#13 +# asm 2: add <mulc=%rbp,<e2=%r15 +add %rbp,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 48) +# asm 1: movq 48(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 48(<qp=%rcx),>mulrax=%rax +movq 48(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? e3 += mulrax +# asm 1: add <mulrax=int64#7,<e3=int64#14 +# asm 2: add <mulrax=%rax,<e3=%rbx +add %rax,%rbx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? e3 += mulc +# asm 1: add <mulc=int64#15,<e3=int64#14 +# asm 2: add <mulc=%rbp,<e3=%rbx +add %rbp,%rbx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 56) +# asm 1: movq 56(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 56(<qp=%rcx),>mulrax=%rax +movq 56(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#5 +# asm 2: add <mulrax=%rax,<mulr4=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#15,<mulr4=int64#5 +# asm 2: add <mulc=%rbp,<mulr4=%r8 +add %rbp,%r8 + +# qhasm: mulr5 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr5=int64#6 +# asm 2: adc <mulrdx=%rdx,<mulr5=%r9 +adc %rdx,%r9 + +# qhasm: mulx2 = b2_stack +# asm 1: movq <b2_stack=stack64#14,>mulx2=int64#10 +# asm 2: movq <b2_stack=104(%rsp),>mulx2=%r12 +movq 104(%rsp),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 32) +# asm 1: movq 32(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 32(<qp=%rcx),>mulrax=%rax +movq 32(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? e2 += mulrax +# asm 1: add <mulrax=int64#7,<e2=int64#13 +# asm 2: add <mulrax=%rax,<e2=%r15 +add %rax,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 40) +# asm 1: movq 40(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 40(<qp=%rcx),>mulrax=%rax +movq 40(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? e3 += mulrax +# asm 1: add <mulrax=int64#7,<e3=int64#14 +# asm 2: add <mulrax=%rax,<e3=%rbx +add %rax,%rbx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? e3 += mulc +# asm 1: add <mulc=int64#15,<e3=int64#14 +# asm 2: add <mulc=%rbp,<e3=%rbx +add %rbp,%rbx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 48) +# asm 1: movq 48(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 48(<qp=%rcx),>mulrax=%rax +movq 48(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#5 +# asm 2: add <mulrax=%rax,<mulr4=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#15,<mulr4=int64#5 +# asm 2: add <mulc=%rbp,<mulr4=%r8 +add %rbp,%r8 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 56) +# asm 1: movq 56(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 56(<qp=%rcx),>mulrax=%rax +movq 56(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#6 +# asm 2: add <mulrax=%rax,<mulr5=%r9 +add %rax,%r9 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#15,<mulr5=int64#6 +# asm 2: add <mulc=%rbp,<mulr5=%r9 +add %rbp,%r9 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#8 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r10 +adc %rdx,%r10 + +# qhasm: mulx3 = b3_stack +# asm 1: movq <b3_stack=stack64#15,>mulx3=int64#10 +# asm 2: movq <b3_stack=112(%rsp),>mulx3=%r12 +movq 112(%rsp),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 32) +# asm 1: movq 32(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 32(<qp=%rcx),>mulrax=%rax +movq 32(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#10 +# asm 2: mul <mulx3=%r12 +mul %r12 + +# qhasm: carry? e3 += mulrax +# asm 1: add <mulrax=int64#7,<e3=int64#14 +# asm 2: add <mulrax=%rax,<e3=%rbx +add %rax,%rbx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 40) +# asm 1: movq 40(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 40(<qp=%rcx),>mulrax=%rax +movq 40(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#10 +# asm 2: mul <mulx3=%r12 +mul %r12 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#5 +# asm 2: add <mulrax=%rax,<mulr4=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#15,<mulr4=int64#5 +# asm 2: add <mulc=%rbp,<mulr4=%r8 +add %rbp,%r8 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 48) +# asm 1: movq 48(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 48(<qp=%rcx),>mulrax=%rax +movq 48(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#10 +# asm 2: mul <mulx3=%r12 +mul %r12 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#6 +# asm 2: add <mulrax=%rax,<mulr5=%r9 +add %rax,%r9 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#15,<mulr5=int64#6 +# asm 2: add <mulc=%rbp,<mulr5=%r9 +add %rbp,%r9 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 56) +# asm 1: movq 56(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 56(<qp=%rcx),>mulrax=%rax +movq 56(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#10 +# asm 2: mul <mulx3=%r12 +mul %r12 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#8 +# asm 2: add <mulrax=%rax,<mulr6=%r10 +add %rax,%r10 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr6 += mulc +# asm 1: add <mulc=int64#15,<mulr6=int64#8 +# asm 2: add <mulc=%rbp,<mulr6=%r10 +add %rbp,%r10 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#9 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r11 +adc %rdx,%r11 + +# qhasm: mulrax = mulr4 +# asm 1: mov <mulr4=int64#5,>mulrax=int64#7 +# asm 2: mov <mulr4=%r8,>mulrax=%rax +mov %r8,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: mulr4 = mulrax +# asm 1: mov <mulrax=int64#7,>mulr4=int64#5 +# asm 2: mov <mulrax=%rax,>mulr4=%r8 +mov %rax,%r8 + +# qhasm: mulrax = mulr5 +# asm 1: mov <mulr5=int64#6,>mulrax=int64#7 +# asm 2: mov <mulr5=%r9,>mulrax=%rax +mov %r9,%rax + +# qhasm: mulr5 = mulrdx +# asm 1: mov <mulrdx=int64#3,>mulr5=int64#6 +# asm 2: mov <mulrdx=%rdx,>mulr5=%r9 +mov %rdx,%r9 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#6 +# asm 2: add <mulrax=%rax,<mulr5=%r9 +add %rax,%r9 + +# qhasm: mulrax = mulr6 +# asm 1: mov <mulr6=int64#8,>mulrax=int64#7 +# asm 2: mov <mulr6=%r10,>mulrax=%rax +mov %r10,%rax + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#8 +# asm 2: mov $0,>mulr6=%r10 +mov $0,%r10 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#8 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r10 +adc %rdx,%r10 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#8 +# asm 2: add <mulrax=%rax,<mulr6=%r10 +add %rax,%r10 + +# qhasm: mulrax = mulr7 +# asm 1: mov <mulr7=int64#9,>mulrax=int64#7 +# asm 2: mov <mulr7=%r11,>mulrax=%rax +mov %r11,%rax + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#9 +# asm 2: mov $0,>mulr7=%r11 +mov $0,%r11 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#9 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r11 +adc %rdx,%r11 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr7 += mulrax +# asm 1: add <mulrax=int64#7,<mulr7=int64#9 +# asm 2: add <mulrax=%rax,<mulr7=%r11 +add %rax,%r11 + +# qhasm: mulr8 = 0 +# asm 1: mov $0,>mulr8=int64#7 +# asm 2: mov $0,>mulr8=%rax +mov $0,%rax + +# qhasm: mulr8 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr8=int64#7 +# asm 2: adc <mulrdx=%rdx,<mulr8=%rax +adc %rdx,%rax + +# qhasm: carry? e0 += mulr4 +# asm 1: add <mulr4=int64#5,<e0=int64#11 +# asm 2: add <mulr4=%r8,<e0=%r13 +add %r8,%r13 + +# qhasm: carry? e1 += mulr5 + carry +# asm 1: adc <mulr5=int64#6,<e1=int64#12 +# asm 2: adc <mulr5=%r9,<e1=%r14 +adc %r9,%r14 + +# qhasm: carry? e2 += mulr6 + carry +# asm 1: adc <mulr6=int64#8,<e2=int64#13 +# asm 2: adc <mulr6=%r10,<e2=%r15 +adc %r10,%r15 + +# qhasm: carry? e3 += mulr7 + carry +# asm 1: adc <mulr7=int64#9,<e3=int64#14 +# asm 2: adc <mulr7=%r11,<e3=%rbx +adc %r11,%rbx + +# qhasm: mulzero = 0 +# asm 1: mov $0,>mulzero=int64#3 +# asm 2: mov $0,>mulzero=%rdx +mov $0,%rdx + +# qhasm: mulr8 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<mulr8=int64#7 +# asm 2: adc <mulzero=%rdx,<mulr8=%rax +adc %rdx,%rax + +# qhasm: mulr8 *= 38 +# asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#5 +# asm 2: imulq $38,<mulr8=%rax,>mulr8=%r8 +imulq $38,%rax,%r8 + +# qhasm: carry? e0 += mulr8 +# asm 1: add <mulr8=int64#5,<e0=int64#11 +# asm 2: add <mulr8=%r8,<e0=%r13 +add %r8,%r13 + +# qhasm: carry? e1 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<e1=int64#12 +# asm 2: adc <mulzero=%rdx,<e1=%r14 +adc %rdx,%r14 + +# qhasm: carry? e2 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<e2=int64#13 +# asm 2: adc <mulzero=%rdx,<e2=%r15 +adc %rdx,%r15 + +# qhasm: carry? e3 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<e3=int64#14 +# asm 2: adc <mulzero=%rdx,<e3=%rbx +adc %rdx,%rbx + +# qhasm: mulzero += mulzero + carry +# asm 1: adc <mulzero=int64#3,<mulzero=int64#3 +# asm 2: adc <mulzero=%rdx,<mulzero=%rdx +adc %rdx,%rdx + +# qhasm: mulzero *= 38 +# asm 1: imulq $38,<mulzero=int64#3,>mulzero=int64#3 +# asm 2: imulq $38,<mulzero=%rdx,>mulzero=%rdx +imulq $38,%rdx,%rdx + +# qhasm: e0 += mulzero +# asm 1: add <mulzero=int64#3,<e0=int64#11 +# asm 2: add <mulzero=%rdx,<e0=%r13 +add %rdx,%r13 + +# qhasm: h0 = e0 +# asm 1: mov <e0=int64#11,>h0=int64#3 +# asm 2: mov <e0=%r13,>h0=%rdx +mov %r13,%rdx + +# qhasm: h1 = e1 +# asm 1: mov <e1=int64#12,>h1=int64#5 +# asm 2: mov <e1=%r14,>h1=%r8 +mov %r14,%r8 + +# qhasm: h2 = e2 +# asm 1: mov <e2=int64#13,>h2=int64#6 +# asm 2: mov <e2=%r15,>h2=%r9 +mov %r15,%r9 + +# qhasm: h3 = e3 +# asm 1: mov <e3=int64#14,>h3=int64#7 +# asm 2: mov <e3=%rbx,>h3=%rax +mov %rbx,%rax + +# qhasm: carry? e0 -= a0_stack +# asm 1: subq <a0_stack=stack64#8,<e0=int64#11 +# asm 2: subq <a0_stack=56(%rsp),<e0=%r13 +subq 56(%rsp),%r13 + +# qhasm: carry? e1 -= a1_stack - carry +# asm 1: sbbq <a1_stack=stack64#9,<e1=int64#12 +# asm 2: sbbq <a1_stack=64(%rsp),<e1=%r14 +sbbq 64(%rsp),%r14 + +# qhasm: carry? e2 -= a2_stack - carry +# asm 1: sbbq <a2_stack=stack64#10,<e2=int64#13 +# asm 2: sbbq <a2_stack=72(%rsp),<e2=%r15 +sbbq 72(%rsp),%r15 + +# qhasm: carry? e3 -= a3_stack - carry +# asm 1: sbbq <a3_stack=stack64#11,<e3=int64#14 +# asm 2: sbbq <a3_stack=80(%rsp),<e3=%rbx +sbbq 80(%rsp),%rbx + +# qhasm: subt0 = 0 +# asm 1: mov $0,>subt0=int64#8 +# asm 2: mov $0,>subt0=%r10 +mov $0,%r10 + +# qhasm: subt1 = 38 +# asm 1: mov $38,>subt1=int64#9 +# asm 2: mov $38,>subt1=%r11 +mov $38,%r11 + +# qhasm: subt1 = subt0 if !carry +# asm 1: cmovae <subt0=int64#8,<subt1=int64#9 +# asm 2: cmovae <subt0=%r10,<subt1=%r11 +cmovae %r10,%r11 + +# qhasm: carry? e0 -= subt1 +# asm 1: sub <subt1=int64#9,<e0=int64#11 +# asm 2: sub <subt1=%r11,<e0=%r13 +sub %r11,%r13 + +# qhasm: carry? e1 -= subt0 - carry +# asm 1: sbb <subt0=int64#8,<e1=int64#12 +# asm 2: sbb <subt0=%r10,<e1=%r14 +sbb %r10,%r14 + +# qhasm: carry? e2 -= subt0 - carry +# asm 1: sbb <subt0=int64#8,<e2=int64#13 +# asm 2: sbb <subt0=%r10,<e2=%r15 +sbb %r10,%r15 + +# qhasm: carry? e3 -= subt0 - carry +# asm 1: sbb <subt0=int64#8,<e3=int64#14 +# asm 2: sbb <subt0=%r10,<e3=%rbx +sbb %r10,%rbx + +# qhasm: subt0 = subt1 if carry +# asm 1: cmovc <subt1=int64#9,<subt0=int64#8 +# asm 2: cmovc <subt1=%r11,<subt0=%r10 +cmovc %r11,%r10 + +# qhasm: e0 -= subt0 +# asm 1: sub <subt0=int64#8,<e0=int64#11 +# asm 2: sub <subt0=%r10,<e0=%r13 +sub %r10,%r13 + +# qhasm: carry? h0 += a0_stack +# asm 1: addq <a0_stack=stack64#8,<h0=int64#3 +# asm 2: addq <a0_stack=56(%rsp),<h0=%rdx +addq 56(%rsp),%rdx + +# qhasm: carry? h1 += a1_stack + carry +# asm 1: adcq <a1_stack=stack64#9,<h1=int64#5 +# asm 2: adcq <a1_stack=64(%rsp),<h1=%r8 +adcq 64(%rsp),%r8 + +# qhasm: carry? h2 += a2_stack + carry +# asm 1: adcq <a2_stack=stack64#10,<h2=int64#6 +# asm 2: adcq <a2_stack=72(%rsp),<h2=%r9 +adcq 72(%rsp),%r9 + +# qhasm: carry? h3 += a3_stack + carry +# asm 1: adcq <a3_stack=stack64#11,<h3=int64#7 +# asm 2: adcq <a3_stack=80(%rsp),<h3=%rax +adcq 80(%rsp),%rax + +# qhasm: addt0 = 0 +# asm 1: mov $0,>addt0=int64#8 +# asm 2: mov $0,>addt0=%r10 +mov $0,%r10 + +# qhasm: addt1 = 38 +# asm 1: mov $38,>addt1=int64#9 +# asm 2: mov $38,>addt1=%r11 +mov $38,%r11 + +# qhasm: addt1 = addt0 if !carry +# asm 1: cmovae <addt0=int64#8,<addt1=int64#9 +# asm 2: cmovae <addt0=%r10,<addt1=%r11 +cmovae %r10,%r11 + +# qhasm: carry? h0 += addt1 +# asm 1: add <addt1=int64#9,<h0=int64#3 +# asm 2: add <addt1=%r11,<h0=%rdx +add %r11,%rdx + +# qhasm: carry? h1 += addt0 + carry +# asm 1: adc <addt0=int64#8,<h1=int64#5 +# asm 2: adc <addt0=%r10,<h1=%r8 +adc %r10,%r8 + +# qhasm: carry? h2 += addt0 + carry +# asm 1: adc <addt0=int64#8,<h2=int64#6 +# asm 2: adc <addt0=%r10,<h2=%r9 +adc %r10,%r9 + +# qhasm: carry? h3 += addt0 + carry +# asm 1: adc <addt0=int64#8,<h3=int64#7 +# asm 2: adc <addt0=%r10,<h3=%rax +adc %r10,%rax + +# qhasm: addt0 = addt1 if carry +# asm 1: cmovc <addt1=int64#9,<addt0=int64#8 +# asm 2: cmovc <addt1=%r11,<addt0=%r10 +cmovc %r11,%r10 + +# qhasm: h0 += addt0 +# asm 1: add <addt0=int64#8,<h0=int64#3 +# asm 2: add <addt0=%r10,<h0=%rdx +add %r10,%rdx + +# qhasm: *(uint64 *)(rp + 64) = h0 +# asm 1: movq <h0=int64#3,64(<rp=int64#1) +# asm 2: movq <h0=%rdx,64(<rp=%rdi) +movq %rdx,64(%rdi) + +# qhasm: *(uint64 *)(rp + 72) = h1 +# asm 1: movq <h1=int64#5,72(<rp=int64#1) +# asm 2: movq <h1=%r8,72(<rp=%rdi) +movq %r8,72(%rdi) + +# qhasm: *(uint64 *)(rp + 80) = h2 +# asm 1: movq <h2=int64#6,80(<rp=int64#1) +# asm 2: movq <h2=%r9,80(<rp=%rdi) +movq %r9,80(%rdi) + +# qhasm: *(uint64 *)(rp + 88) = h3 +# asm 1: movq <h3=int64#7,88(<rp=int64#1) +# asm 2: movq <h3=%rax,88(<rp=%rdi) +movq %rax,88(%rdi) + +# qhasm: *(uint64 *)(rp + 0) = e0 +# asm 1: movq <e0=int64#11,0(<rp=int64#1) +# asm 2: movq <e0=%r13,0(<rp=%rdi) +movq %r13,0(%rdi) + +# qhasm: *(uint64 *)(rp + 8) = e1 +# asm 1: movq <e1=int64#12,8(<rp=int64#1) +# asm 2: movq <e1=%r14,8(<rp=%rdi) +movq %r14,8(%rdi) + +# qhasm: *(uint64 *)(rp + 16) = e2 +# asm 1: movq <e2=int64#13,16(<rp=int64#1) +# asm 2: movq <e2=%r15,16(<rp=%rdi) +movq %r15,16(%rdi) + +# qhasm: *(uint64 *)(rp + 24) = e3 +# asm 1: movq <e3=int64#14,24(<rp=int64#1) +# asm 2: movq <e3=%rbx,24(<rp=%rdi) +movq %rbx,24(%rdi) + +# qhasm: mulr4 = 0 +# asm 1: mov $0,>mulr4=int64#5 +# asm 2: mov $0,>mulr4=%r8 +mov $0,%r8 + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#6 +# asm 2: mov $0,>mulr5=%r9 +mov $0,%r9 + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#8 +# asm 2: mov $0,>mulr6=%r10 +mov $0,%r10 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#9 +# asm 2: mov $0,>mulr7=%r11 +mov $0,%r11 + +# qhasm: mulx0 = *(uint64 *)(pp + 96) +# asm 1: movq 96(<pp=int64#2),>mulx0=int64#10 +# asm 2: movq 96(<pp=%rsi),>mulx0=%r12 +movq 96(%rsi),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 64) +# asm 1: movq 64(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 64(<qp=%rcx),>mulrax=%rax +movq 64(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: c0 = mulrax +# asm 1: mov <mulrax=int64#7,>c0=int64#11 +# asm 2: mov <mulrax=%rax,>c0=%r13 +mov %rax,%r13 + +# qhasm: c1 = mulrdx +# asm 1: mov <mulrdx=int64#3,>c1=int64#12 +# asm 2: mov <mulrdx=%rdx,>c1=%r14 +mov %rdx,%r14 + +# qhasm: mulrax = *(uint64 *)(qp + 72) +# asm 1: movq 72(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 72(<qp=%rcx),>mulrax=%rax +movq 72(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: carry? c1 += mulrax +# asm 1: add <mulrax=int64#7,<c1=int64#12 +# asm 2: add <mulrax=%rax,<c1=%r14 +add %rax,%r14 + +# qhasm: c2 = 0 +# asm 1: mov $0,>c2=int64#13 +# asm 2: mov $0,>c2=%r15 +mov $0,%r15 + +# qhasm: c2 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<c2=int64#13 +# asm 2: adc <mulrdx=%rdx,<c2=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = *(uint64 *)(qp + 80) +# asm 1: movq 80(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 80(<qp=%rcx),>mulrax=%rax +movq 80(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: carry? c2 += mulrax +# asm 1: add <mulrax=int64#7,<c2=int64#13 +# asm 2: add <mulrax=%rax,<c2=%r15 +add %rax,%r15 + +# qhasm: c3 = 0 +# asm 1: mov $0,>c3=int64#14 +# asm 2: mov $0,>c3=%rbx +mov $0,%rbx + +# qhasm: c3 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<c3=int64#14 +# asm 2: adc <mulrdx=%rdx,<c3=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(qp + 88) +# asm 1: movq 88(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 88(<qp=%rcx),>mulrax=%rax +movq 88(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: carry? c3 += mulrax +# asm 1: add <mulrax=int64#7,<c3=int64#14 +# asm 2: add <mulrax=%rax,<c3=%rbx +add %rax,%rbx + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr4=int64#5 +# asm 2: adc <mulrdx=%rdx,<mulr4=%r8 +adc %rdx,%r8 + +# qhasm: mulx1 = *(uint64 *)(pp + 104) +# asm 1: movq 104(<pp=int64#2),>mulx1=int64#10 +# asm 2: movq 104(<pp=%rsi),>mulx1=%r12 +movq 104(%rsi),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 64) +# asm 1: movq 64(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 64(<qp=%rcx),>mulrax=%rax +movq 64(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? c1 += mulrax +# asm 1: add <mulrax=int64#7,<c1=int64#12 +# asm 2: add <mulrax=%rax,<c1=%r14 +add %rax,%r14 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 72) +# asm 1: movq 72(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 72(<qp=%rcx),>mulrax=%rax +movq 72(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? c2 += mulrax +# asm 1: add <mulrax=int64#7,<c2=int64#13 +# asm 2: add <mulrax=%rax,<c2=%r15 +add %rax,%r15 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? c2 += mulc +# asm 1: add <mulc=int64#15,<c2=int64#13 +# asm 2: add <mulc=%rbp,<c2=%r15 +add %rbp,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 80) +# asm 1: movq 80(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 80(<qp=%rcx),>mulrax=%rax +movq 80(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? c3 += mulrax +# asm 1: add <mulrax=int64#7,<c3=int64#14 +# asm 2: add <mulrax=%rax,<c3=%rbx +add %rax,%rbx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? c3 += mulc +# asm 1: add <mulc=int64#15,<c3=int64#14 +# asm 2: add <mulc=%rbp,<c3=%rbx +add %rbp,%rbx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 88) +# asm 1: movq 88(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 88(<qp=%rcx),>mulrax=%rax +movq 88(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#5 +# asm 2: add <mulrax=%rax,<mulr4=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#15,<mulr4=int64#5 +# asm 2: add <mulc=%rbp,<mulr4=%r8 +add %rbp,%r8 + +# qhasm: mulr5 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr5=int64#6 +# asm 2: adc <mulrdx=%rdx,<mulr5=%r9 +adc %rdx,%r9 + +# qhasm: mulx2 = *(uint64 *)(pp + 112) +# asm 1: movq 112(<pp=int64#2),>mulx2=int64#10 +# asm 2: movq 112(<pp=%rsi),>mulx2=%r12 +movq 112(%rsi),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 64) +# asm 1: movq 64(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 64(<qp=%rcx),>mulrax=%rax +movq 64(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? c2 += mulrax +# asm 1: add <mulrax=int64#7,<c2=int64#13 +# asm 2: add <mulrax=%rax,<c2=%r15 +add %rax,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 72) +# asm 1: movq 72(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 72(<qp=%rcx),>mulrax=%rax +movq 72(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? c3 += mulrax +# asm 1: add <mulrax=int64#7,<c3=int64#14 +# asm 2: add <mulrax=%rax,<c3=%rbx +add %rax,%rbx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? c3 += mulc +# asm 1: add <mulc=int64#15,<c3=int64#14 +# asm 2: add <mulc=%rbp,<c3=%rbx +add %rbp,%rbx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 80) +# asm 1: movq 80(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 80(<qp=%rcx),>mulrax=%rax +movq 80(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#5 +# asm 2: add <mulrax=%rax,<mulr4=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#15,<mulr4=int64#5 +# asm 2: add <mulc=%rbp,<mulr4=%r8 +add %rbp,%r8 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 88) +# asm 1: movq 88(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 88(<qp=%rcx),>mulrax=%rax +movq 88(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#6 +# asm 2: add <mulrax=%rax,<mulr5=%r9 +add %rax,%r9 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#15,<mulr5=int64#6 +# asm 2: add <mulc=%rbp,<mulr5=%r9 +add %rbp,%r9 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#8 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r10 +adc %rdx,%r10 + +# qhasm: mulx3 = *(uint64 *)(pp + 120) +# asm 1: movq 120(<pp=int64#2),>mulx3=int64#10 +# asm 2: movq 120(<pp=%rsi),>mulx3=%r12 +movq 120(%rsi),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 64) +# asm 1: movq 64(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 64(<qp=%rcx),>mulrax=%rax +movq 64(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#10 +# asm 2: mul <mulx3=%r12 +mul %r12 + +# qhasm: carry? c3 += mulrax +# asm 1: add <mulrax=int64#7,<c3=int64#14 +# asm 2: add <mulrax=%rax,<c3=%rbx +add %rax,%rbx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 72) +# asm 1: movq 72(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 72(<qp=%rcx),>mulrax=%rax +movq 72(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#10 +# asm 2: mul <mulx3=%r12 +mul %r12 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#5 +# asm 2: add <mulrax=%rax,<mulr4=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#15,<mulr4=int64#5 +# asm 2: add <mulc=%rbp,<mulr4=%r8 +add %rbp,%r8 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 80) +# asm 1: movq 80(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 80(<qp=%rcx),>mulrax=%rax +movq 80(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#10 +# asm 2: mul <mulx3=%r12 +mul %r12 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#6 +# asm 2: add <mulrax=%rax,<mulr5=%r9 +add %rax,%r9 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#15,<mulr5=int64#6 +# asm 2: add <mulc=%rbp,<mulr5=%r9 +add %rbp,%r9 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 88) +# asm 1: movq 88(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 88(<qp=%rcx),>mulrax=%rax +movq 88(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#10 +# asm 2: mul <mulx3=%r12 +mul %r12 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#8 +# asm 2: add <mulrax=%rax,<mulr6=%r10 +add %rax,%r10 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr6 += mulc +# asm 1: add <mulc=int64#15,<mulr6=int64#8 +# asm 2: add <mulc=%rbp,<mulr6=%r10 +add %rbp,%r10 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#9 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r11 +adc %rdx,%r11 + +# qhasm: mulrax = mulr4 +# asm 1: mov <mulr4=int64#5,>mulrax=int64#7 +# asm 2: mov <mulr4=%r8,>mulrax=%rax +mov %r8,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: mulr4 = mulrax +# asm 1: mov <mulrax=int64#7,>mulr4=int64#4 +# asm 2: mov <mulrax=%rax,>mulr4=%rcx +mov %rax,%rcx + +# qhasm: mulrax = mulr5 +# asm 1: mov <mulr5=int64#6,>mulrax=int64#7 +# asm 2: mov <mulr5=%r9,>mulrax=%rax +mov %r9,%rax + +# qhasm: mulr5 = mulrdx +# asm 1: mov <mulrdx=int64#3,>mulr5=int64#5 +# asm 2: mov <mulrdx=%rdx,>mulr5=%r8 +mov %rdx,%r8 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#5 +# asm 2: add <mulrax=%rax,<mulr5=%r8 +add %rax,%r8 + +# qhasm: mulrax = mulr6 +# asm 1: mov <mulr6=int64#8,>mulrax=int64#7 +# asm 2: mov <mulr6=%r10,>mulrax=%rax +mov %r10,%rax + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#6 +# asm 2: mov $0,>mulr6=%r9 +mov $0,%r9 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#6 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r9 +adc %rdx,%r9 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#6 +# asm 2: add <mulrax=%rax,<mulr6=%r9 +add %rax,%r9 + +# qhasm: mulrax = mulr7 +# asm 1: mov <mulr7=int64#9,>mulrax=int64#7 +# asm 2: mov <mulr7=%r11,>mulrax=%rax +mov %r11,%rax + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#8 +# asm 2: mov $0,>mulr7=%r10 +mov $0,%r10 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#8 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r10 +adc %rdx,%r10 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr7 += mulrax +# asm 1: add <mulrax=int64#7,<mulr7=int64#8 +# asm 2: add <mulrax=%rax,<mulr7=%r10 +add %rax,%r10 + +# qhasm: mulr8 = 0 +# asm 1: mov $0,>mulr8=int64#7 +# asm 2: mov $0,>mulr8=%rax +mov $0,%rax + +# qhasm: mulr8 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr8=int64#7 +# asm 2: adc <mulrdx=%rdx,<mulr8=%rax +adc %rdx,%rax + +# qhasm: carry? c0 += mulr4 +# asm 1: add <mulr4=int64#4,<c0=int64#11 +# asm 2: add <mulr4=%rcx,<c0=%r13 +add %rcx,%r13 + +# qhasm: carry? c1 += mulr5 + carry +# asm 1: adc <mulr5=int64#5,<c1=int64#12 +# asm 2: adc <mulr5=%r8,<c1=%r14 +adc %r8,%r14 + +# qhasm: carry? c2 += mulr6 + carry +# asm 1: adc <mulr6=int64#6,<c2=int64#13 +# asm 2: adc <mulr6=%r9,<c2=%r15 +adc %r9,%r15 + +# qhasm: carry? c3 += mulr7 + carry +# asm 1: adc <mulr7=int64#8,<c3=int64#14 +# asm 2: adc <mulr7=%r10,<c3=%rbx +adc %r10,%rbx + +# qhasm: mulzero = 0 +# asm 1: mov $0,>mulzero=int64#3 +# asm 2: mov $0,>mulzero=%rdx +mov $0,%rdx + +# qhasm: mulr8 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<mulr8=int64#7 +# asm 2: adc <mulzero=%rdx,<mulr8=%rax +adc %rdx,%rax + +# qhasm: mulr8 *= 38 +# asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#4 +# asm 2: imulq $38,<mulr8=%rax,>mulr8=%rcx +imulq $38,%rax,%rcx + +# qhasm: carry? c0 += mulr8 +# asm 1: add <mulr8=int64#4,<c0=int64#11 +# asm 2: add <mulr8=%rcx,<c0=%r13 +add %rcx,%r13 + +# qhasm: carry? c1 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<c1=int64#12 +# asm 2: adc <mulzero=%rdx,<c1=%r14 +adc %rdx,%r14 + +# qhasm: carry? c2 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<c2=int64#13 +# asm 2: adc <mulzero=%rdx,<c2=%r15 +adc %rdx,%r15 + +# qhasm: carry? c3 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<c3=int64#14 +# asm 2: adc <mulzero=%rdx,<c3=%rbx +adc %rdx,%rbx + +# qhasm: mulzero += mulzero + carry +# asm 1: adc <mulzero=int64#3,<mulzero=int64#3 +# asm 2: adc <mulzero=%rdx,<mulzero=%rdx +adc %rdx,%rdx + +# qhasm: mulzero *= 38 +# asm 1: imulq $38,<mulzero=int64#3,>mulzero=int64#3 +# asm 2: imulq $38,<mulzero=%rdx,>mulzero=%rdx +imulq $38,%rdx,%rdx + +# qhasm: c0 += mulzero +# asm 1: add <mulzero=int64#3,<c0=int64#11 +# asm 2: add <mulzero=%rdx,<c0=%r13 +add %rdx,%r13 + +# qhasm: f0 = *(uint64 *)(pp + 64) +# asm 1: movq 64(<pp=int64#2),>f0=int64#3 +# asm 2: movq 64(<pp=%rsi),>f0=%rdx +movq 64(%rsi),%rdx + +# qhasm: f1 = *(uint64 *)(pp + 72) +# asm 1: movq 72(<pp=int64#2),>f1=int64#4 +# asm 2: movq 72(<pp=%rsi),>f1=%rcx +movq 72(%rsi),%rcx + +# qhasm: f2 = *(uint64 *)(pp + 80) +# asm 1: movq 80(<pp=int64#2),>f2=int64#5 +# asm 2: movq 80(<pp=%rsi),>f2=%r8 +movq 80(%rsi),%r8 + +# qhasm: f3 = *(uint64 *)(pp + 88) +# asm 1: movq 88(<pp=int64#2),>f3=int64#2 +# asm 2: movq 88(<pp=%rsi),>f3=%rsi +movq 88(%rsi),%rsi + +# qhasm: carry? f0 += f0 +# asm 1: add <f0=int64#3,<f0=int64#3 +# asm 2: add <f0=%rdx,<f0=%rdx +add %rdx,%rdx + +# qhasm: carry? f1 += f1 + carry +# asm 1: adc <f1=int64#4,<f1=int64#4 +# asm 2: adc <f1=%rcx,<f1=%rcx +adc %rcx,%rcx + +# qhasm: carry? f2 += f2 + carry +# asm 1: adc <f2=int64#5,<f2=int64#5 +# asm 2: adc <f2=%r8,<f2=%r8 +adc %r8,%r8 + +# qhasm: carry? f3 += f3 + carry +# asm 1: adc <f3=int64#2,<f3=int64#2 +# asm 2: adc <f3=%rsi,<f3=%rsi +adc %rsi,%rsi + +# qhasm: addt0 = 0 +# asm 1: mov $0,>addt0=int64#6 +# asm 2: mov $0,>addt0=%r9 +mov $0,%r9 + +# qhasm: addt1 = 38 +# asm 1: mov $38,>addt1=int64#7 +# asm 2: mov $38,>addt1=%rax +mov $38,%rax + +# qhasm: addt1 = addt0 if !carry +# asm 1: cmovae <addt0=int64#6,<addt1=int64#7 +# asm 2: cmovae <addt0=%r9,<addt1=%rax +cmovae %r9,%rax + +# qhasm: carry? f0 += addt1 +# asm 1: add <addt1=int64#7,<f0=int64#3 +# asm 2: add <addt1=%rax,<f0=%rdx +add %rax,%rdx + +# qhasm: carry? f1 += addt0 + carry +# asm 1: adc <addt0=int64#6,<f1=int64#4 +# asm 2: adc <addt0=%r9,<f1=%rcx +adc %r9,%rcx + +# qhasm: carry? f2 += addt0 + carry +# asm 1: adc <addt0=int64#6,<f2=int64#5 +# asm 2: adc <addt0=%r9,<f2=%r8 +adc %r9,%r8 + +# qhasm: carry? f3 += addt0 + carry +# asm 1: adc <addt0=int64#6,<f3=int64#2 +# asm 2: adc <addt0=%r9,<f3=%rsi +adc %r9,%rsi + +# qhasm: addt0 = addt1 if carry +# asm 1: cmovc <addt1=int64#7,<addt0=int64#6 +# asm 2: cmovc <addt1=%rax,<addt0=%r9 +cmovc %rax,%r9 + +# qhasm: f0 += addt0 +# asm 1: add <addt0=int64#6,<f0=int64#3 +# asm 2: add <addt0=%r9,<f0=%rdx +add %r9,%rdx + +# qhasm: g0 = f0 +# asm 1: mov <f0=int64#3,>g0=int64#6 +# asm 2: mov <f0=%rdx,>g0=%r9 +mov %rdx,%r9 + +# qhasm: g1 = f1 +# asm 1: mov <f1=int64#4,>g1=int64#7 +# asm 2: mov <f1=%rcx,>g1=%rax +mov %rcx,%rax + +# qhasm: g2 = f2 +# asm 1: mov <f2=int64#5,>g2=int64#8 +# asm 2: mov <f2=%r8,>g2=%r10 +mov %r8,%r10 + +# qhasm: g3 = f3 +# asm 1: mov <f3=int64#2,>g3=int64#9 +# asm 2: mov <f3=%rsi,>g3=%r11 +mov %rsi,%r11 + +# qhasm: carry? f0 -= c0 +# asm 1: sub <c0=int64#11,<f0=int64#3 +# asm 2: sub <c0=%r13,<f0=%rdx +sub %r13,%rdx + +# qhasm: carry? f1 -= c1 - carry +# asm 1: sbb <c1=int64#12,<f1=int64#4 +# asm 2: sbb <c1=%r14,<f1=%rcx +sbb %r14,%rcx + +# qhasm: carry? f2 -= c2 - carry +# asm 1: sbb <c2=int64#13,<f2=int64#5 +# asm 2: sbb <c2=%r15,<f2=%r8 +sbb %r15,%r8 + +# qhasm: carry? f3 -= c3 - carry +# asm 1: sbb <c3=int64#14,<f3=int64#2 +# asm 2: sbb <c3=%rbx,<f3=%rsi +sbb %rbx,%rsi + +# qhasm: subt0 = 0 +# asm 1: mov $0,>subt0=int64#10 +# asm 2: mov $0,>subt0=%r12 +mov $0,%r12 + +# qhasm: subt1 = 38 +# asm 1: mov $38,>subt1=int64#15 +# asm 2: mov $38,>subt1=%rbp +mov $38,%rbp + +# qhasm: subt1 = subt0 if !carry +# asm 1: cmovae <subt0=int64#10,<subt1=int64#15 +# asm 2: cmovae <subt0=%r12,<subt1=%rbp +cmovae %r12,%rbp + +# qhasm: carry? f0 -= subt1 +# asm 1: sub <subt1=int64#15,<f0=int64#3 +# asm 2: sub <subt1=%rbp,<f0=%rdx +sub %rbp,%rdx + +# qhasm: carry? f1 -= subt0 - carry +# asm 1: sbb <subt0=int64#10,<f1=int64#4 +# asm 2: sbb <subt0=%r12,<f1=%rcx +sbb %r12,%rcx + +# qhasm: carry? f2 -= subt0 - carry +# asm 1: sbb <subt0=int64#10,<f2=int64#5 +# asm 2: sbb <subt0=%r12,<f2=%r8 +sbb %r12,%r8 + +# qhasm: carry? f3 -= subt0 - carry +# asm 1: sbb <subt0=int64#10,<f3=int64#2 +# asm 2: sbb <subt0=%r12,<f3=%rsi +sbb %r12,%rsi + +# qhasm: subt0 = subt1 if carry +# asm 1: cmovc <subt1=int64#15,<subt0=int64#10 +# asm 2: cmovc <subt1=%rbp,<subt0=%r12 +cmovc %rbp,%r12 + +# qhasm: f0 -= subt0 +# asm 1: sub <subt0=int64#10,<f0=int64#3 +# asm 2: sub <subt0=%r12,<f0=%rdx +sub %r12,%rdx + +# qhasm: carry? g0 += c0 +# asm 1: add <c0=int64#11,<g0=int64#6 +# asm 2: add <c0=%r13,<g0=%r9 +add %r13,%r9 + +# qhasm: carry? g1 += c1 + carry +# asm 1: adc <c1=int64#12,<g1=int64#7 +# asm 2: adc <c1=%r14,<g1=%rax +adc %r14,%rax + +# qhasm: carry? g2 += c2 + carry +# asm 1: adc <c2=int64#13,<g2=int64#8 +# asm 2: adc <c2=%r15,<g2=%r10 +adc %r15,%r10 + +# qhasm: carry? g3 += c3 + carry +# asm 1: adc <c3=int64#14,<g3=int64#9 +# asm 2: adc <c3=%rbx,<g3=%r11 +adc %rbx,%r11 + +# qhasm: addt0 = 0 +# asm 1: mov $0,>addt0=int64#10 +# asm 2: mov $0,>addt0=%r12 +mov $0,%r12 + +# qhasm: addt1 = 38 +# asm 1: mov $38,>addt1=int64#11 +# asm 2: mov $38,>addt1=%r13 +mov $38,%r13 + +# qhasm: addt1 = addt0 if !carry +# asm 1: cmovae <addt0=int64#10,<addt1=int64#11 +# asm 2: cmovae <addt0=%r12,<addt1=%r13 +cmovae %r12,%r13 + +# qhasm: carry? g0 += addt1 +# asm 1: add <addt1=int64#11,<g0=int64#6 +# asm 2: add <addt1=%r13,<g0=%r9 +add %r13,%r9 + +# qhasm: carry? g1 += addt0 + carry +# asm 1: adc <addt0=int64#10,<g1=int64#7 +# asm 2: adc <addt0=%r12,<g1=%rax +adc %r12,%rax + +# qhasm: carry? g2 += addt0 + carry +# asm 1: adc <addt0=int64#10,<g2=int64#8 +# asm 2: adc <addt0=%r12,<g2=%r10 +adc %r12,%r10 + +# qhasm: carry? g3 += addt0 + carry +# asm 1: adc <addt0=int64#10,<g3=int64#9 +# asm 2: adc <addt0=%r12,<g3=%r11 +adc %r12,%r11 + +# qhasm: addt0 = addt1 if carry +# asm 1: cmovc <addt1=int64#11,<addt0=int64#10 +# asm 2: cmovc <addt1=%r13,<addt0=%r12 +cmovc %r13,%r12 + +# qhasm: g0 += addt0 +# asm 1: add <addt0=int64#10,<g0=int64#6 +# asm 2: add <addt0=%r12,<g0=%r9 +add %r12,%r9 + +# qhasm: *(uint64 *)(rp + 32) = g0 +# asm 1: movq <g0=int64#6,32(<rp=int64#1) +# asm 2: movq <g0=%r9,32(<rp=%rdi) +movq %r9,32(%rdi) + +# qhasm: *(uint64 *)(rp + 40) = g1 +# asm 1: movq <g1=int64#7,40(<rp=int64#1) +# asm 2: movq <g1=%rax,40(<rp=%rdi) +movq %rax,40(%rdi) + +# qhasm: *(uint64 *)(rp + 48) = g2 +# asm 1: movq <g2=int64#8,48(<rp=int64#1) +# asm 2: movq <g2=%r10,48(<rp=%rdi) +movq %r10,48(%rdi) + +# qhasm: *(uint64 *)(rp + 56) = g3 +# asm 1: movq <g3=int64#9,56(<rp=int64#1) +# asm 2: movq <g3=%r11,56(<rp=%rdi) +movq %r11,56(%rdi) + +# qhasm: *(uint64 *)(rp + 96) = f0 +# asm 1: movq <f0=int64#3,96(<rp=int64#1) +# asm 2: movq <f0=%rdx,96(<rp=%rdi) +movq %rdx,96(%rdi) + +# qhasm: *(uint64 *)(rp + 104) = f1 +# asm 1: movq <f1=int64#4,104(<rp=int64#1) +# asm 2: movq <f1=%rcx,104(<rp=%rdi) +movq %rcx,104(%rdi) + +# qhasm: *(uint64 *)(rp + 112) = f2 +# asm 1: movq <f2=int64#5,112(<rp=int64#1) +# asm 2: movq <f2=%r8,112(<rp=%rdi) +movq %r8,112(%rdi) + +# qhasm: *(uint64 *)(rp + 120) = f3 +# asm 1: movq <f3=int64#2,120(<rp=int64#1) +# asm 2: movq <f3=%rsi,120(<rp=%rdi) +movq %rsi,120(%rdi) + +# qhasm: caller1 = caller1_stack +# asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9 +# asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10 +# asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11 +# asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12 +# asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13 +# asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14 +# asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15 +# asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/ext/ed25519-amd64-asm/ge25519_p1p1_to_p2.s b/ext/ed25519-amd64-asm/ge25519_p1p1_to_p2.s new file mode 100644 index 00000000..c3a1bdd7 --- /dev/null +++ b/ext/ed25519-amd64-asm/ge25519_p1p1_to_p2.s @@ -0,0 +1,2236 @@ + +# qhasm: int64 rp + +# qhasm: int64 pp + +# qhasm: input rp + +# qhasm: input pp + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: int64 rx0 + +# qhasm: int64 rx1 + +# qhasm: int64 rx2 + +# qhasm: int64 rx3 + +# qhasm: int64 ry0 + +# qhasm: int64 ry1 + +# qhasm: int64 ry2 + +# qhasm: int64 ry3 + +# qhasm: int64 rz0 + +# qhasm: int64 rz1 + +# qhasm: int64 rz2 + +# qhasm: int64 rz3 + +# qhasm: int64 mulr4 + +# qhasm: int64 mulr5 + +# qhasm: int64 mulr6 + +# qhasm: int64 mulr7 + +# qhasm: int64 mulr8 + +# qhasm: int64 mulrax + +# qhasm: int64 mulrdx + +# qhasm: int64 mulx0 + +# qhasm: int64 mulx1 + +# qhasm: int64 mulx2 + +# qhasm: int64 mulx3 + +# qhasm: int64 mulc + +# qhasm: int64 mulzero + +# qhasm: int64 muli38 + +# qhasm: enter crypto_sign_ed25519_amd64_64_ge25519_p1p1_to_p2 +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_64_ge25519_p1p1_to_p2 +.globl crypto_sign_ed25519_amd64_64_ge25519_p1p1_to_p2 +_crypto_sign_ed25519_amd64_64_ge25519_p1p1_to_p2: +crypto_sign_ed25519_amd64_64_ge25519_p1p1_to_p2: +mov %rsp,%r11 +and $31,%r11 +add $64,%r11 +sub %r11,%rsp + +# qhasm: caller1_stack = caller1 +# asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1 +# asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2 +# asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3 +# asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4 +# asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5 +# asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6 +# asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7 +# asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: mulr4 = 0 +# asm 1: mov $0,>mulr4=int64#4 +# asm 2: mov $0,>mulr4=%rcx +mov $0,%rcx + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#5 +# asm 2: mov $0,>mulr5=%r8 +mov $0,%r8 + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#6 +# asm 2: mov $0,>mulr6=%r9 +mov $0,%r9 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#8 +# asm 2: mov $0,>mulr7=%r10 +mov $0,%r10 + +# qhasm: mulx0 = *(uint64 *)(pp + 0) +# asm 1: movq 0(<pp=int64#2),>mulx0=int64#9 +# asm 2: movq 0(<pp=%rsi),>mulx0=%r11 +movq 0(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 96) +# asm 1: movq 96(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 96(<pp=%rsi),>mulrax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#9 +# asm 2: mul <mulx0=%r11 +mul %r11 + +# qhasm: rx0 = mulrax +# asm 1: mov <mulrax=int64#7,>rx0=int64#10 +# asm 2: mov <mulrax=%rax,>rx0=%r12 +mov %rax,%r12 + +# qhasm: rx1 = mulrdx +# asm 1: mov <mulrdx=int64#3,>rx1=int64#11 +# asm 2: mov <mulrdx=%rdx,>rx1=%r13 +mov %rdx,%r13 + +# qhasm: mulrax = *(uint64 *)(pp + 104) +# asm 1: movq 104(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 104(<pp=%rsi),>mulrax=%rax +movq 104(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#9 +# asm 2: mul <mulx0=%r11 +mul %r11 + +# qhasm: carry? rx1 += mulrax +# asm 1: add <mulrax=int64#7,<rx1=int64#11 +# asm 2: add <mulrax=%rax,<rx1=%r13 +add %rax,%r13 + +# qhasm: rx2 = 0 +# asm 1: mov $0,>rx2=int64#12 +# asm 2: mov $0,>rx2=%r14 +mov $0,%r14 + +# qhasm: rx2 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<rx2=int64#12 +# asm 2: adc <mulrdx=%rdx,<rx2=%r14 +adc %rdx,%r14 + +# qhasm: mulrax = *(uint64 *)(pp + 112) +# asm 1: movq 112(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 112(<pp=%rsi),>mulrax=%rax +movq 112(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#9 +# asm 2: mul <mulx0=%r11 +mul %r11 + +# qhasm: carry? rx2 += mulrax +# asm 1: add <mulrax=int64#7,<rx2=int64#12 +# asm 2: add <mulrax=%rax,<rx2=%r14 +add %rax,%r14 + +# qhasm: rx3 = 0 +# asm 1: mov $0,>rx3=int64#13 +# asm 2: mov $0,>rx3=%r15 +mov $0,%r15 + +# qhasm: rx3 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<rx3=int64#13 +# asm 2: adc <mulrdx=%rdx,<rx3=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = *(uint64 *)(pp + 120) +# asm 1: movq 120(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 120(<pp=%rsi),>mulrax=%rax +movq 120(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#9 +# asm 2: mul <mulx0=%r11 +mul %r11 + +# qhasm: carry? rx3 += mulrax +# asm 1: add <mulrax=int64#7,<rx3=int64#13 +# asm 2: add <mulrax=%rax,<rx3=%r15 +add %rax,%r15 + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr4=int64#4 +# asm 2: adc <mulrdx=%rdx,<mulr4=%rcx +adc %rdx,%rcx + +# qhasm: mulx1 = *(uint64 *)(pp + 8) +# asm 1: movq 8(<pp=int64#2),>mulx1=int64#9 +# asm 2: movq 8(<pp=%rsi),>mulx1=%r11 +movq 8(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 96) +# asm 1: movq 96(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 96(<pp=%rsi),>mulrax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#9 +# asm 2: mul <mulx1=%r11 +mul %r11 + +# qhasm: carry? rx1 += mulrax +# asm 1: add <mulrax=int64#7,<rx1=int64#11 +# asm 2: add <mulrax=%rax,<rx1=%r13 +add %rax,%r13 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 104) +# asm 1: movq 104(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 104(<pp=%rsi),>mulrax=%rax +movq 104(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#9 +# asm 2: mul <mulx1=%r11 +mul %r11 + +# qhasm: carry? rx2 += mulrax +# asm 1: add <mulrax=int64#7,<rx2=int64#12 +# asm 2: add <mulrax=%rax,<rx2=%r14 +add %rax,%r14 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? rx2 += mulc +# asm 1: add <mulc=int64#14,<rx2=int64#12 +# asm 2: add <mulc=%rbx,<rx2=%r14 +add %rbx,%r14 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 112) +# asm 1: movq 112(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 112(<pp=%rsi),>mulrax=%rax +movq 112(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#9 +# asm 2: mul <mulx1=%r11 +mul %r11 + +# qhasm: carry? rx3 += mulrax +# asm 1: add <mulrax=int64#7,<rx3=int64#13 +# asm 2: add <mulrax=%rax,<rx3=%r15 +add %rax,%r15 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? rx3 += mulc +# asm 1: add <mulc=int64#14,<rx3=int64#13 +# asm 2: add <mulc=%rbx,<rx3=%r15 +add %rbx,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 120) +# asm 1: movq 120(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 120(<pp=%rsi),>mulrax=%rax +movq 120(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#9 +# asm 2: mul <mulx1=%r11 +mul %r11 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#4 +# asm 2: add <mulrax=%rax,<mulr4=%rcx +add %rax,%rcx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#14,<mulr4=int64#4 +# asm 2: add <mulc=%rbx,<mulr4=%rcx +add %rbx,%rcx + +# qhasm: mulr5 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr5=int64#5 +# asm 2: adc <mulrdx=%rdx,<mulr5=%r8 +adc %rdx,%r8 + +# qhasm: mulx2 = *(uint64 *)(pp + 16) +# asm 1: movq 16(<pp=int64#2),>mulx2=int64#9 +# asm 2: movq 16(<pp=%rsi),>mulx2=%r11 +movq 16(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 96) +# asm 1: movq 96(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 96(<pp=%rsi),>mulrax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#9 +# asm 2: mul <mulx2=%r11 +mul %r11 + +# qhasm: carry? rx2 += mulrax +# asm 1: add <mulrax=int64#7,<rx2=int64#12 +# asm 2: add <mulrax=%rax,<rx2=%r14 +add %rax,%r14 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 104) +# asm 1: movq 104(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 104(<pp=%rsi),>mulrax=%rax +movq 104(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#9 +# asm 2: mul <mulx2=%r11 +mul %r11 + +# qhasm: carry? rx3 += mulrax +# asm 1: add <mulrax=int64#7,<rx3=int64#13 +# asm 2: add <mulrax=%rax,<rx3=%r15 +add %rax,%r15 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? rx3 += mulc +# asm 1: add <mulc=int64#14,<rx3=int64#13 +# asm 2: add <mulc=%rbx,<rx3=%r15 +add %rbx,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 112) +# asm 1: movq 112(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 112(<pp=%rsi),>mulrax=%rax +movq 112(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#9 +# asm 2: mul <mulx2=%r11 +mul %r11 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#4 +# asm 2: add <mulrax=%rax,<mulr4=%rcx +add %rax,%rcx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#14,<mulr4=int64#4 +# asm 2: add <mulc=%rbx,<mulr4=%rcx +add %rbx,%rcx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 120) +# asm 1: movq 120(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 120(<pp=%rsi),>mulrax=%rax +movq 120(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#9 +# asm 2: mul <mulx2=%r11 +mul %r11 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#5 +# asm 2: add <mulrax=%rax,<mulr5=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#14,<mulr5=int64#5 +# asm 2: add <mulc=%rbx,<mulr5=%r8 +add %rbx,%r8 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#6 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r9 +adc %rdx,%r9 + +# qhasm: mulx3 = *(uint64 *)(pp + 24) +# asm 1: movq 24(<pp=int64#2),>mulx3=int64#9 +# asm 2: movq 24(<pp=%rsi),>mulx3=%r11 +movq 24(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 96) +# asm 1: movq 96(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 96(<pp=%rsi),>mulrax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#9 +# asm 2: mul <mulx3=%r11 +mul %r11 + +# qhasm: carry? rx3 += mulrax +# asm 1: add <mulrax=int64#7,<rx3=int64#13 +# asm 2: add <mulrax=%rax,<rx3=%r15 +add %rax,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 104) +# asm 1: movq 104(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 104(<pp=%rsi),>mulrax=%rax +movq 104(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#9 +# asm 2: mul <mulx3=%r11 +mul %r11 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#4 +# asm 2: add <mulrax=%rax,<mulr4=%rcx +add %rax,%rcx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#14,<mulr4=int64#4 +# asm 2: add <mulc=%rbx,<mulr4=%rcx +add %rbx,%rcx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 112) +# asm 1: movq 112(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 112(<pp=%rsi),>mulrax=%rax +movq 112(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#9 +# asm 2: mul <mulx3=%r11 +mul %r11 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#5 +# asm 2: add <mulrax=%rax,<mulr5=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#14,<mulr5=int64#5 +# asm 2: add <mulc=%rbx,<mulr5=%r8 +add %rbx,%r8 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 120) +# asm 1: movq 120(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 120(<pp=%rsi),>mulrax=%rax +movq 120(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#9 +# asm 2: mul <mulx3=%r11 +mul %r11 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#6 +# asm 2: add <mulrax=%rax,<mulr6=%r9 +add %rax,%r9 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr6 += mulc +# asm 1: add <mulc=int64#14,<mulr6=int64#6 +# asm 2: add <mulc=%rbx,<mulr6=%r9 +add %rbx,%r9 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#8 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r10 +adc %rdx,%r10 + +# qhasm: mulrax = mulr4 +# asm 1: mov <mulr4=int64#4,>mulrax=int64#7 +# asm 2: mov <mulr4=%rcx,>mulrax=%rax +mov %rcx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: mulr4 = mulrax +# asm 1: mov <mulrax=int64#7,>mulr4=int64#4 +# asm 2: mov <mulrax=%rax,>mulr4=%rcx +mov %rax,%rcx + +# qhasm: mulrax = mulr5 +# asm 1: mov <mulr5=int64#5,>mulrax=int64#7 +# asm 2: mov <mulr5=%r8,>mulrax=%rax +mov %r8,%rax + +# qhasm: mulr5 = mulrdx +# asm 1: mov <mulrdx=int64#3,>mulr5=int64#5 +# asm 2: mov <mulrdx=%rdx,>mulr5=%r8 +mov %rdx,%r8 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#5 +# asm 2: add <mulrax=%rax,<mulr5=%r8 +add %rax,%r8 + +# qhasm: mulrax = mulr6 +# asm 1: mov <mulr6=int64#6,>mulrax=int64#7 +# asm 2: mov <mulr6=%r9,>mulrax=%rax +mov %r9,%rax + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#6 +# asm 2: mov $0,>mulr6=%r9 +mov $0,%r9 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#6 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r9 +adc %rdx,%r9 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#6 +# asm 2: add <mulrax=%rax,<mulr6=%r9 +add %rax,%r9 + +# qhasm: mulrax = mulr7 +# asm 1: mov <mulr7=int64#8,>mulrax=int64#7 +# asm 2: mov <mulr7=%r10,>mulrax=%rax +mov %r10,%rax + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#8 +# asm 2: mov $0,>mulr7=%r10 +mov $0,%r10 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#8 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r10 +adc %rdx,%r10 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr7 += mulrax +# asm 1: add <mulrax=int64#7,<mulr7=int64#8 +# asm 2: add <mulrax=%rax,<mulr7=%r10 +add %rax,%r10 + +# qhasm: mulr8 = 0 +# asm 1: mov $0,>mulr8=int64#7 +# asm 2: mov $0,>mulr8=%rax +mov $0,%rax + +# qhasm: mulr8 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr8=int64#7 +# asm 2: adc <mulrdx=%rdx,<mulr8=%rax +adc %rdx,%rax + +# qhasm: carry? rx0 += mulr4 +# asm 1: add <mulr4=int64#4,<rx0=int64#10 +# asm 2: add <mulr4=%rcx,<rx0=%r12 +add %rcx,%r12 + +# qhasm: carry? rx1 += mulr5 + carry +# asm 1: adc <mulr5=int64#5,<rx1=int64#11 +# asm 2: adc <mulr5=%r8,<rx1=%r13 +adc %r8,%r13 + +# qhasm: carry? rx2 += mulr6 + carry +# asm 1: adc <mulr6=int64#6,<rx2=int64#12 +# asm 2: adc <mulr6=%r9,<rx2=%r14 +adc %r9,%r14 + +# qhasm: carry? rx3 += mulr7 + carry +# asm 1: adc <mulr7=int64#8,<rx3=int64#13 +# asm 2: adc <mulr7=%r10,<rx3=%r15 +adc %r10,%r15 + +# qhasm: mulzero = 0 +# asm 1: mov $0,>mulzero=int64#3 +# asm 2: mov $0,>mulzero=%rdx +mov $0,%rdx + +# qhasm: mulr8 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<mulr8=int64#7 +# asm 2: adc <mulzero=%rdx,<mulr8=%rax +adc %rdx,%rax + +# qhasm: mulr8 *= 38 +# asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#4 +# asm 2: imulq $38,<mulr8=%rax,>mulr8=%rcx +imulq $38,%rax,%rcx + +# qhasm: carry? rx0 += mulr8 +# asm 1: add <mulr8=int64#4,<rx0=int64#10 +# asm 2: add <mulr8=%rcx,<rx0=%r12 +add %rcx,%r12 + +# qhasm: carry? rx1 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<rx1=int64#11 +# asm 2: adc <mulzero=%rdx,<rx1=%r13 +adc %rdx,%r13 + +# qhasm: carry? rx2 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<rx2=int64#12 +# asm 2: adc <mulzero=%rdx,<rx2=%r14 +adc %rdx,%r14 + +# qhasm: carry? rx3 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<rx3=int64#13 +# asm 2: adc <mulzero=%rdx,<rx3=%r15 +adc %rdx,%r15 + +# qhasm: mulzero += mulzero + carry +# asm 1: adc <mulzero=int64#3,<mulzero=int64#3 +# asm 2: adc <mulzero=%rdx,<mulzero=%rdx +adc %rdx,%rdx + +# qhasm: mulzero *= 38 +# asm 1: imulq $38,<mulzero=int64#3,>mulzero=int64#3 +# asm 2: imulq $38,<mulzero=%rdx,>mulzero=%rdx +imulq $38,%rdx,%rdx + +# qhasm: rx0 += mulzero +# asm 1: add <mulzero=int64#3,<rx0=int64#10 +# asm 2: add <mulzero=%rdx,<rx0=%r12 +add %rdx,%r12 + +# qhasm: *(uint64 *)(rp + 0) = rx0 +# asm 1: movq <rx0=int64#10,0(<rp=int64#1) +# asm 2: movq <rx0=%r12,0(<rp=%rdi) +movq %r12,0(%rdi) + +# qhasm: *(uint64 *)(rp + 8) = rx1 +# asm 1: movq <rx1=int64#11,8(<rp=int64#1) +# asm 2: movq <rx1=%r13,8(<rp=%rdi) +movq %r13,8(%rdi) + +# qhasm: *(uint64 *)(rp + 16) = rx2 +# asm 1: movq <rx2=int64#12,16(<rp=int64#1) +# asm 2: movq <rx2=%r14,16(<rp=%rdi) +movq %r14,16(%rdi) + +# qhasm: *(uint64 *)(rp + 24) = rx3 +# asm 1: movq <rx3=int64#13,24(<rp=int64#1) +# asm 2: movq <rx3=%r15,24(<rp=%rdi) +movq %r15,24(%rdi) + +# qhasm: mulr4 = 0 +# asm 1: mov $0,>mulr4=int64#4 +# asm 2: mov $0,>mulr4=%rcx +mov $0,%rcx + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#5 +# asm 2: mov $0,>mulr5=%r8 +mov $0,%r8 + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#6 +# asm 2: mov $0,>mulr6=%r9 +mov $0,%r9 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#8 +# asm 2: mov $0,>mulr7=%r10 +mov $0,%r10 + +# qhasm: mulx0 = *(uint64 *)(pp + 64) +# asm 1: movq 64(<pp=int64#2),>mulx0=int64#9 +# asm 2: movq 64(<pp=%rsi),>mulx0=%r11 +movq 64(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 32) +# asm 1: movq 32(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 32(<pp=%rsi),>mulrax=%rax +movq 32(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#9 +# asm 2: mul <mulx0=%r11 +mul %r11 + +# qhasm: ry0 = mulrax +# asm 1: mov <mulrax=int64#7,>ry0=int64#10 +# asm 2: mov <mulrax=%rax,>ry0=%r12 +mov %rax,%r12 + +# qhasm: ry1 = mulrdx +# asm 1: mov <mulrdx=int64#3,>ry1=int64#11 +# asm 2: mov <mulrdx=%rdx,>ry1=%r13 +mov %rdx,%r13 + +# qhasm: mulrax = *(uint64 *)(pp + 40) +# asm 1: movq 40(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 40(<pp=%rsi),>mulrax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#9 +# asm 2: mul <mulx0=%r11 +mul %r11 + +# qhasm: carry? ry1 += mulrax +# asm 1: add <mulrax=int64#7,<ry1=int64#11 +# asm 2: add <mulrax=%rax,<ry1=%r13 +add %rax,%r13 + +# qhasm: ry2 = 0 +# asm 1: mov $0,>ry2=int64#12 +# asm 2: mov $0,>ry2=%r14 +mov $0,%r14 + +# qhasm: ry2 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<ry2=int64#12 +# asm 2: adc <mulrdx=%rdx,<ry2=%r14 +adc %rdx,%r14 + +# qhasm: mulrax = *(uint64 *)(pp + 48) +# asm 1: movq 48(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 48(<pp=%rsi),>mulrax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#9 +# asm 2: mul <mulx0=%r11 +mul %r11 + +# qhasm: carry? ry2 += mulrax +# asm 1: add <mulrax=int64#7,<ry2=int64#12 +# asm 2: add <mulrax=%rax,<ry2=%r14 +add %rax,%r14 + +# qhasm: ry3 = 0 +# asm 1: mov $0,>ry3=int64#13 +# asm 2: mov $0,>ry3=%r15 +mov $0,%r15 + +# qhasm: ry3 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<ry3=int64#13 +# asm 2: adc <mulrdx=%rdx,<ry3=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = *(uint64 *)(pp + 56) +# asm 1: movq 56(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 56(<pp=%rsi),>mulrax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#9 +# asm 2: mul <mulx0=%r11 +mul %r11 + +# qhasm: carry? ry3 += mulrax +# asm 1: add <mulrax=int64#7,<ry3=int64#13 +# asm 2: add <mulrax=%rax,<ry3=%r15 +add %rax,%r15 + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr4=int64#4 +# asm 2: adc <mulrdx=%rdx,<mulr4=%rcx +adc %rdx,%rcx + +# qhasm: mulx1 = *(uint64 *)(pp + 72) +# asm 1: movq 72(<pp=int64#2),>mulx1=int64#9 +# asm 2: movq 72(<pp=%rsi),>mulx1=%r11 +movq 72(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 32) +# asm 1: movq 32(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 32(<pp=%rsi),>mulrax=%rax +movq 32(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#9 +# asm 2: mul <mulx1=%r11 +mul %r11 + +# qhasm: carry? ry1 += mulrax +# asm 1: add <mulrax=int64#7,<ry1=int64#11 +# asm 2: add <mulrax=%rax,<ry1=%r13 +add %rax,%r13 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 40) +# asm 1: movq 40(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 40(<pp=%rsi),>mulrax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#9 +# asm 2: mul <mulx1=%r11 +mul %r11 + +# qhasm: carry? ry2 += mulrax +# asm 1: add <mulrax=int64#7,<ry2=int64#12 +# asm 2: add <mulrax=%rax,<ry2=%r14 +add %rax,%r14 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? ry2 += mulc +# asm 1: add <mulc=int64#14,<ry2=int64#12 +# asm 2: add <mulc=%rbx,<ry2=%r14 +add %rbx,%r14 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 48) +# asm 1: movq 48(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 48(<pp=%rsi),>mulrax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#9 +# asm 2: mul <mulx1=%r11 +mul %r11 + +# qhasm: carry? ry3 += mulrax +# asm 1: add <mulrax=int64#7,<ry3=int64#13 +# asm 2: add <mulrax=%rax,<ry3=%r15 +add %rax,%r15 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? ry3 += mulc +# asm 1: add <mulc=int64#14,<ry3=int64#13 +# asm 2: add <mulc=%rbx,<ry3=%r15 +add %rbx,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 56) +# asm 1: movq 56(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 56(<pp=%rsi),>mulrax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#9 +# asm 2: mul <mulx1=%r11 +mul %r11 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#4 +# asm 2: add <mulrax=%rax,<mulr4=%rcx +add %rax,%rcx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#14,<mulr4=int64#4 +# asm 2: add <mulc=%rbx,<mulr4=%rcx +add %rbx,%rcx + +# qhasm: mulr5 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr5=int64#5 +# asm 2: adc <mulrdx=%rdx,<mulr5=%r8 +adc %rdx,%r8 + +# qhasm: mulx2 = *(uint64 *)(pp + 80) +# asm 1: movq 80(<pp=int64#2),>mulx2=int64#9 +# asm 2: movq 80(<pp=%rsi),>mulx2=%r11 +movq 80(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 32) +# asm 1: movq 32(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 32(<pp=%rsi),>mulrax=%rax +movq 32(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#9 +# asm 2: mul <mulx2=%r11 +mul %r11 + +# qhasm: carry? ry2 += mulrax +# asm 1: add <mulrax=int64#7,<ry2=int64#12 +# asm 2: add <mulrax=%rax,<ry2=%r14 +add %rax,%r14 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 40) +# asm 1: movq 40(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 40(<pp=%rsi),>mulrax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#9 +# asm 2: mul <mulx2=%r11 +mul %r11 + +# qhasm: carry? ry3 += mulrax +# asm 1: add <mulrax=int64#7,<ry3=int64#13 +# asm 2: add <mulrax=%rax,<ry3=%r15 +add %rax,%r15 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? ry3 += mulc +# asm 1: add <mulc=int64#14,<ry3=int64#13 +# asm 2: add <mulc=%rbx,<ry3=%r15 +add %rbx,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 48) +# asm 1: movq 48(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 48(<pp=%rsi),>mulrax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#9 +# asm 2: mul <mulx2=%r11 +mul %r11 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#4 +# asm 2: add <mulrax=%rax,<mulr4=%rcx +add %rax,%rcx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#14,<mulr4=int64#4 +# asm 2: add <mulc=%rbx,<mulr4=%rcx +add %rbx,%rcx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 56) +# asm 1: movq 56(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 56(<pp=%rsi),>mulrax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#9 +# asm 2: mul <mulx2=%r11 +mul %r11 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#5 +# asm 2: add <mulrax=%rax,<mulr5=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#14,<mulr5=int64#5 +# asm 2: add <mulc=%rbx,<mulr5=%r8 +add %rbx,%r8 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#6 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r9 +adc %rdx,%r9 + +# qhasm: mulx3 = *(uint64 *)(pp + 88) +# asm 1: movq 88(<pp=int64#2),>mulx3=int64#9 +# asm 2: movq 88(<pp=%rsi),>mulx3=%r11 +movq 88(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 32) +# asm 1: movq 32(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 32(<pp=%rsi),>mulrax=%rax +movq 32(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#9 +# asm 2: mul <mulx3=%r11 +mul %r11 + +# qhasm: carry? ry3 += mulrax +# asm 1: add <mulrax=int64#7,<ry3=int64#13 +# asm 2: add <mulrax=%rax,<ry3=%r15 +add %rax,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 40) +# asm 1: movq 40(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 40(<pp=%rsi),>mulrax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#9 +# asm 2: mul <mulx3=%r11 +mul %r11 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#4 +# asm 2: add <mulrax=%rax,<mulr4=%rcx +add %rax,%rcx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#14,<mulr4=int64#4 +# asm 2: add <mulc=%rbx,<mulr4=%rcx +add %rbx,%rcx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 48) +# asm 1: movq 48(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 48(<pp=%rsi),>mulrax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#9 +# asm 2: mul <mulx3=%r11 +mul %r11 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#5 +# asm 2: add <mulrax=%rax,<mulr5=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#14,<mulr5=int64#5 +# asm 2: add <mulc=%rbx,<mulr5=%r8 +add %rbx,%r8 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 56) +# asm 1: movq 56(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 56(<pp=%rsi),>mulrax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#9 +# asm 2: mul <mulx3=%r11 +mul %r11 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#6 +# asm 2: add <mulrax=%rax,<mulr6=%r9 +add %rax,%r9 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr6 += mulc +# asm 1: add <mulc=int64#14,<mulr6=int64#6 +# asm 2: add <mulc=%rbx,<mulr6=%r9 +add %rbx,%r9 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#8 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r10 +adc %rdx,%r10 + +# qhasm: mulrax = mulr4 +# asm 1: mov <mulr4=int64#4,>mulrax=int64#7 +# asm 2: mov <mulr4=%rcx,>mulrax=%rax +mov %rcx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: mulr4 = mulrax +# asm 1: mov <mulrax=int64#7,>mulr4=int64#4 +# asm 2: mov <mulrax=%rax,>mulr4=%rcx +mov %rax,%rcx + +# qhasm: mulrax = mulr5 +# asm 1: mov <mulr5=int64#5,>mulrax=int64#7 +# asm 2: mov <mulr5=%r8,>mulrax=%rax +mov %r8,%rax + +# qhasm: mulr5 = mulrdx +# asm 1: mov <mulrdx=int64#3,>mulr5=int64#5 +# asm 2: mov <mulrdx=%rdx,>mulr5=%r8 +mov %rdx,%r8 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#5 +# asm 2: add <mulrax=%rax,<mulr5=%r8 +add %rax,%r8 + +# qhasm: mulrax = mulr6 +# asm 1: mov <mulr6=int64#6,>mulrax=int64#7 +# asm 2: mov <mulr6=%r9,>mulrax=%rax +mov %r9,%rax + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#6 +# asm 2: mov $0,>mulr6=%r9 +mov $0,%r9 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#6 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r9 +adc %rdx,%r9 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#6 +# asm 2: add <mulrax=%rax,<mulr6=%r9 +add %rax,%r9 + +# qhasm: mulrax = mulr7 +# asm 1: mov <mulr7=int64#8,>mulrax=int64#7 +# asm 2: mov <mulr7=%r10,>mulrax=%rax +mov %r10,%rax + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#8 +# asm 2: mov $0,>mulr7=%r10 +mov $0,%r10 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#8 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r10 +adc %rdx,%r10 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr7 += mulrax +# asm 1: add <mulrax=int64#7,<mulr7=int64#8 +# asm 2: add <mulrax=%rax,<mulr7=%r10 +add %rax,%r10 + +# qhasm: mulr8 = 0 +# asm 1: mov $0,>mulr8=int64#7 +# asm 2: mov $0,>mulr8=%rax +mov $0,%rax + +# qhasm: mulr8 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr8=int64#7 +# asm 2: adc <mulrdx=%rdx,<mulr8=%rax +adc %rdx,%rax + +# qhasm: carry? ry0 += mulr4 +# asm 1: add <mulr4=int64#4,<ry0=int64#10 +# asm 2: add <mulr4=%rcx,<ry0=%r12 +add %rcx,%r12 + +# qhasm: carry? ry1 += mulr5 + carry +# asm 1: adc <mulr5=int64#5,<ry1=int64#11 +# asm 2: adc <mulr5=%r8,<ry1=%r13 +adc %r8,%r13 + +# qhasm: carry? ry2 += mulr6 + carry +# asm 1: adc <mulr6=int64#6,<ry2=int64#12 +# asm 2: adc <mulr6=%r9,<ry2=%r14 +adc %r9,%r14 + +# qhasm: carry? ry3 += mulr7 + carry +# asm 1: adc <mulr7=int64#8,<ry3=int64#13 +# asm 2: adc <mulr7=%r10,<ry3=%r15 +adc %r10,%r15 + +# qhasm: mulzero = 0 +# asm 1: mov $0,>mulzero=int64#3 +# asm 2: mov $0,>mulzero=%rdx +mov $0,%rdx + +# qhasm: mulr8 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<mulr8=int64#7 +# asm 2: adc <mulzero=%rdx,<mulr8=%rax +adc %rdx,%rax + +# qhasm: mulr8 *= 38 +# asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#4 +# asm 2: imulq $38,<mulr8=%rax,>mulr8=%rcx +imulq $38,%rax,%rcx + +# qhasm: carry? ry0 += mulr8 +# asm 1: add <mulr8=int64#4,<ry0=int64#10 +# asm 2: add <mulr8=%rcx,<ry0=%r12 +add %rcx,%r12 + +# qhasm: carry? ry1 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<ry1=int64#11 +# asm 2: adc <mulzero=%rdx,<ry1=%r13 +adc %rdx,%r13 + +# qhasm: carry? ry2 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<ry2=int64#12 +# asm 2: adc <mulzero=%rdx,<ry2=%r14 +adc %rdx,%r14 + +# qhasm: carry? ry3 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<ry3=int64#13 +# asm 2: adc <mulzero=%rdx,<ry3=%r15 +adc %rdx,%r15 + +# qhasm: mulzero += mulzero + carry +# asm 1: adc <mulzero=int64#3,<mulzero=int64#3 +# asm 2: adc <mulzero=%rdx,<mulzero=%rdx +adc %rdx,%rdx + +# qhasm: mulzero *= 38 +# asm 1: imulq $38,<mulzero=int64#3,>mulzero=int64#3 +# asm 2: imulq $38,<mulzero=%rdx,>mulzero=%rdx +imulq $38,%rdx,%rdx + +# qhasm: ry0 += mulzero +# asm 1: add <mulzero=int64#3,<ry0=int64#10 +# asm 2: add <mulzero=%rdx,<ry0=%r12 +add %rdx,%r12 + +# qhasm: *(uint64 *)(rp + 32) = ry0 +# asm 1: movq <ry0=int64#10,32(<rp=int64#1) +# asm 2: movq <ry0=%r12,32(<rp=%rdi) +movq %r12,32(%rdi) + +# qhasm: *(uint64 *)(rp + 40) = ry1 +# asm 1: movq <ry1=int64#11,40(<rp=int64#1) +# asm 2: movq <ry1=%r13,40(<rp=%rdi) +movq %r13,40(%rdi) + +# qhasm: *(uint64 *)(rp + 48) = ry2 +# asm 1: movq <ry2=int64#12,48(<rp=int64#1) +# asm 2: movq <ry2=%r14,48(<rp=%rdi) +movq %r14,48(%rdi) + +# qhasm: *(uint64 *)(rp + 56) = ry3 +# asm 1: movq <ry3=int64#13,56(<rp=int64#1) +# asm 2: movq <ry3=%r15,56(<rp=%rdi) +movq %r15,56(%rdi) + +# qhasm: mulr4 = 0 +# asm 1: mov $0,>mulr4=int64#4 +# asm 2: mov $0,>mulr4=%rcx +mov $0,%rcx + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#5 +# asm 2: mov $0,>mulr5=%r8 +mov $0,%r8 + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#6 +# asm 2: mov $0,>mulr6=%r9 +mov $0,%r9 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#8 +# asm 2: mov $0,>mulr7=%r10 +mov $0,%r10 + +# qhasm: mulx0 = *(uint64 *)(pp + 32) +# asm 1: movq 32(<pp=int64#2),>mulx0=int64#9 +# asm 2: movq 32(<pp=%rsi),>mulx0=%r11 +movq 32(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 96) +# asm 1: movq 96(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 96(<pp=%rsi),>mulrax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#9 +# asm 2: mul <mulx0=%r11 +mul %r11 + +# qhasm: rz0 = mulrax +# asm 1: mov <mulrax=int64#7,>rz0=int64#10 +# asm 2: mov <mulrax=%rax,>rz0=%r12 +mov %rax,%r12 + +# qhasm: rz1 = mulrdx +# asm 1: mov <mulrdx=int64#3,>rz1=int64#11 +# asm 2: mov <mulrdx=%rdx,>rz1=%r13 +mov %rdx,%r13 + +# qhasm: mulrax = *(uint64 *)(pp + 104) +# asm 1: movq 104(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 104(<pp=%rsi),>mulrax=%rax +movq 104(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#9 +# asm 2: mul <mulx0=%r11 +mul %r11 + +# qhasm: carry? rz1 += mulrax +# asm 1: add <mulrax=int64#7,<rz1=int64#11 +# asm 2: add <mulrax=%rax,<rz1=%r13 +add %rax,%r13 + +# qhasm: rz2 = 0 +# asm 1: mov $0,>rz2=int64#12 +# asm 2: mov $0,>rz2=%r14 +mov $0,%r14 + +# qhasm: rz2 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<rz2=int64#12 +# asm 2: adc <mulrdx=%rdx,<rz2=%r14 +adc %rdx,%r14 + +# qhasm: mulrax = *(uint64 *)(pp + 112) +# asm 1: movq 112(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 112(<pp=%rsi),>mulrax=%rax +movq 112(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#9 +# asm 2: mul <mulx0=%r11 +mul %r11 + +# qhasm: carry? rz2 += mulrax +# asm 1: add <mulrax=int64#7,<rz2=int64#12 +# asm 2: add <mulrax=%rax,<rz2=%r14 +add %rax,%r14 + +# qhasm: rz3 = 0 +# asm 1: mov $0,>rz3=int64#13 +# asm 2: mov $0,>rz3=%r15 +mov $0,%r15 + +# qhasm: rz3 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<rz3=int64#13 +# asm 2: adc <mulrdx=%rdx,<rz3=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = *(uint64 *)(pp + 120) +# asm 1: movq 120(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 120(<pp=%rsi),>mulrax=%rax +movq 120(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#9 +# asm 2: mul <mulx0=%r11 +mul %r11 + +# qhasm: carry? rz3 += mulrax +# asm 1: add <mulrax=int64#7,<rz3=int64#13 +# asm 2: add <mulrax=%rax,<rz3=%r15 +add %rax,%r15 + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr4=int64#4 +# asm 2: adc <mulrdx=%rdx,<mulr4=%rcx +adc %rdx,%rcx + +# qhasm: mulx1 = *(uint64 *)(pp + 40) +# asm 1: movq 40(<pp=int64#2),>mulx1=int64#9 +# asm 2: movq 40(<pp=%rsi),>mulx1=%r11 +movq 40(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 96) +# asm 1: movq 96(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 96(<pp=%rsi),>mulrax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#9 +# asm 2: mul <mulx1=%r11 +mul %r11 + +# qhasm: carry? rz1 += mulrax +# asm 1: add <mulrax=int64#7,<rz1=int64#11 +# asm 2: add <mulrax=%rax,<rz1=%r13 +add %rax,%r13 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 104) +# asm 1: movq 104(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 104(<pp=%rsi),>mulrax=%rax +movq 104(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#9 +# asm 2: mul <mulx1=%r11 +mul %r11 + +# qhasm: carry? rz2 += mulrax +# asm 1: add <mulrax=int64#7,<rz2=int64#12 +# asm 2: add <mulrax=%rax,<rz2=%r14 +add %rax,%r14 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? rz2 += mulc +# asm 1: add <mulc=int64#14,<rz2=int64#12 +# asm 2: add <mulc=%rbx,<rz2=%r14 +add %rbx,%r14 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 112) +# asm 1: movq 112(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 112(<pp=%rsi),>mulrax=%rax +movq 112(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#9 +# asm 2: mul <mulx1=%r11 +mul %r11 + +# qhasm: carry? rz3 += mulrax +# asm 1: add <mulrax=int64#7,<rz3=int64#13 +# asm 2: add <mulrax=%rax,<rz3=%r15 +add %rax,%r15 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? rz3 += mulc +# asm 1: add <mulc=int64#14,<rz3=int64#13 +# asm 2: add <mulc=%rbx,<rz3=%r15 +add %rbx,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 120) +# asm 1: movq 120(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 120(<pp=%rsi),>mulrax=%rax +movq 120(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#9 +# asm 2: mul <mulx1=%r11 +mul %r11 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#4 +# asm 2: add <mulrax=%rax,<mulr4=%rcx +add %rax,%rcx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#14,<mulr4=int64#4 +# asm 2: add <mulc=%rbx,<mulr4=%rcx +add %rbx,%rcx + +# qhasm: mulr5 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr5=int64#5 +# asm 2: adc <mulrdx=%rdx,<mulr5=%r8 +adc %rdx,%r8 + +# qhasm: mulx2 = *(uint64 *)(pp + 48) +# asm 1: movq 48(<pp=int64#2),>mulx2=int64#9 +# asm 2: movq 48(<pp=%rsi),>mulx2=%r11 +movq 48(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 96) +# asm 1: movq 96(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 96(<pp=%rsi),>mulrax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#9 +# asm 2: mul <mulx2=%r11 +mul %r11 + +# qhasm: carry? rz2 += mulrax +# asm 1: add <mulrax=int64#7,<rz2=int64#12 +# asm 2: add <mulrax=%rax,<rz2=%r14 +add %rax,%r14 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 104) +# asm 1: movq 104(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 104(<pp=%rsi),>mulrax=%rax +movq 104(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#9 +# asm 2: mul <mulx2=%r11 +mul %r11 + +# qhasm: carry? rz3 += mulrax +# asm 1: add <mulrax=int64#7,<rz3=int64#13 +# asm 2: add <mulrax=%rax,<rz3=%r15 +add %rax,%r15 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? rz3 += mulc +# asm 1: add <mulc=int64#14,<rz3=int64#13 +# asm 2: add <mulc=%rbx,<rz3=%r15 +add %rbx,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 112) +# asm 1: movq 112(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 112(<pp=%rsi),>mulrax=%rax +movq 112(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#9 +# asm 2: mul <mulx2=%r11 +mul %r11 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#4 +# asm 2: add <mulrax=%rax,<mulr4=%rcx +add %rax,%rcx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#14,<mulr4=int64#4 +# asm 2: add <mulc=%rbx,<mulr4=%rcx +add %rbx,%rcx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 120) +# asm 1: movq 120(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 120(<pp=%rsi),>mulrax=%rax +movq 120(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#9 +# asm 2: mul <mulx2=%r11 +mul %r11 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#5 +# asm 2: add <mulrax=%rax,<mulr5=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#14,<mulr5=int64#5 +# asm 2: add <mulc=%rbx,<mulr5=%r8 +add %rbx,%r8 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#6 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r9 +adc %rdx,%r9 + +# qhasm: mulx3 = *(uint64 *)(pp + 56) +# asm 1: movq 56(<pp=int64#2),>mulx3=int64#9 +# asm 2: movq 56(<pp=%rsi),>mulx3=%r11 +movq 56(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 96) +# asm 1: movq 96(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 96(<pp=%rsi),>mulrax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#9 +# asm 2: mul <mulx3=%r11 +mul %r11 + +# qhasm: carry? rz3 += mulrax +# asm 1: add <mulrax=int64#7,<rz3=int64#13 +# asm 2: add <mulrax=%rax,<rz3=%r15 +add %rax,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 104) +# asm 1: movq 104(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 104(<pp=%rsi),>mulrax=%rax +movq 104(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#9 +# asm 2: mul <mulx3=%r11 +mul %r11 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#4 +# asm 2: add <mulrax=%rax,<mulr4=%rcx +add %rax,%rcx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#14,<mulr4=int64#4 +# asm 2: add <mulc=%rbx,<mulr4=%rcx +add %rbx,%rcx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 112) +# asm 1: movq 112(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 112(<pp=%rsi),>mulrax=%rax +movq 112(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#9 +# asm 2: mul <mulx3=%r11 +mul %r11 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#5 +# asm 2: add <mulrax=%rax,<mulr5=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#14,<mulr5=int64#5 +# asm 2: add <mulc=%rbx,<mulr5=%r8 +add %rbx,%r8 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 120) +# asm 1: movq 120(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 120(<pp=%rsi),>mulrax=%rax +movq 120(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#9 +# asm 2: mul <mulx3=%r11 +mul %r11 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#6 +# asm 2: add <mulrax=%rax,<mulr6=%r9 +add %rax,%r9 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr6 += mulc +# asm 1: add <mulc=int64#14,<mulr6=int64#6 +# asm 2: add <mulc=%rbx,<mulr6=%r9 +add %rbx,%r9 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#8 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r10 +adc %rdx,%r10 + +# qhasm: mulrax = mulr4 +# asm 1: mov <mulr4=int64#4,>mulrax=int64#7 +# asm 2: mov <mulr4=%rcx,>mulrax=%rax +mov %rcx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: mulr4 = mulrax +# asm 1: mov <mulrax=int64#7,>mulr4=int64#2 +# asm 2: mov <mulrax=%rax,>mulr4=%rsi +mov %rax,%rsi + +# qhasm: mulrax = mulr5 +# asm 1: mov <mulr5=int64#5,>mulrax=int64#7 +# asm 2: mov <mulr5=%r8,>mulrax=%rax +mov %r8,%rax + +# qhasm: mulr5 = mulrdx +# asm 1: mov <mulrdx=int64#3,>mulr5=int64#4 +# asm 2: mov <mulrdx=%rdx,>mulr5=%rcx +mov %rdx,%rcx + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#4 +# asm 2: add <mulrax=%rax,<mulr5=%rcx +add %rax,%rcx + +# qhasm: mulrax = mulr6 +# asm 1: mov <mulr6=int64#6,>mulrax=int64#7 +# asm 2: mov <mulr6=%r9,>mulrax=%rax +mov %r9,%rax + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#5 +# asm 2: mov $0,>mulr6=%r8 +mov $0,%r8 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#5 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r8 +adc %rdx,%r8 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#5 +# asm 2: add <mulrax=%rax,<mulr6=%r8 +add %rax,%r8 + +# qhasm: mulrax = mulr7 +# asm 1: mov <mulr7=int64#8,>mulrax=int64#7 +# asm 2: mov <mulr7=%r10,>mulrax=%rax +mov %r10,%rax + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#6 +# asm 2: mov $0,>mulr7=%r9 +mov $0,%r9 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#6 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r9 +adc %rdx,%r9 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr7 += mulrax +# asm 1: add <mulrax=int64#7,<mulr7=int64#6 +# asm 2: add <mulrax=%rax,<mulr7=%r9 +add %rax,%r9 + +# qhasm: mulr8 = 0 +# asm 1: mov $0,>mulr8=int64#7 +# asm 2: mov $0,>mulr8=%rax +mov $0,%rax + +# qhasm: mulr8 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr8=int64#7 +# asm 2: adc <mulrdx=%rdx,<mulr8=%rax +adc %rdx,%rax + +# qhasm: carry? rz0 += mulr4 +# asm 1: add <mulr4=int64#2,<rz0=int64#10 +# asm 2: add <mulr4=%rsi,<rz0=%r12 +add %rsi,%r12 + +# qhasm: carry? rz1 += mulr5 + carry +# asm 1: adc <mulr5=int64#4,<rz1=int64#11 +# asm 2: adc <mulr5=%rcx,<rz1=%r13 +adc %rcx,%r13 + +# qhasm: carry? rz2 += mulr6 + carry +# asm 1: adc <mulr6=int64#5,<rz2=int64#12 +# asm 2: adc <mulr6=%r8,<rz2=%r14 +adc %r8,%r14 + +# qhasm: carry? rz3 += mulr7 + carry +# asm 1: adc <mulr7=int64#6,<rz3=int64#13 +# asm 2: adc <mulr7=%r9,<rz3=%r15 +adc %r9,%r15 + +# qhasm: mulzero = 0 +# asm 1: mov $0,>mulzero=int64#2 +# asm 2: mov $0,>mulzero=%rsi +mov $0,%rsi + +# qhasm: mulr8 += mulzero + carry +# asm 1: adc <mulzero=int64#2,<mulr8=int64#7 +# asm 2: adc <mulzero=%rsi,<mulr8=%rax +adc %rsi,%rax + +# qhasm: mulr8 *= 38 +# asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#3 +# asm 2: imulq $38,<mulr8=%rax,>mulr8=%rdx +imulq $38,%rax,%rdx + +# qhasm: carry? rz0 += mulr8 +# asm 1: add <mulr8=int64#3,<rz0=int64#10 +# asm 2: add <mulr8=%rdx,<rz0=%r12 +add %rdx,%r12 + +# qhasm: carry? rz1 += mulzero + carry +# asm 1: adc <mulzero=int64#2,<rz1=int64#11 +# asm 2: adc <mulzero=%rsi,<rz1=%r13 +adc %rsi,%r13 + +# qhasm: carry? rz2 += mulzero + carry +# asm 1: adc <mulzero=int64#2,<rz2=int64#12 +# asm 2: adc <mulzero=%rsi,<rz2=%r14 +adc %rsi,%r14 + +# qhasm: carry? rz3 += mulzero + carry +# asm 1: adc <mulzero=int64#2,<rz3=int64#13 +# asm 2: adc <mulzero=%rsi,<rz3=%r15 +adc %rsi,%r15 + +# qhasm: mulzero += mulzero + carry +# asm 1: adc <mulzero=int64#2,<mulzero=int64#2 +# asm 2: adc <mulzero=%rsi,<mulzero=%rsi +adc %rsi,%rsi + +# qhasm: mulzero *= 38 +# asm 1: imulq $38,<mulzero=int64#2,>mulzero=int64#2 +# asm 2: imulq $38,<mulzero=%rsi,>mulzero=%rsi +imulq $38,%rsi,%rsi + +# qhasm: rz0 += mulzero +# asm 1: add <mulzero=int64#2,<rz0=int64#10 +# asm 2: add <mulzero=%rsi,<rz0=%r12 +add %rsi,%r12 + +# qhasm: *(uint64 *)(rp + 64) = rz0 +# asm 1: movq <rz0=int64#10,64(<rp=int64#1) +# asm 2: movq <rz0=%r12,64(<rp=%rdi) +movq %r12,64(%rdi) + +# qhasm: *(uint64 *)(rp + 72) = rz1 +# asm 1: movq <rz1=int64#11,72(<rp=int64#1) +# asm 2: movq <rz1=%r13,72(<rp=%rdi) +movq %r13,72(%rdi) + +# qhasm: *(uint64 *)(rp + 80) = rz2 +# asm 1: movq <rz2=int64#12,80(<rp=int64#1) +# asm 2: movq <rz2=%r14,80(<rp=%rdi) +movq %r14,80(%rdi) + +# qhasm: *(uint64 *)(rp + 88) = rz3 +# asm 1: movq <rz3=int64#13,88(<rp=int64#1) +# asm 2: movq <rz3=%r15,88(<rp=%rdi) +movq %r15,88(%rdi) + +# qhasm: caller1 = caller1_stack +# asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9 +# asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10 +# asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11 +# asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12 +# asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13 +# asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14 +# asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15 +# asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/ext/ed25519-amd64-asm/ge25519_p1p1_to_p3.s b/ext/ed25519-amd64-asm/ge25519_p1p1_to_p3.s new file mode 100644 index 00000000..607b9eaf --- /dev/null +++ b/ext/ed25519-amd64-asm/ge25519_p1p1_to_p3.s @@ -0,0 +1,2926 @@ + +# qhasm: int64 rp + +# qhasm: int64 pp + +# qhasm: input rp + +# qhasm: input pp + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: int64 rx0 + +# qhasm: int64 rx1 + +# qhasm: int64 rx2 + +# qhasm: int64 rx3 + +# qhasm: int64 ry0 + +# qhasm: int64 ry1 + +# qhasm: int64 ry2 + +# qhasm: int64 ry3 + +# qhasm: int64 rz0 + +# qhasm: int64 rz1 + +# qhasm: int64 rz2 + +# qhasm: int64 rz3 + +# qhasm: int64 rt0 + +# qhasm: int64 rt1 + +# qhasm: int64 rt2 + +# qhasm: int64 rt3 + +# qhasm: int64 mulr4 + +# qhasm: int64 mulr5 + +# qhasm: int64 mulr6 + +# qhasm: int64 mulr7 + +# qhasm: int64 mulr8 + +# qhasm: int64 mulrax + +# qhasm: int64 mulrdx + +# qhasm: int64 mulx0 + +# qhasm: int64 mulx1 + +# qhasm: int64 mulx2 + +# qhasm: int64 mulx3 + +# qhasm: int64 mulc + +# qhasm: int64 mulzero + +# qhasm: int64 muli38 + +# qhasm: enter crypto_sign_ed25519_amd64_64_ge25519_p1p1_to_p3 +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_64_ge25519_p1p1_to_p3 +.globl crypto_sign_ed25519_amd64_64_ge25519_p1p1_to_p3 +_crypto_sign_ed25519_amd64_64_ge25519_p1p1_to_p3: +crypto_sign_ed25519_amd64_64_ge25519_p1p1_to_p3: +mov %rsp,%r11 +and $31,%r11 +add $64,%r11 +sub %r11,%rsp + +# qhasm: caller1_stack = caller1 +# asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1 +# asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2 +# asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3 +# asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4 +# asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5 +# asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6 +# asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7 +# asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: mulr4 = 0 +# asm 1: mov $0,>mulr4=int64#4 +# asm 2: mov $0,>mulr4=%rcx +mov $0,%rcx + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#5 +# asm 2: mov $0,>mulr5=%r8 +mov $0,%r8 + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#6 +# asm 2: mov $0,>mulr6=%r9 +mov $0,%r9 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#8 +# asm 2: mov $0,>mulr7=%r10 +mov $0,%r10 + +# qhasm: mulx0 = *(uint64 *)(pp + 0) +# asm 1: movq 0(<pp=int64#2),>mulx0=int64#9 +# asm 2: movq 0(<pp=%rsi),>mulx0=%r11 +movq 0(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 96) +# asm 1: movq 96(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 96(<pp=%rsi),>mulrax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#9 +# asm 2: mul <mulx0=%r11 +mul %r11 + +# qhasm: rx0 = mulrax +# asm 1: mov <mulrax=int64#7,>rx0=int64#10 +# asm 2: mov <mulrax=%rax,>rx0=%r12 +mov %rax,%r12 + +# qhasm: rx1 = mulrdx +# asm 1: mov <mulrdx=int64#3,>rx1=int64#11 +# asm 2: mov <mulrdx=%rdx,>rx1=%r13 +mov %rdx,%r13 + +# qhasm: mulrax = *(uint64 *)(pp + 104) +# asm 1: movq 104(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 104(<pp=%rsi),>mulrax=%rax +movq 104(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#9 +# asm 2: mul <mulx0=%r11 +mul %r11 + +# qhasm: carry? rx1 += mulrax +# asm 1: add <mulrax=int64#7,<rx1=int64#11 +# asm 2: add <mulrax=%rax,<rx1=%r13 +add %rax,%r13 + +# qhasm: rx2 = 0 +# asm 1: mov $0,>rx2=int64#12 +# asm 2: mov $0,>rx2=%r14 +mov $0,%r14 + +# qhasm: rx2 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<rx2=int64#12 +# asm 2: adc <mulrdx=%rdx,<rx2=%r14 +adc %rdx,%r14 + +# qhasm: mulrax = *(uint64 *)(pp + 112) +# asm 1: movq 112(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 112(<pp=%rsi),>mulrax=%rax +movq 112(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#9 +# asm 2: mul <mulx0=%r11 +mul %r11 + +# qhasm: carry? rx2 += mulrax +# asm 1: add <mulrax=int64#7,<rx2=int64#12 +# asm 2: add <mulrax=%rax,<rx2=%r14 +add %rax,%r14 + +# qhasm: rx3 = 0 +# asm 1: mov $0,>rx3=int64#13 +# asm 2: mov $0,>rx3=%r15 +mov $0,%r15 + +# qhasm: rx3 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<rx3=int64#13 +# asm 2: adc <mulrdx=%rdx,<rx3=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = *(uint64 *)(pp + 120) +# asm 1: movq 120(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 120(<pp=%rsi),>mulrax=%rax +movq 120(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#9 +# asm 2: mul <mulx0=%r11 +mul %r11 + +# qhasm: carry? rx3 += mulrax +# asm 1: add <mulrax=int64#7,<rx3=int64#13 +# asm 2: add <mulrax=%rax,<rx3=%r15 +add %rax,%r15 + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr4=int64#4 +# asm 2: adc <mulrdx=%rdx,<mulr4=%rcx +adc %rdx,%rcx + +# qhasm: mulx1 = *(uint64 *)(pp + 8) +# asm 1: movq 8(<pp=int64#2),>mulx1=int64#9 +# asm 2: movq 8(<pp=%rsi),>mulx1=%r11 +movq 8(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 96) +# asm 1: movq 96(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 96(<pp=%rsi),>mulrax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#9 +# asm 2: mul <mulx1=%r11 +mul %r11 + +# qhasm: carry? rx1 += mulrax +# asm 1: add <mulrax=int64#7,<rx1=int64#11 +# asm 2: add <mulrax=%rax,<rx1=%r13 +add %rax,%r13 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 104) +# asm 1: movq 104(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 104(<pp=%rsi),>mulrax=%rax +movq 104(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#9 +# asm 2: mul <mulx1=%r11 +mul %r11 + +# qhasm: carry? rx2 += mulrax +# asm 1: add <mulrax=int64#7,<rx2=int64#12 +# asm 2: add <mulrax=%rax,<rx2=%r14 +add %rax,%r14 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? rx2 += mulc +# asm 1: add <mulc=int64#14,<rx2=int64#12 +# asm 2: add <mulc=%rbx,<rx2=%r14 +add %rbx,%r14 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 112) +# asm 1: movq 112(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 112(<pp=%rsi),>mulrax=%rax +movq 112(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#9 +# asm 2: mul <mulx1=%r11 +mul %r11 + +# qhasm: carry? rx3 += mulrax +# asm 1: add <mulrax=int64#7,<rx3=int64#13 +# asm 2: add <mulrax=%rax,<rx3=%r15 +add %rax,%r15 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? rx3 += mulc +# asm 1: add <mulc=int64#14,<rx3=int64#13 +# asm 2: add <mulc=%rbx,<rx3=%r15 +add %rbx,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 120) +# asm 1: movq 120(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 120(<pp=%rsi),>mulrax=%rax +movq 120(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#9 +# asm 2: mul <mulx1=%r11 +mul %r11 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#4 +# asm 2: add <mulrax=%rax,<mulr4=%rcx +add %rax,%rcx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#14,<mulr4=int64#4 +# asm 2: add <mulc=%rbx,<mulr4=%rcx +add %rbx,%rcx + +# qhasm: mulr5 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr5=int64#5 +# asm 2: adc <mulrdx=%rdx,<mulr5=%r8 +adc %rdx,%r8 + +# qhasm: mulx2 = *(uint64 *)(pp + 16) +# asm 1: movq 16(<pp=int64#2),>mulx2=int64#9 +# asm 2: movq 16(<pp=%rsi),>mulx2=%r11 +movq 16(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 96) +# asm 1: movq 96(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 96(<pp=%rsi),>mulrax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#9 +# asm 2: mul <mulx2=%r11 +mul %r11 + +# qhasm: carry? rx2 += mulrax +# asm 1: add <mulrax=int64#7,<rx2=int64#12 +# asm 2: add <mulrax=%rax,<rx2=%r14 +add %rax,%r14 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 104) +# asm 1: movq 104(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 104(<pp=%rsi),>mulrax=%rax +movq 104(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#9 +# asm 2: mul <mulx2=%r11 +mul %r11 + +# qhasm: carry? rx3 += mulrax +# asm 1: add <mulrax=int64#7,<rx3=int64#13 +# asm 2: add <mulrax=%rax,<rx3=%r15 +add %rax,%r15 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? rx3 += mulc +# asm 1: add <mulc=int64#14,<rx3=int64#13 +# asm 2: add <mulc=%rbx,<rx3=%r15 +add %rbx,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 112) +# asm 1: movq 112(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 112(<pp=%rsi),>mulrax=%rax +movq 112(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#9 +# asm 2: mul <mulx2=%r11 +mul %r11 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#4 +# asm 2: add <mulrax=%rax,<mulr4=%rcx +add %rax,%rcx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#14,<mulr4=int64#4 +# asm 2: add <mulc=%rbx,<mulr4=%rcx +add %rbx,%rcx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 120) +# asm 1: movq 120(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 120(<pp=%rsi),>mulrax=%rax +movq 120(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#9 +# asm 2: mul <mulx2=%r11 +mul %r11 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#5 +# asm 2: add <mulrax=%rax,<mulr5=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#14,<mulr5=int64#5 +# asm 2: add <mulc=%rbx,<mulr5=%r8 +add %rbx,%r8 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#6 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r9 +adc %rdx,%r9 + +# qhasm: mulx3 = *(uint64 *)(pp + 24) +# asm 1: movq 24(<pp=int64#2),>mulx3=int64#9 +# asm 2: movq 24(<pp=%rsi),>mulx3=%r11 +movq 24(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 96) +# asm 1: movq 96(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 96(<pp=%rsi),>mulrax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#9 +# asm 2: mul <mulx3=%r11 +mul %r11 + +# qhasm: carry? rx3 += mulrax +# asm 1: add <mulrax=int64#7,<rx3=int64#13 +# asm 2: add <mulrax=%rax,<rx3=%r15 +add %rax,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 104) +# asm 1: movq 104(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 104(<pp=%rsi),>mulrax=%rax +movq 104(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#9 +# asm 2: mul <mulx3=%r11 +mul %r11 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#4 +# asm 2: add <mulrax=%rax,<mulr4=%rcx +add %rax,%rcx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#14,<mulr4=int64#4 +# asm 2: add <mulc=%rbx,<mulr4=%rcx +add %rbx,%rcx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 112) +# asm 1: movq 112(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 112(<pp=%rsi),>mulrax=%rax +movq 112(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#9 +# asm 2: mul <mulx3=%r11 +mul %r11 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#5 +# asm 2: add <mulrax=%rax,<mulr5=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#14,<mulr5=int64#5 +# asm 2: add <mulc=%rbx,<mulr5=%r8 +add %rbx,%r8 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 120) +# asm 1: movq 120(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 120(<pp=%rsi),>mulrax=%rax +movq 120(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#9 +# asm 2: mul <mulx3=%r11 +mul %r11 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#6 +# asm 2: add <mulrax=%rax,<mulr6=%r9 +add %rax,%r9 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr6 += mulc +# asm 1: add <mulc=int64#14,<mulr6=int64#6 +# asm 2: add <mulc=%rbx,<mulr6=%r9 +add %rbx,%r9 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#8 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r10 +adc %rdx,%r10 + +# qhasm: mulrax = mulr4 +# asm 1: mov <mulr4=int64#4,>mulrax=int64#7 +# asm 2: mov <mulr4=%rcx,>mulrax=%rax +mov %rcx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: mulr4 = mulrax +# asm 1: mov <mulrax=int64#7,>mulr4=int64#4 +# asm 2: mov <mulrax=%rax,>mulr4=%rcx +mov %rax,%rcx + +# qhasm: mulrax = mulr5 +# asm 1: mov <mulr5=int64#5,>mulrax=int64#7 +# asm 2: mov <mulr5=%r8,>mulrax=%rax +mov %r8,%rax + +# qhasm: mulr5 = mulrdx +# asm 1: mov <mulrdx=int64#3,>mulr5=int64#5 +# asm 2: mov <mulrdx=%rdx,>mulr5=%r8 +mov %rdx,%r8 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#5 +# asm 2: add <mulrax=%rax,<mulr5=%r8 +add %rax,%r8 + +# qhasm: mulrax = mulr6 +# asm 1: mov <mulr6=int64#6,>mulrax=int64#7 +# asm 2: mov <mulr6=%r9,>mulrax=%rax +mov %r9,%rax + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#6 +# asm 2: mov $0,>mulr6=%r9 +mov $0,%r9 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#6 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r9 +adc %rdx,%r9 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#6 +# asm 2: add <mulrax=%rax,<mulr6=%r9 +add %rax,%r9 + +# qhasm: mulrax = mulr7 +# asm 1: mov <mulr7=int64#8,>mulrax=int64#7 +# asm 2: mov <mulr7=%r10,>mulrax=%rax +mov %r10,%rax + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#8 +# asm 2: mov $0,>mulr7=%r10 +mov $0,%r10 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#8 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r10 +adc %rdx,%r10 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr7 += mulrax +# asm 1: add <mulrax=int64#7,<mulr7=int64#8 +# asm 2: add <mulrax=%rax,<mulr7=%r10 +add %rax,%r10 + +# qhasm: mulr8 = 0 +# asm 1: mov $0,>mulr8=int64#7 +# asm 2: mov $0,>mulr8=%rax +mov $0,%rax + +# qhasm: mulr8 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr8=int64#7 +# asm 2: adc <mulrdx=%rdx,<mulr8=%rax +adc %rdx,%rax + +# qhasm: carry? rx0 += mulr4 +# asm 1: add <mulr4=int64#4,<rx0=int64#10 +# asm 2: add <mulr4=%rcx,<rx0=%r12 +add %rcx,%r12 + +# qhasm: carry? rx1 += mulr5 + carry +# asm 1: adc <mulr5=int64#5,<rx1=int64#11 +# asm 2: adc <mulr5=%r8,<rx1=%r13 +adc %r8,%r13 + +# qhasm: carry? rx2 += mulr6 + carry +# asm 1: adc <mulr6=int64#6,<rx2=int64#12 +# asm 2: adc <mulr6=%r9,<rx2=%r14 +adc %r9,%r14 + +# qhasm: carry? rx3 += mulr7 + carry +# asm 1: adc <mulr7=int64#8,<rx3=int64#13 +# asm 2: adc <mulr7=%r10,<rx3=%r15 +adc %r10,%r15 + +# qhasm: mulzero = 0 +# asm 1: mov $0,>mulzero=int64#3 +# asm 2: mov $0,>mulzero=%rdx +mov $0,%rdx + +# qhasm: mulr8 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<mulr8=int64#7 +# asm 2: adc <mulzero=%rdx,<mulr8=%rax +adc %rdx,%rax + +# qhasm: mulr8 *= 38 +# asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#4 +# asm 2: imulq $38,<mulr8=%rax,>mulr8=%rcx +imulq $38,%rax,%rcx + +# qhasm: carry? rx0 += mulr8 +# asm 1: add <mulr8=int64#4,<rx0=int64#10 +# asm 2: add <mulr8=%rcx,<rx0=%r12 +add %rcx,%r12 + +# qhasm: carry? rx1 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<rx1=int64#11 +# asm 2: adc <mulzero=%rdx,<rx1=%r13 +adc %rdx,%r13 + +# qhasm: carry? rx2 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<rx2=int64#12 +# asm 2: adc <mulzero=%rdx,<rx2=%r14 +adc %rdx,%r14 + +# qhasm: carry? rx3 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<rx3=int64#13 +# asm 2: adc <mulzero=%rdx,<rx3=%r15 +adc %rdx,%r15 + +# qhasm: mulzero += mulzero + carry +# asm 1: adc <mulzero=int64#3,<mulzero=int64#3 +# asm 2: adc <mulzero=%rdx,<mulzero=%rdx +adc %rdx,%rdx + +# qhasm: mulzero *= 38 +# asm 1: imulq $38,<mulzero=int64#3,>mulzero=int64#3 +# asm 2: imulq $38,<mulzero=%rdx,>mulzero=%rdx +imulq $38,%rdx,%rdx + +# qhasm: rx0 += mulzero +# asm 1: add <mulzero=int64#3,<rx0=int64#10 +# asm 2: add <mulzero=%rdx,<rx0=%r12 +add %rdx,%r12 + +# qhasm: *(uint64 *)(rp + 0) = rx0 +# asm 1: movq <rx0=int64#10,0(<rp=int64#1) +# asm 2: movq <rx0=%r12,0(<rp=%rdi) +movq %r12,0(%rdi) + +# qhasm: *(uint64 *)(rp + 8) = rx1 +# asm 1: movq <rx1=int64#11,8(<rp=int64#1) +# asm 2: movq <rx1=%r13,8(<rp=%rdi) +movq %r13,8(%rdi) + +# qhasm: *(uint64 *)(rp + 16) = rx2 +# asm 1: movq <rx2=int64#12,16(<rp=int64#1) +# asm 2: movq <rx2=%r14,16(<rp=%rdi) +movq %r14,16(%rdi) + +# qhasm: *(uint64 *)(rp + 24) = rx3 +# asm 1: movq <rx3=int64#13,24(<rp=int64#1) +# asm 2: movq <rx3=%r15,24(<rp=%rdi) +movq %r15,24(%rdi) + +# qhasm: mulr4 = 0 +# asm 1: mov $0,>mulr4=int64#4 +# asm 2: mov $0,>mulr4=%rcx +mov $0,%rcx + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#5 +# asm 2: mov $0,>mulr5=%r8 +mov $0,%r8 + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#6 +# asm 2: mov $0,>mulr6=%r9 +mov $0,%r9 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#8 +# asm 2: mov $0,>mulr7=%r10 +mov $0,%r10 + +# qhasm: mulx0 = *(uint64 *)(pp + 64) +# asm 1: movq 64(<pp=int64#2),>mulx0=int64#9 +# asm 2: movq 64(<pp=%rsi),>mulx0=%r11 +movq 64(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 32) +# asm 1: movq 32(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 32(<pp=%rsi),>mulrax=%rax +movq 32(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#9 +# asm 2: mul <mulx0=%r11 +mul %r11 + +# qhasm: ry0 = mulrax +# asm 1: mov <mulrax=int64#7,>ry0=int64#10 +# asm 2: mov <mulrax=%rax,>ry0=%r12 +mov %rax,%r12 + +# qhasm: ry1 = mulrdx +# asm 1: mov <mulrdx=int64#3,>ry1=int64#11 +# asm 2: mov <mulrdx=%rdx,>ry1=%r13 +mov %rdx,%r13 + +# qhasm: mulrax = *(uint64 *)(pp + 40) +# asm 1: movq 40(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 40(<pp=%rsi),>mulrax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#9 +# asm 2: mul <mulx0=%r11 +mul %r11 + +# qhasm: carry? ry1 += mulrax +# asm 1: add <mulrax=int64#7,<ry1=int64#11 +# asm 2: add <mulrax=%rax,<ry1=%r13 +add %rax,%r13 + +# qhasm: ry2 = 0 +# asm 1: mov $0,>ry2=int64#12 +# asm 2: mov $0,>ry2=%r14 +mov $0,%r14 + +# qhasm: ry2 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<ry2=int64#12 +# asm 2: adc <mulrdx=%rdx,<ry2=%r14 +adc %rdx,%r14 + +# qhasm: mulrax = *(uint64 *)(pp + 48) +# asm 1: movq 48(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 48(<pp=%rsi),>mulrax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#9 +# asm 2: mul <mulx0=%r11 +mul %r11 + +# qhasm: carry? ry2 += mulrax +# asm 1: add <mulrax=int64#7,<ry2=int64#12 +# asm 2: add <mulrax=%rax,<ry2=%r14 +add %rax,%r14 + +# qhasm: ry3 = 0 +# asm 1: mov $0,>ry3=int64#13 +# asm 2: mov $0,>ry3=%r15 +mov $0,%r15 + +# qhasm: ry3 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<ry3=int64#13 +# asm 2: adc <mulrdx=%rdx,<ry3=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = *(uint64 *)(pp + 56) +# asm 1: movq 56(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 56(<pp=%rsi),>mulrax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#9 +# asm 2: mul <mulx0=%r11 +mul %r11 + +# qhasm: carry? ry3 += mulrax +# asm 1: add <mulrax=int64#7,<ry3=int64#13 +# asm 2: add <mulrax=%rax,<ry3=%r15 +add %rax,%r15 + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr4=int64#4 +# asm 2: adc <mulrdx=%rdx,<mulr4=%rcx +adc %rdx,%rcx + +# qhasm: mulx1 = *(uint64 *)(pp + 72) +# asm 1: movq 72(<pp=int64#2),>mulx1=int64#9 +# asm 2: movq 72(<pp=%rsi),>mulx1=%r11 +movq 72(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 32) +# asm 1: movq 32(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 32(<pp=%rsi),>mulrax=%rax +movq 32(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#9 +# asm 2: mul <mulx1=%r11 +mul %r11 + +# qhasm: carry? ry1 += mulrax +# asm 1: add <mulrax=int64#7,<ry1=int64#11 +# asm 2: add <mulrax=%rax,<ry1=%r13 +add %rax,%r13 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 40) +# asm 1: movq 40(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 40(<pp=%rsi),>mulrax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#9 +# asm 2: mul <mulx1=%r11 +mul %r11 + +# qhasm: carry? ry2 += mulrax +# asm 1: add <mulrax=int64#7,<ry2=int64#12 +# asm 2: add <mulrax=%rax,<ry2=%r14 +add %rax,%r14 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? ry2 += mulc +# asm 1: add <mulc=int64#14,<ry2=int64#12 +# asm 2: add <mulc=%rbx,<ry2=%r14 +add %rbx,%r14 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 48) +# asm 1: movq 48(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 48(<pp=%rsi),>mulrax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#9 +# asm 2: mul <mulx1=%r11 +mul %r11 + +# qhasm: carry? ry3 += mulrax +# asm 1: add <mulrax=int64#7,<ry3=int64#13 +# asm 2: add <mulrax=%rax,<ry3=%r15 +add %rax,%r15 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? ry3 += mulc +# asm 1: add <mulc=int64#14,<ry3=int64#13 +# asm 2: add <mulc=%rbx,<ry3=%r15 +add %rbx,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 56) +# asm 1: movq 56(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 56(<pp=%rsi),>mulrax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#9 +# asm 2: mul <mulx1=%r11 +mul %r11 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#4 +# asm 2: add <mulrax=%rax,<mulr4=%rcx +add %rax,%rcx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#14,<mulr4=int64#4 +# asm 2: add <mulc=%rbx,<mulr4=%rcx +add %rbx,%rcx + +# qhasm: mulr5 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr5=int64#5 +# asm 2: adc <mulrdx=%rdx,<mulr5=%r8 +adc %rdx,%r8 + +# qhasm: mulx2 = *(uint64 *)(pp + 80) +# asm 1: movq 80(<pp=int64#2),>mulx2=int64#9 +# asm 2: movq 80(<pp=%rsi),>mulx2=%r11 +movq 80(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 32) +# asm 1: movq 32(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 32(<pp=%rsi),>mulrax=%rax +movq 32(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#9 +# asm 2: mul <mulx2=%r11 +mul %r11 + +# qhasm: carry? ry2 += mulrax +# asm 1: add <mulrax=int64#7,<ry2=int64#12 +# asm 2: add <mulrax=%rax,<ry2=%r14 +add %rax,%r14 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 40) +# asm 1: movq 40(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 40(<pp=%rsi),>mulrax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#9 +# asm 2: mul <mulx2=%r11 +mul %r11 + +# qhasm: carry? ry3 += mulrax +# asm 1: add <mulrax=int64#7,<ry3=int64#13 +# asm 2: add <mulrax=%rax,<ry3=%r15 +add %rax,%r15 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? ry3 += mulc +# asm 1: add <mulc=int64#14,<ry3=int64#13 +# asm 2: add <mulc=%rbx,<ry3=%r15 +add %rbx,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 48) +# asm 1: movq 48(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 48(<pp=%rsi),>mulrax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#9 +# asm 2: mul <mulx2=%r11 +mul %r11 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#4 +# asm 2: add <mulrax=%rax,<mulr4=%rcx +add %rax,%rcx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#14,<mulr4=int64#4 +# asm 2: add <mulc=%rbx,<mulr4=%rcx +add %rbx,%rcx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 56) +# asm 1: movq 56(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 56(<pp=%rsi),>mulrax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#9 +# asm 2: mul <mulx2=%r11 +mul %r11 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#5 +# asm 2: add <mulrax=%rax,<mulr5=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#14,<mulr5=int64#5 +# asm 2: add <mulc=%rbx,<mulr5=%r8 +add %rbx,%r8 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#6 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r9 +adc %rdx,%r9 + +# qhasm: mulx3 = *(uint64 *)(pp + 88) +# asm 1: movq 88(<pp=int64#2),>mulx3=int64#9 +# asm 2: movq 88(<pp=%rsi),>mulx3=%r11 +movq 88(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 32) +# asm 1: movq 32(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 32(<pp=%rsi),>mulrax=%rax +movq 32(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#9 +# asm 2: mul <mulx3=%r11 +mul %r11 + +# qhasm: carry? ry3 += mulrax +# asm 1: add <mulrax=int64#7,<ry3=int64#13 +# asm 2: add <mulrax=%rax,<ry3=%r15 +add %rax,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 40) +# asm 1: movq 40(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 40(<pp=%rsi),>mulrax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#9 +# asm 2: mul <mulx3=%r11 +mul %r11 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#4 +# asm 2: add <mulrax=%rax,<mulr4=%rcx +add %rax,%rcx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#14,<mulr4=int64#4 +# asm 2: add <mulc=%rbx,<mulr4=%rcx +add %rbx,%rcx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 48) +# asm 1: movq 48(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 48(<pp=%rsi),>mulrax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#9 +# asm 2: mul <mulx3=%r11 +mul %r11 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#5 +# asm 2: add <mulrax=%rax,<mulr5=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#14,<mulr5=int64#5 +# asm 2: add <mulc=%rbx,<mulr5=%r8 +add %rbx,%r8 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 56) +# asm 1: movq 56(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 56(<pp=%rsi),>mulrax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#9 +# asm 2: mul <mulx3=%r11 +mul %r11 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#6 +# asm 2: add <mulrax=%rax,<mulr6=%r9 +add %rax,%r9 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr6 += mulc +# asm 1: add <mulc=int64#14,<mulr6=int64#6 +# asm 2: add <mulc=%rbx,<mulr6=%r9 +add %rbx,%r9 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#8 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r10 +adc %rdx,%r10 + +# qhasm: mulrax = mulr4 +# asm 1: mov <mulr4=int64#4,>mulrax=int64#7 +# asm 2: mov <mulr4=%rcx,>mulrax=%rax +mov %rcx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: mulr4 = mulrax +# asm 1: mov <mulrax=int64#7,>mulr4=int64#4 +# asm 2: mov <mulrax=%rax,>mulr4=%rcx +mov %rax,%rcx + +# qhasm: mulrax = mulr5 +# asm 1: mov <mulr5=int64#5,>mulrax=int64#7 +# asm 2: mov <mulr5=%r8,>mulrax=%rax +mov %r8,%rax + +# qhasm: mulr5 = mulrdx +# asm 1: mov <mulrdx=int64#3,>mulr5=int64#5 +# asm 2: mov <mulrdx=%rdx,>mulr5=%r8 +mov %rdx,%r8 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#5 +# asm 2: add <mulrax=%rax,<mulr5=%r8 +add %rax,%r8 + +# qhasm: mulrax = mulr6 +# asm 1: mov <mulr6=int64#6,>mulrax=int64#7 +# asm 2: mov <mulr6=%r9,>mulrax=%rax +mov %r9,%rax + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#6 +# asm 2: mov $0,>mulr6=%r9 +mov $0,%r9 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#6 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r9 +adc %rdx,%r9 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#6 +# asm 2: add <mulrax=%rax,<mulr6=%r9 +add %rax,%r9 + +# qhasm: mulrax = mulr7 +# asm 1: mov <mulr7=int64#8,>mulrax=int64#7 +# asm 2: mov <mulr7=%r10,>mulrax=%rax +mov %r10,%rax + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#8 +# asm 2: mov $0,>mulr7=%r10 +mov $0,%r10 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#8 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r10 +adc %rdx,%r10 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr7 += mulrax +# asm 1: add <mulrax=int64#7,<mulr7=int64#8 +# asm 2: add <mulrax=%rax,<mulr7=%r10 +add %rax,%r10 + +# qhasm: mulr8 = 0 +# asm 1: mov $0,>mulr8=int64#7 +# asm 2: mov $0,>mulr8=%rax +mov $0,%rax + +# qhasm: mulr8 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr8=int64#7 +# asm 2: adc <mulrdx=%rdx,<mulr8=%rax +adc %rdx,%rax + +# qhasm: carry? ry0 += mulr4 +# asm 1: add <mulr4=int64#4,<ry0=int64#10 +# asm 2: add <mulr4=%rcx,<ry0=%r12 +add %rcx,%r12 + +# qhasm: carry? ry1 += mulr5 + carry +# asm 1: adc <mulr5=int64#5,<ry1=int64#11 +# asm 2: adc <mulr5=%r8,<ry1=%r13 +adc %r8,%r13 + +# qhasm: carry? ry2 += mulr6 + carry +# asm 1: adc <mulr6=int64#6,<ry2=int64#12 +# asm 2: adc <mulr6=%r9,<ry2=%r14 +adc %r9,%r14 + +# qhasm: carry? ry3 += mulr7 + carry +# asm 1: adc <mulr7=int64#8,<ry3=int64#13 +# asm 2: adc <mulr7=%r10,<ry3=%r15 +adc %r10,%r15 + +# qhasm: mulzero = 0 +# asm 1: mov $0,>mulzero=int64#3 +# asm 2: mov $0,>mulzero=%rdx +mov $0,%rdx + +# qhasm: mulr8 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<mulr8=int64#7 +# asm 2: adc <mulzero=%rdx,<mulr8=%rax +adc %rdx,%rax + +# qhasm: mulr8 *= 38 +# asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#4 +# asm 2: imulq $38,<mulr8=%rax,>mulr8=%rcx +imulq $38,%rax,%rcx + +# qhasm: carry? ry0 += mulr8 +# asm 1: add <mulr8=int64#4,<ry0=int64#10 +# asm 2: add <mulr8=%rcx,<ry0=%r12 +add %rcx,%r12 + +# qhasm: carry? ry1 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<ry1=int64#11 +# asm 2: adc <mulzero=%rdx,<ry1=%r13 +adc %rdx,%r13 + +# qhasm: carry? ry2 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<ry2=int64#12 +# asm 2: adc <mulzero=%rdx,<ry2=%r14 +adc %rdx,%r14 + +# qhasm: carry? ry3 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<ry3=int64#13 +# asm 2: adc <mulzero=%rdx,<ry3=%r15 +adc %rdx,%r15 + +# qhasm: mulzero += mulzero + carry +# asm 1: adc <mulzero=int64#3,<mulzero=int64#3 +# asm 2: adc <mulzero=%rdx,<mulzero=%rdx +adc %rdx,%rdx + +# qhasm: mulzero *= 38 +# asm 1: imulq $38,<mulzero=int64#3,>mulzero=int64#3 +# asm 2: imulq $38,<mulzero=%rdx,>mulzero=%rdx +imulq $38,%rdx,%rdx + +# qhasm: ry0 += mulzero +# asm 1: add <mulzero=int64#3,<ry0=int64#10 +# asm 2: add <mulzero=%rdx,<ry0=%r12 +add %rdx,%r12 + +# qhasm: *(uint64 *)(rp + 32) = ry0 +# asm 1: movq <ry0=int64#10,32(<rp=int64#1) +# asm 2: movq <ry0=%r12,32(<rp=%rdi) +movq %r12,32(%rdi) + +# qhasm: *(uint64 *)(rp + 40) = ry1 +# asm 1: movq <ry1=int64#11,40(<rp=int64#1) +# asm 2: movq <ry1=%r13,40(<rp=%rdi) +movq %r13,40(%rdi) + +# qhasm: *(uint64 *)(rp + 48) = ry2 +# asm 1: movq <ry2=int64#12,48(<rp=int64#1) +# asm 2: movq <ry2=%r14,48(<rp=%rdi) +movq %r14,48(%rdi) + +# qhasm: *(uint64 *)(rp + 56) = ry3 +# asm 1: movq <ry3=int64#13,56(<rp=int64#1) +# asm 2: movq <ry3=%r15,56(<rp=%rdi) +movq %r15,56(%rdi) + +# qhasm: mulr4 = 0 +# asm 1: mov $0,>mulr4=int64#4 +# asm 2: mov $0,>mulr4=%rcx +mov $0,%rcx + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#5 +# asm 2: mov $0,>mulr5=%r8 +mov $0,%r8 + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#6 +# asm 2: mov $0,>mulr6=%r9 +mov $0,%r9 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#8 +# asm 2: mov $0,>mulr7=%r10 +mov $0,%r10 + +# qhasm: mulx0 = *(uint64 *)(pp + 32) +# asm 1: movq 32(<pp=int64#2),>mulx0=int64#9 +# asm 2: movq 32(<pp=%rsi),>mulx0=%r11 +movq 32(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 96) +# asm 1: movq 96(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 96(<pp=%rsi),>mulrax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#9 +# asm 2: mul <mulx0=%r11 +mul %r11 + +# qhasm: rz0 = mulrax +# asm 1: mov <mulrax=int64#7,>rz0=int64#10 +# asm 2: mov <mulrax=%rax,>rz0=%r12 +mov %rax,%r12 + +# qhasm: rz1 = mulrdx +# asm 1: mov <mulrdx=int64#3,>rz1=int64#11 +# asm 2: mov <mulrdx=%rdx,>rz1=%r13 +mov %rdx,%r13 + +# qhasm: mulrax = *(uint64 *)(pp + 104) +# asm 1: movq 104(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 104(<pp=%rsi),>mulrax=%rax +movq 104(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#9 +# asm 2: mul <mulx0=%r11 +mul %r11 + +# qhasm: carry? rz1 += mulrax +# asm 1: add <mulrax=int64#7,<rz1=int64#11 +# asm 2: add <mulrax=%rax,<rz1=%r13 +add %rax,%r13 + +# qhasm: rz2 = 0 +# asm 1: mov $0,>rz2=int64#12 +# asm 2: mov $0,>rz2=%r14 +mov $0,%r14 + +# qhasm: rz2 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<rz2=int64#12 +# asm 2: adc <mulrdx=%rdx,<rz2=%r14 +adc %rdx,%r14 + +# qhasm: mulrax = *(uint64 *)(pp + 112) +# asm 1: movq 112(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 112(<pp=%rsi),>mulrax=%rax +movq 112(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#9 +# asm 2: mul <mulx0=%r11 +mul %r11 + +# qhasm: carry? rz2 += mulrax +# asm 1: add <mulrax=int64#7,<rz2=int64#12 +# asm 2: add <mulrax=%rax,<rz2=%r14 +add %rax,%r14 + +# qhasm: rz3 = 0 +# asm 1: mov $0,>rz3=int64#13 +# asm 2: mov $0,>rz3=%r15 +mov $0,%r15 + +# qhasm: rz3 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<rz3=int64#13 +# asm 2: adc <mulrdx=%rdx,<rz3=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = *(uint64 *)(pp + 120) +# asm 1: movq 120(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 120(<pp=%rsi),>mulrax=%rax +movq 120(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#9 +# asm 2: mul <mulx0=%r11 +mul %r11 + +# qhasm: carry? rz3 += mulrax +# asm 1: add <mulrax=int64#7,<rz3=int64#13 +# asm 2: add <mulrax=%rax,<rz3=%r15 +add %rax,%r15 + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr4=int64#4 +# asm 2: adc <mulrdx=%rdx,<mulr4=%rcx +adc %rdx,%rcx + +# qhasm: mulx1 = *(uint64 *)(pp + 40) +# asm 1: movq 40(<pp=int64#2),>mulx1=int64#9 +# asm 2: movq 40(<pp=%rsi),>mulx1=%r11 +movq 40(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 96) +# asm 1: movq 96(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 96(<pp=%rsi),>mulrax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#9 +# asm 2: mul <mulx1=%r11 +mul %r11 + +# qhasm: carry? rz1 += mulrax +# asm 1: add <mulrax=int64#7,<rz1=int64#11 +# asm 2: add <mulrax=%rax,<rz1=%r13 +add %rax,%r13 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 104) +# asm 1: movq 104(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 104(<pp=%rsi),>mulrax=%rax +movq 104(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#9 +# asm 2: mul <mulx1=%r11 +mul %r11 + +# qhasm: carry? rz2 += mulrax +# asm 1: add <mulrax=int64#7,<rz2=int64#12 +# asm 2: add <mulrax=%rax,<rz2=%r14 +add %rax,%r14 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? rz2 += mulc +# asm 1: add <mulc=int64#14,<rz2=int64#12 +# asm 2: add <mulc=%rbx,<rz2=%r14 +add %rbx,%r14 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 112) +# asm 1: movq 112(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 112(<pp=%rsi),>mulrax=%rax +movq 112(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#9 +# asm 2: mul <mulx1=%r11 +mul %r11 + +# qhasm: carry? rz3 += mulrax +# asm 1: add <mulrax=int64#7,<rz3=int64#13 +# asm 2: add <mulrax=%rax,<rz3=%r15 +add %rax,%r15 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? rz3 += mulc +# asm 1: add <mulc=int64#14,<rz3=int64#13 +# asm 2: add <mulc=%rbx,<rz3=%r15 +add %rbx,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 120) +# asm 1: movq 120(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 120(<pp=%rsi),>mulrax=%rax +movq 120(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#9 +# asm 2: mul <mulx1=%r11 +mul %r11 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#4 +# asm 2: add <mulrax=%rax,<mulr4=%rcx +add %rax,%rcx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#14,<mulr4=int64#4 +# asm 2: add <mulc=%rbx,<mulr4=%rcx +add %rbx,%rcx + +# qhasm: mulr5 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr5=int64#5 +# asm 2: adc <mulrdx=%rdx,<mulr5=%r8 +adc %rdx,%r8 + +# qhasm: mulx2 = *(uint64 *)(pp + 48) +# asm 1: movq 48(<pp=int64#2),>mulx2=int64#9 +# asm 2: movq 48(<pp=%rsi),>mulx2=%r11 +movq 48(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 96) +# asm 1: movq 96(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 96(<pp=%rsi),>mulrax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#9 +# asm 2: mul <mulx2=%r11 +mul %r11 + +# qhasm: carry? rz2 += mulrax +# asm 1: add <mulrax=int64#7,<rz2=int64#12 +# asm 2: add <mulrax=%rax,<rz2=%r14 +add %rax,%r14 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 104) +# asm 1: movq 104(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 104(<pp=%rsi),>mulrax=%rax +movq 104(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#9 +# asm 2: mul <mulx2=%r11 +mul %r11 + +# qhasm: carry? rz3 += mulrax +# asm 1: add <mulrax=int64#7,<rz3=int64#13 +# asm 2: add <mulrax=%rax,<rz3=%r15 +add %rax,%r15 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? rz3 += mulc +# asm 1: add <mulc=int64#14,<rz3=int64#13 +# asm 2: add <mulc=%rbx,<rz3=%r15 +add %rbx,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 112) +# asm 1: movq 112(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 112(<pp=%rsi),>mulrax=%rax +movq 112(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#9 +# asm 2: mul <mulx2=%r11 +mul %r11 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#4 +# asm 2: add <mulrax=%rax,<mulr4=%rcx +add %rax,%rcx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#14,<mulr4=int64#4 +# asm 2: add <mulc=%rbx,<mulr4=%rcx +add %rbx,%rcx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 120) +# asm 1: movq 120(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 120(<pp=%rsi),>mulrax=%rax +movq 120(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#9 +# asm 2: mul <mulx2=%r11 +mul %r11 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#5 +# asm 2: add <mulrax=%rax,<mulr5=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#14,<mulr5=int64#5 +# asm 2: add <mulc=%rbx,<mulr5=%r8 +add %rbx,%r8 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#6 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r9 +adc %rdx,%r9 + +# qhasm: mulx3 = *(uint64 *)(pp + 56) +# asm 1: movq 56(<pp=int64#2),>mulx3=int64#9 +# asm 2: movq 56(<pp=%rsi),>mulx3=%r11 +movq 56(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 96) +# asm 1: movq 96(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 96(<pp=%rsi),>mulrax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#9 +# asm 2: mul <mulx3=%r11 +mul %r11 + +# qhasm: carry? rz3 += mulrax +# asm 1: add <mulrax=int64#7,<rz3=int64#13 +# asm 2: add <mulrax=%rax,<rz3=%r15 +add %rax,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 104) +# asm 1: movq 104(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 104(<pp=%rsi),>mulrax=%rax +movq 104(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#9 +# asm 2: mul <mulx3=%r11 +mul %r11 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#4 +# asm 2: add <mulrax=%rax,<mulr4=%rcx +add %rax,%rcx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#14,<mulr4=int64#4 +# asm 2: add <mulc=%rbx,<mulr4=%rcx +add %rbx,%rcx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 112) +# asm 1: movq 112(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 112(<pp=%rsi),>mulrax=%rax +movq 112(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#9 +# asm 2: mul <mulx3=%r11 +mul %r11 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#5 +# asm 2: add <mulrax=%rax,<mulr5=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#14,<mulr5=int64#5 +# asm 2: add <mulc=%rbx,<mulr5=%r8 +add %rbx,%r8 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 120) +# asm 1: movq 120(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 120(<pp=%rsi),>mulrax=%rax +movq 120(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#9 +# asm 2: mul <mulx3=%r11 +mul %r11 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#6 +# asm 2: add <mulrax=%rax,<mulr6=%r9 +add %rax,%r9 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr6 += mulc +# asm 1: add <mulc=int64#14,<mulr6=int64#6 +# asm 2: add <mulc=%rbx,<mulr6=%r9 +add %rbx,%r9 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#8 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r10 +adc %rdx,%r10 + +# qhasm: mulrax = mulr4 +# asm 1: mov <mulr4=int64#4,>mulrax=int64#7 +# asm 2: mov <mulr4=%rcx,>mulrax=%rax +mov %rcx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: mulr4 = mulrax +# asm 1: mov <mulrax=int64#7,>mulr4=int64#4 +# asm 2: mov <mulrax=%rax,>mulr4=%rcx +mov %rax,%rcx + +# qhasm: mulrax = mulr5 +# asm 1: mov <mulr5=int64#5,>mulrax=int64#7 +# asm 2: mov <mulr5=%r8,>mulrax=%rax +mov %r8,%rax + +# qhasm: mulr5 = mulrdx +# asm 1: mov <mulrdx=int64#3,>mulr5=int64#5 +# asm 2: mov <mulrdx=%rdx,>mulr5=%r8 +mov %rdx,%r8 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#5 +# asm 2: add <mulrax=%rax,<mulr5=%r8 +add %rax,%r8 + +# qhasm: mulrax = mulr6 +# asm 1: mov <mulr6=int64#6,>mulrax=int64#7 +# asm 2: mov <mulr6=%r9,>mulrax=%rax +mov %r9,%rax + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#6 +# asm 2: mov $0,>mulr6=%r9 +mov $0,%r9 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#6 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r9 +adc %rdx,%r9 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#6 +# asm 2: add <mulrax=%rax,<mulr6=%r9 +add %rax,%r9 + +# qhasm: mulrax = mulr7 +# asm 1: mov <mulr7=int64#8,>mulrax=int64#7 +# asm 2: mov <mulr7=%r10,>mulrax=%rax +mov %r10,%rax + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#8 +# asm 2: mov $0,>mulr7=%r10 +mov $0,%r10 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#8 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r10 +adc %rdx,%r10 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr7 += mulrax +# asm 1: add <mulrax=int64#7,<mulr7=int64#8 +# asm 2: add <mulrax=%rax,<mulr7=%r10 +add %rax,%r10 + +# qhasm: mulr8 = 0 +# asm 1: mov $0,>mulr8=int64#7 +# asm 2: mov $0,>mulr8=%rax +mov $0,%rax + +# qhasm: mulr8 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr8=int64#7 +# asm 2: adc <mulrdx=%rdx,<mulr8=%rax +adc %rdx,%rax + +# qhasm: carry? rz0 += mulr4 +# asm 1: add <mulr4=int64#4,<rz0=int64#10 +# asm 2: add <mulr4=%rcx,<rz0=%r12 +add %rcx,%r12 + +# qhasm: carry? rz1 += mulr5 + carry +# asm 1: adc <mulr5=int64#5,<rz1=int64#11 +# asm 2: adc <mulr5=%r8,<rz1=%r13 +adc %r8,%r13 + +# qhasm: carry? rz2 += mulr6 + carry +# asm 1: adc <mulr6=int64#6,<rz2=int64#12 +# asm 2: adc <mulr6=%r9,<rz2=%r14 +adc %r9,%r14 + +# qhasm: carry? rz3 += mulr7 + carry +# asm 1: adc <mulr7=int64#8,<rz3=int64#13 +# asm 2: adc <mulr7=%r10,<rz3=%r15 +adc %r10,%r15 + +# qhasm: mulzero = 0 +# asm 1: mov $0,>mulzero=int64#3 +# asm 2: mov $0,>mulzero=%rdx +mov $0,%rdx + +# qhasm: mulr8 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<mulr8=int64#7 +# asm 2: adc <mulzero=%rdx,<mulr8=%rax +adc %rdx,%rax + +# qhasm: mulr8 *= 38 +# asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#4 +# asm 2: imulq $38,<mulr8=%rax,>mulr8=%rcx +imulq $38,%rax,%rcx + +# qhasm: carry? rz0 += mulr8 +# asm 1: add <mulr8=int64#4,<rz0=int64#10 +# asm 2: add <mulr8=%rcx,<rz0=%r12 +add %rcx,%r12 + +# qhasm: carry? rz1 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<rz1=int64#11 +# asm 2: adc <mulzero=%rdx,<rz1=%r13 +adc %rdx,%r13 + +# qhasm: carry? rz2 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<rz2=int64#12 +# asm 2: adc <mulzero=%rdx,<rz2=%r14 +adc %rdx,%r14 + +# qhasm: carry? rz3 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<rz3=int64#13 +# asm 2: adc <mulzero=%rdx,<rz3=%r15 +adc %rdx,%r15 + +# qhasm: mulzero += mulzero + carry +# asm 1: adc <mulzero=int64#3,<mulzero=int64#3 +# asm 2: adc <mulzero=%rdx,<mulzero=%rdx +adc %rdx,%rdx + +# qhasm: mulzero *= 38 +# asm 1: imulq $38,<mulzero=int64#3,>mulzero=int64#3 +# asm 2: imulq $38,<mulzero=%rdx,>mulzero=%rdx +imulq $38,%rdx,%rdx + +# qhasm: rz0 += mulzero +# asm 1: add <mulzero=int64#3,<rz0=int64#10 +# asm 2: add <mulzero=%rdx,<rz0=%r12 +add %rdx,%r12 + +# qhasm: *(uint64 *)(rp + 64) = rz0 +# asm 1: movq <rz0=int64#10,64(<rp=int64#1) +# asm 2: movq <rz0=%r12,64(<rp=%rdi) +movq %r12,64(%rdi) + +# qhasm: *(uint64 *)(rp + 72) = rz1 +# asm 1: movq <rz1=int64#11,72(<rp=int64#1) +# asm 2: movq <rz1=%r13,72(<rp=%rdi) +movq %r13,72(%rdi) + +# qhasm: *(uint64 *)(rp + 80) = rz2 +# asm 1: movq <rz2=int64#12,80(<rp=int64#1) +# asm 2: movq <rz2=%r14,80(<rp=%rdi) +movq %r14,80(%rdi) + +# qhasm: *(uint64 *)(rp + 88) = rz3 +# asm 1: movq <rz3=int64#13,88(<rp=int64#1) +# asm 2: movq <rz3=%r15,88(<rp=%rdi) +movq %r15,88(%rdi) + +# qhasm: mulr4 = 0 +# asm 1: mov $0,>mulr4=int64#4 +# asm 2: mov $0,>mulr4=%rcx +mov $0,%rcx + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#5 +# asm 2: mov $0,>mulr5=%r8 +mov $0,%r8 + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#6 +# asm 2: mov $0,>mulr6=%r9 +mov $0,%r9 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#8 +# asm 2: mov $0,>mulr7=%r10 +mov $0,%r10 + +# qhasm: mulx0 = *(uint64 *)(pp + 0) +# asm 1: movq 0(<pp=int64#2),>mulx0=int64#9 +# asm 2: movq 0(<pp=%rsi),>mulx0=%r11 +movq 0(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 64) +# asm 1: movq 64(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 64(<pp=%rsi),>mulrax=%rax +movq 64(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#9 +# asm 2: mul <mulx0=%r11 +mul %r11 + +# qhasm: rt0 = mulrax +# asm 1: mov <mulrax=int64#7,>rt0=int64#10 +# asm 2: mov <mulrax=%rax,>rt0=%r12 +mov %rax,%r12 + +# qhasm: rt1 = mulrdx +# asm 1: mov <mulrdx=int64#3,>rt1=int64#11 +# asm 2: mov <mulrdx=%rdx,>rt1=%r13 +mov %rdx,%r13 + +# qhasm: mulrax = *(uint64 *)(pp + 72) +# asm 1: movq 72(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 72(<pp=%rsi),>mulrax=%rax +movq 72(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#9 +# asm 2: mul <mulx0=%r11 +mul %r11 + +# qhasm: carry? rt1 += mulrax +# asm 1: add <mulrax=int64#7,<rt1=int64#11 +# asm 2: add <mulrax=%rax,<rt1=%r13 +add %rax,%r13 + +# qhasm: rt2 = 0 +# asm 1: mov $0,>rt2=int64#12 +# asm 2: mov $0,>rt2=%r14 +mov $0,%r14 + +# qhasm: rt2 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<rt2=int64#12 +# asm 2: adc <mulrdx=%rdx,<rt2=%r14 +adc %rdx,%r14 + +# qhasm: mulrax = *(uint64 *)(pp + 80) +# asm 1: movq 80(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 80(<pp=%rsi),>mulrax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#9 +# asm 2: mul <mulx0=%r11 +mul %r11 + +# qhasm: carry? rt2 += mulrax +# asm 1: add <mulrax=int64#7,<rt2=int64#12 +# asm 2: add <mulrax=%rax,<rt2=%r14 +add %rax,%r14 + +# qhasm: rt3 = 0 +# asm 1: mov $0,>rt3=int64#13 +# asm 2: mov $0,>rt3=%r15 +mov $0,%r15 + +# qhasm: rt3 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<rt3=int64#13 +# asm 2: adc <mulrdx=%rdx,<rt3=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = *(uint64 *)(pp + 88) +# asm 1: movq 88(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 88(<pp=%rsi),>mulrax=%rax +movq 88(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#9 +# asm 2: mul <mulx0=%r11 +mul %r11 + +# qhasm: carry? rt3 += mulrax +# asm 1: add <mulrax=int64#7,<rt3=int64#13 +# asm 2: add <mulrax=%rax,<rt3=%r15 +add %rax,%r15 + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr4=int64#4 +# asm 2: adc <mulrdx=%rdx,<mulr4=%rcx +adc %rdx,%rcx + +# qhasm: mulx1 = *(uint64 *)(pp + 8) +# asm 1: movq 8(<pp=int64#2),>mulx1=int64#9 +# asm 2: movq 8(<pp=%rsi),>mulx1=%r11 +movq 8(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 64) +# asm 1: movq 64(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 64(<pp=%rsi),>mulrax=%rax +movq 64(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#9 +# asm 2: mul <mulx1=%r11 +mul %r11 + +# qhasm: carry? rt1 += mulrax +# asm 1: add <mulrax=int64#7,<rt1=int64#11 +# asm 2: add <mulrax=%rax,<rt1=%r13 +add %rax,%r13 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 72) +# asm 1: movq 72(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 72(<pp=%rsi),>mulrax=%rax +movq 72(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#9 +# asm 2: mul <mulx1=%r11 +mul %r11 + +# qhasm: carry? rt2 += mulrax +# asm 1: add <mulrax=int64#7,<rt2=int64#12 +# asm 2: add <mulrax=%rax,<rt2=%r14 +add %rax,%r14 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? rt2 += mulc +# asm 1: add <mulc=int64#14,<rt2=int64#12 +# asm 2: add <mulc=%rbx,<rt2=%r14 +add %rbx,%r14 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 80) +# asm 1: movq 80(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 80(<pp=%rsi),>mulrax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#9 +# asm 2: mul <mulx1=%r11 +mul %r11 + +# qhasm: carry? rt3 += mulrax +# asm 1: add <mulrax=int64#7,<rt3=int64#13 +# asm 2: add <mulrax=%rax,<rt3=%r15 +add %rax,%r15 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? rt3 += mulc +# asm 1: add <mulc=int64#14,<rt3=int64#13 +# asm 2: add <mulc=%rbx,<rt3=%r15 +add %rbx,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 88) +# asm 1: movq 88(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 88(<pp=%rsi),>mulrax=%rax +movq 88(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#9 +# asm 2: mul <mulx1=%r11 +mul %r11 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#4 +# asm 2: add <mulrax=%rax,<mulr4=%rcx +add %rax,%rcx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#14,<mulr4=int64#4 +# asm 2: add <mulc=%rbx,<mulr4=%rcx +add %rbx,%rcx + +# qhasm: mulr5 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr5=int64#5 +# asm 2: adc <mulrdx=%rdx,<mulr5=%r8 +adc %rdx,%r8 + +# qhasm: mulx2 = *(uint64 *)(pp + 16) +# asm 1: movq 16(<pp=int64#2),>mulx2=int64#9 +# asm 2: movq 16(<pp=%rsi),>mulx2=%r11 +movq 16(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 64) +# asm 1: movq 64(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 64(<pp=%rsi),>mulrax=%rax +movq 64(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#9 +# asm 2: mul <mulx2=%r11 +mul %r11 + +# qhasm: carry? rt2 += mulrax +# asm 1: add <mulrax=int64#7,<rt2=int64#12 +# asm 2: add <mulrax=%rax,<rt2=%r14 +add %rax,%r14 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 72) +# asm 1: movq 72(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 72(<pp=%rsi),>mulrax=%rax +movq 72(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#9 +# asm 2: mul <mulx2=%r11 +mul %r11 + +# qhasm: carry? rt3 += mulrax +# asm 1: add <mulrax=int64#7,<rt3=int64#13 +# asm 2: add <mulrax=%rax,<rt3=%r15 +add %rax,%r15 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? rt3 += mulc +# asm 1: add <mulc=int64#14,<rt3=int64#13 +# asm 2: add <mulc=%rbx,<rt3=%r15 +add %rbx,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 80) +# asm 1: movq 80(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 80(<pp=%rsi),>mulrax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#9 +# asm 2: mul <mulx2=%r11 +mul %r11 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#4 +# asm 2: add <mulrax=%rax,<mulr4=%rcx +add %rax,%rcx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#14,<mulr4=int64#4 +# asm 2: add <mulc=%rbx,<mulr4=%rcx +add %rbx,%rcx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 88) +# asm 1: movq 88(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 88(<pp=%rsi),>mulrax=%rax +movq 88(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#9 +# asm 2: mul <mulx2=%r11 +mul %r11 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#5 +# asm 2: add <mulrax=%rax,<mulr5=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#14,<mulr5=int64#5 +# asm 2: add <mulc=%rbx,<mulr5=%r8 +add %rbx,%r8 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#6 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r9 +adc %rdx,%r9 + +# qhasm: mulx3 = *(uint64 *)(pp + 24) +# asm 1: movq 24(<pp=int64#2),>mulx3=int64#9 +# asm 2: movq 24(<pp=%rsi),>mulx3=%r11 +movq 24(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 64) +# asm 1: movq 64(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 64(<pp=%rsi),>mulrax=%rax +movq 64(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#9 +# asm 2: mul <mulx3=%r11 +mul %r11 + +# qhasm: carry? rt3 += mulrax +# asm 1: add <mulrax=int64#7,<rt3=int64#13 +# asm 2: add <mulrax=%rax,<rt3=%r15 +add %rax,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 72) +# asm 1: movq 72(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 72(<pp=%rsi),>mulrax=%rax +movq 72(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#9 +# asm 2: mul <mulx3=%r11 +mul %r11 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#4 +# asm 2: add <mulrax=%rax,<mulr4=%rcx +add %rax,%rcx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#14,<mulr4=int64#4 +# asm 2: add <mulc=%rbx,<mulr4=%rcx +add %rbx,%rcx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 80) +# asm 1: movq 80(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 80(<pp=%rsi),>mulrax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#9 +# asm 2: mul <mulx3=%r11 +mul %r11 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#5 +# asm 2: add <mulrax=%rax,<mulr5=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#14,<mulr5=int64#5 +# asm 2: add <mulc=%rbx,<mulr5=%r8 +add %rbx,%r8 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#14 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 88) +# asm 1: movq 88(<pp=int64#2),>mulrax=int64#7 +# asm 2: movq 88(<pp=%rsi),>mulrax=%rax +movq 88(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#9 +# asm 2: mul <mulx3=%r11 +mul %r11 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#6 +# asm 2: add <mulrax=%rax,<mulr6=%r9 +add %rax,%r9 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr6 += mulc +# asm 1: add <mulc=int64#14,<mulr6=int64#6 +# asm 2: add <mulc=%rbx,<mulr6=%r9 +add %rbx,%r9 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#8 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r10 +adc %rdx,%r10 + +# qhasm: mulrax = mulr4 +# asm 1: mov <mulr4=int64#4,>mulrax=int64#7 +# asm 2: mov <mulr4=%rcx,>mulrax=%rax +mov %rcx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: mulr4 = mulrax +# asm 1: mov <mulrax=int64#7,>mulr4=int64#2 +# asm 2: mov <mulrax=%rax,>mulr4=%rsi +mov %rax,%rsi + +# qhasm: mulrax = mulr5 +# asm 1: mov <mulr5=int64#5,>mulrax=int64#7 +# asm 2: mov <mulr5=%r8,>mulrax=%rax +mov %r8,%rax + +# qhasm: mulr5 = mulrdx +# asm 1: mov <mulrdx=int64#3,>mulr5=int64#4 +# asm 2: mov <mulrdx=%rdx,>mulr5=%rcx +mov %rdx,%rcx + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#4 +# asm 2: add <mulrax=%rax,<mulr5=%rcx +add %rax,%rcx + +# qhasm: mulrax = mulr6 +# asm 1: mov <mulr6=int64#6,>mulrax=int64#7 +# asm 2: mov <mulr6=%r9,>mulrax=%rax +mov %r9,%rax + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#5 +# asm 2: mov $0,>mulr6=%r8 +mov $0,%r8 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#5 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r8 +adc %rdx,%r8 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#5 +# asm 2: add <mulrax=%rax,<mulr6=%r8 +add %rax,%r8 + +# qhasm: mulrax = mulr7 +# asm 1: mov <mulr7=int64#8,>mulrax=int64#7 +# asm 2: mov <mulr7=%r10,>mulrax=%rax +mov %r10,%rax + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#6 +# asm 2: mov $0,>mulr7=%r9 +mov $0,%r9 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#6 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r9 +adc %rdx,%r9 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr7 += mulrax +# asm 1: add <mulrax=int64#7,<mulr7=int64#6 +# asm 2: add <mulrax=%rax,<mulr7=%r9 +add %rax,%r9 + +# qhasm: mulr8 = 0 +# asm 1: mov $0,>mulr8=int64#7 +# asm 2: mov $0,>mulr8=%rax +mov $0,%rax + +# qhasm: mulr8 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr8=int64#7 +# asm 2: adc <mulrdx=%rdx,<mulr8=%rax +adc %rdx,%rax + +# qhasm: carry? rt0 += mulr4 +# asm 1: add <mulr4=int64#2,<rt0=int64#10 +# asm 2: add <mulr4=%rsi,<rt0=%r12 +add %rsi,%r12 + +# qhasm: carry? rt1 += mulr5 + carry +# asm 1: adc <mulr5=int64#4,<rt1=int64#11 +# asm 2: adc <mulr5=%rcx,<rt1=%r13 +adc %rcx,%r13 + +# qhasm: carry? rt2 += mulr6 + carry +# asm 1: adc <mulr6=int64#5,<rt2=int64#12 +# asm 2: adc <mulr6=%r8,<rt2=%r14 +adc %r8,%r14 + +# qhasm: carry? rt3 += mulr7 + carry +# asm 1: adc <mulr7=int64#6,<rt3=int64#13 +# asm 2: adc <mulr7=%r9,<rt3=%r15 +adc %r9,%r15 + +# qhasm: mulzero = 0 +# asm 1: mov $0,>mulzero=int64#2 +# asm 2: mov $0,>mulzero=%rsi +mov $0,%rsi + +# qhasm: mulr8 += mulzero + carry +# asm 1: adc <mulzero=int64#2,<mulr8=int64#7 +# asm 2: adc <mulzero=%rsi,<mulr8=%rax +adc %rsi,%rax + +# qhasm: mulr8 *= 38 +# asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#3 +# asm 2: imulq $38,<mulr8=%rax,>mulr8=%rdx +imulq $38,%rax,%rdx + +# qhasm: carry? rt0 += mulr8 +# asm 1: add <mulr8=int64#3,<rt0=int64#10 +# asm 2: add <mulr8=%rdx,<rt0=%r12 +add %rdx,%r12 + +# qhasm: carry? rt1 += mulzero + carry +# asm 1: adc <mulzero=int64#2,<rt1=int64#11 +# asm 2: adc <mulzero=%rsi,<rt1=%r13 +adc %rsi,%r13 + +# qhasm: carry? rt2 += mulzero + carry +# asm 1: adc <mulzero=int64#2,<rt2=int64#12 +# asm 2: adc <mulzero=%rsi,<rt2=%r14 +adc %rsi,%r14 + +# qhasm: carry? rt3 += mulzero + carry +# asm 1: adc <mulzero=int64#2,<rt3=int64#13 +# asm 2: adc <mulzero=%rsi,<rt3=%r15 +adc %rsi,%r15 + +# qhasm: mulzero += mulzero + carry +# asm 1: adc <mulzero=int64#2,<mulzero=int64#2 +# asm 2: adc <mulzero=%rsi,<mulzero=%rsi +adc %rsi,%rsi + +# qhasm: mulzero *= 38 +# asm 1: imulq $38,<mulzero=int64#2,>mulzero=int64#2 +# asm 2: imulq $38,<mulzero=%rsi,>mulzero=%rsi +imulq $38,%rsi,%rsi + +# qhasm: rt0 += mulzero +# asm 1: add <mulzero=int64#2,<rt0=int64#10 +# asm 2: add <mulzero=%rsi,<rt0=%r12 +add %rsi,%r12 + +# qhasm: *(uint64 *)(rp + 96) = rt0 +# asm 1: movq <rt0=int64#10,96(<rp=int64#1) +# asm 2: movq <rt0=%r12,96(<rp=%rdi) +movq %r12,96(%rdi) + +# qhasm: *(uint64 *)(rp + 104) = rt1 +# asm 1: movq <rt1=int64#11,104(<rp=int64#1) +# asm 2: movq <rt1=%r13,104(<rp=%rdi) +movq %r13,104(%rdi) + +# qhasm: *(uint64 *)(rp + 112) = rt2 +# asm 1: movq <rt2=int64#12,112(<rp=int64#1) +# asm 2: movq <rt2=%r14,112(<rp=%rdi) +movq %r14,112(%rdi) + +# qhasm: *(uint64 *)(rp + 120) = rt3 +# asm 1: movq <rt3=int64#13,120(<rp=int64#1) +# asm 2: movq <rt3=%r15,120(<rp=%rdi) +movq %r15,120(%rdi) + +# qhasm: caller1 = caller1_stack +# asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9 +# asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10 +# asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11 +# asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12 +# asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13 +# asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14 +# asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15 +# asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/ext/ed25519-amd64-asm/ge25519_pack.c b/ext/ed25519-amd64-asm/ge25519_pack.c new file mode 100644 index 00000000..f289fe57 --- /dev/null +++ b/ext/ed25519-amd64-asm/ge25519_pack.c @@ -0,0 +1,13 @@ +#include "fe25519.h" +#include "sc25519.h" +#include "ge25519.h" + +void ge25519_pack(unsigned char r[32], const ge25519_p3 *p) +{ + fe25519 tx, ty, zi; + fe25519_invert(&zi, &p->z); + fe25519_mul(&tx, &p->x, &zi); + fe25519_mul(&ty, &p->y, &zi); + fe25519_pack(r, &ty); + r[31] ^= fe25519_getparity(&tx) << 7; +} diff --git a/ext/ed25519-amd64-asm/ge25519_pnielsadd_p1p1.s b/ext/ed25519-amd64-asm/ge25519_pnielsadd_p1p1.s new file mode 100644 index 00000000..93b7cc54 --- /dev/null +++ b/ext/ed25519-amd64-asm/ge25519_pnielsadd_p1p1.s @@ -0,0 +1,3662 @@ + +# qhasm: int64 rp + +# qhasm: int64 pp + +# qhasm: int64 qp + +# qhasm: input rp + +# qhasm: input pp + +# qhasm: input qp + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: int64 a0 + +# qhasm: int64 a1 + +# qhasm: int64 a2 + +# qhasm: int64 a3 + +# qhasm: stack64 a0_stack + +# qhasm: stack64 a1_stack + +# qhasm: stack64 a2_stack + +# qhasm: stack64 a3_stack + +# qhasm: int64 b0 + +# qhasm: int64 b1 + +# qhasm: int64 b2 + +# qhasm: int64 b3 + +# qhasm: stack64 b0_stack + +# qhasm: stack64 b1_stack + +# qhasm: stack64 b2_stack + +# qhasm: stack64 b3_stack + +# qhasm: int64 c0 + +# qhasm: int64 c1 + +# qhasm: int64 c2 + +# qhasm: int64 c3 + +# qhasm: stack64 c0_stack + +# qhasm: stack64 c1_stack + +# qhasm: stack64 c2_stack + +# qhasm: stack64 c3_stack + +# qhasm: int64 d0 + +# qhasm: int64 d1 + +# qhasm: int64 d2 + +# qhasm: int64 d3 + +# qhasm: stack64 d0_stack + +# qhasm: stack64 d1_stack + +# qhasm: stack64 d2_stack + +# qhasm: stack64 d3_stack + +# qhasm: int64 t10 + +# qhasm: int64 t11 + +# qhasm: int64 t12 + +# qhasm: int64 t13 + +# qhasm: stack64 t10_stack + +# qhasm: stack64 t11_stack + +# qhasm: stack64 t12_stack + +# qhasm: stack64 t13_stack + +# qhasm: int64 t20 + +# qhasm: int64 t21 + +# qhasm: int64 t22 + +# qhasm: int64 t23 + +# qhasm: stack64 t20_stack + +# qhasm: stack64 t21_stack + +# qhasm: stack64 t22_stack + +# qhasm: stack64 t23_stack + +# qhasm: int64 rx0 + +# qhasm: int64 rx1 + +# qhasm: int64 rx2 + +# qhasm: int64 rx3 + +# qhasm: int64 ry0 + +# qhasm: int64 ry1 + +# qhasm: int64 ry2 + +# qhasm: int64 ry3 + +# qhasm: int64 rz0 + +# qhasm: int64 rz1 + +# qhasm: int64 rz2 + +# qhasm: int64 rz3 + +# qhasm: int64 rt0 + +# qhasm: int64 rt1 + +# qhasm: int64 rt2 + +# qhasm: int64 rt3 + +# qhasm: int64 x0 + +# qhasm: int64 x1 + +# qhasm: int64 x2 + +# qhasm: int64 x3 + +# qhasm: int64 mulr4 + +# qhasm: int64 mulr5 + +# qhasm: int64 mulr6 + +# qhasm: int64 mulr7 + +# qhasm: int64 mulr8 + +# qhasm: int64 mulrax + +# qhasm: int64 mulrdx + +# qhasm: int64 mulx0 + +# qhasm: int64 mulx1 + +# qhasm: int64 mulx2 + +# qhasm: int64 mulx3 + +# qhasm: int64 mulc + +# qhasm: int64 mulzero + +# qhasm: int64 muli38 + +# qhasm: int64 addt0 + +# qhasm: int64 addt1 + +# qhasm: int64 subt0 + +# qhasm: int64 subt1 + +# qhasm: enter crypto_sign_ed25519_amd64_64_ge25519_pnielsadd_p1p1 +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_64_ge25519_pnielsadd_p1p1 +.globl crypto_sign_ed25519_amd64_64_ge25519_pnielsadd_p1p1 +_crypto_sign_ed25519_amd64_64_ge25519_pnielsadd_p1p1: +crypto_sign_ed25519_amd64_64_ge25519_pnielsadd_p1p1: +mov %rsp,%r11 +and $31,%r11 +add $128,%r11 +sub %r11,%rsp + +# qhasm: caller1_stack = caller1 +# asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1 +# asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2 +# asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3 +# asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4 +# asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5 +# asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6 +# asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7 +# asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: qp = qp +# asm 1: mov <qp=int64#3,>qp=int64#4 +# asm 2: mov <qp=%rdx,>qp=%rcx +mov %rdx,%rcx + +# qhasm: a0 = *(uint64 *)(pp + 32) +# asm 1: movq 32(<pp=int64#2),>a0=int64#3 +# asm 2: movq 32(<pp=%rsi),>a0=%rdx +movq 32(%rsi),%rdx + +# qhasm: a1 = *(uint64 *)(pp + 40) +# asm 1: movq 40(<pp=int64#2),>a1=int64#5 +# asm 2: movq 40(<pp=%rsi),>a1=%r8 +movq 40(%rsi),%r8 + +# qhasm: a2 = *(uint64 *)(pp + 48) +# asm 1: movq 48(<pp=int64#2),>a2=int64#6 +# asm 2: movq 48(<pp=%rsi),>a2=%r9 +movq 48(%rsi),%r9 + +# qhasm: a3 = *(uint64 *)(pp + 56) +# asm 1: movq 56(<pp=int64#2),>a3=int64#7 +# asm 2: movq 56(<pp=%rsi),>a3=%rax +movq 56(%rsi),%rax + +# qhasm: b0 = a0 +# asm 1: mov <a0=int64#3,>b0=int64#8 +# asm 2: mov <a0=%rdx,>b0=%r10 +mov %rdx,%r10 + +# qhasm: b1 = a1 +# asm 1: mov <a1=int64#5,>b1=int64#9 +# asm 2: mov <a1=%r8,>b1=%r11 +mov %r8,%r11 + +# qhasm: b2 = a2 +# asm 1: mov <a2=int64#6,>b2=int64#10 +# asm 2: mov <a2=%r9,>b2=%r12 +mov %r9,%r12 + +# qhasm: b3 = a3 +# asm 1: mov <a3=int64#7,>b3=int64#11 +# asm 2: mov <a3=%rax,>b3=%r13 +mov %rax,%r13 + +# qhasm: carry? a0 -= *(uint64 *)(pp + 0) +# asm 1: subq 0(<pp=int64#2),<a0=int64#3 +# asm 2: subq 0(<pp=%rsi),<a0=%rdx +subq 0(%rsi),%rdx + +# qhasm: carry? a1 -= *(uint64 *)(pp + 8) - carry +# asm 1: sbbq 8(<pp=int64#2),<a1=int64#5 +# asm 2: sbbq 8(<pp=%rsi),<a1=%r8 +sbbq 8(%rsi),%r8 + +# qhasm: carry? a2 -= *(uint64 *)(pp + 16) - carry +# asm 1: sbbq 16(<pp=int64#2),<a2=int64#6 +# asm 2: sbbq 16(<pp=%rsi),<a2=%r9 +sbbq 16(%rsi),%r9 + +# qhasm: carry? a3 -= *(uint64 *)(pp + 24) - carry +# asm 1: sbbq 24(<pp=int64#2),<a3=int64#7 +# asm 2: sbbq 24(<pp=%rsi),<a3=%rax +sbbq 24(%rsi),%rax + +# qhasm: subt0 = 0 +# asm 1: mov $0,>subt0=int64#12 +# asm 2: mov $0,>subt0=%r14 +mov $0,%r14 + +# qhasm: subt1 = 38 +# asm 1: mov $38,>subt1=int64#13 +# asm 2: mov $38,>subt1=%r15 +mov $38,%r15 + +# qhasm: subt1 = subt0 if !carry +# asm 1: cmovae <subt0=int64#12,<subt1=int64#13 +# asm 2: cmovae <subt0=%r14,<subt1=%r15 +cmovae %r14,%r15 + +# qhasm: carry? a0 -= subt1 +# asm 1: sub <subt1=int64#13,<a0=int64#3 +# asm 2: sub <subt1=%r15,<a0=%rdx +sub %r15,%rdx + +# qhasm: carry? a1 -= subt0 - carry +# asm 1: sbb <subt0=int64#12,<a1=int64#5 +# asm 2: sbb <subt0=%r14,<a1=%r8 +sbb %r14,%r8 + +# qhasm: carry? a2 -= subt0 - carry +# asm 1: sbb <subt0=int64#12,<a2=int64#6 +# asm 2: sbb <subt0=%r14,<a2=%r9 +sbb %r14,%r9 + +# qhasm: carry? a3 -= subt0 - carry +# asm 1: sbb <subt0=int64#12,<a3=int64#7 +# asm 2: sbb <subt0=%r14,<a3=%rax +sbb %r14,%rax + +# qhasm: subt0 = subt1 if carry +# asm 1: cmovc <subt1=int64#13,<subt0=int64#12 +# asm 2: cmovc <subt1=%r15,<subt0=%r14 +cmovc %r15,%r14 + +# qhasm: a0 -= subt0 +# asm 1: sub <subt0=int64#12,<a0=int64#3 +# asm 2: sub <subt0=%r14,<a0=%rdx +sub %r14,%rdx + +# qhasm: carry? b0 += *(uint64 *)(pp + 0) +# asm 1: addq 0(<pp=int64#2),<b0=int64#8 +# asm 2: addq 0(<pp=%rsi),<b0=%r10 +addq 0(%rsi),%r10 + +# qhasm: carry? b1 += *(uint64 *)(pp + 8) + carry +# asm 1: adcq 8(<pp=int64#2),<b1=int64#9 +# asm 2: adcq 8(<pp=%rsi),<b1=%r11 +adcq 8(%rsi),%r11 + +# qhasm: carry? b2 += *(uint64 *)(pp + 16) + carry +# asm 1: adcq 16(<pp=int64#2),<b2=int64#10 +# asm 2: adcq 16(<pp=%rsi),<b2=%r12 +adcq 16(%rsi),%r12 + +# qhasm: carry? b3 += *(uint64 *)(pp + 24) + carry +# asm 1: adcq 24(<pp=int64#2),<b3=int64#11 +# asm 2: adcq 24(<pp=%rsi),<b3=%r13 +adcq 24(%rsi),%r13 + +# qhasm: addt0 = 0 +# asm 1: mov $0,>addt0=int64#12 +# asm 2: mov $0,>addt0=%r14 +mov $0,%r14 + +# qhasm: addt1 = 38 +# asm 1: mov $38,>addt1=int64#13 +# asm 2: mov $38,>addt1=%r15 +mov $38,%r15 + +# qhasm: addt1 = addt0 if !carry +# asm 1: cmovae <addt0=int64#12,<addt1=int64#13 +# asm 2: cmovae <addt0=%r14,<addt1=%r15 +cmovae %r14,%r15 + +# qhasm: carry? b0 += addt1 +# asm 1: add <addt1=int64#13,<b0=int64#8 +# asm 2: add <addt1=%r15,<b0=%r10 +add %r15,%r10 + +# qhasm: carry? b1 += addt0 + carry +# asm 1: adc <addt0=int64#12,<b1=int64#9 +# asm 2: adc <addt0=%r14,<b1=%r11 +adc %r14,%r11 + +# qhasm: carry? b2 += addt0 + carry +# asm 1: adc <addt0=int64#12,<b2=int64#10 +# asm 2: adc <addt0=%r14,<b2=%r12 +adc %r14,%r12 + +# qhasm: carry? b3 += addt0 + carry +# asm 1: adc <addt0=int64#12,<b3=int64#11 +# asm 2: adc <addt0=%r14,<b3=%r13 +adc %r14,%r13 + +# qhasm: addt0 = addt1 if carry +# asm 1: cmovc <addt1=int64#13,<addt0=int64#12 +# asm 2: cmovc <addt1=%r15,<addt0=%r14 +cmovc %r15,%r14 + +# qhasm: b0 += addt0 +# asm 1: add <addt0=int64#12,<b0=int64#8 +# asm 2: add <addt0=%r14,<b0=%r10 +add %r14,%r10 + +# qhasm: a0_stack = a0 +# asm 1: movq <a0=int64#3,>a0_stack=stack64#8 +# asm 2: movq <a0=%rdx,>a0_stack=56(%rsp) +movq %rdx,56(%rsp) + +# qhasm: a1_stack = a1 +# asm 1: movq <a1=int64#5,>a1_stack=stack64#9 +# asm 2: movq <a1=%r8,>a1_stack=64(%rsp) +movq %r8,64(%rsp) + +# qhasm: a2_stack = a2 +# asm 1: movq <a2=int64#6,>a2_stack=stack64#10 +# asm 2: movq <a2=%r9,>a2_stack=72(%rsp) +movq %r9,72(%rsp) + +# qhasm: a3_stack = a3 +# asm 1: movq <a3=int64#7,>a3_stack=stack64#11 +# asm 2: movq <a3=%rax,>a3_stack=80(%rsp) +movq %rax,80(%rsp) + +# qhasm: b0_stack = b0 +# asm 1: movq <b0=int64#8,>b0_stack=stack64#12 +# asm 2: movq <b0=%r10,>b0_stack=88(%rsp) +movq %r10,88(%rsp) + +# qhasm: b1_stack = b1 +# asm 1: movq <b1=int64#9,>b1_stack=stack64#13 +# asm 2: movq <b1=%r11,>b1_stack=96(%rsp) +movq %r11,96(%rsp) + +# qhasm: b2_stack = b2 +# asm 1: movq <b2=int64#10,>b2_stack=stack64#14 +# asm 2: movq <b2=%r12,>b2_stack=104(%rsp) +movq %r12,104(%rsp) + +# qhasm: b3_stack = b3 +# asm 1: movq <b3=int64#11,>b3_stack=stack64#15 +# asm 2: movq <b3=%r13,>b3_stack=112(%rsp) +movq %r13,112(%rsp) + +# qhasm: mulr4 = 0 +# asm 1: mov $0,>mulr4=int64#5 +# asm 2: mov $0,>mulr4=%r8 +mov $0,%r8 + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#6 +# asm 2: mov $0,>mulr5=%r9 +mov $0,%r9 + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#8 +# asm 2: mov $0,>mulr6=%r10 +mov $0,%r10 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#9 +# asm 2: mov $0,>mulr7=%r11 +mov $0,%r11 + +# qhasm: mulx0 = a0_stack +# asm 1: movq <a0_stack=stack64#8,>mulx0=int64#10 +# asm 2: movq <a0_stack=56(%rsp),>mulx0=%r12 +movq 56(%rsp),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 0) +# asm 1: movq 0(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 0(<qp=%rcx),>mulrax=%rax +movq 0(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: a0 = mulrax +# asm 1: mov <mulrax=int64#7,>a0=int64#11 +# asm 2: mov <mulrax=%rax,>a0=%r13 +mov %rax,%r13 + +# qhasm: a1 = mulrdx +# asm 1: mov <mulrdx=int64#3,>a1=int64#12 +# asm 2: mov <mulrdx=%rdx,>a1=%r14 +mov %rdx,%r14 + +# qhasm: mulrax = *(uint64 *)(qp + 8) +# asm 1: movq 8(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 8(<qp=%rcx),>mulrax=%rax +movq 8(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: carry? a1 += mulrax +# asm 1: add <mulrax=int64#7,<a1=int64#12 +# asm 2: add <mulrax=%rax,<a1=%r14 +add %rax,%r14 + +# qhasm: a2 = 0 +# asm 1: mov $0,>a2=int64#13 +# asm 2: mov $0,>a2=%r15 +mov $0,%r15 + +# qhasm: a2 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<a2=int64#13 +# asm 2: adc <mulrdx=%rdx,<a2=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = *(uint64 *)(qp + 16) +# asm 1: movq 16(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 16(<qp=%rcx),>mulrax=%rax +movq 16(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: carry? a2 += mulrax +# asm 1: add <mulrax=int64#7,<a2=int64#13 +# asm 2: add <mulrax=%rax,<a2=%r15 +add %rax,%r15 + +# qhasm: a3 = 0 +# asm 1: mov $0,>a3=int64#14 +# asm 2: mov $0,>a3=%rbx +mov $0,%rbx + +# qhasm: a3 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<a3=int64#14 +# asm 2: adc <mulrdx=%rdx,<a3=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(qp + 24) +# asm 1: movq 24(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 24(<qp=%rcx),>mulrax=%rax +movq 24(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: carry? a3 += mulrax +# asm 1: add <mulrax=int64#7,<a3=int64#14 +# asm 2: add <mulrax=%rax,<a3=%rbx +add %rax,%rbx + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr4=int64#5 +# asm 2: adc <mulrdx=%rdx,<mulr4=%r8 +adc %rdx,%r8 + +# qhasm: mulx1 = a1_stack +# asm 1: movq <a1_stack=stack64#9,>mulx1=int64#10 +# asm 2: movq <a1_stack=64(%rsp),>mulx1=%r12 +movq 64(%rsp),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 0) +# asm 1: movq 0(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 0(<qp=%rcx),>mulrax=%rax +movq 0(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? a1 += mulrax +# asm 1: add <mulrax=int64#7,<a1=int64#12 +# asm 2: add <mulrax=%rax,<a1=%r14 +add %rax,%r14 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 8) +# asm 1: movq 8(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 8(<qp=%rcx),>mulrax=%rax +movq 8(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? a2 += mulrax +# asm 1: add <mulrax=int64#7,<a2=int64#13 +# asm 2: add <mulrax=%rax,<a2=%r15 +add %rax,%r15 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? a2 += mulc +# asm 1: add <mulc=int64#15,<a2=int64#13 +# asm 2: add <mulc=%rbp,<a2=%r15 +add %rbp,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 16) +# asm 1: movq 16(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 16(<qp=%rcx),>mulrax=%rax +movq 16(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? a3 += mulrax +# asm 1: add <mulrax=int64#7,<a3=int64#14 +# asm 2: add <mulrax=%rax,<a3=%rbx +add %rax,%rbx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? a3 += mulc +# asm 1: add <mulc=int64#15,<a3=int64#14 +# asm 2: add <mulc=%rbp,<a3=%rbx +add %rbp,%rbx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 24) +# asm 1: movq 24(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 24(<qp=%rcx),>mulrax=%rax +movq 24(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#5 +# asm 2: add <mulrax=%rax,<mulr4=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#15,<mulr4=int64#5 +# asm 2: add <mulc=%rbp,<mulr4=%r8 +add %rbp,%r8 + +# qhasm: mulr5 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr5=int64#6 +# asm 2: adc <mulrdx=%rdx,<mulr5=%r9 +adc %rdx,%r9 + +# qhasm: mulx2 = a2_stack +# asm 1: movq <a2_stack=stack64#10,>mulx2=int64#10 +# asm 2: movq <a2_stack=72(%rsp),>mulx2=%r12 +movq 72(%rsp),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 0) +# asm 1: movq 0(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 0(<qp=%rcx),>mulrax=%rax +movq 0(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? a2 += mulrax +# asm 1: add <mulrax=int64#7,<a2=int64#13 +# asm 2: add <mulrax=%rax,<a2=%r15 +add %rax,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 8) +# asm 1: movq 8(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 8(<qp=%rcx),>mulrax=%rax +movq 8(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? a3 += mulrax +# asm 1: add <mulrax=int64#7,<a3=int64#14 +# asm 2: add <mulrax=%rax,<a3=%rbx +add %rax,%rbx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? a3 += mulc +# asm 1: add <mulc=int64#15,<a3=int64#14 +# asm 2: add <mulc=%rbp,<a3=%rbx +add %rbp,%rbx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 16) +# asm 1: movq 16(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 16(<qp=%rcx),>mulrax=%rax +movq 16(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#5 +# asm 2: add <mulrax=%rax,<mulr4=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#15,<mulr4=int64#5 +# asm 2: add <mulc=%rbp,<mulr4=%r8 +add %rbp,%r8 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 24) +# asm 1: movq 24(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 24(<qp=%rcx),>mulrax=%rax +movq 24(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#6 +# asm 2: add <mulrax=%rax,<mulr5=%r9 +add %rax,%r9 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#15,<mulr5=int64#6 +# asm 2: add <mulc=%rbp,<mulr5=%r9 +add %rbp,%r9 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#8 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r10 +adc %rdx,%r10 + +# qhasm: mulx3 = a3_stack +# asm 1: movq <a3_stack=stack64#11,>mulx3=int64#10 +# asm 2: movq <a3_stack=80(%rsp),>mulx3=%r12 +movq 80(%rsp),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 0) +# asm 1: movq 0(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 0(<qp=%rcx),>mulrax=%rax +movq 0(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#10 +# asm 2: mul <mulx3=%r12 +mul %r12 + +# qhasm: carry? a3 += mulrax +# asm 1: add <mulrax=int64#7,<a3=int64#14 +# asm 2: add <mulrax=%rax,<a3=%rbx +add %rax,%rbx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 8) +# asm 1: movq 8(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 8(<qp=%rcx),>mulrax=%rax +movq 8(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#10 +# asm 2: mul <mulx3=%r12 +mul %r12 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#5 +# asm 2: add <mulrax=%rax,<mulr4=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#15,<mulr4=int64#5 +# asm 2: add <mulc=%rbp,<mulr4=%r8 +add %rbp,%r8 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 16) +# asm 1: movq 16(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 16(<qp=%rcx),>mulrax=%rax +movq 16(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#10 +# asm 2: mul <mulx3=%r12 +mul %r12 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#6 +# asm 2: add <mulrax=%rax,<mulr5=%r9 +add %rax,%r9 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#15,<mulr5=int64#6 +# asm 2: add <mulc=%rbp,<mulr5=%r9 +add %rbp,%r9 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 24) +# asm 1: movq 24(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 24(<qp=%rcx),>mulrax=%rax +movq 24(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#10 +# asm 2: mul <mulx3=%r12 +mul %r12 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#8 +# asm 2: add <mulrax=%rax,<mulr6=%r10 +add %rax,%r10 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr6 += mulc +# asm 1: add <mulc=int64#15,<mulr6=int64#8 +# asm 2: add <mulc=%rbp,<mulr6=%r10 +add %rbp,%r10 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#9 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r11 +adc %rdx,%r11 + +# qhasm: mulrax = mulr4 +# asm 1: mov <mulr4=int64#5,>mulrax=int64#7 +# asm 2: mov <mulr4=%r8,>mulrax=%rax +mov %r8,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: mulr4 = mulrax +# asm 1: mov <mulrax=int64#7,>mulr4=int64#5 +# asm 2: mov <mulrax=%rax,>mulr4=%r8 +mov %rax,%r8 + +# qhasm: mulrax = mulr5 +# asm 1: mov <mulr5=int64#6,>mulrax=int64#7 +# asm 2: mov <mulr5=%r9,>mulrax=%rax +mov %r9,%rax + +# qhasm: mulr5 = mulrdx +# asm 1: mov <mulrdx=int64#3,>mulr5=int64#6 +# asm 2: mov <mulrdx=%rdx,>mulr5=%r9 +mov %rdx,%r9 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#6 +# asm 2: add <mulrax=%rax,<mulr5=%r9 +add %rax,%r9 + +# qhasm: mulrax = mulr6 +# asm 1: mov <mulr6=int64#8,>mulrax=int64#7 +# asm 2: mov <mulr6=%r10,>mulrax=%rax +mov %r10,%rax + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#8 +# asm 2: mov $0,>mulr6=%r10 +mov $0,%r10 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#8 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r10 +adc %rdx,%r10 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#8 +# asm 2: add <mulrax=%rax,<mulr6=%r10 +add %rax,%r10 + +# qhasm: mulrax = mulr7 +# asm 1: mov <mulr7=int64#9,>mulrax=int64#7 +# asm 2: mov <mulr7=%r11,>mulrax=%rax +mov %r11,%rax + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#9 +# asm 2: mov $0,>mulr7=%r11 +mov $0,%r11 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#9 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r11 +adc %rdx,%r11 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr7 += mulrax +# asm 1: add <mulrax=int64#7,<mulr7=int64#9 +# asm 2: add <mulrax=%rax,<mulr7=%r11 +add %rax,%r11 + +# qhasm: mulr8 = 0 +# asm 1: mov $0,>mulr8=int64#7 +# asm 2: mov $0,>mulr8=%rax +mov $0,%rax + +# qhasm: mulr8 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr8=int64#7 +# asm 2: adc <mulrdx=%rdx,<mulr8=%rax +adc %rdx,%rax + +# qhasm: carry? a0 += mulr4 +# asm 1: add <mulr4=int64#5,<a0=int64#11 +# asm 2: add <mulr4=%r8,<a0=%r13 +add %r8,%r13 + +# qhasm: carry? a1 += mulr5 + carry +# asm 1: adc <mulr5=int64#6,<a1=int64#12 +# asm 2: adc <mulr5=%r9,<a1=%r14 +adc %r9,%r14 + +# qhasm: carry? a2 += mulr6 + carry +# asm 1: adc <mulr6=int64#8,<a2=int64#13 +# asm 2: adc <mulr6=%r10,<a2=%r15 +adc %r10,%r15 + +# qhasm: carry? a3 += mulr7 + carry +# asm 1: adc <mulr7=int64#9,<a3=int64#14 +# asm 2: adc <mulr7=%r11,<a3=%rbx +adc %r11,%rbx + +# qhasm: mulzero = 0 +# asm 1: mov $0,>mulzero=int64#3 +# asm 2: mov $0,>mulzero=%rdx +mov $0,%rdx + +# qhasm: mulr8 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<mulr8=int64#7 +# asm 2: adc <mulzero=%rdx,<mulr8=%rax +adc %rdx,%rax + +# qhasm: mulr8 *= 38 +# asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#5 +# asm 2: imulq $38,<mulr8=%rax,>mulr8=%r8 +imulq $38,%rax,%r8 + +# qhasm: carry? a0 += mulr8 +# asm 1: add <mulr8=int64#5,<a0=int64#11 +# asm 2: add <mulr8=%r8,<a0=%r13 +add %r8,%r13 + +# qhasm: carry? a1 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<a1=int64#12 +# asm 2: adc <mulzero=%rdx,<a1=%r14 +adc %rdx,%r14 + +# qhasm: carry? a2 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<a2=int64#13 +# asm 2: adc <mulzero=%rdx,<a2=%r15 +adc %rdx,%r15 + +# qhasm: carry? a3 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<a3=int64#14 +# asm 2: adc <mulzero=%rdx,<a3=%rbx +adc %rdx,%rbx + +# qhasm: mulzero += mulzero + carry +# asm 1: adc <mulzero=int64#3,<mulzero=int64#3 +# asm 2: adc <mulzero=%rdx,<mulzero=%rdx +adc %rdx,%rdx + +# qhasm: mulzero *= 38 +# asm 1: imulq $38,<mulzero=int64#3,>mulzero=int64#3 +# asm 2: imulq $38,<mulzero=%rdx,>mulzero=%rdx +imulq $38,%rdx,%rdx + +# qhasm: a0 += mulzero +# asm 1: add <mulzero=int64#3,<a0=int64#11 +# asm 2: add <mulzero=%rdx,<a0=%r13 +add %rdx,%r13 + +# qhasm: a0_stack = a0 +# asm 1: movq <a0=int64#11,>a0_stack=stack64#8 +# asm 2: movq <a0=%r13,>a0_stack=56(%rsp) +movq %r13,56(%rsp) + +# qhasm: a1_stack = a1 +# asm 1: movq <a1=int64#12,>a1_stack=stack64#9 +# asm 2: movq <a1=%r14,>a1_stack=64(%rsp) +movq %r14,64(%rsp) + +# qhasm: a2_stack = a2 +# asm 1: movq <a2=int64#13,>a2_stack=stack64#10 +# asm 2: movq <a2=%r15,>a2_stack=72(%rsp) +movq %r15,72(%rsp) + +# qhasm: a3_stack = a3 +# asm 1: movq <a3=int64#14,>a3_stack=stack64#11 +# asm 2: movq <a3=%rbx,>a3_stack=80(%rsp) +movq %rbx,80(%rsp) + +# qhasm: mulr4 = 0 +# asm 1: mov $0,>mulr4=int64#5 +# asm 2: mov $0,>mulr4=%r8 +mov $0,%r8 + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#6 +# asm 2: mov $0,>mulr5=%r9 +mov $0,%r9 + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#8 +# asm 2: mov $0,>mulr6=%r10 +mov $0,%r10 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#9 +# asm 2: mov $0,>mulr7=%r11 +mov $0,%r11 + +# qhasm: mulx0 = b0_stack +# asm 1: movq <b0_stack=stack64#12,>mulx0=int64#10 +# asm 2: movq <b0_stack=88(%rsp),>mulx0=%r12 +movq 88(%rsp),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 32) +# asm 1: movq 32(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 32(<qp=%rcx),>mulrax=%rax +movq 32(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: rx0 = mulrax +# asm 1: mov <mulrax=int64#7,>rx0=int64#11 +# asm 2: mov <mulrax=%rax,>rx0=%r13 +mov %rax,%r13 + +# qhasm: rx1 = mulrdx +# asm 1: mov <mulrdx=int64#3,>rx1=int64#12 +# asm 2: mov <mulrdx=%rdx,>rx1=%r14 +mov %rdx,%r14 + +# qhasm: mulrax = *(uint64 *)(qp + 40) +# asm 1: movq 40(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 40(<qp=%rcx),>mulrax=%rax +movq 40(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: carry? rx1 += mulrax +# asm 1: add <mulrax=int64#7,<rx1=int64#12 +# asm 2: add <mulrax=%rax,<rx1=%r14 +add %rax,%r14 + +# qhasm: rx2 = 0 +# asm 1: mov $0,>rx2=int64#13 +# asm 2: mov $0,>rx2=%r15 +mov $0,%r15 + +# qhasm: rx2 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<rx2=int64#13 +# asm 2: adc <mulrdx=%rdx,<rx2=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = *(uint64 *)(qp + 48) +# asm 1: movq 48(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 48(<qp=%rcx),>mulrax=%rax +movq 48(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: carry? rx2 += mulrax +# asm 1: add <mulrax=int64#7,<rx2=int64#13 +# asm 2: add <mulrax=%rax,<rx2=%r15 +add %rax,%r15 + +# qhasm: rx3 = 0 +# asm 1: mov $0,>rx3=int64#14 +# asm 2: mov $0,>rx3=%rbx +mov $0,%rbx + +# qhasm: rx3 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<rx3=int64#14 +# asm 2: adc <mulrdx=%rdx,<rx3=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(qp + 56) +# asm 1: movq 56(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 56(<qp=%rcx),>mulrax=%rax +movq 56(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: carry? rx3 += mulrax +# asm 1: add <mulrax=int64#7,<rx3=int64#14 +# asm 2: add <mulrax=%rax,<rx3=%rbx +add %rax,%rbx + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr4=int64#5 +# asm 2: adc <mulrdx=%rdx,<mulr4=%r8 +adc %rdx,%r8 + +# qhasm: mulx1 = b1_stack +# asm 1: movq <b1_stack=stack64#13,>mulx1=int64#10 +# asm 2: movq <b1_stack=96(%rsp),>mulx1=%r12 +movq 96(%rsp),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 32) +# asm 1: movq 32(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 32(<qp=%rcx),>mulrax=%rax +movq 32(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? rx1 += mulrax +# asm 1: add <mulrax=int64#7,<rx1=int64#12 +# asm 2: add <mulrax=%rax,<rx1=%r14 +add %rax,%r14 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 40) +# asm 1: movq 40(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 40(<qp=%rcx),>mulrax=%rax +movq 40(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? rx2 += mulrax +# asm 1: add <mulrax=int64#7,<rx2=int64#13 +# asm 2: add <mulrax=%rax,<rx2=%r15 +add %rax,%r15 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? rx2 += mulc +# asm 1: add <mulc=int64#15,<rx2=int64#13 +# asm 2: add <mulc=%rbp,<rx2=%r15 +add %rbp,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 48) +# asm 1: movq 48(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 48(<qp=%rcx),>mulrax=%rax +movq 48(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? rx3 += mulrax +# asm 1: add <mulrax=int64#7,<rx3=int64#14 +# asm 2: add <mulrax=%rax,<rx3=%rbx +add %rax,%rbx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? rx3 += mulc +# asm 1: add <mulc=int64#15,<rx3=int64#14 +# asm 2: add <mulc=%rbp,<rx3=%rbx +add %rbp,%rbx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 56) +# asm 1: movq 56(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 56(<qp=%rcx),>mulrax=%rax +movq 56(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#5 +# asm 2: add <mulrax=%rax,<mulr4=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#15,<mulr4=int64#5 +# asm 2: add <mulc=%rbp,<mulr4=%r8 +add %rbp,%r8 + +# qhasm: mulr5 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr5=int64#6 +# asm 2: adc <mulrdx=%rdx,<mulr5=%r9 +adc %rdx,%r9 + +# qhasm: mulx2 = b2_stack +# asm 1: movq <b2_stack=stack64#14,>mulx2=int64#10 +# asm 2: movq <b2_stack=104(%rsp),>mulx2=%r12 +movq 104(%rsp),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 32) +# asm 1: movq 32(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 32(<qp=%rcx),>mulrax=%rax +movq 32(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? rx2 += mulrax +# asm 1: add <mulrax=int64#7,<rx2=int64#13 +# asm 2: add <mulrax=%rax,<rx2=%r15 +add %rax,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 40) +# asm 1: movq 40(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 40(<qp=%rcx),>mulrax=%rax +movq 40(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? rx3 += mulrax +# asm 1: add <mulrax=int64#7,<rx3=int64#14 +# asm 2: add <mulrax=%rax,<rx3=%rbx +add %rax,%rbx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? rx3 += mulc +# asm 1: add <mulc=int64#15,<rx3=int64#14 +# asm 2: add <mulc=%rbp,<rx3=%rbx +add %rbp,%rbx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 48) +# asm 1: movq 48(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 48(<qp=%rcx),>mulrax=%rax +movq 48(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#5 +# asm 2: add <mulrax=%rax,<mulr4=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#15,<mulr4=int64#5 +# asm 2: add <mulc=%rbp,<mulr4=%r8 +add %rbp,%r8 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 56) +# asm 1: movq 56(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 56(<qp=%rcx),>mulrax=%rax +movq 56(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#6 +# asm 2: add <mulrax=%rax,<mulr5=%r9 +add %rax,%r9 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#15,<mulr5=int64#6 +# asm 2: add <mulc=%rbp,<mulr5=%r9 +add %rbp,%r9 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#8 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r10 +adc %rdx,%r10 + +# qhasm: mulx3 = b3_stack +# asm 1: movq <b3_stack=stack64#15,>mulx3=int64#10 +# asm 2: movq <b3_stack=112(%rsp),>mulx3=%r12 +movq 112(%rsp),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 32) +# asm 1: movq 32(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 32(<qp=%rcx),>mulrax=%rax +movq 32(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#10 +# asm 2: mul <mulx3=%r12 +mul %r12 + +# qhasm: carry? rx3 += mulrax +# asm 1: add <mulrax=int64#7,<rx3=int64#14 +# asm 2: add <mulrax=%rax,<rx3=%rbx +add %rax,%rbx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 40) +# asm 1: movq 40(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 40(<qp=%rcx),>mulrax=%rax +movq 40(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#10 +# asm 2: mul <mulx3=%r12 +mul %r12 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#5 +# asm 2: add <mulrax=%rax,<mulr4=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#15,<mulr4=int64#5 +# asm 2: add <mulc=%rbp,<mulr4=%r8 +add %rbp,%r8 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 48) +# asm 1: movq 48(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 48(<qp=%rcx),>mulrax=%rax +movq 48(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#10 +# asm 2: mul <mulx3=%r12 +mul %r12 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#6 +# asm 2: add <mulrax=%rax,<mulr5=%r9 +add %rax,%r9 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#15,<mulr5=int64#6 +# asm 2: add <mulc=%rbp,<mulr5=%r9 +add %rbp,%r9 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 56) +# asm 1: movq 56(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 56(<qp=%rcx),>mulrax=%rax +movq 56(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#10 +# asm 2: mul <mulx3=%r12 +mul %r12 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#8 +# asm 2: add <mulrax=%rax,<mulr6=%r10 +add %rax,%r10 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr6 += mulc +# asm 1: add <mulc=int64#15,<mulr6=int64#8 +# asm 2: add <mulc=%rbp,<mulr6=%r10 +add %rbp,%r10 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#9 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r11 +adc %rdx,%r11 + +# qhasm: mulrax = mulr4 +# asm 1: mov <mulr4=int64#5,>mulrax=int64#7 +# asm 2: mov <mulr4=%r8,>mulrax=%rax +mov %r8,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: mulr4 = mulrax +# asm 1: mov <mulrax=int64#7,>mulr4=int64#5 +# asm 2: mov <mulrax=%rax,>mulr4=%r8 +mov %rax,%r8 + +# qhasm: mulrax = mulr5 +# asm 1: mov <mulr5=int64#6,>mulrax=int64#7 +# asm 2: mov <mulr5=%r9,>mulrax=%rax +mov %r9,%rax + +# qhasm: mulr5 = mulrdx +# asm 1: mov <mulrdx=int64#3,>mulr5=int64#6 +# asm 2: mov <mulrdx=%rdx,>mulr5=%r9 +mov %rdx,%r9 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#6 +# asm 2: add <mulrax=%rax,<mulr5=%r9 +add %rax,%r9 + +# qhasm: mulrax = mulr6 +# asm 1: mov <mulr6=int64#8,>mulrax=int64#7 +# asm 2: mov <mulr6=%r10,>mulrax=%rax +mov %r10,%rax + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#8 +# asm 2: mov $0,>mulr6=%r10 +mov $0,%r10 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#8 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r10 +adc %rdx,%r10 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#8 +# asm 2: add <mulrax=%rax,<mulr6=%r10 +add %rax,%r10 + +# qhasm: mulrax = mulr7 +# asm 1: mov <mulr7=int64#9,>mulrax=int64#7 +# asm 2: mov <mulr7=%r11,>mulrax=%rax +mov %r11,%rax + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#9 +# asm 2: mov $0,>mulr7=%r11 +mov $0,%r11 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#9 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r11 +adc %rdx,%r11 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr7 += mulrax +# asm 1: add <mulrax=int64#7,<mulr7=int64#9 +# asm 2: add <mulrax=%rax,<mulr7=%r11 +add %rax,%r11 + +# qhasm: mulr8 = 0 +# asm 1: mov $0,>mulr8=int64#7 +# asm 2: mov $0,>mulr8=%rax +mov $0,%rax + +# qhasm: mulr8 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr8=int64#7 +# asm 2: adc <mulrdx=%rdx,<mulr8=%rax +adc %rdx,%rax + +# qhasm: carry? rx0 += mulr4 +# asm 1: add <mulr4=int64#5,<rx0=int64#11 +# asm 2: add <mulr4=%r8,<rx0=%r13 +add %r8,%r13 + +# qhasm: carry? rx1 += mulr5 + carry +# asm 1: adc <mulr5=int64#6,<rx1=int64#12 +# asm 2: adc <mulr5=%r9,<rx1=%r14 +adc %r9,%r14 + +# qhasm: carry? rx2 += mulr6 + carry +# asm 1: adc <mulr6=int64#8,<rx2=int64#13 +# asm 2: adc <mulr6=%r10,<rx2=%r15 +adc %r10,%r15 + +# qhasm: carry? rx3 += mulr7 + carry +# asm 1: adc <mulr7=int64#9,<rx3=int64#14 +# asm 2: adc <mulr7=%r11,<rx3=%rbx +adc %r11,%rbx + +# qhasm: mulzero = 0 +# asm 1: mov $0,>mulzero=int64#3 +# asm 2: mov $0,>mulzero=%rdx +mov $0,%rdx + +# qhasm: mulr8 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<mulr8=int64#7 +# asm 2: adc <mulzero=%rdx,<mulr8=%rax +adc %rdx,%rax + +# qhasm: mulr8 *= 38 +# asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#5 +# asm 2: imulq $38,<mulr8=%rax,>mulr8=%r8 +imulq $38,%rax,%r8 + +# qhasm: carry? rx0 += mulr8 +# asm 1: add <mulr8=int64#5,<rx0=int64#11 +# asm 2: add <mulr8=%r8,<rx0=%r13 +add %r8,%r13 + +# qhasm: carry? rx1 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<rx1=int64#12 +# asm 2: adc <mulzero=%rdx,<rx1=%r14 +adc %rdx,%r14 + +# qhasm: carry? rx2 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<rx2=int64#13 +# asm 2: adc <mulzero=%rdx,<rx2=%r15 +adc %rdx,%r15 + +# qhasm: carry? rx3 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<rx3=int64#14 +# asm 2: adc <mulzero=%rdx,<rx3=%rbx +adc %rdx,%rbx + +# qhasm: mulzero += mulzero + carry +# asm 1: adc <mulzero=int64#3,<mulzero=int64#3 +# asm 2: adc <mulzero=%rdx,<mulzero=%rdx +adc %rdx,%rdx + +# qhasm: mulzero *= 38 +# asm 1: imulq $38,<mulzero=int64#3,>mulzero=int64#3 +# asm 2: imulq $38,<mulzero=%rdx,>mulzero=%rdx +imulq $38,%rdx,%rdx + +# qhasm: rx0 += mulzero +# asm 1: add <mulzero=int64#3,<rx0=int64#11 +# asm 2: add <mulzero=%rdx,<rx0=%r13 +add %rdx,%r13 + +# qhasm: ry0 = rx0 +# asm 1: mov <rx0=int64#11,>ry0=int64#3 +# asm 2: mov <rx0=%r13,>ry0=%rdx +mov %r13,%rdx + +# qhasm: ry1 = rx1 +# asm 1: mov <rx1=int64#12,>ry1=int64#5 +# asm 2: mov <rx1=%r14,>ry1=%r8 +mov %r14,%r8 + +# qhasm: ry2 = rx2 +# asm 1: mov <rx2=int64#13,>ry2=int64#6 +# asm 2: mov <rx2=%r15,>ry2=%r9 +mov %r15,%r9 + +# qhasm: ry3 = rx3 +# asm 1: mov <rx3=int64#14,>ry3=int64#7 +# asm 2: mov <rx3=%rbx,>ry3=%rax +mov %rbx,%rax + +# qhasm: carry? ry0 += a0_stack +# asm 1: addq <a0_stack=stack64#8,<ry0=int64#3 +# asm 2: addq <a0_stack=56(%rsp),<ry0=%rdx +addq 56(%rsp),%rdx + +# qhasm: carry? ry1 += a1_stack + carry +# asm 1: adcq <a1_stack=stack64#9,<ry1=int64#5 +# asm 2: adcq <a1_stack=64(%rsp),<ry1=%r8 +adcq 64(%rsp),%r8 + +# qhasm: carry? ry2 += a2_stack + carry +# asm 1: adcq <a2_stack=stack64#10,<ry2=int64#6 +# asm 2: adcq <a2_stack=72(%rsp),<ry2=%r9 +adcq 72(%rsp),%r9 + +# qhasm: carry? ry3 += a3_stack + carry +# asm 1: adcq <a3_stack=stack64#11,<ry3=int64#7 +# asm 2: adcq <a3_stack=80(%rsp),<ry3=%rax +adcq 80(%rsp),%rax + +# qhasm: addt0 = 0 +# asm 1: mov $0,>addt0=int64#8 +# asm 2: mov $0,>addt0=%r10 +mov $0,%r10 + +# qhasm: addt1 = 38 +# asm 1: mov $38,>addt1=int64#9 +# asm 2: mov $38,>addt1=%r11 +mov $38,%r11 + +# qhasm: addt1 = addt0 if !carry +# asm 1: cmovae <addt0=int64#8,<addt1=int64#9 +# asm 2: cmovae <addt0=%r10,<addt1=%r11 +cmovae %r10,%r11 + +# qhasm: carry? ry0 += addt1 +# asm 1: add <addt1=int64#9,<ry0=int64#3 +# asm 2: add <addt1=%r11,<ry0=%rdx +add %r11,%rdx + +# qhasm: carry? ry1 += addt0 + carry +# asm 1: adc <addt0=int64#8,<ry1=int64#5 +# asm 2: adc <addt0=%r10,<ry1=%r8 +adc %r10,%r8 + +# qhasm: carry? ry2 += addt0 + carry +# asm 1: adc <addt0=int64#8,<ry2=int64#6 +# asm 2: adc <addt0=%r10,<ry2=%r9 +adc %r10,%r9 + +# qhasm: carry? ry3 += addt0 + carry +# asm 1: adc <addt0=int64#8,<ry3=int64#7 +# asm 2: adc <addt0=%r10,<ry3=%rax +adc %r10,%rax + +# qhasm: addt0 = addt1 if carry +# asm 1: cmovc <addt1=int64#9,<addt0=int64#8 +# asm 2: cmovc <addt1=%r11,<addt0=%r10 +cmovc %r11,%r10 + +# qhasm: ry0 += addt0 +# asm 1: add <addt0=int64#8,<ry0=int64#3 +# asm 2: add <addt0=%r10,<ry0=%rdx +add %r10,%rdx + +# qhasm: carry? rx0 -= a0_stack +# asm 1: subq <a0_stack=stack64#8,<rx0=int64#11 +# asm 2: subq <a0_stack=56(%rsp),<rx0=%r13 +subq 56(%rsp),%r13 + +# qhasm: carry? rx1 -= a1_stack - carry +# asm 1: sbbq <a1_stack=stack64#9,<rx1=int64#12 +# asm 2: sbbq <a1_stack=64(%rsp),<rx1=%r14 +sbbq 64(%rsp),%r14 + +# qhasm: carry? rx2 -= a2_stack - carry +# asm 1: sbbq <a2_stack=stack64#10,<rx2=int64#13 +# asm 2: sbbq <a2_stack=72(%rsp),<rx2=%r15 +sbbq 72(%rsp),%r15 + +# qhasm: carry? rx3 -= a3_stack - carry +# asm 1: sbbq <a3_stack=stack64#11,<rx3=int64#14 +# asm 2: sbbq <a3_stack=80(%rsp),<rx3=%rbx +sbbq 80(%rsp),%rbx + +# qhasm: subt0 = 0 +# asm 1: mov $0,>subt0=int64#8 +# asm 2: mov $0,>subt0=%r10 +mov $0,%r10 + +# qhasm: subt1 = 38 +# asm 1: mov $38,>subt1=int64#9 +# asm 2: mov $38,>subt1=%r11 +mov $38,%r11 + +# qhasm: subt1 = subt0 if !carry +# asm 1: cmovae <subt0=int64#8,<subt1=int64#9 +# asm 2: cmovae <subt0=%r10,<subt1=%r11 +cmovae %r10,%r11 + +# qhasm: carry? rx0 -= subt1 +# asm 1: sub <subt1=int64#9,<rx0=int64#11 +# asm 2: sub <subt1=%r11,<rx0=%r13 +sub %r11,%r13 + +# qhasm: carry? rx1 -= subt0 - carry +# asm 1: sbb <subt0=int64#8,<rx1=int64#12 +# asm 2: sbb <subt0=%r10,<rx1=%r14 +sbb %r10,%r14 + +# qhasm: carry? rx2 -= subt0 - carry +# asm 1: sbb <subt0=int64#8,<rx2=int64#13 +# asm 2: sbb <subt0=%r10,<rx2=%r15 +sbb %r10,%r15 + +# qhasm: carry? rx3 -= subt0 - carry +# asm 1: sbb <subt0=int64#8,<rx3=int64#14 +# asm 2: sbb <subt0=%r10,<rx3=%rbx +sbb %r10,%rbx + +# qhasm: subt0 = subt1 if carry +# asm 1: cmovc <subt1=int64#9,<subt0=int64#8 +# asm 2: cmovc <subt1=%r11,<subt0=%r10 +cmovc %r11,%r10 + +# qhasm: rx0 -= subt0 +# asm 1: sub <subt0=int64#8,<rx0=int64#11 +# asm 2: sub <subt0=%r10,<rx0=%r13 +sub %r10,%r13 + +# qhasm: *(uint64 *) (rp + 0) = rx0 +# asm 1: movq <rx0=int64#11,0(<rp=int64#1) +# asm 2: movq <rx0=%r13,0(<rp=%rdi) +movq %r13,0(%rdi) + +# qhasm: *(uint64 *) (rp + 8) = rx1 +# asm 1: movq <rx1=int64#12,8(<rp=int64#1) +# asm 2: movq <rx1=%r14,8(<rp=%rdi) +movq %r14,8(%rdi) + +# qhasm: *(uint64 *) (rp + 16) = rx2 +# asm 1: movq <rx2=int64#13,16(<rp=int64#1) +# asm 2: movq <rx2=%r15,16(<rp=%rdi) +movq %r15,16(%rdi) + +# qhasm: *(uint64 *) (rp + 24) = rx3 +# asm 1: movq <rx3=int64#14,24(<rp=int64#1) +# asm 2: movq <rx3=%rbx,24(<rp=%rdi) +movq %rbx,24(%rdi) + +# qhasm: *(uint64 *) (rp + 64) = ry0 +# asm 1: movq <ry0=int64#3,64(<rp=int64#1) +# asm 2: movq <ry0=%rdx,64(<rp=%rdi) +movq %rdx,64(%rdi) + +# qhasm: *(uint64 *) (rp + 72) = ry1 +# asm 1: movq <ry1=int64#5,72(<rp=int64#1) +# asm 2: movq <ry1=%r8,72(<rp=%rdi) +movq %r8,72(%rdi) + +# qhasm: *(uint64 *) (rp + 80) = ry2 +# asm 1: movq <ry2=int64#6,80(<rp=int64#1) +# asm 2: movq <ry2=%r9,80(<rp=%rdi) +movq %r9,80(%rdi) + +# qhasm: *(uint64 *) (rp + 88) = ry3 +# asm 1: movq <ry3=int64#7,88(<rp=int64#1) +# asm 2: movq <ry3=%rax,88(<rp=%rdi) +movq %rax,88(%rdi) + +# qhasm: mulr4 = 0 +# asm 1: mov $0,>mulr4=int64#5 +# asm 2: mov $0,>mulr4=%r8 +mov $0,%r8 + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#6 +# asm 2: mov $0,>mulr5=%r9 +mov $0,%r9 + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#8 +# asm 2: mov $0,>mulr6=%r10 +mov $0,%r10 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#9 +# asm 2: mov $0,>mulr7=%r11 +mov $0,%r11 + +# qhasm: mulx0 = *(uint64 *)(pp + 96) +# asm 1: movq 96(<pp=int64#2),>mulx0=int64#10 +# asm 2: movq 96(<pp=%rsi),>mulx0=%r12 +movq 96(%rsi),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 96) +# asm 1: movq 96(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 96(<qp=%rcx),>mulrax=%rax +movq 96(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: c0 = mulrax +# asm 1: mov <mulrax=int64#7,>c0=int64#11 +# asm 2: mov <mulrax=%rax,>c0=%r13 +mov %rax,%r13 + +# qhasm: c1 = mulrdx +# asm 1: mov <mulrdx=int64#3,>c1=int64#12 +# asm 2: mov <mulrdx=%rdx,>c1=%r14 +mov %rdx,%r14 + +# qhasm: mulrax = *(uint64 *)(qp + 104) +# asm 1: movq 104(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 104(<qp=%rcx),>mulrax=%rax +movq 104(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: carry? c1 += mulrax +# asm 1: add <mulrax=int64#7,<c1=int64#12 +# asm 2: add <mulrax=%rax,<c1=%r14 +add %rax,%r14 + +# qhasm: c2 = 0 +# asm 1: mov $0,>c2=int64#13 +# asm 2: mov $0,>c2=%r15 +mov $0,%r15 + +# qhasm: c2 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<c2=int64#13 +# asm 2: adc <mulrdx=%rdx,<c2=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = *(uint64 *)(qp + 112) +# asm 1: movq 112(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 112(<qp=%rcx),>mulrax=%rax +movq 112(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: carry? c2 += mulrax +# asm 1: add <mulrax=int64#7,<c2=int64#13 +# asm 2: add <mulrax=%rax,<c2=%r15 +add %rax,%r15 + +# qhasm: c3 = 0 +# asm 1: mov $0,>c3=int64#14 +# asm 2: mov $0,>c3=%rbx +mov $0,%rbx + +# qhasm: c3 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<c3=int64#14 +# asm 2: adc <mulrdx=%rdx,<c3=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(qp + 120) +# asm 1: movq 120(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 120(<qp=%rcx),>mulrax=%rax +movq 120(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: carry? c3 += mulrax +# asm 1: add <mulrax=int64#7,<c3=int64#14 +# asm 2: add <mulrax=%rax,<c3=%rbx +add %rax,%rbx + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr4=int64#5 +# asm 2: adc <mulrdx=%rdx,<mulr4=%r8 +adc %rdx,%r8 + +# qhasm: mulx1 = *(uint64 *)(pp + 104) +# asm 1: movq 104(<pp=int64#2),>mulx1=int64#10 +# asm 2: movq 104(<pp=%rsi),>mulx1=%r12 +movq 104(%rsi),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 96) +# asm 1: movq 96(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 96(<qp=%rcx),>mulrax=%rax +movq 96(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? c1 += mulrax +# asm 1: add <mulrax=int64#7,<c1=int64#12 +# asm 2: add <mulrax=%rax,<c1=%r14 +add %rax,%r14 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 104) +# asm 1: movq 104(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 104(<qp=%rcx),>mulrax=%rax +movq 104(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? c2 += mulrax +# asm 1: add <mulrax=int64#7,<c2=int64#13 +# asm 2: add <mulrax=%rax,<c2=%r15 +add %rax,%r15 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? c2 += mulc +# asm 1: add <mulc=int64#15,<c2=int64#13 +# asm 2: add <mulc=%rbp,<c2=%r15 +add %rbp,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 112) +# asm 1: movq 112(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 112(<qp=%rcx),>mulrax=%rax +movq 112(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? c3 += mulrax +# asm 1: add <mulrax=int64#7,<c3=int64#14 +# asm 2: add <mulrax=%rax,<c3=%rbx +add %rax,%rbx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? c3 += mulc +# asm 1: add <mulc=int64#15,<c3=int64#14 +# asm 2: add <mulc=%rbp,<c3=%rbx +add %rbp,%rbx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 120) +# asm 1: movq 120(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 120(<qp=%rcx),>mulrax=%rax +movq 120(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#5 +# asm 2: add <mulrax=%rax,<mulr4=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#15,<mulr4=int64#5 +# asm 2: add <mulc=%rbp,<mulr4=%r8 +add %rbp,%r8 + +# qhasm: mulr5 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr5=int64#6 +# asm 2: adc <mulrdx=%rdx,<mulr5=%r9 +adc %rdx,%r9 + +# qhasm: mulx2 = *(uint64 *)(pp + 112) +# asm 1: movq 112(<pp=int64#2),>mulx2=int64#10 +# asm 2: movq 112(<pp=%rsi),>mulx2=%r12 +movq 112(%rsi),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 96) +# asm 1: movq 96(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 96(<qp=%rcx),>mulrax=%rax +movq 96(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? c2 += mulrax +# asm 1: add <mulrax=int64#7,<c2=int64#13 +# asm 2: add <mulrax=%rax,<c2=%r15 +add %rax,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 104) +# asm 1: movq 104(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 104(<qp=%rcx),>mulrax=%rax +movq 104(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? c3 += mulrax +# asm 1: add <mulrax=int64#7,<c3=int64#14 +# asm 2: add <mulrax=%rax,<c3=%rbx +add %rax,%rbx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? c3 += mulc +# asm 1: add <mulc=int64#15,<c3=int64#14 +# asm 2: add <mulc=%rbp,<c3=%rbx +add %rbp,%rbx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 112) +# asm 1: movq 112(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 112(<qp=%rcx),>mulrax=%rax +movq 112(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#5 +# asm 2: add <mulrax=%rax,<mulr4=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#15,<mulr4=int64#5 +# asm 2: add <mulc=%rbp,<mulr4=%r8 +add %rbp,%r8 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 120) +# asm 1: movq 120(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 120(<qp=%rcx),>mulrax=%rax +movq 120(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#6 +# asm 2: add <mulrax=%rax,<mulr5=%r9 +add %rax,%r9 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#15,<mulr5=int64#6 +# asm 2: add <mulc=%rbp,<mulr5=%r9 +add %rbp,%r9 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#8 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r10 +adc %rdx,%r10 + +# qhasm: mulx3 = *(uint64 *)(pp + 120) +# asm 1: movq 120(<pp=int64#2),>mulx3=int64#10 +# asm 2: movq 120(<pp=%rsi),>mulx3=%r12 +movq 120(%rsi),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 96) +# asm 1: movq 96(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 96(<qp=%rcx),>mulrax=%rax +movq 96(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#10 +# asm 2: mul <mulx3=%r12 +mul %r12 + +# qhasm: carry? c3 += mulrax +# asm 1: add <mulrax=int64#7,<c3=int64#14 +# asm 2: add <mulrax=%rax,<c3=%rbx +add %rax,%rbx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 104) +# asm 1: movq 104(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 104(<qp=%rcx),>mulrax=%rax +movq 104(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#10 +# asm 2: mul <mulx3=%r12 +mul %r12 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#5 +# asm 2: add <mulrax=%rax,<mulr4=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#15,<mulr4=int64#5 +# asm 2: add <mulc=%rbp,<mulr4=%r8 +add %rbp,%r8 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 112) +# asm 1: movq 112(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 112(<qp=%rcx),>mulrax=%rax +movq 112(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#10 +# asm 2: mul <mulx3=%r12 +mul %r12 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#6 +# asm 2: add <mulrax=%rax,<mulr5=%r9 +add %rax,%r9 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#15,<mulr5=int64#6 +# asm 2: add <mulc=%rbp,<mulr5=%r9 +add %rbp,%r9 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 120) +# asm 1: movq 120(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 120(<qp=%rcx),>mulrax=%rax +movq 120(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#10 +# asm 2: mul <mulx3=%r12 +mul %r12 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#8 +# asm 2: add <mulrax=%rax,<mulr6=%r10 +add %rax,%r10 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr6 += mulc +# asm 1: add <mulc=int64#15,<mulr6=int64#8 +# asm 2: add <mulc=%rbp,<mulr6=%r10 +add %rbp,%r10 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#9 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r11 +adc %rdx,%r11 + +# qhasm: mulrax = mulr4 +# asm 1: mov <mulr4=int64#5,>mulrax=int64#7 +# asm 2: mov <mulr4=%r8,>mulrax=%rax +mov %r8,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: mulr4 = mulrax +# asm 1: mov <mulrax=int64#7,>mulr4=int64#5 +# asm 2: mov <mulrax=%rax,>mulr4=%r8 +mov %rax,%r8 + +# qhasm: mulrax = mulr5 +# asm 1: mov <mulr5=int64#6,>mulrax=int64#7 +# asm 2: mov <mulr5=%r9,>mulrax=%rax +mov %r9,%rax + +# qhasm: mulr5 = mulrdx +# asm 1: mov <mulrdx=int64#3,>mulr5=int64#6 +# asm 2: mov <mulrdx=%rdx,>mulr5=%r9 +mov %rdx,%r9 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#6 +# asm 2: add <mulrax=%rax,<mulr5=%r9 +add %rax,%r9 + +# qhasm: mulrax = mulr6 +# asm 1: mov <mulr6=int64#8,>mulrax=int64#7 +# asm 2: mov <mulr6=%r10,>mulrax=%rax +mov %r10,%rax + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#8 +# asm 2: mov $0,>mulr6=%r10 +mov $0,%r10 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#8 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r10 +adc %rdx,%r10 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#8 +# asm 2: add <mulrax=%rax,<mulr6=%r10 +add %rax,%r10 + +# qhasm: mulrax = mulr7 +# asm 1: mov <mulr7=int64#9,>mulrax=int64#7 +# asm 2: mov <mulr7=%r11,>mulrax=%rax +mov %r11,%rax + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#9 +# asm 2: mov $0,>mulr7=%r11 +mov $0,%r11 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#9 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r11 +adc %rdx,%r11 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr7 += mulrax +# asm 1: add <mulrax=int64#7,<mulr7=int64#9 +# asm 2: add <mulrax=%rax,<mulr7=%r11 +add %rax,%r11 + +# qhasm: mulr8 = 0 +# asm 1: mov $0,>mulr8=int64#7 +# asm 2: mov $0,>mulr8=%rax +mov $0,%rax + +# qhasm: mulr8 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr8=int64#7 +# asm 2: adc <mulrdx=%rdx,<mulr8=%rax +adc %rdx,%rax + +# qhasm: carry? c0 += mulr4 +# asm 1: add <mulr4=int64#5,<c0=int64#11 +# asm 2: add <mulr4=%r8,<c0=%r13 +add %r8,%r13 + +# qhasm: carry? c1 += mulr5 + carry +# asm 1: adc <mulr5=int64#6,<c1=int64#12 +# asm 2: adc <mulr5=%r9,<c1=%r14 +adc %r9,%r14 + +# qhasm: carry? c2 += mulr6 + carry +# asm 1: adc <mulr6=int64#8,<c2=int64#13 +# asm 2: adc <mulr6=%r10,<c2=%r15 +adc %r10,%r15 + +# qhasm: carry? c3 += mulr7 + carry +# asm 1: adc <mulr7=int64#9,<c3=int64#14 +# asm 2: adc <mulr7=%r11,<c3=%rbx +adc %r11,%rbx + +# qhasm: mulzero = 0 +# asm 1: mov $0,>mulzero=int64#3 +# asm 2: mov $0,>mulzero=%rdx +mov $0,%rdx + +# qhasm: mulr8 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<mulr8=int64#7 +# asm 2: adc <mulzero=%rdx,<mulr8=%rax +adc %rdx,%rax + +# qhasm: mulr8 *= 38 +# asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#5 +# asm 2: imulq $38,<mulr8=%rax,>mulr8=%r8 +imulq $38,%rax,%r8 + +# qhasm: carry? c0 += mulr8 +# asm 1: add <mulr8=int64#5,<c0=int64#11 +# asm 2: add <mulr8=%r8,<c0=%r13 +add %r8,%r13 + +# qhasm: carry? c1 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<c1=int64#12 +# asm 2: adc <mulzero=%rdx,<c1=%r14 +adc %rdx,%r14 + +# qhasm: carry? c2 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<c2=int64#13 +# asm 2: adc <mulzero=%rdx,<c2=%r15 +adc %rdx,%r15 + +# qhasm: carry? c3 += mulzero + carry +# asm 1: adc <mulzero=int64#3,<c3=int64#14 +# asm 2: adc <mulzero=%rdx,<c3=%rbx +adc %rdx,%rbx + +# qhasm: mulzero += mulzero + carry +# asm 1: adc <mulzero=int64#3,<mulzero=int64#3 +# asm 2: adc <mulzero=%rdx,<mulzero=%rdx +adc %rdx,%rdx + +# qhasm: mulzero *= 38 +# asm 1: imulq $38,<mulzero=int64#3,>mulzero=int64#3 +# asm 2: imulq $38,<mulzero=%rdx,>mulzero=%rdx +imulq $38,%rdx,%rdx + +# qhasm: c0 += mulzero +# asm 1: add <mulzero=int64#3,<c0=int64#11 +# asm 2: add <mulzero=%rdx,<c0=%r13 +add %rdx,%r13 + +# qhasm: c0_stack = c0 +# asm 1: movq <c0=int64#11,>c0_stack=stack64#8 +# asm 2: movq <c0=%r13,>c0_stack=56(%rsp) +movq %r13,56(%rsp) + +# qhasm: c1_stack = c1 +# asm 1: movq <c1=int64#12,>c1_stack=stack64#9 +# asm 2: movq <c1=%r14,>c1_stack=64(%rsp) +movq %r14,64(%rsp) + +# qhasm: c2_stack = c2 +# asm 1: movq <c2=int64#13,>c2_stack=stack64#10 +# asm 2: movq <c2=%r15,>c2_stack=72(%rsp) +movq %r15,72(%rsp) + +# qhasm: c3_stack = c3 +# asm 1: movq <c3=int64#14,>c3_stack=stack64#11 +# asm 2: movq <c3=%rbx,>c3_stack=80(%rsp) +movq %rbx,80(%rsp) + +# qhasm: mulr4 = 0 +# asm 1: mov $0,>mulr4=int64#5 +# asm 2: mov $0,>mulr4=%r8 +mov $0,%r8 + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#6 +# asm 2: mov $0,>mulr5=%r9 +mov $0,%r9 + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#8 +# asm 2: mov $0,>mulr6=%r10 +mov $0,%r10 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#9 +# asm 2: mov $0,>mulr7=%r11 +mov $0,%r11 + +# qhasm: mulx0 = *(uint64 *)(pp + 64) +# asm 1: movq 64(<pp=int64#2),>mulx0=int64#10 +# asm 2: movq 64(<pp=%rsi),>mulx0=%r12 +movq 64(%rsi),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 64) +# asm 1: movq 64(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 64(<qp=%rcx),>mulrax=%rax +movq 64(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: rt0 = mulrax +# asm 1: mov <mulrax=int64#7,>rt0=int64#11 +# asm 2: mov <mulrax=%rax,>rt0=%r13 +mov %rax,%r13 + +# qhasm: rt1 = mulrdx +# asm 1: mov <mulrdx=int64#3,>rt1=int64#12 +# asm 2: mov <mulrdx=%rdx,>rt1=%r14 +mov %rdx,%r14 + +# qhasm: mulrax = *(uint64 *)(qp + 72) +# asm 1: movq 72(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 72(<qp=%rcx),>mulrax=%rax +movq 72(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: carry? rt1 += mulrax +# asm 1: add <mulrax=int64#7,<rt1=int64#12 +# asm 2: add <mulrax=%rax,<rt1=%r14 +add %rax,%r14 + +# qhasm: rt2 = 0 +# asm 1: mov $0,>rt2=int64#13 +# asm 2: mov $0,>rt2=%r15 +mov $0,%r15 + +# qhasm: rt2 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<rt2=int64#13 +# asm 2: adc <mulrdx=%rdx,<rt2=%r15 +adc %rdx,%r15 + +# qhasm: mulrax = *(uint64 *)(qp + 80) +# asm 1: movq 80(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 80(<qp=%rcx),>mulrax=%rax +movq 80(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: carry? rt2 += mulrax +# asm 1: add <mulrax=int64#7,<rt2=int64#13 +# asm 2: add <mulrax=%rax,<rt2=%r15 +add %rax,%r15 + +# qhasm: rt3 = 0 +# asm 1: mov $0,>rt3=int64#14 +# asm 2: mov $0,>rt3=%rbx +mov $0,%rbx + +# qhasm: rt3 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<rt3=int64#14 +# asm 2: adc <mulrdx=%rdx,<rt3=%rbx +adc %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(qp + 88) +# asm 1: movq 88(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 88(<qp=%rcx),>mulrax=%rax +movq 88(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul <mulx0=int64#10 +# asm 2: mul <mulx0=%r12 +mul %r12 + +# qhasm: carry? rt3 += mulrax +# asm 1: add <mulrax=int64#7,<rt3=int64#14 +# asm 2: add <mulrax=%rax,<rt3=%rbx +add %rax,%rbx + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr4=int64#5 +# asm 2: adc <mulrdx=%rdx,<mulr4=%r8 +adc %rdx,%r8 + +# qhasm: mulx1 = *(uint64 *)(pp + 72) +# asm 1: movq 72(<pp=int64#2),>mulx1=int64#10 +# asm 2: movq 72(<pp=%rsi),>mulx1=%r12 +movq 72(%rsi),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 64) +# asm 1: movq 64(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 64(<qp=%rcx),>mulrax=%rax +movq 64(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? rt1 += mulrax +# asm 1: add <mulrax=int64#7,<rt1=int64#12 +# asm 2: add <mulrax=%rax,<rt1=%r14 +add %rax,%r14 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 72) +# asm 1: movq 72(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 72(<qp=%rcx),>mulrax=%rax +movq 72(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? rt2 += mulrax +# asm 1: add <mulrax=int64#7,<rt2=int64#13 +# asm 2: add <mulrax=%rax,<rt2=%r15 +add %rax,%r15 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? rt2 += mulc +# asm 1: add <mulc=int64#15,<rt2=int64#13 +# asm 2: add <mulc=%rbp,<rt2=%r15 +add %rbp,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 80) +# asm 1: movq 80(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 80(<qp=%rcx),>mulrax=%rax +movq 80(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? rt3 += mulrax +# asm 1: add <mulrax=int64#7,<rt3=int64#14 +# asm 2: add <mulrax=%rax,<rt3=%rbx +add %rax,%rbx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? rt3 += mulc +# asm 1: add <mulc=int64#15,<rt3=int64#14 +# asm 2: add <mulc=%rbp,<rt3=%rbx +add %rbp,%rbx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 88) +# asm 1: movq 88(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 88(<qp=%rcx),>mulrax=%rax +movq 88(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul <mulx1=int64#10 +# asm 2: mul <mulx1=%r12 +mul %r12 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#5 +# asm 2: add <mulrax=%rax,<mulr4=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#15,<mulr4=int64#5 +# asm 2: add <mulc=%rbp,<mulr4=%r8 +add %rbp,%r8 + +# qhasm: mulr5 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr5=int64#6 +# asm 2: adc <mulrdx=%rdx,<mulr5=%r9 +adc %rdx,%r9 + +# qhasm: mulx2 = *(uint64 *)(pp + 80) +# asm 1: movq 80(<pp=int64#2),>mulx2=int64#10 +# asm 2: movq 80(<pp=%rsi),>mulx2=%r12 +movq 80(%rsi),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 64) +# asm 1: movq 64(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 64(<qp=%rcx),>mulrax=%rax +movq 64(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? rt2 += mulrax +# asm 1: add <mulrax=int64#7,<rt2=int64#13 +# asm 2: add <mulrax=%rax,<rt2=%r15 +add %rax,%r15 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 72) +# asm 1: movq 72(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 72(<qp=%rcx),>mulrax=%rax +movq 72(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? rt3 += mulrax +# asm 1: add <mulrax=int64#7,<rt3=int64#14 +# asm 2: add <mulrax=%rax,<rt3=%rbx +add %rax,%rbx + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? rt3 += mulc +# asm 1: add <mulc=int64#15,<rt3=int64#14 +# asm 2: add <mulc=%rbp,<rt3=%rbx +add %rbp,%rbx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 80) +# asm 1: movq 80(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 80(<qp=%rcx),>mulrax=%rax +movq 80(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#5 +# asm 2: add <mulrax=%rax,<mulr4=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#15,<mulr4=int64#5 +# asm 2: add <mulc=%rbp,<mulr4=%r8 +add %rbp,%r8 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#15 +# asm 2: adc <mulrdx=%rdx,<mulc=%rbp +adc %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(qp + 88) +# asm 1: movq 88(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 88(<qp=%rcx),>mulrax=%rax +movq 88(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul <mulx2=int64#10 +# asm 2: mul <mulx2=%r12 +mul %r12 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#6 +# asm 2: add <mulrax=%rax,<mulr5=%r9 +add %rax,%r9 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#15,<mulr5=int64#6 +# asm 2: add <mulc=%rbp,<mulr5=%r9 +add %rbp,%r9 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#8 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r10 +adc %rdx,%r10 + +# qhasm: mulx3 = *(uint64 *)(pp + 88) +# asm 1: movq 88(<pp=int64#2),>mulx3=int64#2 +# asm 2: movq 88(<pp=%rsi),>mulx3=%rsi +movq 88(%rsi),%rsi + +# qhasm: mulrax = *(uint64 *)(qp + 64) +# asm 1: movq 64(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 64(<qp=%rcx),>mulrax=%rax +movq 64(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#2 +# asm 2: mul <mulx3=%rsi +mul %rsi + +# qhasm: carry? rt3 += mulrax +# asm 1: add <mulrax=int64#7,<rt3=int64#14 +# asm 2: add <mulrax=%rax,<rt3=%rbx +add %rax,%rbx + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#10 +# asm 2: mov $0,>mulc=%r12 +mov $0,%r12 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#10 +# asm 2: adc <mulrdx=%rdx,<mulc=%r12 +adc %rdx,%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 72) +# asm 1: movq 72(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 72(<qp=%rcx),>mulrax=%rax +movq 72(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#2 +# asm 2: mul <mulx3=%rsi +mul %rsi + +# qhasm: carry? mulr4 += mulrax +# asm 1: add <mulrax=int64#7,<mulr4=int64#5 +# asm 2: add <mulrax=%rax,<mulr4=%r8 +add %rax,%r8 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr4 += mulc +# asm 1: add <mulc=int64#10,<mulr4=int64#5 +# asm 2: add <mulc=%r12,<mulr4=%r8 +add %r12,%r8 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#10 +# asm 2: mov $0,>mulc=%r12 +mov $0,%r12 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#10 +# asm 2: adc <mulrdx=%rdx,<mulc=%r12 +adc %rdx,%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 80) +# asm 1: movq 80(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 80(<qp=%rcx),>mulrax=%rax +movq 80(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#2 +# asm 2: mul <mulx3=%rsi +mul %rsi + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#6 +# asm 2: add <mulrax=%rax,<mulr5=%r9 +add %rax,%r9 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr5 += mulc +# asm 1: add <mulc=int64#10,<mulr5=int64#6 +# asm 2: add <mulc=%r12,<mulr5=%r9 +add %r12,%r9 + +# qhasm: mulc = 0 +# asm 1: mov $0,>mulc=int64#10 +# asm 2: mov $0,>mulc=%r12 +mov $0,%r12 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulc=int64#10 +# asm 2: adc <mulrdx=%rdx,<mulc=%r12 +adc %rdx,%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 88) +# asm 1: movq 88(<qp=int64#4),>mulrax=int64#7 +# asm 2: movq 88(<qp=%rcx),>mulrax=%rax +movq 88(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul <mulx3=int64#2 +# asm 2: mul <mulx3=%rsi +mul %rsi + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#8 +# asm 2: add <mulrax=%rax,<mulr6=%r10 +add %rax,%r10 + +# qhasm: mulrdx += 0 + carry +# asm 1: adc $0,<mulrdx=int64#3 +# asm 2: adc $0,<mulrdx=%rdx +adc $0,%rdx + +# qhasm: carry? mulr6 += mulc +# asm 1: add <mulc=int64#10,<mulr6=int64#8 +# asm 2: add <mulc=%r12,<mulr6=%r10 +add %r12,%r10 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#9 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r11 +adc %rdx,%r11 + +# qhasm: mulrax = mulr4 +# asm 1: mov <mulr4=int64#5,>mulrax=int64#7 +# asm 2: mov <mulr4=%r8,>mulrax=%rax +mov %r8,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: mulr4 = mulrax +# asm 1: mov <mulrax=int64#7,>mulr4=int64#2 +# asm 2: mov <mulrax=%rax,>mulr4=%rsi +mov %rax,%rsi + +# qhasm: mulrax = mulr5 +# asm 1: mov <mulr5=int64#6,>mulrax=int64#7 +# asm 2: mov <mulr5=%r9,>mulrax=%rax +mov %r9,%rax + +# qhasm: mulr5 = mulrdx +# asm 1: mov <mulrdx=int64#3,>mulr5=int64#4 +# asm 2: mov <mulrdx=%rdx,>mulr5=%rcx +mov %rdx,%rcx + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr5 += mulrax +# asm 1: add <mulrax=int64#7,<mulr5=int64#4 +# asm 2: add <mulrax=%rax,<mulr5=%rcx +add %rax,%rcx + +# qhasm: mulrax = mulr6 +# asm 1: mov <mulr6=int64#8,>mulrax=int64#7 +# asm 2: mov <mulr6=%r10,>mulrax=%rax +mov %r10,%rax + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#5 +# asm 2: mov $0,>mulr6=%r8 +mov $0,%r8 + +# qhasm: mulr6 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr6=int64#5 +# asm 2: adc <mulrdx=%rdx,<mulr6=%r8 +adc %rdx,%r8 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr6 += mulrax +# asm 1: add <mulrax=int64#7,<mulr6=int64#5 +# asm 2: add <mulrax=%rax,<mulr6=%r8 +add %rax,%r8 + +# qhasm: mulrax = mulr7 +# asm 1: mov <mulr7=int64#9,>mulrax=int64#7 +# asm 2: mov <mulr7=%r11,>mulrax=%rax +mov %r11,%rax + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#6 +# asm 2: mov $0,>mulr7=%r9 +mov $0,%r9 + +# qhasm: mulr7 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr7=int64#6 +# asm 2: adc <mulrdx=%rdx,<mulr7=%r9 +adc %rdx,%r9 + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 +mulq crypto_sign_ed25519_amd64_64_38 + +# qhasm: carry? mulr7 += mulrax +# asm 1: add <mulrax=int64#7,<mulr7=int64#6 +# asm 2: add <mulrax=%rax,<mulr7=%r9 +add %rax,%r9 + +# qhasm: mulr8 = 0 +# asm 1: mov $0,>mulr8=int64#7 +# asm 2: mov $0,>mulr8=%rax +mov $0,%rax + +# qhasm: mulr8 += mulrdx + carry +# asm 1: adc <mulrdx=int64#3,<mulr8=int64#7 +# asm 2: adc <mulrdx=%rdx,<mulr8=%rax +adc %rdx,%rax + +# qhasm: carry? rt0 += mulr4 +# asm 1: add <mulr4=int64#2,<rt0=int64#11 +# asm 2: add <mulr4=%rsi,<rt0=%r13 +add %rsi,%r13 + +# qhasm: carry? rt1 += mulr5 + carry +# asm 1: adc <mulr5=int64#4,<rt1=int64#12 +# asm 2: adc <mulr5=%rcx,<rt1=%r14 +adc %rcx,%r14 + +# qhasm: carry? rt2 += mulr6 + carry +# asm 1: adc <mulr6=int64#5,<rt2=int64#13 +# asm 2: adc <mulr6=%r8,<rt2=%r15 +adc %r8,%r15 + +# qhasm: carry? rt3 += mulr7 + carry +# asm 1: adc <mulr7=int64#6,<rt3=int64#14 +# asm 2: adc <mulr7=%r9,<rt3=%rbx +adc %r9,%rbx + +# qhasm: mulzero = 0 +# asm 1: mov $0,>mulzero=int64#2 +# asm 2: mov $0,>mulzero=%rsi +mov $0,%rsi + +# qhasm: mulr8 += mulzero + carry +# asm 1: adc <mulzero=int64#2,<mulr8=int64#7 +# asm 2: adc <mulzero=%rsi,<mulr8=%rax +adc %rsi,%rax + +# qhasm: mulr8 *= 38 +# asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#3 +# asm 2: imulq $38,<mulr8=%rax,>mulr8=%rdx +imulq $38,%rax,%rdx + +# qhasm: carry? rt0 += mulr8 +# asm 1: add <mulr8=int64#3,<rt0=int64#11 +# asm 2: add <mulr8=%rdx,<rt0=%r13 +add %rdx,%r13 + +# qhasm: carry? rt1 += mulzero + carry +# asm 1: adc <mulzero=int64#2,<rt1=int64#12 +# asm 2: adc <mulzero=%rsi,<rt1=%r14 +adc %rsi,%r14 + +# qhasm: carry? rt2 += mulzero + carry +# asm 1: adc <mulzero=int64#2,<rt2=int64#13 +# asm 2: adc <mulzero=%rsi,<rt2=%r15 +adc %rsi,%r15 + +# qhasm: carry? rt3 += mulzero + carry +# asm 1: adc <mulzero=int64#2,<rt3=int64#14 +# asm 2: adc <mulzero=%rsi,<rt3=%rbx +adc %rsi,%rbx + +# qhasm: mulzero += mulzero + carry +# asm 1: adc <mulzero=int64#2,<mulzero=int64#2 +# asm 2: adc <mulzero=%rsi,<mulzero=%rsi +adc %rsi,%rsi + +# qhasm: mulzero *= 38 +# asm 1: imulq $38,<mulzero=int64#2,>mulzero=int64#2 +# asm 2: imulq $38,<mulzero=%rsi,>mulzero=%rsi +imulq $38,%rsi,%rsi + +# qhasm: rt0 += mulzero +# asm 1: add <mulzero=int64#2,<rt0=int64#11 +# asm 2: add <mulzero=%rsi,<rt0=%r13 +add %rsi,%r13 + +# qhasm: carry? rt0 += rt0 +# asm 1: add <rt0=int64#11,<rt0=int64#11 +# asm 2: add <rt0=%r13,<rt0=%r13 +add %r13,%r13 + +# qhasm: carry? rt1 += rt1 + carry +# asm 1: adc <rt1=int64#12,<rt1=int64#12 +# asm 2: adc <rt1=%r14,<rt1=%r14 +adc %r14,%r14 + +# qhasm: carry? rt2 += rt2 + carry +# asm 1: adc <rt2=int64#13,<rt2=int64#13 +# asm 2: adc <rt2=%r15,<rt2=%r15 +adc %r15,%r15 + +# qhasm: carry? rt3 += rt3 + carry +# asm 1: adc <rt3=int64#14,<rt3=int64#14 +# asm 2: adc <rt3=%rbx,<rt3=%rbx +adc %rbx,%rbx + +# qhasm: addt0 = 0 +# asm 1: mov $0,>addt0=int64#2 +# asm 2: mov $0,>addt0=%rsi +mov $0,%rsi + +# qhasm: addt1 = 38 +# asm 1: mov $38,>addt1=int64#3 +# asm 2: mov $38,>addt1=%rdx +mov $38,%rdx + +# qhasm: addt1 = addt0 if !carry +# asm 1: cmovae <addt0=int64#2,<addt1=int64#3 +# asm 2: cmovae <addt0=%rsi,<addt1=%rdx +cmovae %rsi,%rdx + +# qhasm: carry? rt0 += addt1 +# asm 1: add <addt1=int64#3,<rt0=int64#11 +# asm 2: add <addt1=%rdx,<rt0=%r13 +add %rdx,%r13 + +# qhasm: carry? rt1 += addt0 + carry +# asm 1: adc <addt0=int64#2,<rt1=int64#12 +# asm 2: adc <addt0=%rsi,<rt1=%r14 +adc %rsi,%r14 + +# qhasm: carry? rt2 += addt0 + carry +# asm 1: adc <addt0=int64#2,<rt2=int64#13 +# asm 2: adc <addt0=%rsi,<rt2=%r15 +adc %rsi,%r15 + +# qhasm: carry? rt3 += addt0 + carry +# asm 1: adc <addt0=int64#2,<rt3=int64#14 +# asm 2: adc <addt0=%rsi,<rt3=%rbx +adc %rsi,%rbx + +# qhasm: addt0 = addt1 if carry +# asm 1: cmovc <addt1=int64#3,<addt0=int64#2 +# asm 2: cmovc <addt1=%rdx,<addt0=%rsi +cmovc %rdx,%rsi + +# qhasm: rt0 += addt0 +# asm 1: add <addt0=int64#2,<rt0=int64#11 +# asm 2: add <addt0=%rsi,<rt0=%r13 +add %rsi,%r13 + +# qhasm: rz0 = rt0 +# asm 1: mov <rt0=int64#11,>rz0=int64#2 +# asm 2: mov <rt0=%r13,>rz0=%rsi +mov %r13,%rsi + +# qhasm: rz1 = rt1 +# asm 1: mov <rt1=int64#12,>rz1=int64#3 +# asm 2: mov <rt1=%r14,>rz1=%rdx +mov %r14,%rdx + +# qhasm: rz2 = rt2 +# asm 1: mov <rt2=int64#13,>rz2=int64#4 +# asm 2: mov <rt2=%r15,>rz2=%rcx +mov %r15,%rcx + +# qhasm: rz3 = rt3 +# asm 1: mov <rt3=int64#14,>rz3=int64#5 +# asm 2: mov <rt3=%rbx,>rz3=%r8 +mov %rbx,%r8 + +# qhasm: carry? rz0 += c0_stack +# asm 1: addq <c0_stack=stack64#8,<rz0=int64#2 +# asm 2: addq <c0_stack=56(%rsp),<rz0=%rsi +addq 56(%rsp),%rsi + +# qhasm: carry? rz1 += c1_stack + carry +# asm 1: adcq <c1_stack=stack64#9,<rz1=int64#3 +# asm 2: adcq <c1_stack=64(%rsp),<rz1=%rdx +adcq 64(%rsp),%rdx + +# qhasm: carry? rz2 += c2_stack + carry +# asm 1: adcq <c2_stack=stack64#10,<rz2=int64#4 +# asm 2: adcq <c2_stack=72(%rsp),<rz2=%rcx +adcq 72(%rsp),%rcx + +# qhasm: carry? rz3 += c3_stack + carry +# asm 1: adcq <c3_stack=stack64#11,<rz3=int64#5 +# asm 2: adcq <c3_stack=80(%rsp),<rz3=%r8 +adcq 80(%rsp),%r8 + +# qhasm: addt0 = 0 +# asm 1: mov $0,>addt0=int64#6 +# asm 2: mov $0,>addt0=%r9 +mov $0,%r9 + +# qhasm: addt1 = 38 +# asm 1: mov $38,>addt1=int64#7 +# asm 2: mov $38,>addt1=%rax +mov $38,%rax + +# qhasm: addt1 = addt0 if !carry +# asm 1: cmovae <addt0=int64#6,<addt1=int64#7 +# asm 2: cmovae <addt0=%r9,<addt1=%rax +cmovae %r9,%rax + +# qhasm: carry? rz0 += addt1 +# asm 1: add <addt1=int64#7,<rz0=int64#2 +# asm 2: add <addt1=%rax,<rz0=%rsi +add %rax,%rsi + +# qhasm: carry? rz1 += addt0 + carry +# asm 1: adc <addt0=int64#6,<rz1=int64#3 +# asm 2: adc <addt0=%r9,<rz1=%rdx +adc %r9,%rdx + +# qhasm: carry? rz2 += addt0 + carry +# asm 1: adc <addt0=int64#6,<rz2=int64#4 +# asm 2: adc <addt0=%r9,<rz2=%rcx +adc %r9,%rcx + +# qhasm: carry? rz3 += addt0 + carry +# asm 1: adc <addt0=int64#6,<rz3=int64#5 +# asm 2: adc <addt0=%r9,<rz3=%r8 +adc %r9,%r8 + +# qhasm: addt0 = addt1 if carry +# asm 1: cmovc <addt1=int64#7,<addt0=int64#6 +# asm 2: cmovc <addt1=%rax,<addt0=%r9 +cmovc %rax,%r9 + +# qhasm: rz0 += addt0 +# asm 1: add <addt0=int64#6,<rz0=int64#2 +# asm 2: add <addt0=%r9,<rz0=%rsi +add %r9,%rsi + +# qhasm: carry? rt0 -= c0_stack +# asm 1: subq <c0_stack=stack64#8,<rt0=int64#11 +# asm 2: subq <c0_stack=56(%rsp),<rt0=%r13 +subq 56(%rsp),%r13 + +# qhasm: carry? rt1 -= c1_stack - carry +# asm 1: sbbq <c1_stack=stack64#9,<rt1=int64#12 +# asm 2: sbbq <c1_stack=64(%rsp),<rt1=%r14 +sbbq 64(%rsp),%r14 + +# qhasm: carry? rt2 -= c2_stack - carry +# asm 1: sbbq <c2_stack=stack64#10,<rt2=int64#13 +# asm 2: sbbq <c2_stack=72(%rsp),<rt2=%r15 +sbbq 72(%rsp),%r15 + +# qhasm: carry? rt3 -= c3_stack - carry +# asm 1: sbbq <c3_stack=stack64#11,<rt3=int64#14 +# asm 2: sbbq <c3_stack=80(%rsp),<rt3=%rbx +sbbq 80(%rsp),%rbx + +# qhasm: subt0 = 0 +# asm 1: mov $0,>subt0=int64#6 +# asm 2: mov $0,>subt0=%r9 +mov $0,%r9 + +# qhasm: subt1 = 38 +# asm 1: mov $38,>subt1=int64#7 +# asm 2: mov $38,>subt1=%rax +mov $38,%rax + +# qhasm: subt1 = subt0 if !carry +# asm 1: cmovae <subt0=int64#6,<subt1=int64#7 +# asm 2: cmovae <subt0=%r9,<subt1=%rax +cmovae %r9,%rax + +# qhasm: carry? rt0 -= subt1 +# asm 1: sub <subt1=int64#7,<rt0=int64#11 +# asm 2: sub <subt1=%rax,<rt0=%r13 +sub %rax,%r13 + +# qhasm: carry? rt1 -= subt0 - carry +# asm 1: sbb <subt0=int64#6,<rt1=int64#12 +# asm 2: sbb <subt0=%r9,<rt1=%r14 +sbb %r9,%r14 + +# qhasm: carry? rt2 -= subt0 - carry +# asm 1: sbb <subt0=int64#6,<rt2=int64#13 +# asm 2: sbb <subt0=%r9,<rt2=%r15 +sbb %r9,%r15 + +# qhasm: carry? rt3 -= subt0 - carry +# asm 1: sbb <subt0=int64#6,<rt3=int64#14 +# asm 2: sbb <subt0=%r9,<rt3=%rbx +sbb %r9,%rbx + +# qhasm: subt0 = subt1 if carry +# asm 1: cmovc <subt1=int64#7,<subt0=int64#6 +# asm 2: cmovc <subt1=%rax,<subt0=%r9 +cmovc %rax,%r9 + +# qhasm: rt0 -= subt0 +# asm 1: sub <subt0=int64#6,<rt0=int64#11 +# asm 2: sub <subt0=%r9,<rt0=%r13 +sub %r9,%r13 + +# qhasm: *(uint64 *)(rp + 32) = rz0 +# asm 1: movq <rz0=int64#2,32(<rp=int64#1) +# asm 2: movq <rz0=%rsi,32(<rp=%rdi) +movq %rsi,32(%rdi) + +# qhasm: *(uint64 *)(rp + 40) = rz1 +# asm 1: movq <rz1=int64#3,40(<rp=int64#1) +# asm 2: movq <rz1=%rdx,40(<rp=%rdi) +movq %rdx,40(%rdi) + +# qhasm: *(uint64 *)(rp + 48) = rz2 +# asm 1: movq <rz2=int64#4,48(<rp=int64#1) +# asm 2: movq <rz2=%rcx,48(<rp=%rdi) +movq %rcx,48(%rdi) + +# qhasm: *(uint64 *)(rp + 56) = rz3 +# asm 1: movq <rz3=int64#5,56(<rp=int64#1) +# asm 2: movq <rz3=%r8,56(<rp=%rdi) +movq %r8,56(%rdi) + +# qhasm: *(uint64 *)(rp + 96) = rt0 +# asm 1: movq <rt0=int64#11,96(<rp=int64#1) +# asm 2: movq <rt0=%r13,96(<rp=%rdi) +movq %r13,96(%rdi) + +# qhasm: *(uint64 *)(rp + 104) = rt1 +# asm 1: movq <rt1=int64#12,104(<rp=int64#1) +# asm 2: movq <rt1=%r14,104(<rp=%rdi) +movq %r14,104(%rdi) + +# qhasm: *(uint64 *)(rp + 112) = rt2 +# asm 1: movq <rt2=int64#13,112(<rp=int64#1) +# asm 2: movq <rt2=%r15,112(<rp=%rdi) +movq %r15,112(%rdi) + +# qhasm: *(uint64 *)(rp + 120) = rt3 +# asm 1: movq <rt3=int64#14,120(<rp=int64#1) +# asm 2: movq <rt3=%rbx,120(<rp=%rdi) +movq %rbx,120(%rdi) + +# qhasm: caller1 = caller1_stack +# asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9 +# asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10 +# asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11 +# asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12 +# asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13 +# asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14 +# asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15 +# asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/ext/ed25519-amd64-asm/ge25519_scalarmult_base.c b/ext/ed25519-amd64-asm/ge25519_scalarmult_base.c new file mode 100644 index 00000000..986abaf6 --- /dev/null +++ b/ext/ed25519-amd64-asm/ge25519_scalarmult_base.c @@ -0,0 +1,68 @@ +#include "fe25519.h" +#include "sc25519.h" +#include "ge25519.h" + +/* Multiples of the base point in Niels' representation */ +static const ge25519_niels ge25519_base_multiples_niels[] = { +#ifdef SMALLTABLES +#include "ge25519_base_niels_smalltables.data" +#else +#include "ge25519_base_niels.data" +#endif +}; + +/* d */ +static const fe25519 ecd = {{0x75EB4DCA135978A3, 0x00700A4D4141D8AB, 0x8CC740797779E898, 0x52036CEE2B6FFE73}}; + +void ge25519_scalarmult_base(ge25519_p3 *r, const sc25519 *s) +{ + signed char b[64]; + int i; + ge25519_niels t; + fe25519 d; + + sc25519_window4(b,s); + +#ifdef SMALLTABLES + ge25519_p1p1 tp1p1; + choose_t((ge25519_niels *)r, 0, (signed long long) b[1], ge25519_base_multiples_niels); + fe25519_sub(&d, &r->y, &r->x); + fe25519_add(&r->y, &r->y, &r->x); + r->x = d; + r->t = r->z; + fe25519_setint(&r->z,2); + for(i=3;i<64;i+=2) + { + choose_t(&t, (unsigned long long) i/2, (signed long long) b[i], ge25519_base_multiples_niels); + ge25519_nielsadd2(r, &t); + } + ge25519_dbl_p1p1(&tp1p1,(ge25519_p2 *)r); + ge25519_p1p1_to_p2((ge25519_p2 *)r, &tp1p1); + ge25519_dbl_p1p1(&tp1p1,(ge25519_p2 *)r); + ge25519_p1p1_to_p2((ge25519_p2 *)r, &tp1p1); + ge25519_dbl_p1p1(&tp1p1,(ge25519_p2 *)r); + ge25519_p1p1_to_p2((ge25519_p2 *)r, &tp1p1); + ge25519_dbl_p1p1(&tp1p1,(ge25519_p2 *)r); + ge25519_p1p1_to_p3(r, &tp1p1); + choose_t(&t, (unsigned long long) 0, (signed long long) b[0], ge25519_base_multiples_niels); + fe25519_mul(&t.t2d, &t.t2d, &ecd); + ge25519_nielsadd2(r, &t); + for(i=2;i<64;i+=2) + { + choose_t(&t, (unsigned long long) i/2, (signed long long) b[i], ge25519_base_multiples_niels); + ge25519_nielsadd2(r, &t); + } +#else + choose_t((ge25519_niels *)r, 0, (signed long long) b[0], ge25519_base_multiples_niels); + fe25519_sub(&d, &r->y, &r->x); + fe25519_add(&r->y, &r->y, &r->x); + r->x = d; + r->t = r->z; + fe25519_setint(&r->z,2); + for(i=1;i<64;i++) + { + choose_t(&t, (unsigned long long) i, (signed long long) b[i], ge25519_base_multiples_niels); + ge25519_nielsadd2(r, &t); + } +#endif +} diff --git a/ext/ed25519-amd64-asm/ge25519_unpackneg.c b/ext/ed25519-amd64-asm/ge25519_unpackneg.c new file mode 100644 index 00000000..ff16fd20 --- /dev/null +++ b/ext/ed25519-amd64-asm/ge25519_unpackneg.c @@ -0,0 +1,60 @@ +#include "fe25519.h" +#include "ge25519.h" + +/* d */ +static const fe25519 ecd = {{0x75EB4DCA135978A3, 0x00700A4D4141D8AB, 0x8CC740797779E898, 0x52036CEE2B6FFE73}}; +/* sqrt(-1) */ +static const fe25519 sqrtm1 = {{0xC4EE1B274A0EA0B0, 0x2F431806AD2FE478, 0x2B4D00993DFBD7A7, 0x2B8324804FC1DF0B}}; + +/* return 0 on success, -1 otherwise */ +int ge25519_unpackneg_vartime(ge25519_p3 *r, const unsigned char p[32]) +{ + fe25519 t, chk, num, den, den2, den4, den6; + unsigned char par = p[31] >> 7; + + fe25519_setint(&r->z,1); + fe25519_unpack(&r->y, p); + fe25519_square(&num, &r->y); /* x = y^2 */ + fe25519_mul(&den, &num, &ecd); /* den = dy^2 */ + fe25519_sub(&num, &num, &r->z); /* x = y^2-1 */ + fe25519_add(&den, &r->z, &den); /* den = dy^2+1 */ + + /* Computation of sqrt(num/den) + 1.: computation of num^((p-5)/8)*den^((7p-35)/8) = (num*den^7)^((p-5)/8) + */ + fe25519_square(&den2, &den); + fe25519_square(&den4, &den2); + fe25519_mul(&den6, &den4, &den2); + fe25519_mul(&t, &den6, &num); + fe25519_mul(&t, &t, &den); + + fe25519_pow2523(&t, &t); + /* 2. computation of r->x = t * num * den^3 + */ + fe25519_mul(&t, &t, &num); + fe25519_mul(&t, &t, &den); + fe25519_mul(&t, &t, &den); + fe25519_mul(&r->x, &t, &den); + + /* 3. Check whether sqrt computation gave correct result, multiply by sqrt(-1) if not: + */ + fe25519_square(&chk, &r->x); + fe25519_mul(&chk, &chk, &den); + if (!fe25519_iseq_vartime(&chk, &num)) + fe25519_mul(&r->x, &r->x, &sqrtm1); + + /* 4. Now we have one of the two square roots, except if input was not a square + */ + fe25519_square(&chk, &r->x); + fe25519_mul(&chk, &chk, &den); + if (!fe25519_iseq_vartime(&chk, &num)) + return -1; + + /* 5. Choose the desired square root according to parity: + */ + if(fe25519_getparity(&r->x) != (1-par)) + fe25519_neg(&r->x, &r->x); + + fe25519_mul(&r->t, &r->x, &r->y); + return 0; +} diff --git a/ext/ed25519-amd64-asm/heap_rootreplaced.s b/ext/ed25519-amd64-asm/heap_rootreplaced.s new file mode 100644 index 00000000..8fe385b4 --- /dev/null +++ b/ext/ed25519-amd64-asm/heap_rootreplaced.s @@ -0,0 +1,476 @@ + +# qhasm: int64 hp + +# qhasm: int64 hlen + +# qhasm: int64 sp + +# qhasm: int64 pp + +# qhasm: input hp + +# qhasm: input hlen + +# qhasm: input sp + +# qhasm: int64 prc + +# qhasm: int64 plc + +# qhasm: int64 pc + +# qhasm: int64 d + +# qhasm: int64 spp + +# qhasm: int64 sprc + +# qhasm: int64 spc + +# qhasm: int64 c0 + +# qhasm: int64 c1 + +# qhasm: int64 c2 + +# qhasm: int64 c3 + +# qhasm: int64 t0 + +# qhasm: int64 t1 + +# qhasm: int64 t2 + +# qhasm: int64 t3 + +# qhasm: int64 p0 + +# qhasm: int64 p1 + +# qhasm: int64 p2 + +# qhasm: int64 p3 + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: enter crypto_sign_ed25519_amd64_64_heap_rootreplaced +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_64_heap_rootreplaced +.globl crypto_sign_ed25519_amd64_64_heap_rootreplaced +_crypto_sign_ed25519_amd64_64_heap_rootreplaced: +crypto_sign_ed25519_amd64_64_heap_rootreplaced: +mov %rsp,%r11 +and $31,%r11 +add $64,%r11 +sub %r11,%rsp + +# qhasm: caller1_stack = caller1 +# asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1 +# asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2 +# asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3 +# asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4 +# asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5 +# asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6 +# asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7 +# asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: pp = 0 +# asm 1: mov $0,>pp=int64#4 +# asm 2: mov $0,>pp=%rcx +mov $0,%rcx + +# qhasm: siftdownloop: +._siftdownloop: + +# qhasm: prc = pp +# asm 1: mov <pp=int64#4,>prc=int64#5 +# asm 2: mov <pp=%rcx,>prc=%r8 +mov %rcx,%r8 + +# qhasm: prc *= 2 +# asm 1: imulq $2,<prc=int64#5,>prc=int64#5 +# asm 2: imulq $2,<prc=%r8,>prc=%r8 +imulq $2,%r8,%r8 + +# qhasm: pc = prc +# asm 1: mov <prc=int64#5,>pc=int64#6 +# asm 2: mov <prc=%r8,>pc=%r9 +mov %r8,%r9 + +# qhasm: prc += 2 +# asm 1: add $2,<prc=int64#5 +# asm 2: add $2,<prc=%r8 +add $2,%r8 + +# qhasm: pc += 1 +# asm 1: add $1,<pc=int64#6 +# asm 2: add $1,<pc=%r9 +add $1,%r9 + +# qhasm: unsigned>? hlen - prc +# asm 1: cmp <prc=int64#5,<hlen=int64#2 +# asm 2: cmp <prc=%r8,<hlen=%rsi +cmp %r8,%rsi +# comment:fp stack unchanged by jump + +# qhasm: goto siftuploop if !unsigned> +jbe ._siftuploop + +# qhasm: sprc = *(uint64 *)(hp + prc * 8) +# asm 1: movq (<hp=int64#1,<prc=int64#5,8),>sprc=int64#7 +# asm 2: movq (<hp=%rdi,<prc=%r8,8),>sprc=%rax +movq (%rdi,%r8,8),%rax + +# qhasm: sprc <<= 5 +# asm 1: shl $5,<sprc=int64#7 +# asm 2: shl $5,<sprc=%rax +shl $5,%rax + +# qhasm: sprc += sp +# asm 1: add <sp=int64#3,<sprc=int64#7 +# asm 2: add <sp=%rdx,<sprc=%rax +add %rdx,%rax + +# qhasm: spc = *(uint64 *)(hp + pc * 8) +# asm 1: movq (<hp=int64#1,<pc=int64#6,8),>spc=int64#8 +# asm 2: movq (<hp=%rdi,<pc=%r9,8),>spc=%r10 +movq (%rdi,%r9,8),%r10 + +# qhasm: spc <<= 5 +# asm 1: shl $5,<spc=int64#8 +# asm 2: shl $5,<spc=%r10 +shl $5,%r10 + +# qhasm: spc += sp +# asm 1: add <sp=int64#3,<spc=int64#8 +# asm 2: add <sp=%rdx,<spc=%r10 +add %rdx,%r10 + +# qhasm: c0 = *(uint64 *)(spc + 0) +# asm 1: movq 0(<spc=int64#8),>c0=int64#9 +# asm 2: movq 0(<spc=%r10),>c0=%r11 +movq 0(%r10),%r11 + +# qhasm: c1 = *(uint64 *)(spc + 8) +# asm 1: movq 8(<spc=int64#8),>c1=int64#10 +# asm 2: movq 8(<spc=%r10),>c1=%r12 +movq 8(%r10),%r12 + +# qhasm: c2 = *(uint64 *)(spc + 16) +# asm 1: movq 16(<spc=int64#8),>c2=int64#11 +# asm 2: movq 16(<spc=%r10),>c2=%r13 +movq 16(%r10),%r13 + +# qhasm: c3 = *(uint64 *)(spc + 24) +# asm 1: movq 24(<spc=int64#8),>c3=int64#12 +# asm 2: movq 24(<spc=%r10),>c3=%r14 +movq 24(%r10),%r14 + +# qhasm: carry? c0 -= *(uint64 *)(sprc + 0) +# asm 1: subq 0(<sprc=int64#7),<c0=int64#9 +# asm 2: subq 0(<sprc=%rax),<c0=%r11 +subq 0(%rax),%r11 + +# qhasm: carry? c1 -= *(uint64 *)(sprc + 8) - carry +# asm 1: sbbq 8(<sprc=int64#7),<c1=int64#10 +# asm 2: sbbq 8(<sprc=%rax),<c1=%r12 +sbbq 8(%rax),%r12 + +# qhasm: carry? c2 -= *(uint64 *)(sprc + 16) - carry +# asm 1: sbbq 16(<sprc=int64#7),<c2=int64#11 +# asm 2: sbbq 16(<sprc=%rax),<c2=%r13 +sbbq 16(%rax),%r13 + +# qhasm: carry? c3 -= *(uint64 *)(sprc + 24) - carry +# asm 1: sbbq 24(<sprc=int64#7),<c3=int64#12 +# asm 2: sbbq 24(<sprc=%rax),<c3=%r14 +sbbq 24(%rax),%r14 + +# qhasm: pc = prc if carry +# asm 1: cmovc <prc=int64#5,<pc=int64#6 +# asm 2: cmovc <prc=%r8,<pc=%r9 +cmovc %r8,%r9 + +# qhasm: spc = sprc if carry +# asm 1: cmovc <sprc=int64#7,<spc=int64#8 +# asm 2: cmovc <sprc=%rax,<spc=%r10 +cmovc %rax,%r10 + +# qhasm: spc -= sp +# asm 1: sub <sp=int64#3,<spc=int64#8 +# asm 2: sub <sp=%rdx,<spc=%r10 +sub %rdx,%r10 + +# qhasm: (uint64) spc >>= 5 +# asm 1: shr $5,<spc=int64#8 +# asm 2: shr $5,<spc=%r10 +shr $5,%r10 + +# qhasm: spp = *(uint64 *)(hp + pp * 8) +# asm 1: movq (<hp=int64#1,<pp=int64#4,8),>spp=int64#5 +# asm 2: movq (<hp=%rdi,<pp=%rcx,8),>spp=%r8 +movq (%rdi,%rcx,8),%r8 + +# qhasm: *(uint64 *)(hp + pp * 8) = spc +# asm 1: movq <spc=int64#8,(<hp=int64#1,<pp=int64#4,8) +# asm 2: movq <spc=%r10,(<hp=%rdi,<pp=%rcx,8) +movq %r10,(%rdi,%rcx,8) + +# qhasm: *(uint64 *)(hp + pc * 8) = spp +# asm 1: movq <spp=int64#5,(<hp=int64#1,<pc=int64#6,8) +# asm 2: movq <spp=%r8,(<hp=%rdi,<pc=%r9,8) +movq %r8,(%rdi,%r9,8) + +# qhasm: pp = pc +# asm 1: mov <pc=int64#6,>pp=int64#4 +# asm 2: mov <pc=%r9,>pp=%rcx +mov %r9,%rcx +# comment:fp stack unchanged by jump + +# qhasm: goto siftdownloop +jmp ._siftdownloop + +# qhasm: siftuploop: +._siftuploop: + +# qhasm: pc = pp +# asm 1: mov <pp=int64#4,>pc=int64#2 +# asm 2: mov <pp=%rcx,>pc=%rsi +mov %rcx,%rsi + +# qhasm: pp -= 1 +# asm 1: sub $1,<pp=int64#4 +# asm 2: sub $1,<pp=%rcx +sub $1,%rcx + +# qhasm: (uint64) pp >>= 1 +# asm 1: shr $1,<pp=int64#4 +# asm 2: shr $1,<pp=%rcx +shr $1,%rcx + +# qhasm: unsigned>? pc - 0 +# asm 1: cmp $0,<pc=int64#2 +# asm 2: cmp $0,<pc=%rsi +cmp $0,%rsi +# comment:fp stack unchanged by jump + +# qhasm: goto end if !unsigned> +jbe ._end + +# qhasm: spp = *(uint64 *)(hp + pp * 8) +# asm 1: movq (<hp=int64#1,<pp=int64#4,8),>spp=int64#5 +# asm 2: movq (<hp=%rdi,<pp=%rcx,8),>spp=%r8 +movq (%rdi,%rcx,8),%r8 + +# qhasm: spc = *(uint64 *)(hp + pc * 8) +# asm 1: movq (<hp=int64#1,<pc=int64#2,8),>spc=int64#6 +# asm 2: movq (<hp=%rdi,<pc=%rsi,8),>spc=%r9 +movq (%rdi,%rsi,8),%r9 + +# qhasm: spp <<= 5 +# asm 1: shl $5,<spp=int64#5 +# asm 2: shl $5,<spp=%r8 +shl $5,%r8 + +# qhasm: spc <<= 5 +# asm 1: shl $5,<spc=int64#6 +# asm 2: shl $5,<spc=%r9 +shl $5,%r9 + +# qhasm: spc += sp +# asm 1: add <sp=int64#3,<spc=int64#6 +# asm 2: add <sp=%rdx,<spc=%r9 +add %rdx,%r9 + +# qhasm: spp += sp +# asm 1: add <sp=int64#3,<spp=int64#5 +# asm 2: add <sp=%rdx,<spp=%r8 +add %rdx,%r8 + +# qhasm: c0 = *(uint64 *)(spc + 0) +# asm 1: movq 0(<spc=int64#6),>c0=int64#7 +# asm 2: movq 0(<spc=%r9),>c0=%rax +movq 0(%r9),%rax + +# qhasm: c1 = *(uint64 *)(spc + 8) +# asm 1: movq 8(<spc=int64#6),>c1=int64#8 +# asm 2: movq 8(<spc=%r9),>c1=%r10 +movq 8(%r9),%r10 + +# qhasm: c2 = *(uint64 *)(spc + 16) +# asm 1: movq 16(<spc=int64#6),>c2=int64#9 +# asm 2: movq 16(<spc=%r9),>c2=%r11 +movq 16(%r9),%r11 + +# qhasm: c3 = *(uint64 *)(spc + 24) +# asm 1: movq 24(<spc=int64#6),>c3=int64#10 +# asm 2: movq 24(<spc=%r9),>c3=%r12 +movq 24(%r9),%r12 + +# qhasm: carry? c0 -= *(uint64 *)(spp + 0) +# asm 1: subq 0(<spp=int64#5),<c0=int64#7 +# asm 2: subq 0(<spp=%r8),<c0=%rax +subq 0(%r8),%rax + +# qhasm: carry? c1 -= *(uint64 *)(spp + 8) - carry +# asm 1: sbbq 8(<spp=int64#5),<c1=int64#8 +# asm 2: sbbq 8(<spp=%r8),<c1=%r10 +sbbq 8(%r8),%r10 + +# qhasm: carry? c2 -= *(uint64 *)(spp + 16) - carry +# asm 1: sbbq 16(<spp=int64#5),<c2=int64#9 +# asm 2: sbbq 16(<spp=%r8),<c2=%r11 +sbbq 16(%r8),%r11 + +# qhasm: carry? c3 -= *(uint64 *)(spp + 24) - carry +# asm 1: sbbq 24(<spp=int64#5),<c3=int64#10 +# asm 2: sbbq 24(<spp=%r8),<c3=%r12 +sbbq 24(%r8),%r12 +# comment:fp stack unchanged by jump + +# qhasm: goto end if carry +jc ._end + +# qhasm: spc -= sp +# asm 1: sub <sp=int64#3,<spc=int64#6 +# asm 2: sub <sp=%rdx,<spc=%r9 +sub %rdx,%r9 + +# qhasm: (uint64) spc >>= 5 +# asm 1: shr $5,<spc=int64#6 +# asm 2: shr $5,<spc=%r9 +shr $5,%r9 + +# qhasm: spp -= sp +# asm 1: sub <sp=int64#3,<spp=int64#5 +# asm 2: sub <sp=%rdx,<spp=%r8 +sub %rdx,%r8 + +# qhasm: (uint64) spp >>= 5 +# asm 1: shr $5,<spp=int64#5 +# asm 2: shr $5,<spp=%r8 +shr $5,%r8 + +# qhasm: *(uint64 *)(hp + pp * 8) = spc +# asm 1: movq <spc=int64#6,(<hp=int64#1,<pp=int64#4,8) +# asm 2: movq <spc=%r9,(<hp=%rdi,<pp=%rcx,8) +movq %r9,(%rdi,%rcx,8) + +# qhasm: *(uint64 *)(hp + pc * 8) = spp +# asm 1: movq <spp=int64#5,(<hp=int64#1,<pc=int64#2,8) +# asm 2: movq <spp=%r8,(<hp=%rdi,<pc=%rsi,8) +movq %r8,(%rdi,%rsi,8) +# comment:fp stack unchanged by jump + +# qhasm: goto siftuploop +jmp ._siftuploop + +# qhasm: end: +._end: + +# qhasm: caller1 = caller1_stack +# asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9 +# asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10 +# asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11 +# asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12 +# asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13 +# asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14 +# asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15 +# asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/ext/ed25519-amd64-asm/heap_rootreplaced_1limb.s b/ext/ed25519-amd64-asm/heap_rootreplaced_1limb.s new file mode 100644 index 00000000..488e9c52 --- /dev/null +++ b/ext/ed25519-amd64-asm/heap_rootreplaced_1limb.s @@ -0,0 +1,416 @@ + +# qhasm: int64 hp + +# qhasm: int64 hlen + +# qhasm: int64 sp + +# qhasm: int64 pp + +# qhasm: input hp + +# qhasm: input hlen + +# qhasm: input sp + +# qhasm: int64 prc + +# qhasm: int64 plc + +# qhasm: int64 pc + +# qhasm: int64 d + +# qhasm: int64 spp + +# qhasm: int64 sprc + +# qhasm: int64 spc + +# qhasm: int64 c0 + +# qhasm: int64 c1 + +# qhasm: int64 c2 + +# qhasm: int64 c3 + +# qhasm: int64 t0 + +# qhasm: int64 t1 + +# qhasm: int64 t2 + +# qhasm: int64 t3 + +# qhasm: int64 p0 + +# qhasm: int64 p1 + +# qhasm: int64 p2 + +# qhasm: int64 p3 + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: enter crypto_sign_ed25519_amd64_64_heap_rootreplaced_1limb +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_64_heap_rootreplaced_1limb +.globl crypto_sign_ed25519_amd64_64_heap_rootreplaced_1limb +_crypto_sign_ed25519_amd64_64_heap_rootreplaced_1limb: +crypto_sign_ed25519_amd64_64_heap_rootreplaced_1limb: +mov %rsp,%r11 +and $31,%r11 +add $64,%r11 +sub %r11,%rsp + +# qhasm: caller1_stack = caller1 +# asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1 +# asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2 +# asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3 +# asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4 +# asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5 +# asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6 +# asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7 +# asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: pp = 0 +# asm 1: mov $0,>pp=int64#4 +# asm 2: mov $0,>pp=%rcx +mov $0,%rcx + +# qhasm: siftdownloop: +._siftdownloop: + +# qhasm: prc = pp +# asm 1: mov <pp=int64#4,>prc=int64#5 +# asm 2: mov <pp=%rcx,>prc=%r8 +mov %rcx,%r8 + +# qhasm: prc *= 2 +# asm 1: imulq $2,<prc=int64#5,>prc=int64#5 +# asm 2: imulq $2,<prc=%r8,>prc=%r8 +imulq $2,%r8,%r8 + +# qhasm: pc = prc +# asm 1: mov <prc=int64#5,>pc=int64#6 +# asm 2: mov <prc=%r8,>pc=%r9 +mov %r8,%r9 + +# qhasm: prc += 2 +# asm 1: add $2,<prc=int64#5 +# asm 2: add $2,<prc=%r8 +add $2,%r8 + +# qhasm: pc += 1 +# asm 1: add $1,<pc=int64#6 +# asm 2: add $1,<pc=%r9 +add $1,%r9 + +# qhasm: unsigned>? hlen - prc +# asm 1: cmp <prc=int64#5,<hlen=int64#2 +# asm 2: cmp <prc=%r8,<hlen=%rsi +cmp %r8,%rsi +# comment:fp stack unchanged by jump + +# qhasm: goto siftuploop if !unsigned> +jbe ._siftuploop + +# qhasm: sprc = *(uint64 *)(hp + prc * 8) +# asm 1: movq (<hp=int64#1,<prc=int64#5,8),>sprc=int64#7 +# asm 2: movq (<hp=%rdi,<prc=%r8,8),>sprc=%rax +movq (%rdi,%r8,8),%rax + +# qhasm: sprc <<= 5 +# asm 1: shl $5,<sprc=int64#7 +# asm 2: shl $5,<sprc=%rax +shl $5,%rax + +# qhasm: sprc += sp +# asm 1: add <sp=int64#3,<sprc=int64#7 +# asm 2: add <sp=%rdx,<sprc=%rax +add %rdx,%rax + +# qhasm: spc = *(uint64 *)(hp + pc * 8) +# asm 1: movq (<hp=int64#1,<pc=int64#6,8),>spc=int64#8 +# asm 2: movq (<hp=%rdi,<pc=%r9,8),>spc=%r10 +movq (%rdi,%r9,8),%r10 + +# qhasm: spc <<= 5 +# asm 1: shl $5,<spc=int64#8 +# asm 2: shl $5,<spc=%r10 +shl $5,%r10 + +# qhasm: spc += sp +# asm 1: add <sp=int64#3,<spc=int64#8 +# asm 2: add <sp=%rdx,<spc=%r10 +add %rdx,%r10 + +# qhasm: c0 = *(uint64 *)(spc + 0) +# asm 1: movq 0(<spc=int64#8),>c0=int64#9 +# asm 2: movq 0(<spc=%r10),>c0=%r11 +movq 0(%r10),%r11 + +# qhasm: carry? c0 -= *(uint64 *)(sprc + 0) +# asm 1: subq 0(<sprc=int64#7),<c0=int64#9 +# asm 2: subq 0(<sprc=%rax),<c0=%r11 +subq 0(%rax),%r11 + +# qhasm: pc = prc if carry +# asm 1: cmovc <prc=int64#5,<pc=int64#6 +# asm 2: cmovc <prc=%r8,<pc=%r9 +cmovc %r8,%r9 + +# qhasm: spc = sprc if carry +# asm 1: cmovc <sprc=int64#7,<spc=int64#8 +# asm 2: cmovc <sprc=%rax,<spc=%r10 +cmovc %rax,%r10 + +# qhasm: spc -= sp +# asm 1: sub <sp=int64#3,<spc=int64#8 +# asm 2: sub <sp=%rdx,<spc=%r10 +sub %rdx,%r10 + +# qhasm: (uint64) spc >>= 5 +# asm 1: shr $5,<spc=int64#8 +# asm 2: shr $5,<spc=%r10 +shr $5,%r10 + +# qhasm: spp = *(uint64 *)(hp + pp * 8) +# asm 1: movq (<hp=int64#1,<pp=int64#4,8),>spp=int64#5 +# asm 2: movq (<hp=%rdi,<pp=%rcx,8),>spp=%r8 +movq (%rdi,%rcx,8),%r8 + +# qhasm: *(uint64 *)(hp + pp * 8) = spc +# asm 1: movq <spc=int64#8,(<hp=int64#1,<pp=int64#4,8) +# asm 2: movq <spc=%r10,(<hp=%rdi,<pp=%rcx,8) +movq %r10,(%rdi,%rcx,8) + +# qhasm: *(uint64 *)(hp + pc * 8) = spp +# asm 1: movq <spp=int64#5,(<hp=int64#1,<pc=int64#6,8) +# asm 2: movq <spp=%r8,(<hp=%rdi,<pc=%r9,8) +movq %r8,(%rdi,%r9,8) + +# qhasm: pp = pc +# asm 1: mov <pc=int64#6,>pp=int64#4 +# asm 2: mov <pc=%r9,>pp=%rcx +mov %r9,%rcx +# comment:fp stack unchanged by jump + +# qhasm: goto siftdownloop +jmp ._siftdownloop + +# qhasm: siftuploop: +._siftuploop: + +# qhasm: pc = pp +# asm 1: mov <pp=int64#4,>pc=int64#2 +# asm 2: mov <pp=%rcx,>pc=%rsi +mov %rcx,%rsi + +# qhasm: pp -= 1 +# asm 1: sub $1,<pp=int64#4 +# asm 2: sub $1,<pp=%rcx +sub $1,%rcx + +# qhasm: (uint64) pp >>= 1 +# asm 1: shr $1,<pp=int64#4 +# asm 2: shr $1,<pp=%rcx +shr $1,%rcx + +# qhasm: unsigned>? pc - 0 +# asm 1: cmp $0,<pc=int64#2 +# asm 2: cmp $0,<pc=%rsi +cmp $0,%rsi +# comment:fp stack unchanged by jump + +# qhasm: goto end if !unsigned> +jbe ._end + +# qhasm: spp = *(uint64 *)(hp + pp * 8) +# asm 1: movq (<hp=int64#1,<pp=int64#4,8),>spp=int64#5 +# asm 2: movq (<hp=%rdi,<pp=%rcx,8),>spp=%r8 +movq (%rdi,%rcx,8),%r8 + +# qhasm: spc = *(uint64 *)(hp + pc * 8) +# asm 1: movq (<hp=int64#1,<pc=int64#2,8),>spc=int64#6 +# asm 2: movq (<hp=%rdi,<pc=%rsi,8),>spc=%r9 +movq (%rdi,%rsi,8),%r9 + +# qhasm: spp <<= 5 +# asm 1: shl $5,<spp=int64#5 +# asm 2: shl $5,<spp=%r8 +shl $5,%r8 + +# qhasm: spc <<= 5 +# asm 1: shl $5,<spc=int64#6 +# asm 2: shl $5,<spc=%r9 +shl $5,%r9 + +# qhasm: spc += sp +# asm 1: add <sp=int64#3,<spc=int64#6 +# asm 2: add <sp=%rdx,<spc=%r9 +add %rdx,%r9 + +# qhasm: spp += sp +# asm 1: add <sp=int64#3,<spp=int64#5 +# asm 2: add <sp=%rdx,<spp=%r8 +add %rdx,%r8 + +# qhasm: c0 = *(uint64 *)(spc + 0) +# asm 1: movq 0(<spc=int64#6),>c0=int64#7 +# asm 2: movq 0(<spc=%r9),>c0=%rax +movq 0(%r9),%rax + +# qhasm: carry? c0 -= *(uint64 *)(spp + 0) +# asm 1: subq 0(<spp=int64#5),<c0=int64#7 +# asm 2: subq 0(<spp=%r8),<c0=%rax +subq 0(%r8),%rax +# comment:fp stack unchanged by jump + +# qhasm: goto end if carry +jc ._end + +# qhasm: spc -= sp +# asm 1: sub <sp=int64#3,<spc=int64#6 +# asm 2: sub <sp=%rdx,<spc=%r9 +sub %rdx,%r9 + +# qhasm: (uint64) spc >>= 5 +# asm 1: shr $5,<spc=int64#6 +# asm 2: shr $5,<spc=%r9 +shr $5,%r9 + +# qhasm: spp -= sp +# asm 1: sub <sp=int64#3,<spp=int64#5 +# asm 2: sub <sp=%rdx,<spp=%r8 +sub %rdx,%r8 + +# qhasm: (uint64) spp >>= 5 +# asm 1: shr $5,<spp=int64#5 +# asm 2: shr $5,<spp=%r8 +shr $5,%r8 + +# qhasm: *(uint64 *)(hp + pp * 8) = spc +# asm 1: movq <spc=int64#6,(<hp=int64#1,<pp=int64#4,8) +# asm 2: movq <spc=%r9,(<hp=%rdi,<pp=%rcx,8) +movq %r9,(%rdi,%rcx,8) + +# qhasm: *(uint64 *)(hp + pc * 8) = spp +# asm 1: movq <spp=int64#5,(<hp=int64#1,<pc=int64#2,8) +# asm 2: movq <spp=%r8,(<hp=%rdi,<pc=%rsi,8) +movq %r8,(%rdi,%rsi,8) +# comment:fp stack unchanged by jump + +# qhasm: goto siftuploop +jmp ._siftuploop + +# qhasm: end: +._end: + +# qhasm: caller1 = caller1_stack +# asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9 +# asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10 +# asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11 +# asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12 +# asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13 +# asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14 +# asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15 +# asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/ext/ed25519-amd64-asm/heap_rootreplaced_2limbs.s b/ext/ed25519-amd64-asm/heap_rootreplaced_2limbs.s new file mode 100644 index 00000000..f9259184 --- /dev/null +++ b/ext/ed25519-amd64-asm/heap_rootreplaced_2limbs.s @@ -0,0 +1,436 @@ + +# qhasm: int64 hp + +# qhasm: int64 hlen + +# qhasm: int64 sp + +# qhasm: int64 pp + +# qhasm: input hp + +# qhasm: input hlen + +# qhasm: input sp + +# qhasm: int64 prc + +# qhasm: int64 plc + +# qhasm: int64 pc + +# qhasm: int64 d + +# qhasm: int64 spp + +# qhasm: int64 sprc + +# qhasm: int64 spc + +# qhasm: int64 c0 + +# qhasm: int64 c1 + +# qhasm: int64 c2 + +# qhasm: int64 c3 + +# qhasm: int64 t0 + +# qhasm: int64 t1 + +# qhasm: int64 t2 + +# qhasm: int64 t3 + +# qhasm: int64 p0 + +# qhasm: int64 p1 + +# qhasm: int64 p2 + +# qhasm: int64 p3 + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: enter crypto_sign_ed25519_amd64_64_heap_rootreplaced_2limbs +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_64_heap_rootreplaced_2limbs +.globl crypto_sign_ed25519_amd64_64_heap_rootreplaced_2limbs +_crypto_sign_ed25519_amd64_64_heap_rootreplaced_2limbs: +crypto_sign_ed25519_amd64_64_heap_rootreplaced_2limbs: +mov %rsp,%r11 +and $31,%r11 +add $64,%r11 +sub %r11,%rsp + +# qhasm: caller1_stack = caller1 +# asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1 +# asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2 +# asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3 +# asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4 +# asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5 +# asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6 +# asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7 +# asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: pp = 0 +# asm 1: mov $0,>pp=int64#4 +# asm 2: mov $0,>pp=%rcx +mov $0,%rcx + +# qhasm: siftdownloop: +._siftdownloop: + +# qhasm: prc = pp +# asm 1: mov <pp=int64#4,>prc=int64#5 +# asm 2: mov <pp=%rcx,>prc=%r8 +mov %rcx,%r8 + +# qhasm: prc *= 2 +# asm 1: imulq $2,<prc=int64#5,>prc=int64#5 +# asm 2: imulq $2,<prc=%r8,>prc=%r8 +imulq $2,%r8,%r8 + +# qhasm: pc = prc +# asm 1: mov <prc=int64#5,>pc=int64#6 +# asm 2: mov <prc=%r8,>pc=%r9 +mov %r8,%r9 + +# qhasm: prc += 2 +# asm 1: add $2,<prc=int64#5 +# asm 2: add $2,<prc=%r8 +add $2,%r8 + +# qhasm: pc += 1 +# asm 1: add $1,<pc=int64#6 +# asm 2: add $1,<pc=%r9 +add $1,%r9 + +# qhasm: unsigned>? hlen - prc +# asm 1: cmp <prc=int64#5,<hlen=int64#2 +# asm 2: cmp <prc=%r8,<hlen=%rsi +cmp %r8,%rsi +# comment:fp stack unchanged by jump + +# qhasm: goto siftuploop if !unsigned> +jbe ._siftuploop + +# qhasm: sprc = *(uint64 *)(hp + prc * 8) +# asm 1: movq (<hp=int64#1,<prc=int64#5,8),>sprc=int64#7 +# asm 2: movq (<hp=%rdi,<prc=%r8,8),>sprc=%rax +movq (%rdi,%r8,8),%rax + +# qhasm: sprc <<= 5 +# asm 1: shl $5,<sprc=int64#7 +# asm 2: shl $5,<sprc=%rax +shl $5,%rax + +# qhasm: sprc += sp +# asm 1: add <sp=int64#3,<sprc=int64#7 +# asm 2: add <sp=%rdx,<sprc=%rax +add %rdx,%rax + +# qhasm: spc = *(uint64 *)(hp + pc * 8) +# asm 1: movq (<hp=int64#1,<pc=int64#6,8),>spc=int64#8 +# asm 2: movq (<hp=%rdi,<pc=%r9,8),>spc=%r10 +movq (%rdi,%r9,8),%r10 + +# qhasm: spc <<= 5 +# asm 1: shl $5,<spc=int64#8 +# asm 2: shl $5,<spc=%r10 +shl $5,%r10 + +# qhasm: spc += sp +# asm 1: add <sp=int64#3,<spc=int64#8 +# asm 2: add <sp=%rdx,<spc=%r10 +add %rdx,%r10 + +# qhasm: c0 = *(uint64 *)(spc + 0) +# asm 1: movq 0(<spc=int64#8),>c0=int64#9 +# asm 2: movq 0(<spc=%r10),>c0=%r11 +movq 0(%r10),%r11 + +# qhasm: c1 = *(uint64 *)(spc + 8) +# asm 1: movq 8(<spc=int64#8),>c1=int64#10 +# asm 2: movq 8(<spc=%r10),>c1=%r12 +movq 8(%r10),%r12 + +# qhasm: carry? c0 -= *(uint64 *)(sprc + 0) +# asm 1: subq 0(<sprc=int64#7),<c0=int64#9 +# asm 2: subq 0(<sprc=%rax),<c0=%r11 +subq 0(%rax),%r11 + +# qhasm: carry? c1 -= *(uint64 *)(sprc + 8) - carry +# asm 1: sbbq 8(<sprc=int64#7),<c1=int64#10 +# asm 2: sbbq 8(<sprc=%rax),<c1=%r12 +sbbq 8(%rax),%r12 + +# qhasm: pc = prc if carry +# asm 1: cmovc <prc=int64#5,<pc=int64#6 +# asm 2: cmovc <prc=%r8,<pc=%r9 +cmovc %r8,%r9 + +# qhasm: spc = sprc if carry +# asm 1: cmovc <sprc=int64#7,<spc=int64#8 +# asm 2: cmovc <sprc=%rax,<spc=%r10 +cmovc %rax,%r10 + +# qhasm: spc -= sp +# asm 1: sub <sp=int64#3,<spc=int64#8 +# asm 2: sub <sp=%rdx,<spc=%r10 +sub %rdx,%r10 + +# qhasm: (uint64) spc >>= 5 +# asm 1: shr $5,<spc=int64#8 +# asm 2: shr $5,<spc=%r10 +shr $5,%r10 + +# qhasm: spp = *(uint64 *)(hp + pp * 8) +# asm 1: movq (<hp=int64#1,<pp=int64#4,8),>spp=int64#5 +# asm 2: movq (<hp=%rdi,<pp=%rcx,8),>spp=%r8 +movq (%rdi,%rcx,8),%r8 + +# qhasm: *(uint64 *)(hp + pp * 8) = spc +# asm 1: movq <spc=int64#8,(<hp=int64#1,<pp=int64#4,8) +# asm 2: movq <spc=%r10,(<hp=%rdi,<pp=%rcx,8) +movq %r10,(%rdi,%rcx,8) + +# qhasm: *(uint64 *)(hp + pc * 8) = spp +# asm 1: movq <spp=int64#5,(<hp=int64#1,<pc=int64#6,8) +# asm 2: movq <spp=%r8,(<hp=%rdi,<pc=%r9,8) +movq %r8,(%rdi,%r9,8) + +# qhasm: pp = pc +# asm 1: mov <pc=int64#6,>pp=int64#4 +# asm 2: mov <pc=%r9,>pp=%rcx +mov %r9,%rcx +# comment:fp stack unchanged by jump + +# qhasm: goto siftdownloop +jmp ._siftdownloop + +# qhasm: siftuploop: +._siftuploop: + +# qhasm: pc = pp +# asm 1: mov <pp=int64#4,>pc=int64#2 +# asm 2: mov <pp=%rcx,>pc=%rsi +mov %rcx,%rsi + +# qhasm: pp -= 1 +# asm 1: sub $1,<pp=int64#4 +# asm 2: sub $1,<pp=%rcx +sub $1,%rcx + +# qhasm: (uint64) pp >>= 1 +# asm 1: shr $1,<pp=int64#4 +# asm 2: shr $1,<pp=%rcx +shr $1,%rcx + +# qhasm: unsigned>? pc - 0 +# asm 1: cmp $0,<pc=int64#2 +# asm 2: cmp $0,<pc=%rsi +cmp $0,%rsi +# comment:fp stack unchanged by jump + +# qhasm: goto end if !unsigned> +jbe ._end + +# qhasm: spp = *(uint64 *)(hp + pp * 8) +# asm 1: movq (<hp=int64#1,<pp=int64#4,8),>spp=int64#5 +# asm 2: movq (<hp=%rdi,<pp=%rcx,8),>spp=%r8 +movq (%rdi,%rcx,8),%r8 + +# qhasm: spc = *(uint64 *)(hp + pc * 8) +# asm 1: movq (<hp=int64#1,<pc=int64#2,8),>spc=int64#6 +# asm 2: movq (<hp=%rdi,<pc=%rsi,8),>spc=%r9 +movq (%rdi,%rsi,8),%r9 + +# qhasm: spp <<= 5 +# asm 1: shl $5,<spp=int64#5 +# asm 2: shl $5,<spp=%r8 +shl $5,%r8 + +# qhasm: spc <<= 5 +# asm 1: shl $5,<spc=int64#6 +# asm 2: shl $5,<spc=%r9 +shl $5,%r9 + +# qhasm: spc += sp +# asm 1: add <sp=int64#3,<spc=int64#6 +# asm 2: add <sp=%rdx,<spc=%r9 +add %rdx,%r9 + +# qhasm: spp += sp +# asm 1: add <sp=int64#3,<spp=int64#5 +# asm 2: add <sp=%rdx,<spp=%r8 +add %rdx,%r8 + +# qhasm: c0 = *(uint64 *)(spc + 0) +# asm 1: movq 0(<spc=int64#6),>c0=int64#7 +# asm 2: movq 0(<spc=%r9),>c0=%rax +movq 0(%r9),%rax + +# qhasm: c1 = *(uint64 *)(spc + 8) +# asm 1: movq 8(<spc=int64#6),>c1=int64#8 +# asm 2: movq 8(<spc=%r9),>c1=%r10 +movq 8(%r9),%r10 + +# qhasm: carry? c0 -= *(uint64 *)(spp + 0) +# asm 1: subq 0(<spp=int64#5),<c0=int64#7 +# asm 2: subq 0(<spp=%r8),<c0=%rax +subq 0(%r8),%rax + +# qhasm: carry? c1 -= *(uint64 *)(spp + 8) - carry +# asm 1: sbbq 8(<spp=int64#5),<c1=int64#8 +# asm 2: sbbq 8(<spp=%r8),<c1=%r10 +sbbq 8(%r8),%r10 +# comment:fp stack unchanged by jump + +# qhasm: goto end if carry +jc ._end + +# qhasm: spc -= sp +# asm 1: sub <sp=int64#3,<spc=int64#6 +# asm 2: sub <sp=%rdx,<spc=%r9 +sub %rdx,%r9 + +# qhasm: (uint64) spc >>= 5 +# asm 1: shr $5,<spc=int64#6 +# asm 2: shr $5,<spc=%r9 +shr $5,%r9 + +# qhasm: spp -= sp +# asm 1: sub <sp=int64#3,<spp=int64#5 +# asm 2: sub <sp=%rdx,<spp=%r8 +sub %rdx,%r8 + +# qhasm: (uint64) spp >>= 5 +# asm 1: shr $5,<spp=int64#5 +# asm 2: shr $5,<spp=%r8 +shr $5,%r8 + +# qhasm: *(uint64 *)(hp + pp * 8) = spc +# asm 1: movq <spc=int64#6,(<hp=int64#1,<pp=int64#4,8) +# asm 2: movq <spc=%r9,(<hp=%rdi,<pp=%rcx,8) +movq %r9,(%rdi,%rcx,8) + +# qhasm: *(uint64 *)(hp + pc * 8) = spp +# asm 1: movq <spp=int64#5,(<hp=int64#1,<pc=int64#2,8) +# asm 2: movq <spp=%r8,(<hp=%rdi,<pc=%rsi,8) +movq %r8,(%rdi,%rsi,8) +# comment:fp stack unchanged by jump + +# qhasm: goto siftuploop +jmp ._siftuploop + +# qhasm: end: +._end: + +# qhasm: caller1 = caller1_stack +# asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9 +# asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10 +# asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11 +# asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12 +# asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13 +# asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14 +# asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15 +# asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/ext/ed25519-amd64-asm/heap_rootreplaced_3limbs.s b/ext/ed25519-amd64-asm/heap_rootreplaced_3limbs.s new file mode 100644 index 00000000..dcf890ea --- /dev/null +++ b/ext/ed25519-amd64-asm/heap_rootreplaced_3limbs.s @@ -0,0 +1,456 @@ + +# qhasm: int64 hp + +# qhasm: int64 hlen + +# qhasm: int64 sp + +# qhasm: int64 pp + +# qhasm: input hp + +# qhasm: input hlen + +# qhasm: input sp + +# qhasm: int64 prc + +# qhasm: int64 plc + +# qhasm: int64 pc + +# qhasm: int64 d + +# qhasm: int64 spp + +# qhasm: int64 sprc + +# qhasm: int64 spc + +# qhasm: int64 c0 + +# qhasm: int64 c1 + +# qhasm: int64 c2 + +# qhasm: int64 c3 + +# qhasm: int64 t0 + +# qhasm: int64 t1 + +# qhasm: int64 t2 + +# qhasm: int64 t3 + +# qhasm: int64 p0 + +# qhasm: int64 p1 + +# qhasm: int64 p2 + +# qhasm: int64 p3 + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: enter crypto_sign_ed25519_amd64_64_heap_rootreplaced_3limbs +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_64_heap_rootreplaced_3limbs +.globl crypto_sign_ed25519_amd64_64_heap_rootreplaced_3limbs +_crypto_sign_ed25519_amd64_64_heap_rootreplaced_3limbs: +crypto_sign_ed25519_amd64_64_heap_rootreplaced_3limbs: +mov %rsp,%r11 +and $31,%r11 +add $64,%r11 +sub %r11,%rsp + +# qhasm: caller1_stack = caller1 +# asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1 +# asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2 +# asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3 +# asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4 +# asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5 +# asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6 +# asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7 +# asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: pp = 0 +# asm 1: mov $0,>pp=int64#4 +# asm 2: mov $0,>pp=%rcx +mov $0,%rcx + +# qhasm: siftdownloop: +._siftdownloop: + +# qhasm: prc = pp +# asm 1: mov <pp=int64#4,>prc=int64#5 +# asm 2: mov <pp=%rcx,>prc=%r8 +mov %rcx,%r8 + +# qhasm: prc *= 2 +# asm 1: imulq $2,<prc=int64#5,>prc=int64#5 +# asm 2: imulq $2,<prc=%r8,>prc=%r8 +imulq $2,%r8,%r8 + +# qhasm: pc = prc +# asm 1: mov <prc=int64#5,>pc=int64#6 +# asm 2: mov <prc=%r8,>pc=%r9 +mov %r8,%r9 + +# qhasm: prc += 2 +# asm 1: add $2,<prc=int64#5 +# asm 2: add $2,<prc=%r8 +add $2,%r8 + +# qhasm: pc += 1 +# asm 1: add $1,<pc=int64#6 +# asm 2: add $1,<pc=%r9 +add $1,%r9 + +# qhasm: unsigned>? hlen - prc +# asm 1: cmp <prc=int64#5,<hlen=int64#2 +# asm 2: cmp <prc=%r8,<hlen=%rsi +cmp %r8,%rsi +# comment:fp stack unchanged by jump + +# qhasm: goto siftuploop if !unsigned> +jbe ._siftuploop + +# qhasm: sprc = *(uint64 *)(hp + prc * 8) +# asm 1: movq (<hp=int64#1,<prc=int64#5,8),>sprc=int64#7 +# asm 2: movq (<hp=%rdi,<prc=%r8,8),>sprc=%rax +movq (%rdi,%r8,8),%rax + +# qhasm: sprc <<= 5 +# asm 1: shl $5,<sprc=int64#7 +# asm 2: shl $5,<sprc=%rax +shl $5,%rax + +# qhasm: sprc += sp +# asm 1: add <sp=int64#3,<sprc=int64#7 +# asm 2: add <sp=%rdx,<sprc=%rax +add %rdx,%rax + +# qhasm: spc = *(uint64 *)(hp + pc * 8) +# asm 1: movq (<hp=int64#1,<pc=int64#6,8),>spc=int64#8 +# asm 2: movq (<hp=%rdi,<pc=%r9,8),>spc=%r10 +movq (%rdi,%r9,8),%r10 + +# qhasm: spc <<= 5 +# asm 1: shl $5,<spc=int64#8 +# asm 2: shl $5,<spc=%r10 +shl $5,%r10 + +# qhasm: spc += sp +# asm 1: add <sp=int64#3,<spc=int64#8 +# asm 2: add <sp=%rdx,<spc=%r10 +add %rdx,%r10 + +# qhasm: c0 = *(uint64 *)(spc + 0) +# asm 1: movq 0(<spc=int64#8),>c0=int64#9 +# asm 2: movq 0(<spc=%r10),>c0=%r11 +movq 0(%r10),%r11 + +# qhasm: c1 = *(uint64 *)(spc + 8) +# asm 1: movq 8(<spc=int64#8),>c1=int64#10 +# asm 2: movq 8(<spc=%r10),>c1=%r12 +movq 8(%r10),%r12 + +# qhasm: c2 = *(uint64 *)(spc + 16) +# asm 1: movq 16(<spc=int64#8),>c2=int64#11 +# asm 2: movq 16(<spc=%r10),>c2=%r13 +movq 16(%r10),%r13 + +# qhasm: carry? c0 -= *(uint64 *)(sprc + 0) +# asm 1: subq 0(<sprc=int64#7),<c0=int64#9 +# asm 2: subq 0(<sprc=%rax),<c0=%r11 +subq 0(%rax),%r11 + +# qhasm: carry? c1 -= *(uint64 *)(sprc + 8) - carry +# asm 1: sbbq 8(<sprc=int64#7),<c1=int64#10 +# asm 2: sbbq 8(<sprc=%rax),<c1=%r12 +sbbq 8(%rax),%r12 + +# qhasm: carry? c2 -= *(uint64 *)(sprc + 16) - carry +# asm 1: sbbq 16(<sprc=int64#7),<c2=int64#11 +# asm 2: sbbq 16(<sprc=%rax),<c2=%r13 +sbbq 16(%rax),%r13 + +# qhasm: pc = prc if carry +# asm 1: cmovc <prc=int64#5,<pc=int64#6 +# asm 2: cmovc <prc=%r8,<pc=%r9 +cmovc %r8,%r9 + +# qhasm: spc = sprc if carry +# asm 1: cmovc <sprc=int64#7,<spc=int64#8 +# asm 2: cmovc <sprc=%rax,<spc=%r10 +cmovc %rax,%r10 + +# qhasm: spc -= sp +# asm 1: sub <sp=int64#3,<spc=int64#8 +# asm 2: sub <sp=%rdx,<spc=%r10 +sub %rdx,%r10 + +# qhasm: (uint64) spc >>= 5 +# asm 1: shr $5,<spc=int64#8 +# asm 2: shr $5,<spc=%r10 +shr $5,%r10 + +# qhasm: spp = *(uint64 *)(hp + pp * 8) +# asm 1: movq (<hp=int64#1,<pp=int64#4,8),>spp=int64#5 +# asm 2: movq (<hp=%rdi,<pp=%rcx,8),>spp=%r8 +movq (%rdi,%rcx,8),%r8 + +# qhasm: *(uint64 *)(hp + pp * 8) = spc +# asm 1: movq <spc=int64#8,(<hp=int64#1,<pp=int64#4,8) +# asm 2: movq <spc=%r10,(<hp=%rdi,<pp=%rcx,8) +movq %r10,(%rdi,%rcx,8) + +# qhasm: *(uint64 *)(hp + pc * 8) = spp +# asm 1: movq <spp=int64#5,(<hp=int64#1,<pc=int64#6,8) +# asm 2: movq <spp=%r8,(<hp=%rdi,<pc=%r9,8) +movq %r8,(%rdi,%r9,8) + +# qhasm: pp = pc +# asm 1: mov <pc=int64#6,>pp=int64#4 +# asm 2: mov <pc=%r9,>pp=%rcx +mov %r9,%rcx +# comment:fp stack unchanged by jump + +# qhasm: goto siftdownloop +jmp ._siftdownloop + +# qhasm: siftuploop: +._siftuploop: + +# qhasm: pc = pp +# asm 1: mov <pp=int64#4,>pc=int64#2 +# asm 2: mov <pp=%rcx,>pc=%rsi +mov %rcx,%rsi + +# qhasm: pp -= 1 +# asm 1: sub $1,<pp=int64#4 +# asm 2: sub $1,<pp=%rcx +sub $1,%rcx + +# qhasm: (uint64) pp >>= 1 +# asm 1: shr $1,<pp=int64#4 +# asm 2: shr $1,<pp=%rcx +shr $1,%rcx + +# qhasm: unsigned>? pc - 0 +# asm 1: cmp $0,<pc=int64#2 +# asm 2: cmp $0,<pc=%rsi +cmp $0,%rsi +# comment:fp stack unchanged by jump + +# qhasm: goto end if !unsigned> +jbe ._end + +# qhasm: spp = *(uint64 *)(hp + pp * 8) +# asm 1: movq (<hp=int64#1,<pp=int64#4,8),>spp=int64#5 +# asm 2: movq (<hp=%rdi,<pp=%rcx,8),>spp=%r8 +movq (%rdi,%rcx,8),%r8 + +# qhasm: spc = *(uint64 *)(hp + pc * 8) +# asm 1: movq (<hp=int64#1,<pc=int64#2,8),>spc=int64#6 +# asm 2: movq (<hp=%rdi,<pc=%rsi,8),>spc=%r9 +movq (%rdi,%rsi,8),%r9 + +# qhasm: spp <<= 5 +# asm 1: shl $5,<spp=int64#5 +# asm 2: shl $5,<spp=%r8 +shl $5,%r8 + +# qhasm: spc <<= 5 +# asm 1: shl $5,<spc=int64#6 +# asm 2: shl $5,<spc=%r9 +shl $5,%r9 + +# qhasm: spc += sp +# asm 1: add <sp=int64#3,<spc=int64#6 +# asm 2: add <sp=%rdx,<spc=%r9 +add %rdx,%r9 + +# qhasm: spp += sp +# asm 1: add <sp=int64#3,<spp=int64#5 +# asm 2: add <sp=%rdx,<spp=%r8 +add %rdx,%r8 + +# qhasm: c0 = *(uint64 *)(spc + 0) +# asm 1: movq 0(<spc=int64#6),>c0=int64#7 +# asm 2: movq 0(<spc=%r9),>c0=%rax +movq 0(%r9),%rax + +# qhasm: c1 = *(uint64 *)(spc + 8) +# asm 1: movq 8(<spc=int64#6),>c1=int64#8 +# asm 2: movq 8(<spc=%r9),>c1=%r10 +movq 8(%r9),%r10 + +# qhasm: c2 = *(uint64 *)(spc + 16) +# asm 1: movq 16(<spc=int64#6),>c2=int64#9 +# asm 2: movq 16(<spc=%r9),>c2=%r11 +movq 16(%r9),%r11 + +# qhasm: carry? c0 -= *(uint64 *)(spp + 0) +# asm 1: subq 0(<spp=int64#5),<c0=int64#7 +# asm 2: subq 0(<spp=%r8),<c0=%rax +subq 0(%r8),%rax + +# qhasm: carry? c1 -= *(uint64 *)(spp + 8) - carry +# asm 1: sbbq 8(<spp=int64#5),<c1=int64#8 +# asm 2: sbbq 8(<spp=%r8),<c1=%r10 +sbbq 8(%r8),%r10 + +# qhasm: carry? c2 -= *(uint64 *)(spp + 16) - carry +# asm 1: sbbq 16(<spp=int64#5),<c2=int64#9 +# asm 2: sbbq 16(<spp=%r8),<c2=%r11 +sbbq 16(%r8),%r11 +# comment:fp stack unchanged by jump + +# qhasm: goto end if carry +jc ._end + +# qhasm: spc -= sp +# asm 1: sub <sp=int64#3,<spc=int64#6 +# asm 2: sub <sp=%rdx,<spc=%r9 +sub %rdx,%r9 + +# qhasm: (uint64) spc >>= 5 +# asm 1: shr $5,<spc=int64#6 +# asm 2: shr $5,<spc=%r9 +shr $5,%r9 + +# qhasm: spp -= sp +# asm 1: sub <sp=int64#3,<spp=int64#5 +# asm 2: sub <sp=%rdx,<spp=%r8 +sub %rdx,%r8 + +# qhasm: (uint64) spp >>= 5 +# asm 1: shr $5,<spp=int64#5 +# asm 2: shr $5,<spp=%r8 +shr $5,%r8 + +# qhasm: *(uint64 *)(hp + pp * 8) = spc +# asm 1: movq <spc=int64#6,(<hp=int64#1,<pp=int64#4,8) +# asm 2: movq <spc=%r9,(<hp=%rdi,<pp=%rcx,8) +movq %r9,(%rdi,%rcx,8) + +# qhasm: *(uint64 *)(hp + pc * 8) = spp +# asm 1: movq <spp=int64#5,(<hp=int64#1,<pc=int64#2,8) +# asm 2: movq <spp=%r8,(<hp=%rdi,<pc=%rsi,8) +movq %r8,(%rdi,%rsi,8) +# comment:fp stack unchanged by jump + +# qhasm: goto siftuploop +jmp ._siftuploop + +# qhasm: end: +._end: + +# qhasm: caller1 = caller1_stack +# asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9 +# asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10 +# asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11 +# asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12 +# asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13 +# asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14 +# asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15 +# asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/ext/ed25519-amd64-asm/hram.c b/ext/ed25519-amd64-asm/hram.c new file mode 100644 index 00000000..6f99fc62 --- /dev/null +++ b/ext/ed25519-amd64-asm/hram.c @@ -0,0 +1,16 @@ +/*#include "crypto_hash_sha512.h"*/ +#include "hram.h" + +extern void ZT_sha512internal(void *digest,const void *data,unsigned int len); + +void get_hram(unsigned char *hram, const unsigned char *sm, const unsigned char *pk, unsigned char *playground, unsigned long long smlen) +{ + unsigned long long i; + + for (i = 0;i < 32;++i) playground[i] = sm[i]; + for (i = 32;i < 64;++i) playground[i] = pk[i-32]; + for (i = 64;i < smlen;++i) playground[i] = sm[i]; + + /*crypto_hash_sha512(hram,playground,smlen);*/ + ZT_sha512internal(hram,playground,smlen); +} diff --git a/ext/ed25519-amd64-asm/hram.h b/ext/ed25519-amd64-asm/hram.h new file mode 100644 index 00000000..1740c78a --- /dev/null +++ b/ext/ed25519-amd64-asm/hram.h @@ -0,0 +1,8 @@ +#ifndef HRAM_H +#define HRAM_H + +#define get_hram crypto_sign_ed25519_amd64_64_get_hram + +extern void get_hram(unsigned char *hram, const unsigned char *sm, const unsigned char *pk, unsigned char *playground, unsigned long long smlen); + +#endif diff --git a/ext/ed25519-amd64-asm/implementors b/ext/ed25519-amd64-asm/implementors new file mode 100644 index 00000000..9b5399a3 --- /dev/null +++ b/ext/ed25519-amd64-asm/implementors @@ -0,0 +1,5 @@ +Daniel J. Bernstein +Niels Duif +Tanja Lange +lead: Peter Schwabe +Bo-Yin Yang diff --git a/ext/ed25519-amd64-asm/index_heap.c b/ext/ed25519-amd64-asm/index_heap.c new file mode 100644 index 00000000..f29f7a28 --- /dev/null +++ b/ext/ed25519-amd64-asm/index_heap.c @@ -0,0 +1,58 @@ +#include "sc25519.h" +#include "index_heap.h" + +/* caller's responsibility to ensure hlen>=3 */ +void heap_init(unsigned long long *h, unsigned long long hlen, sc25519 *scalars) +{ + h[0] = 0; + unsigned long long i=1; + while(i<hlen) + heap_push(h, &i, i, scalars); +} + +void heap_extend(unsigned long long *h, unsigned long long oldlen, unsigned long long newlen, sc25519 *scalars) +{ + unsigned long long i=oldlen; + while(i<newlen) + heap_push(h, &i, i, scalars); +} + + +void heap_push(unsigned long long *h, unsigned long long *hlen, unsigned long long elem, sc25519 *scalars) +{ + /* Move up towards the root */ + /* XXX: Check size of hlen, whether cast to signed value is ok */ + signed long long pos = *hlen; + signed long long ppos = (pos-1)/2; + unsigned long long t; + h[*hlen] = elem; + while(pos > 0) + { + /* if(sc25519_lt_vartime(&scalars[h[ppos]], &scalars[h[pos]])) */ + if(sc25519_lt(&scalars[h[ppos]], &scalars[h[pos]])) + { + t = h[ppos]; + h[ppos] = h[pos]; + h[pos] = t; + pos = ppos; + ppos = (pos-1)/2; + } + else break; + } + (*hlen)++; +} + +/* Put the largest value in the heap in max1, the second largest in max2 */ +void heap_get2max(unsigned long long *h, unsigned long long *max1, unsigned long long *max2, sc25519 *scalars) +{ + *max1 = h[0]; + *max2 = h[1]; + if(sc25519_lt(&scalars[h[1]],&scalars[h[2]])) + *max2 = h[2]; +} + +/* After the root has been replaced, restore heap property */ +/* extern void heap_rootreplaced(unsigned long long *h, unsigned long long hlen, sc25519 *scalars); +*/ +/* extern void heap_rootreplaced_shortscalars(unsigned long long *h, unsigned long long hlen, sc25519 *scalars); +*/ diff --git a/ext/ed25519-amd64-asm/index_heap.h b/ext/ed25519-amd64-asm/index_heap.h new file mode 100644 index 00000000..7dee9161 --- /dev/null +++ b/ext/ed25519-amd64-asm/index_heap.h @@ -0,0 +1,31 @@ +#ifndef INDEX_HEAP_H +#define INDEX_HEAP_H + +#include "sc25519.h" + +#define heap_init crypto_sign_ed25519_amd64_64_heap_init +#define heap_extend crypto_sign_ed25519_amd64_64_heap_extend +#define heap_pop crypto_sign_ed25519_amd64_64_heap_pop +#define heap_push crypto_sign_ed25519_amd64_64_heap_push +#define heap_get2max crypto_sign_ed25519_amd64_64_heap_get2max +#define heap_rootreplaced crypto_sign_ed25519_amd64_64_heap_rootreplaced +#define heap_rootreplaced_3limbs crypto_sign_ed25519_amd64_64_heap_rootreplaced_3limbs +#define heap_rootreplaced_2limbs crypto_sign_ed25519_amd64_64_heap_rootreplaced_2limbs +#define heap_rootreplaced_1limb crypto_sign_ed25519_amd64_64_heap_rootreplaced_1limb + +void heap_init(unsigned long long *h, unsigned long long hlen, sc25519 *scalars); + +void heap_extend(unsigned long long *h, unsigned long long oldlen, unsigned long long newlen, sc25519 *scalars); + +unsigned long long heap_pop(unsigned long long *h, unsigned long long *hlen, sc25519 *scalars); + +void heap_push(unsigned long long *h, unsigned long long *hlen, unsigned long long elem, sc25519 *scalars); + +void heap_get2max(unsigned long long *h, unsigned long long *max1, unsigned long long *max2, sc25519 *scalars); + +void heap_rootreplaced(unsigned long long *h, unsigned long long hlen, sc25519 *scalars); +void heap_rootreplaced_3limbs(unsigned long long *h, unsigned long long hlen, sc25519 *scalars); +void heap_rootreplaced_2limbs(unsigned long long *h, unsigned long long hlen, sc25519 *scalars); +void heap_rootreplaced_1limb(unsigned long long *h, unsigned long long hlen, sc25519 *scalars); + +#endif diff --git a/ext/ed25519-amd64-asm/keypair.c b/ext/ed25519-amd64-asm/keypair.c new file mode 100644 index 00000000..7e094710 --- /dev/null +++ b/ext/ed25519-amd64-asm/keypair.c @@ -0,0 +1,25 @@ +#include <string.h> +#include "crypto_sign.h" +#include "crypto_hash_sha512.h" +#include "randombytes.h" +#include "ge25519.h" + +int crypto_sign_keypair(unsigned char *pk,unsigned char *sk) +{ + unsigned char az[64]; + sc25519 scsk; + ge25519 gepk; + + randombytes(sk,32); + crypto_hash_sha512(az,sk,32); + az[0] &= 248; + az[31] &= 127; + az[31] |= 64; + + sc25519_from32bytes(&scsk,az); + + ge25519_scalarmult_base(&gepk, &scsk); + ge25519_pack(pk, &gepk); + memmove(sk + 32,pk,32); + return 0; +} diff --git a/ext/ed25519-amd64-asm/open.c b/ext/ed25519-amd64-asm/open.c new file mode 100644 index 00000000..104d48dc --- /dev/null +++ b/ext/ed25519-amd64-asm/open.c @@ -0,0 +1,49 @@ +#include <string.h> +#include "crypto_sign.h" +#include "crypto_verify_32.h" +#include "crypto_hash_sha512.h" +#include "ge25519.h" + +int crypto_sign_open( + unsigned char *m,unsigned long long *mlen, + const unsigned char *sm,unsigned long long smlen, + const unsigned char *pk + ) +{ + unsigned char pkcopy[32]; + unsigned char rcopy[32]; + unsigned char hram[64]; + unsigned char rcheck[32]; + ge25519 get1, get2; + sc25519 schram, scs; + + if (smlen < 64) goto badsig; + if (sm[63] & 224) goto badsig; + if (ge25519_unpackneg_vartime(&get1,pk)) goto badsig; + + memmove(pkcopy,pk,32); + memmove(rcopy,sm,32); + + sc25519_from32bytes(&scs, sm+32); + + memmove(m,sm,smlen); + memmove(m + 32,pkcopy,32); + crypto_hash_sha512(hram,m,smlen); + + sc25519_from64bytes(&schram, hram); + + ge25519_double_scalarmult_vartime(&get2, &get1, &schram, &scs); + ge25519_pack(rcheck, &get2); + + if (crypto_verify_32(rcopy,rcheck) == 0) { + memmove(m,m + 64,smlen - 64); + memset(m + smlen - 64,0,64); + *mlen = smlen - 64; + return 0; + } + +badsig: + *mlen = (unsigned long long) -1; + memset(m,0,smlen); + return -1; +} diff --git a/ext/ed25519-amd64-asm/sc25519.h b/ext/ed25519-amd64-asm/sc25519.h new file mode 100644 index 00000000..8ff1b1ca --- /dev/null +++ b/ext/ed25519-amd64-asm/sc25519.h @@ -0,0 +1,66 @@ +#ifndef SC25519_H +#define SC25519_H + +#define sc25519 crypto_sign_ed25519_amd64_64_sc25519 +#define shortsc25519 crypto_sign_ed25519_amd64_64_shortsc25519 +#define sc25519_from32bytes crypto_sign_ed25519_amd64_64_sc25519_from32bytes +#define shortsc25519_from16bytes crypto_sign_ed25519_amd64_64_shortsc25519_from16bytes +#define sc25519_from64bytes crypto_sign_ed25519_amd64_64_sc25519_from64bytes +#define sc25519_from_shortsc crypto_sign_ed25519_amd64_64_sc25519_from_shortsc +#define sc25519_to32bytes crypto_sign_ed25519_amd64_64_sc25519_to32bytes +#define sc25519_iszero_vartime crypto_sign_ed25519_amd64_64_sc25519_iszero_vartime +#define sc25519_isshort_vartime crypto_sign_ed25519_amd64_64_sc25519_isshort_vartime +#define sc25519_lt crypto_sign_ed25519_amd64_64_sc25519_lt +#define sc25519_add crypto_sign_ed25519_amd64_64_sc25519_add +#define sc25519_sub_nored crypto_sign_ed25519_amd64_64_sc25519_sub_nored +#define sc25519_mul crypto_sign_ed25519_amd64_64_sc25519_mul +#define sc25519_mul_shortsc crypto_sign_ed25519_amd64_64_sc25519_mul_shortsc +#define sc25519_window4 crypto_sign_ed25519_amd64_64_sc25519_window4 +#define sc25519_slide crypto_sign_ed25519_amd64_64_sc25519_slide +#define sc25519_2interleave2 crypto_sign_ed25519_amd64_64_sc25519_2interleave2 +#define sc25519_barrett crypto_sign_ed25519_amd64_64_sc25519_barrett + +typedef struct +{ + unsigned long long v[4]; +} +sc25519; + +typedef struct +{ + unsigned long long v[2]; +} +shortsc25519; + +void sc25519_from32bytes(sc25519 *r, const unsigned char x[32]); + +void sc25519_from64bytes(sc25519 *r, const unsigned char x[64]); + +void sc25519_from_shortsc(sc25519 *r, const shortsc25519 *x); + +void sc25519_to32bytes(unsigned char r[32], const sc25519 *x); + +int sc25519_iszero_vartime(const sc25519 *x); + +int sc25519_lt(const sc25519 *x, const sc25519 *y); + +void sc25519_add(sc25519 *r, const sc25519 *x, const sc25519 *y); + +void sc25519_sub_nored(sc25519 *r, const sc25519 *x, const sc25519 *y); + +void sc25519_mul(sc25519 *r, const sc25519 *x, const sc25519 *y); + +void sc25519_mul_shortsc(sc25519 *r, const sc25519 *x, const shortsc25519 *y); + +/* Convert s into a representation of the form \sum_{i=0}^{63}r[i]2^(4*i) + * with r[i] in {-8,...,7} + */ +void sc25519_window4(signed char r[85], const sc25519 *s); + +void sc25519_slide(signed char r[256], const sc25519 *s, int swindowsize); + +void sc25519_2interleave2(unsigned char r[127], const sc25519 *s1, const sc25519 *s2); + +void sc25519_barrett(sc25519 *r, unsigned long long x[8]); + +#endif diff --git a/ext/ed25519-amd64-asm/sc25519_add.s b/ext/ed25519-amd64-asm/sc25519_add.s new file mode 100644 index 00000000..71de0024 --- /dev/null +++ b/ext/ed25519-amd64-asm/sc25519_add.s @@ -0,0 +1,232 @@ + +# qhasm: int64 rp + +# qhasm: int64 xp + +# qhasm: int64 yp + +# qhasm: input rp + +# qhasm: input xp + +# qhasm: input yp + +# qhasm: int64 r0 + +# qhasm: int64 r1 + +# qhasm: int64 r2 + +# qhasm: int64 r3 + +# qhasm: int64 t0 + +# qhasm: int64 t1 + +# qhasm: int64 t2 + +# qhasm: int64 t3 + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: enter crypto_sign_ed25519_amd64_64_sc25519_add +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_64_sc25519_add +.globl crypto_sign_ed25519_amd64_64_sc25519_add +_crypto_sign_ed25519_amd64_64_sc25519_add: +crypto_sign_ed25519_amd64_64_sc25519_add: +mov %rsp,%r11 +and $31,%r11 +add $32,%r11 +sub %r11,%rsp + +# qhasm: caller4_stack = caller4 +# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#1 +# asm 2: movq <caller4=%r14,>caller4_stack=0(%rsp) +movq %r14,0(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#2 +# asm 2: movq <caller5=%r15,>caller5_stack=8(%rsp) +movq %r15,8(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#3 +# asm 2: movq <caller6=%rbx,>caller6_stack=16(%rsp) +movq %rbx,16(%rsp) + +# qhasm: r0 = *(uint64 *)(xp + 0) +# asm 1: movq 0(<xp=int64#2),>r0=int64#4 +# asm 2: movq 0(<xp=%rsi),>r0=%rcx +movq 0(%rsi),%rcx + +# qhasm: r1 = *(uint64 *)(xp + 8) +# asm 1: movq 8(<xp=int64#2),>r1=int64#5 +# asm 2: movq 8(<xp=%rsi),>r1=%r8 +movq 8(%rsi),%r8 + +# qhasm: r2 = *(uint64 *)(xp + 16) +# asm 1: movq 16(<xp=int64#2),>r2=int64#6 +# asm 2: movq 16(<xp=%rsi),>r2=%r9 +movq 16(%rsi),%r9 + +# qhasm: r3 = *(uint64 *)(xp + 24) +# asm 1: movq 24(<xp=int64#2),>r3=int64#2 +# asm 2: movq 24(<xp=%rsi),>r3=%rsi +movq 24(%rsi),%rsi + +# qhasm: carry? r0 += *(uint64 *)(yp + 0) +# asm 1: addq 0(<yp=int64#3),<r0=int64#4 +# asm 2: addq 0(<yp=%rdx),<r0=%rcx +addq 0(%rdx),%rcx + +# qhasm: carry? r1 += *(uint64 *)(yp + 8) + carry +# asm 1: adcq 8(<yp=int64#3),<r1=int64#5 +# asm 2: adcq 8(<yp=%rdx),<r1=%r8 +adcq 8(%rdx),%r8 + +# qhasm: carry? r2 += *(uint64 *)(yp + 16) + carry +# asm 1: adcq 16(<yp=int64#3),<r2=int64#6 +# asm 2: adcq 16(<yp=%rdx),<r2=%r9 +adcq 16(%rdx),%r9 + +# qhasm: r3 += *(uint64 *)(yp + 24) + carry +# asm 1: adcq 24(<yp=int64#3),<r3=int64#2 +# asm 2: adcq 24(<yp=%rdx),<r3=%rsi +adcq 24(%rdx),%rsi + +# qhasm: t0 = r0 +# asm 1: mov <r0=int64#4,>t0=int64#3 +# asm 2: mov <r0=%rcx,>t0=%rdx +mov %rcx,%rdx + +# qhasm: t1 = r1 +# asm 1: mov <r1=int64#5,>t1=int64#7 +# asm 2: mov <r1=%r8,>t1=%rax +mov %r8,%rax + +# qhasm: t2 = r2 +# asm 1: mov <r2=int64#6,>t2=int64#8 +# asm 2: mov <r2=%r9,>t2=%r10 +mov %r9,%r10 + +# qhasm: t3 = r3 +# asm 1: mov <r3=int64#2,>t3=int64#12 +# asm 2: mov <r3=%rsi,>t3=%r14 +mov %rsi,%r14 + +# qhasm: carry? t0 -= *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER0 +# asm 1: sub crypto_sign_ed25519_amd64_64_ORDER0,<t0=int64#3 +# asm 2: sub crypto_sign_ed25519_amd64_64_ORDER0,<t0=%rdx +sub crypto_sign_ed25519_amd64_64_ORDER0,%rdx + +# qhasm: carry? t1 -= *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER1 - carry +# asm 1: sbb crypto_sign_ed25519_amd64_64_ORDER1,<t1=int64#7 +# asm 2: sbb crypto_sign_ed25519_amd64_64_ORDER1,<t1=%rax +sbb crypto_sign_ed25519_amd64_64_ORDER1,%rax + +# qhasm: carry? t2 -= *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER2 - carry +# asm 1: sbb crypto_sign_ed25519_amd64_64_ORDER2,<t2=int64#8 +# asm 2: sbb crypto_sign_ed25519_amd64_64_ORDER2,<t2=%r10 +sbb crypto_sign_ed25519_amd64_64_ORDER2,%r10 + +# qhasm: unsigned<? t3 -= *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER3 - carry +# asm 1: sbb crypto_sign_ed25519_amd64_64_ORDER3,<t3=int64#12 +# asm 2: sbb crypto_sign_ed25519_amd64_64_ORDER3,<t3=%r14 +sbb crypto_sign_ed25519_amd64_64_ORDER3,%r14 + +# qhasm: r0 = t0 if !unsigned< +# asm 1: cmovae <t0=int64#3,<r0=int64#4 +# asm 2: cmovae <t0=%rdx,<r0=%rcx +cmovae %rdx,%rcx + +# qhasm: r1 = t1 if !unsigned< +# asm 1: cmovae <t1=int64#7,<r1=int64#5 +# asm 2: cmovae <t1=%rax,<r1=%r8 +cmovae %rax,%r8 + +# qhasm: r2 = t2 if !unsigned< +# asm 1: cmovae <t2=int64#8,<r2=int64#6 +# asm 2: cmovae <t2=%r10,<r2=%r9 +cmovae %r10,%r9 + +# qhasm: r3 = t3 if !unsigned< +# asm 1: cmovae <t3=int64#12,<r3=int64#2 +# asm 2: cmovae <t3=%r14,<r3=%rsi +cmovae %r14,%rsi + +# qhasm: *(uint64 *)(rp + 0) = r0 +# asm 1: movq <r0=int64#4,0(<rp=int64#1) +# asm 2: movq <r0=%rcx,0(<rp=%rdi) +movq %rcx,0(%rdi) + +# qhasm: *(uint64 *)(rp + 8) = r1 +# asm 1: movq <r1=int64#5,8(<rp=int64#1) +# asm 2: movq <r1=%r8,8(<rp=%rdi) +movq %r8,8(%rdi) + +# qhasm: *(uint64 *)(rp + 16) = r2 +# asm 1: movq <r2=int64#6,16(<rp=int64#1) +# asm 2: movq <r2=%r9,16(<rp=%rdi) +movq %r9,16(%rdi) + +# qhasm: *(uint64 *)(rp + 24) = r3 +# asm 1: movq <r3=int64#2,24(<rp=int64#1) +# asm 2: movq <r3=%rsi,24(<rp=%rdi) +movq %rsi,24(%rdi) + +# qhasm: caller4 = caller4_stack +# asm 1: movq <caller4_stack=stack64#1,>caller4=int64#12 +# asm 2: movq <caller4_stack=0(%rsp),>caller4=%r14 +movq 0(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq <caller5_stack=stack64#2,>caller5=int64#13 +# asm 2: movq <caller5_stack=8(%rsp),>caller5=%r15 +movq 8(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq <caller6_stack=stack64#3,>caller6=int64#14 +# asm 2: movq <caller6_stack=16(%rsp),>caller6=%rbx +movq 16(%rsp),%rbx + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/ext/ed25519-amd64-asm/sc25519_barrett.s b/ext/ed25519-amd64-asm/sc25519_barrett.s new file mode 100644 index 00000000..c59f4563 --- /dev/null +++ b/ext/ed25519-amd64-asm/sc25519_barrett.s @@ -0,0 +1,1188 @@ + +# qhasm: int64 rp + +# qhasm: int64 xp + +# qhasm: input rp + +# qhasm: input xp + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: int64 q23 + +# qhasm: int64 q24 + +# qhasm: int64 q30 + +# qhasm: int64 q31 + +# qhasm: int64 q32 + +# qhasm: int64 q33 + +# qhasm: int64 r20 + +# qhasm: int64 r21 + +# qhasm: int64 r22 + +# qhasm: int64 r23 + +# qhasm: int64 r24 + +# qhasm: int64 r0 + +# qhasm: int64 r1 + +# qhasm: int64 r2 + +# qhasm: int64 r3 + +# qhasm: int64 t0 + +# qhasm: int64 t1 + +# qhasm: int64 t2 + +# qhasm: int64 t3 + +# qhasm: int64 rax + +# qhasm: int64 rdx + +# qhasm: int64 c + +# qhasm: int64 zero + +# qhasm: int64 mask + +# qhasm: int64 nmask + +# qhasm: stack64 q30_stack + +# qhasm: stack64 q31_stack + +# qhasm: stack64 q32_stack + +# qhasm: stack64 q33_stack + +# qhasm: enter crypto_sign_ed25519_amd64_64_sc25519_barrett +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_64_sc25519_barrett +.globl crypto_sign_ed25519_amd64_64_sc25519_barrett +_crypto_sign_ed25519_amd64_64_sc25519_barrett: +crypto_sign_ed25519_amd64_64_sc25519_barrett: +mov %rsp,%r11 +and $31,%r11 +add $96,%r11 +sub %r11,%rsp + +# qhasm: caller1_stack = caller1 +# asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1 +# asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2 +# asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3 +# asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4 +# asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5 +# asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6 +# asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7 +# asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: zero ^= zero +# asm 1: xor <zero=int64#4,<zero=int64#4 +# asm 2: xor <zero=%rcx,<zero=%rcx +xor %rcx,%rcx + +# qhasm: q30 ^= q30 +# asm 1: xor <q30=int64#5,<q30=int64#5 +# asm 2: xor <q30=%r8,<q30=%r8 +xor %r8,%r8 + +# qhasm: q31 ^= q31 +# asm 1: xor <q31=int64#6,<q31=int64#6 +# asm 2: xor <q31=%r9,<q31=%r9 +xor %r9,%r9 + +# qhasm: q32 ^= q32 +# asm 1: xor <q32=int64#8,<q32=int64#8 +# asm 2: xor <q32=%r10,<q32=%r10 +xor %r10,%r10 + +# qhasm: q33 ^= q33 +# asm 1: xor <q33=int64#9,<q33=int64#9 +# asm 2: xor <q33=%r11,<q33=%r11 +xor %r11,%r11 + +# qhasm: rax = *(uint64 *)(xp + 24) +# asm 1: movq 24(<xp=int64#2),>rax=int64#7 +# asm 2: movq 24(<xp=%rsi),>rax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU3 +mulq crypto_sign_ed25519_amd64_64_MU3 + +# qhasm: q23 = rax +# asm 1: mov <rax=int64#7,>q23=int64#10 +# asm 2: mov <rax=%rax,>q23=%r12 +mov %rax,%r12 + +# qhasm: c = rdx +# asm 1: mov <rdx=int64#3,>c=int64#11 +# asm 2: mov <rdx=%rdx,>c=%r13 +mov %rdx,%r13 + +# qhasm: rax = *(uint64 *)(xp + 24) +# asm 1: movq 24(<xp=int64#2),>rax=int64#7 +# asm 2: movq 24(<xp=%rsi),>rax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU4 +mulq crypto_sign_ed25519_amd64_64_MU4 + +# qhasm: q24 = rax +# asm 1: mov <rax=int64#7,>q24=int64#12 +# asm 2: mov <rax=%rax,>q24=%r14 +mov %rax,%r14 + +# qhasm: carry? q24 += c +# asm 1: add <c=int64#11,<q24=int64#12 +# asm 2: add <c=%r13,<q24=%r14 +add %r13,%r14 + +# qhasm: q30 += rdx + carry +# asm 1: adc <rdx=int64#3,<q30=int64#5 +# asm 2: adc <rdx=%rdx,<q30=%r8 +adc %rdx,%r8 + +# qhasm: rax = *(uint64 *)(xp + 32) +# asm 1: movq 32(<xp=int64#2),>rax=int64#7 +# asm 2: movq 32(<xp=%rsi),>rax=%rax +movq 32(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU2 +mulq crypto_sign_ed25519_amd64_64_MU2 + +# qhasm: carry? q23 += rax +# asm 1: add <rax=int64#7,<q23=int64#10 +# asm 2: add <rax=%rax,<q23=%r12 +add %rax,%r12 + +# qhasm: c = 0 +# asm 1: mov $0,>c=int64#11 +# asm 2: mov $0,>c=%r13 +mov $0,%r13 + +# qhasm: c += rdx + carry +# asm 1: adc <rdx=int64#3,<c=int64#11 +# asm 2: adc <rdx=%rdx,<c=%r13 +adc %rdx,%r13 + +# qhasm: rax = *(uint64 *)(xp + 32) +# asm 1: movq 32(<xp=int64#2),>rax=int64#7 +# asm 2: movq 32(<xp=%rsi),>rax=%rax +movq 32(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU3 +mulq crypto_sign_ed25519_amd64_64_MU3 + +# qhasm: carry? q24 += rax +# asm 1: add <rax=int64#7,<q24=int64#12 +# asm 2: add <rax=%rax,<q24=%r14 +add %rax,%r14 + +# qhasm: rdx += zero + carry +# asm 1: adc <zero=int64#4,<rdx=int64#3 +# asm 2: adc <zero=%rcx,<rdx=%rdx +adc %rcx,%rdx + +# qhasm: carry? q24 += c +# asm 1: add <c=int64#11,<q24=int64#12 +# asm 2: add <c=%r13,<q24=%r14 +add %r13,%r14 + +# qhasm: c = 0 +# asm 1: mov $0,>c=int64#11 +# asm 2: mov $0,>c=%r13 +mov $0,%r13 + +# qhasm: c += rdx + carry +# asm 1: adc <rdx=int64#3,<c=int64#11 +# asm 2: adc <rdx=%rdx,<c=%r13 +adc %rdx,%r13 + +# qhasm: rax = *(uint64 *)(xp + 32) +# asm 1: movq 32(<xp=int64#2),>rax=int64#7 +# asm 2: movq 32(<xp=%rsi),>rax=%rax +movq 32(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU4 +mulq crypto_sign_ed25519_amd64_64_MU4 + +# qhasm: carry? q30 += rax +# asm 1: add <rax=int64#7,<q30=int64#5 +# asm 2: add <rax=%rax,<q30=%r8 +add %rax,%r8 + +# qhasm: rdx += zero + carry +# asm 1: adc <zero=int64#4,<rdx=int64#3 +# asm 2: adc <zero=%rcx,<rdx=%rdx +adc %rcx,%rdx + +# qhasm: carry? q30 += c +# asm 1: add <c=int64#11,<q30=int64#5 +# asm 2: add <c=%r13,<q30=%r8 +add %r13,%r8 + +# qhasm: q31 += rdx + carry +# asm 1: adc <rdx=int64#3,<q31=int64#6 +# asm 2: adc <rdx=%rdx,<q31=%r9 +adc %rdx,%r9 + +# qhasm: rax = *(uint64 *)(xp + 40) +# asm 1: movq 40(<xp=int64#2),>rax=int64#7 +# asm 2: movq 40(<xp=%rsi),>rax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU1 +mulq crypto_sign_ed25519_amd64_64_MU1 + +# qhasm: carry? q23 += rax +# asm 1: add <rax=int64#7,<q23=int64#10 +# asm 2: add <rax=%rax,<q23=%r12 +add %rax,%r12 + +# qhasm: c = 0 +# asm 1: mov $0,>c=int64#11 +# asm 2: mov $0,>c=%r13 +mov $0,%r13 + +# qhasm: c += rdx + carry +# asm 1: adc <rdx=int64#3,<c=int64#11 +# asm 2: adc <rdx=%rdx,<c=%r13 +adc %rdx,%r13 + +# qhasm: rax = *(uint64 *)(xp + 40) +# asm 1: movq 40(<xp=int64#2),>rax=int64#7 +# asm 2: movq 40(<xp=%rsi),>rax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU2 +mulq crypto_sign_ed25519_amd64_64_MU2 + +# qhasm: carry? q24 += rax +# asm 1: add <rax=int64#7,<q24=int64#12 +# asm 2: add <rax=%rax,<q24=%r14 +add %rax,%r14 + +# qhasm: rdx += zero + carry +# asm 1: adc <zero=int64#4,<rdx=int64#3 +# asm 2: adc <zero=%rcx,<rdx=%rdx +adc %rcx,%rdx + +# qhasm: carry? q24 += c +# asm 1: add <c=int64#11,<q24=int64#12 +# asm 2: add <c=%r13,<q24=%r14 +add %r13,%r14 + +# qhasm: c = 0 +# asm 1: mov $0,>c=int64#11 +# asm 2: mov $0,>c=%r13 +mov $0,%r13 + +# qhasm: c += rdx + carry +# asm 1: adc <rdx=int64#3,<c=int64#11 +# asm 2: adc <rdx=%rdx,<c=%r13 +adc %rdx,%r13 + +# qhasm: rax = *(uint64 *)(xp + 40) +# asm 1: movq 40(<xp=int64#2),>rax=int64#7 +# asm 2: movq 40(<xp=%rsi),>rax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU3 +mulq crypto_sign_ed25519_amd64_64_MU3 + +# qhasm: carry? q30 += rax +# asm 1: add <rax=int64#7,<q30=int64#5 +# asm 2: add <rax=%rax,<q30=%r8 +add %rax,%r8 + +# qhasm: rdx += zero + carry +# asm 1: adc <zero=int64#4,<rdx=int64#3 +# asm 2: adc <zero=%rcx,<rdx=%rdx +adc %rcx,%rdx + +# qhasm: carry? q30 += c +# asm 1: add <c=int64#11,<q30=int64#5 +# asm 2: add <c=%r13,<q30=%r8 +add %r13,%r8 + +# qhasm: c = 0 +# asm 1: mov $0,>c=int64#11 +# asm 2: mov $0,>c=%r13 +mov $0,%r13 + +# qhasm: c += rdx + carry +# asm 1: adc <rdx=int64#3,<c=int64#11 +# asm 2: adc <rdx=%rdx,<c=%r13 +adc %rdx,%r13 + +# qhasm: rax = *(uint64 *)(xp + 40) +# asm 1: movq 40(<xp=int64#2),>rax=int64#7 +# asm 2: movq 40(<xp=%rsi),>rax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU4 +mulq crypto_sign_ed25519_amd64_64_MU4 + +# qhasm: carry? q31 += rax +# asm 1: add <rax=int64#7,<q31=int64#6 +# asm 2: add <rax=%rax,<q31=%r9 +add %rax,%r9 + +# qhasm: rdx += zero + carry +# asm 1: adc <zero=int64#4,<rdx=int64#3 +# asm 2: adc <zero=%rcx,<rdx=%rdx +adc %rcx,%rdx + +# qhasm: carry? q31 += c +# asm 1: add <c=int64#11,<q31=int64#6 +# asm 2: add <c=%r13,<q31=%r9 +add %r13,%r9 + +# qhasm: q32 += rdx + carry +# asm 1: adc <rdx=int64#3,<q32=int64#8 +# asm 2: adc <rdx=%rdx,<q32=%r10 +adc %rdx,%r10 + +# qhasm: rax = *(uint64 *)(xp + 48) +# asm 1: movq 48(<xp=int64#2),>rax=int64#7 +# asm 2: movq 48(<xp=%rsi),>rax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU0 +mulq crypto_sign_ed25519_amd64_64_MU0 + +# qhasm: carry? q23 += rax +# asm 1: add <rax=int64#7,<q23=int64#10 +# asm 2: add <rax=%rax,<q23=%r12 +add %rax,%r12 + +# qhasm: c = 0 +# asm 1: mov $0,>c=int64#10 +# asm 2: mov $0,>c=%r12 +mov $0,%r12 + +# qhasm: c += rdx + carry +# asm 1: adc <rdx=int64#3,<c=int64#10 +# asm 2: adc <rdx=%rdx,<c=%r12 +adc %rdx,%r12 + +# qhasm: rax = *(uint64 *)(xp + 48) +# asm 1: movq 48(<xp=int64#2),>rax=int64#7 +# asm 2: movq 48(<xp=%rsi),>rax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU1 +mulq crypto_sign_ed25519_amd64_64_MU1 + +# qhasm: carry? q24 += rax +# asm 1: add <rax=int64#7,<q24=int64#12 +# asm 2: add <rax=%rax,<q24=%r14 +add %rax,%r14 + +# qhasm: rdx += zero + carry +# asm 1: adc <zero=int64#4,<rdx=int64#3 +# asm 2: adc <zero=%rcx,<rdx=%rdx +adc %rcx,%rdx + +# qhasm: carry? q24 += c +# asm 1: add <c=int64#10,<q24=int64#12 +# asm 2: add <c=%r12,<q24=%r14 +add %r12,%r14 + +# qhasm: c = 0 +# asm 1: mov $0,>c=int64#10 +# asm 2: mov $0,>c=%r12 +mov $0,%r12 + +# qhasm: c += rdx + carry +# asm 1: adc <rdx=int64#3,<c=int64#10 +# asm 2: adc <rdx=%rdx,<c=%r12 +adc %rdx,%r12 + +# qhasm: rax = *(uint64 *)(xp + 48) +# asm 1: movq 48(<xp=int64#2),>rax=int64#7 +# asm 2: movq 48(<xp=%rsi),>rax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU2 +mulq crypto_sign_ed25519_amd64_64_MU2 + +# qhasm: carry? q30 += rax +# asm 1: add <rax=int64#7,<q30=int64#5 +# asm 2: add <rax=%rax,<q30=%r8 +add %rax,%r8 + +# qhasm: rdx += zero + carry +# asm 1: adc <zero=int64#4,<rdx=int64#3 +# asm 2: adc <zero=%rcx,<rdx=%rdx +adc %rcx,%rdx + +# qhasm: carry? q30 += c +# asm 1: add <c=int64#10,<q30=int64#5 +# asm 2: add <c=%r12,<q30=%r8 +add %r12,%r8 + +# qhasm: c = 0 +# asm 1: mov $0,>c=int64#10 +# asm 2: mov $0,>c=%r12 +mov $0,%r12 + +# qhasm: c += rdx + carry +# asm 1: adc <rdx=int64#3,<c=int64#10 +# asm 2: adc <rdx=%rdx,<c=%r12 +adc %rdx,%r12 + +# qhasm: rax = *(uint64 *)(xp + 48) +# asm 1: movq 48(<xp=int64#2),>rax=int64#7 +# asm 2: movq 48(<xp=%rsi),>rax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU3 +mulq crypto_sign_ed25519_amd64_64_MU3 + +# qhasm: carry? q31 += rax +# asm 1: add <rax=int64#7,<q31=int64#6 +# asm 2: add <rax=%rax,<q31=%r9 +add %rax,%r9 + +# qhasm: rdx += zero + carry +# asm 1: adc <zero=int64#4,<rdx=int64#3 +# asm 2: adc <zero=%rcx,<rdx=%rdx +adc %rcx,%rdx + +# qhasm: carry? q31 += c +# asm 1: add <c=int64#10,<q31=int64#6 +# asm 2: add <c=%r12,<q31=%r9 +add %r12,%r9 + +# qhasm: c = 0 +# asm 1: mov $0,>c=int64#10 +# asm 2: mov $0,>c=%r12 +mov $0,%r12 + +# qhasm: c += rdx + carry +# asm 1: adc <rdx=int64#3,<c=int64#10 +# asm 2: adc <rdx=%rdx,<c=%r12 +adc %rdx,%r12 + +# qhasm: rax = *(uint64 *)(xp + 48) +# asm 1: movq 48(<xp=int64#2),>rax=int64#7 +# asm 2: movq 48(<xp=%rsi),>rax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU4 +mulq crypto_sign_ed25519_amd64_64_MU4 + +# qhasm: carry? q32 += rax +# asm 1: add <rax=int64#7,<q32=int64#8 +# asm 2: add <rax=%rax,<q32=%r10 +add %rax,%r10 + +# qhasm: rdx += zero + carry +# asm 1: adc <zero=int64#4,<rdx=int64#3 +# asm 2: adc <zero=%rcx,<rdx=%rdx +adc %rcx,%rdx + +# qhasm: carry? q32 += c +# asm 1: add <c=int64#10,<q32=int64#8 +# asm 2: add <c=%r12,<q32=%r10 +add %r12,%r10 + +# qhasm: q33 += rdx + carry +# asm 1: adc <rdx=int64#3,<q33=int64#9 +# asm 2: adc <rdx=%rdx,<q33=%r11 +adc %rdx,%r11 + +# qhasm: rax = *(uint64 *)(xp + 56) +# asm 1: movq 56(<xp=int64#2),>rax=int64#7 +# asm 2: movq 56(<xp=%rsi),>rax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU0 +mulq crypto_sign_ed25519_amd64_64_MU0 + +# qhasm: carry? q24 += rax +# asm 1: add <rax=int64#7,<q24=int64#12 +# asm 2: add <rax=%rax,<q24=%r14 +add %rax,%r14 + +# qhasm: free q24 + +# qhasm: c = 0 +# asm 1: mov $0,>c=int64#10 +# asm 2: mov $0,>c=%r12 +mov $0,%r12 + +# qhasm: c += rdx + carry +# asm 1: adc <rdx=int64#3,<c=int64#10 +# asm 2: adc <rdx=%rdx,<c=%r12 +adc %rdx,%r12 + +# qhasm: rax = *(uint64 *)(xp + 56) +# asm 1: movq 56(<xp=int64#2),>rax=int64#7 +# asm 2: movq 56(<xp=%rsi),>rax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU1 +mulq crypto_sign_ed25519_amd64_64_MU1 + +# qhasm: carry? q30 += rax +# asm 1: add <rax=int64#7,<q30=int64#5 +# asm 2: add <rax=%rax,<q30=%r8 +add %rax,%r8 + +# qhasm: rdx += zero + carry +# asm 1: adc <zero=int64#4,<rdx=int64#3 +# asm 2: adc <zero=%rcx,<rdx=%rdx +adc %rcx,%rdx + +# qhasm: carry? q30 += c +# asm 1: add <c=int64#10,<q30=int64#5 +# asm 2: add <c=%r12,<q30=%r8 +add %r12,%r8 + +# qhasm: c = 0 +# asm 1: mov $0,>c=int64#10 +# asm 2: mov $0,>c=%r12 +mov $0,%r12 + +# qhasm: c += rdx + carry +# asm 1: adc <rdx=int64#3,<c=int64#10 +# asm 2: adc <rdx=%rdx,<c=%r12 +adc %rdx,%r12 + +# qhasm: q30_stack = q30 +# asm 1: movq <q30=int64#5,>q30_stack=stack64#8 +# asm 2: movq <q30=%r8,>q30_stack=56(%rsp) +movq %r8,56(%rsp) + +# qhasm: rax = *(uint64 *)(xp + 56) +# asm 1: movq 56(<xp=int64#2),>rax=int64#7 +# asm 2: movq 56(<xp=%rsi),>rax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU2 +mulq crypto_sign_ed25519_amd64_64_MU2 + +# qhasm: carry? q31 += rax +# asm 1: add <rax=int64#7,<q31=int64#6 +# asm 2: add <rax=%rax,<q31=%r9 +add %rax,%r9 + +# qhasm: rdx += zero + carry +# asm 1: adc <zero=int64#4,<rdx=int64#3 +# asm 2: adc <zero=%rcx,<rdx=%rdx +adc %rcx,%rdx + +# qhasm: carry? q31 += c +# asm 1: add <c=int64#10,<q31=int64#6 +# asm 2: add <c=%r12,<q31=%r9 +add %r12,%r9 + +# qhasm: c = 0 +# asm 1: mov $0,>c=int64#5 +# asm 2: mov $0,>c=%r8 +mov $0,%r8 + +# qhasm: c += rdx + carry +# asm 1: adc <rdx=int64#3,<c=int64#5 +# asm 2: adc <rdx=%rdx,<c=%r8 +adc %rdx,%r8 + +# qhasm: q31_stack = q31 +# asm 1: movq <q31=int64#6,>q31_stack=stack64#9 +# asm 2: movq <q31=%r9,>q31_stack=64(%rsp) +movq %r9,64(%rsp) + +# qhasm: rax = *(uint64 *)(xp + 56) +# asm 1: movq 56(<xp=int64#2),>rax=int64#7 +# asm 2: movq 56(<xp=%rsi),>rax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU3 +mulq crypto_sign_ed25519_amd64_64_MU3 + +# qhasm: carry? q32 += rax +# asm 1: add <rax=int64#7,<q32=int64#8 +# asm 2: add <rax=%rax,<q32=%r10 +add %rax,%r10 + +# qhasm: rdx += zero + carry +# asm 1: adc <zero=int64#4,<rdx=int64#3 +# asm 2: adc <zero=%rcx,<rdx=%rdx +adc %rcx,%rdx + +# qhasm: carry? q32 += c +# asm 1: add <c=int64#5,<q32=int64#8 +# asm 2: add <c=%r8,<q32=%r10 +add %r8,%r10 + +# qhasm: c = 0 +# asm 1: mov $0,>c=int64#5 +# asm 2: mov $0,>c=%r8 +mov $0,%r8 + +# qhasm: c += rdx + carry +# asm 1: adc <rdx=int64#3,<c=int64#5 +# asm 2: adc <rdx=%rdx,<c=%r8 +adc %rdx,%r8 + +# qhasm: q32_stack = q32 +# asm 1: movq <q32=int64#8,>q32_stack=stack64#10 +# asm 2: movq <q32=%r10,>q32_stack=72(%rsp) +movq %r10,72(%rsp) + +# qhasm: rax = *(uint64 *)(xp + 56) +# asm 1: movq 56(<xp=int64#2),>rax=int64#7 +# asm 2: movq 56(<xp=%rsi),>rax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU4 +mulq crypto_sign_ed25519_amd64_64_MU4 + +# qhasm: carry? q33 += rax +# asm 1: add <rax=int64#7,<q33=int64#9 +# asm 2: add <rax=%rax,<q33=%r11 +add %rax,%r11 + +# qhasm: rdx += zero + carry +# asm 1: adc <zero=int64#4,<rdx=int64#3 +# asm 2: adc <zero=%rcx,<rdx=%rdx +adc %rcx,%rdx + +# qhasm: q33 += c +# asm 1: add <c=int64#5,<q33=int64#9 +# asm 2: add <c=%r8,<q33=%r11 +add %r8,%r11 + +# qhasm: q33_stack = q33 +# asm 1: movq <q33=int64#9,>q33_stack=stack64#11 +# asm 2: movq <q33=%r11,>q33_stack=80(%rsp) +movq %r11,80(%rsp) + +# qhasm: rax = q30_stack +# asm 1: movq <q30_stack=stack64#8,>rax=int64#7 +# asm 2: movq <q30_stack=56(%rsp),>rax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER0 +mulq crypto_sign_ed25519_amd64_64_ORDER0 + +# qhasm: r20 = rax +# asm 1: mov <rax=int64#7,>r20=int64#5 +# asm 2: mov <rax=%rax,>r20=%r8 +mov %rax,%r8 + +# qhasm: c = rdx +# asm 1: mov <rdx=int64#3,>c=int64#6 +# asm 2: mov <rdx=%rdx,>c=%r9 +mov %rdx,%r9 + +# qhasm: rax = q30_stack +# asm 1: movq <q30_stack=stack64#8,>rax=int64#7 +# asm 2: movq <q30_stack=56(%rsp),>rax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER1 +mulq crypto_sign_ed25519_amd64_64_ORDER1 + +# qhasm: r21 = rax +# asm 1: mov <rax=int64#7,>r21=int64#8 +# asm 2: mov <rax=%rax,>r21=%r10 +mov %rax,%r10 + +# qhasm: carry? r21 += c +# asm 1: add <c=int64#6,<r21=int64#8 +# asm 2: add <c=%r9,<r21=%r10 +add %r9,%r10 + +# qhasm: c = 0 +# asm 1: mov $0,>c=int64#6 +# asm 2: mov $0,>c=%r9 +mov $0,%r9 + +# qhasm: c += rdx + carry +# asm 1: adc <rdx=int64#3,<c=int64#6 +# asm 2: adc <rdx=%rdx,<c=%r9 +adc %rdx,%r9 + +# qhasm: rax = q30_stack +# asm 1: movq <q30_stack=stack64#8,>rax=int64#7 +# asm 2: movq <q30_stack=56(%rsp),>rax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER2 +mulq crypto_sign_ed25519_amd64_64_ORDER2 + +# qhasm: r22 = rax +# asm 1: mov <rax=int64#7,>r22=int64#9 +# asm 2: mov <rax=%rax,>r22=%r11 +mov %rax,%r11 + +# qhasm: carry? r22 += c +# asm 1: add <c=int64#6,<r22=int64#9 +# asm 2: add <c=%r9,<r22=%r11 +add %r9,%r11 + +# qhasm: c = 0 +# asm 1: mov $0,>c=int64#6 +# asm 2: mov $0,>c=%r9 +mov $0,%r9 + +# qhasm: c += rdx + carry +# asm 1: adc <rdx=int64#3,<c=int64#6 +# asm 2: adc <rdx=%rdx,<c=%r9 +adc %rdx,%r9 + +# qhasm: rax = q30_stack +# asm 1: movq <q30_stack=stack64#8,>rax=int64#7 +# asm 2: movq <q30_stack=56(%rsp),>rax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER3 +mulq crypto_sign_ed25519_amd64_64_ORDER3 + +# qhasm: free rdx + +# qhasm: r23 = rax +# asm 1: mov <rax=int64#7,>r23=int64#10 +# asm 2: mov <rax=%rax,>r23=%r12 +mov %rax,%r12 + +# qhasm: r23 += c +# asm 1: add <c=int64#6,<r23=int64#10 +# asm 2: add <c=%r9,<r23=%r12 +add %r9,%r12 + +# qhasm: rax = q31_stack +# asm 1: movq <q31_stack=stack64#9,>rax=int64#7 +# asm 2: movq <q31_stack=64(%rsp),>rax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER0 +mulq crypto_sign_ed25519_amd64_64_ORDER0 + +# qhasm: carry? r21 += rax +# asm 1: add <rax=int64#7,<r21=int64#8 +# asm 2: add <rax=%rax,<r21=%r10 +add %rax,%r10 + +# qhasm: c = 0 +# asm 1: mov $0,>c=int64#6 +# asm 2: mov $0,>c=%r9 +mov $0,%r9 + +# qhasm: c += rdx + carry +# asm 1: adc <rdx=int64#3,<c=int64#6 +# asm 2: adc <rdx=%rdx,<c=%r9 +adc %rdx,%r9 + +# qhasm: rax = q31_stack +# asm 1: movq <q31_stack=stack64#9,>rax=int64#7 +# asm 2: movq <q31_stack=64(%rsp),>rax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER1 +mulq crypto_sign_ed25519_amd64_64_ORDER1 + +# qhasm: carry? r22 += rax +# asm 1: add <rax=int64#7,<r22=int64#9 +# asm 2: add <rax=%rax,<r22=%r11 +add %rax,%r11 + +# qhasm: rdx += zero + carry +# asm 1: adc <zero=int64#4,<rdx=int64#3 +# asm 2: adc <zero=%rcx,<rdx=%rdx +adc %rcx,%rdx + +# qhasm: carry? r22 += c +# asm 1: add <c=int64#6,<r22=int64#9 +# asm 2: add <c=%r9,<r22=%r11 +add %r9,%r11 + +# qhasm: c = 0 +# asm 1: mov $0,>c=int64#4 +# asm 2: mov $0,>c=%rcx +mov $0,%rcx + +# qhasm: c += rdx + carry +# asm 1: adc <rdx=int64#3,<c=int64#4 +# asm 2: adc <rdx=%rdx,<c=%rcx +adc %rdx,%rcx + +# qhasm: rax = q31_stack +# asm 1: movq <q31_stack=stack64#9,>rax=int64#7 +# asm 2: movq <q31_stack=64(%rsp),>rax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER2 +mulq crypto_sign_ed25519_amd64_64_ORDER2 + +# qhasm: free rdx + +# qhasm: r23 += rax +# asm 1: add <rax=int64#7,<r23=int64#10 +# asm 2: add <rax=%rax,<r23=%r12 +add %rax,%r12 + +# qhasm: r23 += c +# asm 1: add <c=int64#4,<r23=int64#10 +# asm 2: add <c=%rcx,<r23=%r12 +add %rcx,%r12 + +# qhasm: rax = q32_stack +# asm 1: movq <q32_stack=stack64#10,>rax=int64#7 +# asm 2: movq <q32_stack=72(%rsp),>rax=%rax +movq 72(%rsp),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER0 +mulq crypto_sign_ed25519_amd64_64_ORDER0 + +# qhasm: carry? r22 += rax +# asm 1: add <rax=int64#7,<r22=int64#9 +# asm 2: add <rax=%rax,<r22=%r11 +add %rax,%r11 + +# qhasm: c = 0 +# asm 1: mov $0,>c=int64#4 +# asm 2: mov $0,>c=%rcx +mov $0,%rcx + +# qhasm: c += rdx + carry +# asm 1: adc <rdx=int64#3,<c=int64#4 +# asm 2: adc <rdx=%rdx,<c=%rcx +adc %rdx,%rcx + +# qhasm: rax = q32_stack +# asm 1: movq <q32_stack=stack64#10,>rax=int64#7 +# asm 2: movq <q32_stack=72(%rsp),>rax=%rax +movq 72(%rsp),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER1 +mulq crypto_sign_ed25519_amd64_64_ORDER1 + +# qhasm: free rdx + +# qhasm: r23 += rax +# asm 1: add <rax=int64#7,<r23=int64#10 +# asm 2: add <rax=%rax,<r23=%r12 +add %rax,%r12 + +# qhasm: r23 += c +# asm 1: add <c=int64#4,<r23=int64#10 +# asm 2: add <c=%rcx,<r23=%r12 +add %rcx,%r12 + +# qhasm: rax = q33_stack +# asm 1: movq <q33_stack=stack64#11,>rax=int64#7 +# asm 2: movq <q33_stack=80(%rsp),>rax=%rax +movq 80(%rsp),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER0 +mulq crypto_sign_ed25519_amd64_64_ORDER0 + +# qhasm: free rdx + +# qhasm: r23 += rax +# asm 1: add <rax=int64#7,<r23=int64#10 +# asm 2: add <rax=%rax,<r23=%r12 +add %rax,%r12 + +# qhasm: r0 = *(uint64 *)(xp + 0) +# asm 1: movq 0(<xp=int64#2),>r0=int64#3 +# asm 2: movq 0(<xp=%rsi),>r0=%rdx +movq 0(%rsi),%rdx + +# qhasm: carry? r0 -= r20 +# asm 1: sub <r20=int64#5,<r0=int64#3 +# asm 2: sub <r20=%r8,<r0=%rdx +sub %r8,%rdx + +# qhasm: t0 = r0 +# asm 1: mov <r0=int64#3,>t0=int64#4 +# asm 2: mov <r0=%rdx,>t0=%rcx +mov %rdx,%rcx + +# qhasm: r1 = *(uint64 *)(xp + 8) +# asm 1: movq 8(<xp=int64#2),>r1=int64#5 +# asm 2: movq 8(<xp=%rsi),>r1=%r8 +movq 8(%rsi),%r8 + +# qhasm: carry? r1 -= r21 - carry +# asm 1: sbb <r21=int64#8,<r1=int64#5 +# asm 2: sbb <r21=%r10,<r1=%r8 +sbb %r10,%r8 + +# qhasm: t1 = r1 +# asm 1: mov <r1=int64#5,>t1=int64#6 +# asm 2: mov <r1=%r8,>t1=%r9 +mov %r8,%r9 + +# qhasm: r2 = *(uint64 *)(xp + 16) +# asm 1: movq 16(<xp=int64#2),>r2=int64#7 +# asm 2: movq 16(<xp=%rsi),>r2=%rax +movq 16(%rsi),%rax + +# qhasm: carry? r2 -= r22 - carry +# asm 1: sbb <r22=int64#9,<r2=int64#7 +# asm 2: sbb <r22=%r11,<r2=%rax +sbb %r11,%rax + +# qhasm: t2 = r2 +# asm 1: mov <r2=int64#7,>t2=int64#8 +# asm 2: mov <r2=%rax,>t2=%r10 +mov %rax,%r10 + +# qhasm: r3 = *(uint64 *)(xp + 24) +# asm 1: movq 24(<xp=int64#2),>r3=int64#2 +# asm 2: movq 24(<xp=%rsi),>r3=%rsi +movq 24(%rsi),%rsi + +# qhasm: r3 -= r23 - carry +# asm 1: sbb <r23=int64#10,<r3=int64#2 +# asm 2: sbb <r23=%r12,<r3=%rsi +sbb %r12,%rsi + +# qhasm: t3 = r3 +# asm 1: mov <r3=int64#2,>t3=int64#9 +# asm 2: mov <r3=%rsi,>t3=%r11 +mov %rsi,%r11 + +# qhasm: carry? t0 -= *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER0 +# asm 1: sub crypto_sign_ed25519_amd64_64_ORDER0,<t0=int64#4 +# asm 2: sub crypto_sign_ed25519_amd64_64_ORDER0,<t0=%rcx +sub crypto_sign_ed25519_amd64_64_ORDER0,%rcx + +# qhasm: carry? t1 -= *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER1 - carry +# asm 1: sbb crypto_sign_ed25519_amd64_64_ORDER1,<t1=int64#6 +# asm 2: sbb crypto_sign_ed25519_amd64_64_ORDER1,<t1=%r9 +sbb crypto_sign_ed25519_amd64_64_ORDER1,%r9 + +# qhasm: carry? t2 -= *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER2 - carry +# asm 1: sbb crypto_sign_ed25519_amd64_64_ORDER2,<t2=int64#8 +# asm 2: sbb crypto_sign_ed25519_amd64_64_ORDER2,<t2=%r10 +sbb crypto_sign_ed25519_amd64_64_ORDER2,%r10 + +# qhasm: unsigned<? t3 -= *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER3 - carry +# asm 1: sbb crypto_sign_ed25519_amd64_64_ORDER3,<t3=int64#9 +# asm 2: sbb crypto_sign_ed25519_amd64_64_ORDER3,<t3=%r11 +sbb crypto_sign_ed25519_amd64_64_ORDER3,%r11 + +# qhasm: r0 = t0 if !unsigned< +# asm 1: cmovae <t0=int64#4,<r0=int64#3 +# asm 2: cmovae <t0=%rcx,<r0=%rdx +cmovae %rcx,%rdx + +# qhasm: t0 = r0 +# asm 1: mov <r0=int64#3,>t0=int64#4 +# asm 2: mov <r0=%rdx,>t0=%rcx +mov %rdx,%rcx + +# qhasm: r1 = t1 if !unsigned< +# asm 1: cmovae <t1=int64#6,<r1=int64#5 +# asm 2: cmovae <t1=%r9,<r1=%r8 +cmovae %r9,%r8 + +# qhasm: t1 = r1 +# asm 1: mov <r1=int64#5,>t1=int64#6 +# asm 2: mov <r1=%r8,>t1=%r9 +mov %r8,%r9 + +# qhasm: r2 = t2 if !unsigned< +# asm 1: cmovae <t2=int64#8,<r2=int64#7 +# asm 2: cmovae <t2=%r10,<r2=%rax +cmovae %r10,%rax + +# qhasm: t2 = r2 +# asm 1: mov <r2=int64#7,>t2=int64#8 +# asm 2: mov <r2=%rax,>t2=%r10 +mov %rax,%r10 + +# qhasm: r3 = t3 if !unsigned< +# asm 1: cmovae <t3=int64#9,<r3=int64#2 +# asm 2: cmovae <t3=%r11,<r3=%rsi +cmovae %r11,%rsi + +# qhasm: t3 = r3 +# asm 1: mov <r3=int64#2,>t3=int64#9 +# asm 2: mov <r3=%rsi,>t3=%r11 +mov %rsi,%r11 + +# qhasm: carry? t0 -= *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER0 +# asm 1: sub crypto_sign_ed25519_amd64_64_ORDER0,<t0=int64#4 +# asm 2: sub crypto_sign_ed25519_amd64_64_ORDER0,<t0=%rcx +sub crypto_sign_ed25519_amd64_64_ORDER0,%rcx + +# qhasm: carry? t1 -= *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER1 - carry +# asm 1: sbb crypto_sign_ed25519_amd64_64_ORDER1,<t1=int64#6 +# asm 2: sbb crypto_sign_ed25519_amd64_64_ORDER1,<t1=%r9 +sbb crypto_sign_ed25519_amd64_64_ORDER1,%r9 + +# qhasm: carry? t2 -= *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER2 - carry +# asm 1: sbb crypto_sign_ed25519_amd64_64_ORDER2,<t2=int64#8 +# asm 2: sbb crypto_sign_ed25519_amd64_64_ORDER2,<t2=%r10 +sbb crypto_sign_ed25519_amd64_64_ORDER2,%r10 + +# qhasm: unsigned<? t3 -= *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER3 - carry +# asm 1: sbb crypto_sign_ed25519_amd64_64_ORDER3,<t3=int64#9 +# asm 2: sbb crypto_sign_ed25519_amd64_64_ORDER3,<t3=%r11 +sbb crypto_sign_ed25519_amd64_64_ORDER3,%r11 + +# qhasm: r0 = t0 if !unsigned< +# asm 1: cmovae <t0=int64#4,<r0=int64#3 +# asm 2: cmovae <t0=%rcx,<r0=%rdx +cmovae %rcx,%rdx + +# qhasm: r1 = t1 if !unsigned< +# asm 1: cmovae <t1=int64#6,<r1=int64#5 +# asm 2: cmovae <t1=%r9,<r1=%r8 +cmovae %r9,%r8 + +# qhasm: r2 = t2 if !unsigned< +# asm 1: cmovae <t2=int64#8,<r2=int64#7 +# asm 2: cmovae <t2=%r10,<r2=%rax +cmovae %r10,%rax + +# qhasm: r3 = t3 if !unsigned< +# asm 1: cmovae <t3=int64#9,<r3=int64#2 +# asm 2: cmovae <t3=%r11,<r3=%rsi +cmovae %r11,%rsi + +# qhasm: *(uint64 *)(rp + 0) = r0 +# asm 1: movq <r0=int64#3,0(<rp=int64#1) +# asm 2: movq <r0=%rdx,0(<rp=%rdi) +movq %rdx,0(%rdi) + +# qhasm: *(uint64 *)(rp + 8) = r1 +# asm 1: movq <r1=int64#5,8(<rp=int64#1) +# asm 2: movq <r1=%r8,8(<rp=%rdi) +movq %r8,8(%rdi) + +# qhasm: *(uint64 *)(rp + 16) = r2 +# asm 1: movq <r2=int64#7,16(<rp=int64#1) +# asm 2: movq <r2=%rax,16(<rp=%rdi) +movq %rax,16(%rdi) + +# qhasm: *(uint64 *)(rp + 24) = r3 +# asm 1: movq <r3=int64#2,24(<rp=int64#1) +# asm 2: movq <r3=%rsi,24(<rp=%rdi) +movq %rsi,24(%rdi) + +# qhasm: caller1 = caller1_stack +# asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9 +# asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10 +# asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11 +# asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12 +# asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13 +# asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14 +# asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15 +# asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/ext/ed25519-amd64-asm/sc25519_from32bytes.c b/ext/ed25519-amd64-asm/sc25519_from32bytes.c new file mode 100644 index 00000000..7f21e686 --- /dev/null +++ b/ext/ed25519-amd64-asm/sc25519_from32bytes.c @@ -0,0 +1,55 @@ +#include "sc25519.h" + +/*Arithmetic modulo the group order n = 2^252 + 27742317777372353535851937790883648493 + * = 7237005577332262213973186563042994240857116359379907606001950938285454250989 + */ + +/* Contains order, 2*order, 4*order, 8*order, each represented in 4 consecutive unsigned long long */ +static const unsigned long long order[16] = {0x5812631A5CF5D3EDULL, 0x14DEF9DEA2F79CD6ULL, + 0x0000000000000000ULL, 0x1000000000000000ULL, + 0xB024C634B9EBA7DAULL, 0x29BDF3BD45EF39ACULL, + 0x0000000000000000ULL, 0x2000000000000000ULL, + 0x60498C6973D74FB4ULL, 0x537BE77A8BDE7359ULL, + 0x0000000000000000ULL, 0x4000000000000000ULL, + 0xC09318D2E7AE9F68ULL, 0xA6F7CEF517BCE6B2ULL, + 0x0000000000000000ULL, 0x8000000000000000ULL}; + +static unsigned long long smaller(unsigned long long a,unsigned long long b) +{ + unsigned long long atop = a >> 32; + unsigned long long abot = a & 4294967295; + unsigned long long btop = b >> 32; + unsigned long long bbot = b & 4294967295; + unsigned long long atopbelowbtop = (atop - btop) >> 63; + unsigned long long atopeqbtop = ((atop ^ btop) - 1) >> 63; + unsigned long long abotbelowbbot = (abot - bbot) >> 63; + return atopbelowbtop | (atopeqbtop & abotbelowbbot); +} + +void sc25519_from32bytes(sc25519 *r, const unsigned char x[32]) +{ + unsigned long long t[4]; + unsigned long long b; + unsigned long long mask; + int i, j; + + /* assuming little-endian */ + r->v[0] = *(unsigned long long *)x; + r->v[1] = *(((unsigned long long *)x)+1); + r->v[2] = *(((unsigned long long *)x)+2); + r->v[3] = *(((unsigned long long *)x)+3); + + for(j=3;j>=0;j--) + { + b=0; + for(i=0;i<4;i++) + { + b += order[4*j+i]; /* no overflow for this particular order */ + t[i] = r->v[i] - b; + b = smaller(r->v[i],b); + } + mask = b - 1; + for(i=0;i<4;i++) + r->v[i] ^= mask & (r->v[i] ^ t[i]); + } +} diff --git a/ext/ed25519-amd64-asm/sc25519_from64bytes.c b/ext/ed25519-amd64-asm/sc25519_from64bytes.c new file mode 100644 index 00000000..8e76a1b3 --- /dev/null +++ b/ext/ed25519-amd64-asm/sc25519_from64bytes.c @@ -0,0 +1,7 @@ +#include "sc25519.h" + +void sc25519_from64bytes(sc25519 *r, const unsigned char x[64]) +{ + /* assuming little-endian representation of unsigned long long */ + sc25519_barrett(r, (unsigned long long *)x); +} diff --git a/ext/ed25519-amd64-asm/sc25519_from_shortsc.c b/ext/ed25519-amd64-asm/sc25519_from_shortsc.c new file mode 100644 index 00000000..3b8ff2fb --- /dev/null +++ b/ext/ed25519-amd64-asm/sc25519_from_shortsc.c @@ -0,0 +1,9 @@ +#include "sc25519.h" + +void sc25519_from_shortsc(sc25519 *r, const shortsc25519 *x) +{ + r->v[0] = x->v[0]; + r->v[1] = x->v[1]; + r->v[2] = 0; + r->v[3] = 0; +} diff --git a/ext/ed25519-amd64-asm/sc25519_iszero.c b/ext/ed25519-amd64-asm/sc25519_iszero.c new file mode 100644 index 00000000..21f593d7 --- /dev/null +++ b/ext/ed25519-amd64-asm/sc25519_iszero.c @@ -0,0 +1,10 @@ +#include "sc25519.h" + +int sc25519_iszero_vartime(const sc25519 *x) +{ + if(x->v[0] != 0) return 0; + if(x->v[1] != 0) return 0; + if(x->v[2] != 0) return 0; + if(x->v[3] != 0) return 0; + return 1; +} diff --git a/ext/ed25519-amd64-asm/sc25519_lt.s b/ext/ed25519-amd64-asm/sc25519_lt.s new file mode 100644 index 00000000..3ba43178 --- /dev/null +++ b/ext/ed25519-amd64-asm/sc25519_lt.s @@ -0,0 +1,131 @@ + +# qhasm: int64 xp + +# qhasm: int64 yp + +# qhasm: int64 ret + +# qhasm: input xp + +# qhasm: input yp + +# qhasm: output ret + +# qhasm: int64 t0 + +# qhasm: int64 t1 + +# qhasm: int64 t2 + +# qhasm: int64 t3 + +# qhasm: int64 doof + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: enter crypto_sign_ed25519_amd64_64_sc25519_lt +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_64_sc25519_lt +.globl crypto_sign_ed25519_amd64_64_sc25519_lt +_crypto_sign_ed25519_amd64_64_sc25519_lt: +crypto_sign_ed25519_amd64_64_sc25519_lt: +mov %rsp,%r11 +and $31,%r11 +add $0,%r11 +sub %r11,%rsp + +# qhasm: t0 = *(uint64 *)(xp + 0) +# asm 1: movq 0(<xp=int64#1),>t0=int64#3 +# asm 2: movq 0(<xp=%rdi),>t0=%rdx +movq 0(%rdi),%rdx + +# qhasm: t1 = *(uint64 *)(xp + 8) +# asm 1: movq 8(<xp=int64#1),>t1=int64#4 +# asm 2: movq 8(<xp=%rdi),>t1=%rcx +movq 8(%rdi),%rcx + +# qhasm: t2 = *(uint64 *)(xp + 16) +# asm 1: movq 16(<xp=int64#1),>t2=int64#5 +# asm 2: movq 16(<xp=%rdi),>t2=%r8 +movq 16(%rdi),%r8 + +# qhasm: t3 = *(uint64 *)(xp + 24) +# asm 1: movq 24(<xp=int64#1),>t3=int64#1 +# asm 2: movq 24(<xp=%rdi),>t3=%rdi +movq 24(%rdi),%rdi + +# qhasm: carry? t0 -= *(uint64 *)(yp + 0) +# asm 1: subq 0(<yp=int64#2),<t0=int64#3 +# asm 2: subq 0(<yp=%rsi),<t0=%rdx +subq 0(%rsi),%rdx + +# qhasm: carry? t1 -= *(uint64 *)(yp + 8) - carry +# asm 1: sbbq 8(<yp=int64#2),<t1=int64#4 +# asm 2: sbbq 8(<yp=%rsi),<t1=%rcx +sbbq 8(%rsi),%rcx + +# qhasm: carry? t2 -= *(uint64 *)(yp + 16) - carry +# asm 1: sbbq 16(<yp=int64#2),<t2=int64#5 +# asm 2: sbbq 16(<yp=%rsi),<t2=%r8 +sbbq 16(%rsi),%r8 + +# qhasm: carry? t3 -= *(uint64 *)(yp + 24) - carry +# asm 1: sbbq 24(<yp=int64#2),<t3=int64#1 +# asm 2: sbbq 24(<yp=%rsi),<t3=%rdi +sbbq 24(%rsi),%rdi + +# qhasm: ret = 0 +# asm 1: mov $0,>ret=int64#1 +# asm 2: mov $0,>ret=%rdi +mov $0,%rdi + +# qhasm: doof = 1 +# asm 1: mov $1,>doof=int64#2 +# asm 2: mov $1,>doof=%rsi +mov $1,%rsi + +# qhasm: ret = doof if carry +# asm 1: cmovc <doof=int64#2,<ret=int64#1 +# asm 2: cmovc <doof=%rsi,<ret=%rdi +cmovc %rsi,%rdi + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/ext/ed25519-amd64-asm/sc25519_mul.c b/ext/ed25519-amd64-asm/sc25519_mul.c new file mode 100644 index 00000000..ca4d5baa --- /dev/null +++ b/ext/ed25519-amd64-asm/sc25519_mul.c @@ -0,0 +1,12 @@ +#include "sc25519.h" + +#define ull4_mul crypto_sign_ed25519_amd64_64_ull4_mul + +extern void ull4_mul(unsigned long long r[8], const unsigned long long x[4], const unsigned long long y[4]); + +void sc25519_mul(sc25519 *r, const sc25519 *x, const sc25519 *y) +{ + unsigned long long t[8]; + ull4_mul(t, x->v, y->v); + sc25519_barrett(r, t); +} diff --git a/ext/ed25519-amd64-asm/sc25519_mul_shortsc.c b/ext/ed25519-amd64-asm/sc25519_mul_shortsc.c new file mode 100644 index 00000000..0c67250d --- /dev/null +++ b/ext/ed25519-amd64-asm/sc25519_mul_shortsc.c @@ -0,0 +1,9 @@ +#include "sc25519.h" + +void sc25519_mul_shortsc(sc25519 *r, const sc25519 *x, const shortsc25519 *y) +{ + /* XXX: This wants to be faster */ + sc25519 t; + sc25519_from_shortsc(&t, y); + sc25519_mul(r, x, &t); +} diff --git a/ext/ed25519-amd64-asm/sc25519_slide.c b/ext/ed25519-amd64-asm/sc25519_slide.c new file mode 100644 index 00000000..4e52010d --- /dev/null +++ b/ext/ed25519-amd64-asm/sc25519_slide.c @@ -0,0 +1,49 @@ +#include "sc25519.h" + +void sc25519_slide(signed char r[256], const sc25519 *s, int swindowsize) +{ + int i,j,k,b,m=(1<<(swindowsize-1))-1, soplen=256; + unsigned long long sv0 = s->v[0]; + unsigned long long sv1 = s->v[1]; + unsigned long long sv2 = s->v[2]; + unsigned long long sv3 = s->v[3]; + + /* first put the binary expansion into r */ + for(i=0;i<64;i++) { + r[i] = sv0 & 1; + r[i+64] = sv1 & 1; + r[i+128] = sv2 & 1; + r[i+192] = sv3 & 1; + sv0 >>= 1; + sv1 >>= 1; + sv2 >>= 1; + sv3 >>= 1; + } + + /* Making it sliding window */ + for (j = 0;j < soplen;++j) + { + if (r[j]) { + for (b = 1;b < soplen - j && b <= 6;++b) { + if (r[j] + (r[j + b] << b) <= m) + { + r[j] += r[j + b] << b; r[j + b] = 0; + } + else if (r[j] - (r[j + b] << b) >= -m) + { + r[j] -= r[j + b] << b; + for (k = j + b;k < soplen;++k) + { + if (!r[k]) { + r[k] = 1; + break; + } + r[k] = 0; + } + } + else if (r[j + b]) + break; + } + } + } +} diff --git a/ext/ed25519-amd64-asm/sc25519_sub_nored.s b/ext/ed25519-amd64-asm/sc25519_sub_nored.s new file mode 100644 index 00000000..a347e7d4 --- /dev/null +++ b/ext/ed25519-amd64-asm/sc25519_sub_nored.s @@ -0,0 +1,142 @@ + +# qhasm: int64 rp + +# qhasm: int64 xp + +# qhasm: int64 yp + +# qhasm: input rp + +# qhasm: input xp + +# qhasm: input yp + +# qhasm: int64 r0 + +# qhasm: int64 r1 + +# qhasm: int64 r2 + +# qhasm: int64 r3 + +# qhasm: int64 t0 + +# qhasm: int64 t1 + +# qhasm: int64 t2 + +# qhasm: int64 t3 + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: enter crypto_sign_ed25519_amd64_64_sc25519_sub_nored +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_64_sc25519_sub_nored +.globl crypto_sign_ed25519_amd64_64_sc25519_sub_nored +_crypto_sign_ed25519_amd64_64_sc25519_sub_nored: +crypto_sign_ed25519_amd64_64_sc25519_sub_nored: +mov %rsp,%r11 +and $31,%r11 +add $0,%r11 +sub %r11,%rsp + +# qhasm: r0 = *(uint64 *)(xp + 0) +# asm 1: movq 0(<xp=int64#2),>r0=int64#4 +# asm 2: movq 0(<xp=%rsi),>r0=%rcx +movq 0(%rsi),%rcx + +# qhasm: r1 = *(uint64 *)(xp + 8) +# asm 1: movq 8(<xp=int64#2),>r1=int64#5 +# asm 2: movq 8(<xp=%rsi),>r1=%r8 +movq 8(%rsi),%r8 + +# qhasm: r2 = *(uint64 *)(xp + 16) +# asm 1: movq 16(<xp=int64#2),>r2=int64#6 +# asm 2: movq 16(<xp=%rsi),>r2=%r9 +movq 16(%rsi),%r9 + +# qhasm: r3 = *(uint64 *)(xp + 24) +# asm 1: movq 24(<xp=int64#2),>r3=int64#2 +# asm 2: movq 24(<xp=%rsi),>r3=%rsi +movq 24(%rsi),%rsi + +# qhasm: carry? r0 -= *(uint64 *)(yp + 0) +# asm 1: subq 0(<yp=int64#3),<r0=int64#4 +# asm 2: subq 0(<yp=%rdx),<r0=%rcx +subq 0(%rdx),%rcx + +# qhasm: carry? r1 -= *(uint64 *)(yp + 8) - carry +# asm 1: sbbq 8(<yp=int64#3),<r1=int64#5 +# asm 2: sbbq 8(<yp=%rdx),<r1=%r8 +sbbq 8(%rdx),%r8 + +# qhasm: carry? r2 -= *(uint64 *)(yp + 16) - carry +# asm 1: sbbq 16(<yp=int64#3),<r2=int64#6 +# asm 2: sbbq 16(<yp=%rdx),<r2=%r9 +sbbq 16(%rdx),%r9 + +# qhasm: r3 -= *(uint64 *)(yp + 24) - carry +# asm 1: sbbq 24(<yp=int64#3),<r3=int64#2 +# asm 2: sbbq 24(<yp=%rdx),<r3=%rsi +sbbq 24(%rdx),%rsi + +# qhasm: *(uint64 *)(rp + 0) = r0 +# asm 1: movq <r0=int64#4,0(<rp=int64#1) +# asm 2: movq <r0=%rcx,0(<rp=%rdi) +movq %rcx,0(%rdi) + +# qhasm: *(uint64 *)(rp + 8) = r1 +# asm 1: movq <r1=int64#5,8(<rp=int64#1) +# asm 2: movq <r1=%r8,8(<rp=%rdi) +movq %r8,8(%rdi) + +# qhasm: *(uint64 *)(rp + 16) = r2 +# asm 1: movq <r2=int64#6,16(<rp=int64#1) +# asm 2: movq <r2=%r9,16(<rp=%rdi) +movq %r9,16(%rdi) + +# qhasm: *(uint64 *)(rp + 24) = r3 +# asm 1: movq <r3=int64#2,24(<rp=int64#1) +# asm 2: movq <r3=%rsi,24(<rp=%rdi) +movq %rsi,24(%rdi) + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/ext/ed25519-amd64-asm/sc25519_to32bytes.c b/ext/ed25519-amd64-asm/sc25519_to32bytes.c new file mode 100644 index 00000000..eddb235d --- /dev/null +++ b/ext/ed25519-amd64-asm/sc25519_to32bytes.c @@ -0,0 +1,8 @@ +#include "sc25519.h" + +void sc25519_to32bytes(unsigned char r[32], const sc25519 *x) +{ + /* assuming little-endian */ + int i; + for(i=0;i<32;i++) r[i] = i[(unsigned char *)x->v]; +} diff --git a/ext/ed25519-amd64-asm/sc25519_window4.c b/ext/ed25519-amd64-asm/sc25519_window4.c new file mode 100644 index 00000000..683a1d4b --- /dev/null +++ b/ext/ed25519-amd64-asm/sc25519_window4.c @@ -0,0 +1,27 @@ +#include "sc25519.h" + +void sc25519_window4(signed char r[64], const sc25519 *s) +{ + char carry; + int i; + for(i=0;i<16;i++) + r[i] = (s->v[0] >> (4*i)) & 15; + for(i=0;i<16;i++) + r[i+16] = (s->v[1] >> (4*i)) & 15; + for(i=0;i<16;i++) + r[i+32] = (s->v[2] >> (4*i)) & 15; + for(i=0;i<16;i++) + r[i+48] = (s->v[3] >> (4*i)) & 15; + + /* Making it signed */ + carry = 0; + for(i=0;i<63;i++) + { + r[i] += carry; + r[i+1] += r[i] >> 4; + r[i] &= 15; + carry = r[i] >> 3; + r[i] -= carry << 4; + } + r[63] += carry; +} diff --git a/ext/ed25519-amd64-asm/sign.c b/ext/ed25519-amd64-asm/sign.c new file mode 100644 index 00000000..958e4a14 --- /dev/null +++ b/ext/ed25519-amd64-asm/sign.c @@ -0,0 +1,165 @@ +#include <stdlib.h> +#include <string.h> +/*#include "crypto_sign.h" +#include "crypto_hash_sha512.h"*/ +#include "ge25519.h" + +/* Original */ +#if 0 +int crypto_sign( + unsigned char *sm,unsigned long long *smlen, + const unsigned char *m,unsigned long long mlen, + const unsigned char *sk + ) +{ + unsigned char pk[32]; + unsigned char az[64]; + unsigned char nonce[64]; + unsigned char hram[64]; + sc25519 sck, scs, scsk; + ge25519 ger; + + memmove(pk,sk + 32,32); + /* pk: 32-byte public key A */ + + crypto_hash_sha512(az,sk,32); + az[0] &= 248; + az[31] &= 127; + az[31] |= 64; + /* az: 32-byte scalar a, 32-byte randomizer z */ + + *smlen = mlen + 64; + memmove(sm + 64,m,mlen); + memmove(sm + 32,az + 32,32); + /* sm: 32-byte uninit, 32-byte z, mlen-byte m */ + + crypto_hash_sha512(nonce, sm+32, mlen+32); + /* nonce: 64-byte H(z,m) */ + + sc25519_from64bytes(&sck, nonce); + ge25519_scalarmult_base(&ger, &sck); + ge25519_pack(sm, &ger); + /* sm: 32-byte R, 32-byte z, mlen-byte m */ + + memmove(sm + 32,pk,32); + /* sm: 32-byte R, 32-byte A, mlen-byte m */ + + crypto_hash_sha512(hram,sm,mlen + 64); + /* hram: 64-byte H(R,A,m) */ + + sc25519_from64bytes(&scs, hram); + sc25519_from32bytes(&scsk, az); + sc25519_mul(&scs, &scs, &scsk); + sc25519_add(&scs, &scs, &sck); + /* scs: S = nonce + H(R,A,m)a */ + + sc25519_to32bytes(sm + 32,&scs); + /* sm: 32-byte R, 32-byte S, mlen-byte m */ + + return 0; +} +#endif + +#if 0 +void C25519::sign(const C25519::Private &myPrivate,const C25519::Public &myPublic,const void *msg,unsigned int len,void *signature) +{ + sc25519 sck, scs, scsk; + ge25519 ger; + unsigned char r[32]; + unsigned char s[32]; + unsigned char extsk[64]; + unsigned char hmg[crypto_hash_sha512_BYTES]; + unsigned char hram[crypto_hash_sha512_BYTES]; + unsigned char *sig = (unsigned char *)signature; + unsigned char digest[64]; // we sign the first 32 bytes of SHA-512(msg) + + SHA512::hash(digest,msg,len); + + SHA512::hash(extsk,myPrivate.data + 32,32); + extsk[0] &= 248; + extsk[31] &= 127; + extsk[31] |= 64; + + for(unsigned int i=0;i<32;i++) + sig[32 + i] = extsk[32 + i]; + for(unsigned int i=0;i<32;i++) + sig[64 + i] = digest[i]; + + SHA512::hash(hmg,sig + 32,64); + + /* Computation of R */ + sc25519_from64bytes(&sck, hmg); + ge25519_scalarmult_base(&ger, &sck); + ge25519_pack(r, &ger); + + /* Computation of s */ + for(unsigned int i=0;i<32;i++) + sig[i] = r[i]; + + get_hram(hram,sig,myPublic.data + 32,sig,96); + + sc25519_from64bytes(&scs, hram); + sc25519_from32bytes(&scsk, extsk); + sc25519_mul(&scs, &scs, &scsk); + + sc25519_add(&scs, &scs, &sck); + + sc25519_to32bytes(s,&scs); /* cat s */ + for(unsigned int i=0;i<32;i++) + sig[32 + i] = s[i]; +} + +void get_hram(unsigned char *hram, const unsigned char *sm, const unsigned char *pk, unsigned char *playground, unsigned long long smlen) +{ + unsigned long long i; + + for (i = 0;i < 32;++i) playground[i] = sm[i]; + for (i = 32;i < 64;++i) playground[i] = pk[i-32]; + for (i = 64;i < smlen;++i) playground[i] = sm[i]; + + //crypto_hash_sha512(hram,playground,smlen); + ZeroTier::SHA512::hash(hram,playground,(unsigned int)smlen); +} +#endif + +extern void ZT_sha512internal(void *digest,const void *data,unsigned int len); + +extern void ed25519_amd64_asm_sign(const unsigned char *sk,const unsigned char *pk,const unsigned char *m,const unsigned int mlen,unsigned char *sig) +{ + unsigned char az[64]; + unsigned char nonce[64]; + unsigned char hram[64]; + sc25519 sck, scs, scsk; + ge25519 ger; + unsigned char digest[64]; + unsigned int i; + + ZT_sha512internal(digest,m,mlen); + + ZT_sha512internal(az,sk,32); + az[0] &= 248; + az[31] &= 127; + az[31] |= 64; + + for(i=0;i<32;i++) + sig[32 + i] = az[32 + i]; + for(i=0;i<32;i++) + sig[64 + i] = digest[i]; + + ZT_sha512internal(nonce,sig + 32,64); + + sc25519_from64bytes(&sck, nonce); + ge25519_scalarmult_base(&ger, &sck); + ge25519_pack(sig, &ger); + + memmove(sig + 32,pk,32); + + ZT_sha512internal(hram,sig,96); + + sc25519_from64bytes(&scs, hram); + sc25519_from32bytes(&scsk, az); + sc25519_mul(&scs, &scs, &scsk); + sc25519_add(&scs, &scs, &sck); + + sc25519_to32bytes(sig + 32,&scs); +} diff --git a/ext/ed25519-amd64-asm/ull4_mul.s b/ext/ed25519-amd64-asm/ull4_mul.s new file mode 100644 index 00000000..9f7b4fa2 --- /dev/null +++ b/ext/ed25519-amd64-asm/ull4_mul.s @@ -0,0 +1,716 @@ + +# qhasm: int64 rp + +# qhasm: int64 xp + +# qhasm: int64 yp + +# qhasm: input rp + +# qhasm: input xp + +# qhasm: input yp + +# qhasm: int64 r0 + +# qhasm: int64 r1 + +# qhasm: int64 r2 + +# qhasm: int64 r3 + +# qhasm: int64 r4 + +# qhasm: int64 r5 + +# qhasm: int64 r6 + +# qhasm: int64 r7 + +# qhasm: int64 c + +# qhasm: int64 zero + +# qhasm: int64 rax + +# qhasm: int64 rdx + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: enter crypto_sign_ed25519_amd64_64_ull4_mul +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_64_ull4_mul +.globl crypto_sign_ed25519_amd64_64_ull4_mul +_crypto_sign_ed25519_amd64_64_ull4_mul: +crypto_sign_ed25519_amd64_64_ull4_mul: +mov %rsp,%r11 +and $31,%r11 +add $64,%r11 +sub %r11,%rsp + +# qhasm: caller1_stack = caller1 +# asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1 +# asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2 +# asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3 +# asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4 +# asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5 +# asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6 +# asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7 +# asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: yp = yp +# asm 1: mov <yp=int64#3,>yp=int64#4 +# asm 2: mov <yp=%rdx,>yp=%rcx +mov %rdx,%rcx + +# qhasm: r4 = 0 +# asm 1: mov $0,>r4=int64#5 +# asm 2: mov $0,>r4=%r8 +mov $0,%r8 + +# qhasm: r5 = 0 +# asm 1: mov $0,>r5=int64#6 +# asm 2: mov $0,>r5=%r9 +mov $0,%r9 + +# qhasm: r6 = 0 +# asm 1: mov $0,>r6=int64#8 +# asm 2: mov $0,>r6=%r10 +mov $0,%r10 + +# qhasm: r7 = 0 +# asm 1: mov $0,>r7=int64#9 +# asm 2: mov $0,>r7=%r11 +mov $0,%r11 + +# qhasm: zero = 0 +# asm 1: mov $0,>zero=int64#10 +# asm 2: mov $0,>zero=%r12 +mov $0,%r12 + +# qhasm: rax = *(uint64 *)(xp + 0) +# asm 1: movq 0(<xp=int64#2),>rax=int64#7 +# asm 2: movq 0(<xp=%rsi),>rax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 0) +# asm 1: mulq 0(<yp=int64#4) +# asm 2: mulq 0(<yp=%rcx) +mulq 0(%rcx) + +# qhasm: r0 = rax +# asm 1: mov <rax=int64#7,>r0=int64#11 +# asm 2: mov <rax=%rax,>r0=%r13 +mov %rax,%r13 + +# qhasm: c = rdx +# asm 1: mov <rdx=int64#3,>c=int64#12 +# asm 2: mov <rdx=%rdx,>c=%r14 +mov %rdx,%r14 + +# qhasm: rax = *(uint64 *)(xp + 0) +# asm 1: movq 0(<xp=int64#2),>rax=int64#7 +# asm 2: movq 0(<xp=%rsi),>rax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 8) +# asm 1: mulq 8(<yp=int64#4) +# asm 2: mulq 8(<yp=%rcx) +mulq 8(%rcx) + +# qhasm: r1 = rax +# asm 1: mov <rax=int64#7,>r1=int64#13 +# asm 2: mov <rax=%rax,>r1=%r15 +mov %rax,%r15 + +# qhasm: carry? r1 += c +# asm 1: add <c=int64#12,<r1=int64#13 +# asm 2: add <c=%r14,<r1=%r15 +add %r14,%r15 + +# qhasm: c = 0 +# asm 1: mov $0,>c=int64#12 +# asm 2: mov $0,>c=%r14 +mov $0,%r14 + +# qhasm: c += rdx + carry +# asm 1: adc <rdx=int64#3,<c=int64#12 +# asm 2: adc <rdx=%rdx,<c=%r14 +adc %rdx,%r14 + +# qhasm: rax = *(uint64 *)(xp + 0) +# asm 1: movq 0(<xp=int64#2),>rax=int64#7 +# asm 2: movq 0(<xp=%rsi),>rax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 16) +# asm 1: mulq 16(<yp=int64#4) +# asm 2: mulq 16(<yp=%rcx) +mulq 16(%rcx) + +# qhasm: r2 = rax +# asm 1: mov <rax=int64#7,>r2=int64#14 +# asm 2: mov <rax=%rax,>r2=%rbx +mov %rax,%rbx + +# qhasm: carry? r2 += c +# asm 1: add <c=int64#12,<r2=int64#14 +# asm 2: add <c=%r14,<r2=%rbx +add %r14,%rbx + +# qhasm: c = 0 +# asm 1: mov $0,>c=int64#12 +# asm 2: mov $0,>c=%r14 +mov $0,%r14 + +# qhasm: c += rdx + carry +# asm 1: adc <rdx=int64#3,<c=int64#12 +# asm 2: adc <rdx=%rdx,<c=%r14 +adc %rdx,%r14 + +# qhasm: rax = *(uint64 *)(xp + 0) +# asm 1: movq 0(<xp=int64#2),>rax=int64#7 +# asm 2: movq 0(<xp=%rsi),>rax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 24) +# asm 1: mulq 24(<yp=int64#4) +# asm 2: mulq 24(<yp=%rcx) +mulq 24(%rcx) + +# qhasm: r3 = rax +# asm 1: mov <rax=int64#7,>r3=int64#15 +# asm 2: mov <rax=%rax,>r3=%rbp +mov %rax,%rbp + +# qhasm: carry? r3 += c +# asm 1: add <c=int64#12,<r3=int64#15 +# asm 2: add <c=%r14,<r3=%rbp +add %r14,%rbp + +# qhasm: r4 += rdx + carry +# asm 1: adc <rdx=int64#3,<r4=int64#5 +# asm 2: adc <rdx=%rdx,<r4=%r8 +adc %rdx,%r8 + +# qhasm: rax = *(uint64 *)(xp + 8) +# asm 1: movq 8(<xp=int64#2),>rax=int64#7 +# asm 2: movq 8(<xp=%rsi),>rax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 0) +# asm 1: mulq 0(<yp=int64#4) +# asm 2: mulq 0(<yp=%rcx) +mulq 0(%rcx) + +# qhasm: carry? r1 += rax +# asm 1: add <rax=int64#7,<r1=int64#13 +# asm 2: add <rax=%rax,<r1=%r15 +add %rax,%r15 + +# qhasm: c = 0 +# asm 1: mov $0,>c=int64#12 +# asm 2: mov $0,>c=%r14 +mov $0,%r14 + +# qhasm: c += rdx + carry +# asm 1: adc <rdx=int64#3,<c=int64#12 +# asm 2: adc <rdx=%rdx,<c=%r14 +adc %rdx,%r14 + +# qhasm: rax = *(uint64 *)(xp + 8) +# asm 1: movq 8(<xp=int64#2),>rax=int64#7 +# asm 2: movq 8(<xp=%rsi),>rax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 8) +# asm 1: mulq 8(<yp=int64#4) +# asm 2: mulq 8(<yp=%rcx) +mulq 8(%rcx) + +# qhasm: carry? r2 += rax +# asm 1: add <rax=int64#7,<r2=int64#14 +# asm 2: add <rax=%rax,<r2=%rbx +add %rax,%rbx + +# qhasm: rdx += zero + carry +# asm 1: adc <zero=int64#10,<rdx=int64#3 +# asm 2: adc <zero=%r12,<rdx=%rdx +adc %r12,%rdx + +# qhasm: carry? r2 += c +# asm 1: add <c=int64#12,<r2=int64#14 +# asm 2: add <c=%r14,<r2=%rbx +add %r14,%rbx + +# qhasm: c = 0 +# asm 1: mov $0,>c=int64#12 +# asm 2: mov $0,>c=%r14 +mov $0,%r14 + +# qhasm: c += rdx + carry +# asm 1: adc <rdx=int64#3,<c=int64#12 +# asm 2: adc <rdx=%rdx,<c=%r14 +adc %rdx,%r14 + +# qhasm: rax = *(uint64 *)(xp + 8) +# asm 1: movq 8(<xp=int64#2),>rax=int64#7 +# asm 2: movq 8(<xp=%rsi),>rax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 16) +# asm 1: mulq 16(<yp=int64#4) +# asm 2: mulq 16(<yp=%rcx) +mulq 16(%rcx) + +# qhasm: carry? r3 += rax +# asm 1: add <rax=int64#7,<r3=int64#15 +# asm 2: add <rax=%rax,<r3=%rbp +add %rax,%rbp + +# qhasm: rdx += zero + carry +# asm 1: adc <zero=int64#10,<rdx=int64#3 +# asm 2: adc <zero=%r12,<rdx=%rdx +adc %r12,%rdx + +# qhasm: carry? r3 += c +# asm 1: add <c=int64#12,<r3=int64#15 +# asm 2: add <c=%r14,<r3=%rbp +add %r14,%rbp + +# qhasm: c = 0 +# asm 1: mov $0,>c=int64#12 +# asm 2: mov $0,>c=%r14 +mov $0,%r14 + +# qhasm: c += rdx + carry +# asm 1: adc <rdx=int64#3,<c=int64#12 +# asm 2: adc <rdx=%rdx,<c=%r14 +adc %rdx,%r14 + +# qhasm: rax = *(uint64 *)(xp + 8) +# asm 1: movq 8(<xp=int64#2),>rax=int64#7 +# asm 2: movq 8(<xp=%rsi),>rax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 24) +# asm 1: mulq 24(<yp=int64#4) +# asm 2: mulq 24(<yp=%rcx) +mulq 24(%rcx) + +# qhasm: carry? r4 += rax +# asm 1: add <rax=int64#7,<r4=int64#5 +# asm 2: add <rax=%rax,<r4=%r8 +add %rax,%r8 + +# qhasm: rdx += zero + carry +# asm 1: adc <zero=int64#10,<rdx=int64#3 +# asm 2: adc <zero=%r12,<rdx=%rdx +adc %r12,%rdx + +# qhasm: carry? r4 += c +# asm 1: add <c=int64#12,<r4=int64#5 +# asm 2: add <c=%r14,<r4=%r8 +add %r14,%r8 + +# qhasm: r5 += rdx + carry +# asm 1: adc <rdx=int64#3,<r5=int64#6 +# asm 2: adc <rdx=%rdx,<r5=%r9 +adc %rdx,%r9 + +# qhasm: rax = *(uint64 *)(xp + 16) +# asm 1: movq 16(<xp=int64#2),>rax=int64#7 +# asm 2: movq 16(<xp=%rsi),>rax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 0) +# asm 1: mulq 0(<yp=int64#4) +# asm 2: mulq 0(<yp=%rcx) +mulq 0(%rcx) + +# qhasm: carry? r2 += rax +# asm 1: add <rax=int64#7,<r2=int64#14 +# asm 2: add <rax=%rax,<r2=%rbx +add %rax,%rbx + +# qhasm: c = 0 +# asm 1: mov $0,>c=int64#12 +# asm 2: mov $0,>c=%r14 +mov $0,%r14 + +# qhasm: c += rdx + carry +# asm 1: adc <rdx=int64#3,<c=int64#12 +# asm 2: adc <rdx=%rdx,<c=%r14 +adc %rdx,%r14 + +# qhasm: rax = *(uint64 *)(xp + 16) +# asm 1: movq 16(<xp=int64#2),>rax=int64#7 +# asm 2: movq 16(<xp=%rsi),>rax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 8) +# asm 1: mulq 8(<yp=int64#4) +# asm 2: mulq 8(<yp=%rcx) +mulq 8(%rcx) + +# qhasm: carry? r3 += rax +# asm 1: add <rax=int64#7,<r3=int64#15 +# asm 2: add <rax=%rax,<r3=%rbp +add %rax,%rbp + +# qhasm: rdx += zero + carry +# asm 1: adc <zero=int64#10,<rdx=int64#3 +# asm 2: adc <zero=%r12,<rdx=%rdx +adc %r12,%rdx + +# qhasm: carry? r3 += c +# asm 1: add <c=int64#12,<r3=int64#15 +# asm 2: add <c=%r14,<r3=%rbp +add %r14,%rbp + +# qhasm: c = 0 +# asm 1: mov $0,>c=int64#12 +# asm 2: mov $0,>c=%r14 +mov $0,%r14 + +# qhasm: c += rdx + carry +# asm 1: adc <rdx=int64#3,<c=int64#12 +# asm 2: adc <rdx=%rdx,<c=%r14 +adc %rdx,%r14 + +# qhasm: rax = *(uint64 *)(xp + 16) +# asm 1: movq 16(<xp=int64#2),>rax=int64#7 +# asm 2: movq 16(<xp=%rsi),>rax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 16) +# asm 1: mulq 16(<yp=int64#4) +# asm 2: mulq 16(<yp=%rcx) +mulq 16(%rcx) + +# qhasm: carry? r4 += rax +# asm 1: add <rax=int64#7,<r4=int64#5 +# asm 2: add <rax=%rax,<r4=%r8 +add %rax,%r8 + +# qhasm: rdx += zero + carry +# asm 1: adc <zero=int64#10,<rdx=int64#3 +# asm 2: adc <zero=%r12,<rdx=%rdx +adc %r12,%rdx + +# qhasm: carry? r4 += c +# asm 1: add <c=int64#12,<r4=int64#5 +# asm 2: add <c=%r14,<r4=%r8 +add %r14,%r8 + +# qhasm: c = 0 +# asm 1: mov $0,>c=int64#12 +# asm 2: mov $0,>c=%r14 +mov $0,%r14 + +# qhasm: c += rdx + carry +# asm 1: adc <rdx=int64#3,<c=int64#12 +# asm 2: adc <rdx=%rdx,<c=%r14 +adc %rdx,%r14 + +# qhasm: rax = *(uint64 *)(xp + 16) +# asm 1: movq 16(<xp=int64#2),>rax=int64#7 +# asm 2: movq 16(<xp=%rsi),>rax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 24) +# asm 1: mulq 24(<yp=int64#4) +# asm 2: mulq 24(<yp=%rcx) +mulq 24(%rcx) + +# qhasm: carry? r5 += rax +# asm 1: add <rax=int64#7,<r5=int64#6 +# asm 2: add <rax=%rax,<r5=%r9 +add %rax,%r9 + +# qhasm: rdx += zero + carry +# asm 1: adc <zero=int64#10,<rdx=int64#3 +# asm 2: adc <zero=%r12,<rdx=%rdx +adc %r12,%rdx + +# qhasm: carry? r5 += c +# asm 1: add <c=int64#12,<r5=int64#6 +# asm 2: add <c=%r14,<r5=%r9 +add %r14,%r9 + +# qhasm: r6 += rdx + carry +# asm 1: adc <rdx=int64#3,<r6=int64#8 +# asm 2: adc <rdx=%rdx,<r6=%r10 +adc %rdx,%r10 + +# qhasm: rax = *(uint64 *)(xp + 24) +# asm 1: movq 24(<xp=int64#2),>rax=int64#7 +# asm 2: movq 24(<xp=%rsi),>rax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 0) +# asm 1: mulq 0(<yp=int64#4) +# asm 2: mulq 0(<yp=%rcx) +mulq 0(%rcx) + +# qhasm: carry? r3 += rax +# asm 1: add <rax=int64#7,<r3=int64#15 +# asm 2: add <rax=%rax,<r3=%rbp +add %rax,%rbp + +# qhasm: c = 0 +# asm 1: mov $0,>c=int64#12 +# asm 2: mov $0,>c=%r14 +mov $0,%r14 + +# qhasm: c += rdx + carry +# asm 1: adc <rdx=int64#3,<c=int64#12 +# asm 2: adc <rdx=%rdx,<c=%r14 +adc %rdx,%r14 + +# qhasm: rax = *(uint64 *)(xp + 24) +# asm 1: movq 24(<xp=int64#2),>rax=int64#7 +# asm 2: movq 24(<xp=%rsi),>rax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 8) +# asm 1: mulq 8(<yp=int64#4) +# asm 2: mulq 8(<yp=%rcx) +mulq 8(%rcx) + +# qhasm: carry? r4 += rax +# asm 1: add <rax=int64#7,<r4=int64#5 +# asm 2: add <rax=%rax,<r4=%r8 +add %rax,%r8 + +# qhasm: rdx += zero + carry +# asm 1: adc <zero=int64#10,<rdx=int64#3 +# asm 2: adc <zero=%r12,<rdx=%rdx +adc %r12,%rdx + +# qhasm: carry? r4 += c +# asm 1: add <c=int64#12,<r4=int64#5 +# asm 2: add <c=%r14,<r4=%r8 +add %r14,%r8 + +# qhasm: c = 0 +# asm 1: mov $0,>c=int64#12 +# asm 2: mov $0,>c=%r14 +mov $0,%r14 + +# qhasm: c += rdx + carry +# asm 1: adc <rdx=int64#3,<c=int64#12 +# asm 2: adc <rdx=%rdx,<c=%r14 +adc %rdx,%r14 + +# qhasm: rax = *(uint64 *)(xp + 24) +# asm 1: movq 24(<xp=int64#2),>rax=int64#7 +# asm 2: movq 24(<xp=%rsi),>rax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 16) +# asm 1: mulq 16(<yp=int64#4) +# asm 2: mulq 16(<yp=%rcx) +mulq 16(%rcx) + +# qhasm: carry? r5 += rax +# asm 1: add <rax=int64#7,<r5=int64#6 +# asm 2: add <rax=%rax,<r5=%r9 +add %rax,%r9 + +# qhasm: rdx += zero + carry +# asm 1: adc <zero=int64#10,<rdx=int64#3 +# asm 2: adc <zero=%r12,<rdx=%rdx +adc %r12,%rdx + +# qhasm: carry? r5 += c +# asm 1: add <c=int64#12,<r5=int64#6 +# asm 2: add <c=%r14,<r5=%r9 +add %r14,%r9 + +# qhasm: c = 0 +# asm 1: mov $0,>c=int64#12 +# asm 2: mov $0,>c=%r14 +mov $0,%r14 + +# qhasm: c += rdx + carry +# asm 1: adc <rdx=int64#3,<c=int64#12 +# asm 2: adc <rdx=%rdx,<c=%r14 +adc %rdx,%r14 + +# qhasm: rax = *(uint64 *)(xp + 24) +# asm 1: movq 24(<xp=int64#2),>rax=int64#7 +# asm 2: movq 24(<xp=%rsi),>rax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 24) +# asm 1: mulq 24(<yp=int64#4) +# asm 2: mulq 24(<yp=%rcx) +mulq 24(%rcx) + +# qhasm: carry? r6 += rax +# asm 1: add <rax=int64#7,<r6=int64#8 +# asm 2: add <rax=%rax,<r6=%r10 +add %rax,%r10 + +# qhasm: rdx += zero + carry +# asm 1: adc <zero=int64#10,<rdx=int64#3 +# asm 2: adc <zero=%r12,<rdx=%rdx +adc %r12,%rdx + +# qhasm: carry? r6 += c +# asm 1: add <c=int64#12,<r6=int64#8 +# asm 2: add <c=%r14,<r6=%r10 +add %r14,%r10 + +# qhasm: r7 += rdx + carry +# asm 1: adc <rdx=int64#3,<r7=int64#9 +# asm 2: adc <rdx=%rdx,<r7=%r11 +adc %rdx,%r11 + +# qhasm: *(uint64 *)(rp + 0) = r0 +# asm 1: movq <r0=int64#11,0(<rp=int64#1) +# asm 2: movq <r0=%r13,0(<rp=%rdi) +movq %r13,0(%rdi) + +# qhasm: *(uint64 *)(rp + 8) = r1 +# asm 1: movq <r1=int64#13,8(<rp=int64#1) +# asm 2: movq <r1=%r15,8(<rp=%rdi) +movq %r15,8(%rdi) + +# qhasm: *(uint64 *)(rp + 16) = r2 +# asm 1: movq <r2=int64#14,16(<rp=int64#1) +# asm 2: movq <r2=%rbx,16(<rp=%rdi) +movq %rbx,16(%rdi) + +# qhasm: *(uint64 *)(rp + 24) = r3 +# asm 1: movq <r3=int64#15,24(<rp=int64#1) +# asm 2: movq <r3=%rbp,24(<rp=%rdi) +movq %rbp,24(%rdi) + +# qhasm: *(uint64 *)(rp + 32) = r4 +# asm 1: movq <r4=int64#5,32(<rp=int64#1) +# asm 2: movq <r4=%r8,32(<rp=%rdi) +movq %r8,32(%rdi) + +# qhasm: *(uint64 *)(rp + 40) = r5 +# asm 1: movq <r5=int64#6,40(<rp=int64#1) +# asm 2: movq <r5=%r9,40(<rp=%rdi) +movq %r9,40(%rdi) + +# qhasm: *(uint64 *)(rp + 48) = r6 +# asm 1: movq <r6=int64#8,48(<rp=int64#1) +# asm 2: movq <r6=%r10,48(<rp=%rdi) +movq %r10,48(%rdi) + +# qhasm: *(uint64 *)(rp + 56) = r7 +# asm 1: movq <r7=int64#9,56(<rp=int64#1) +# asm 2: movq <r7=%r11,56(<rp=%rdi) +movq %r11,56(%rdi) + +# qhasm: caller1 = caller1_stack +# asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9 +# asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10 +# asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11 +# asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12 +# asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13 +# asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14 +# asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15 +# asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret |