12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874 |
- #! /usr/bin/env perl
- # Copyright 2015-2020 The OpenSSL Project Authors. All Rights Reserved.
- #
- # Licensed under the OpenSSL license (the "License"). You may not use
- # this file except in compliance with the License. You can obtain a copy
- # in the file LICENSE in the source distribution or at
- # https://www.openssl.org/source/license.html
- # ====================================================================
- # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
- # project. The module is, however, dual licensed under OpenSSL and
- # CRYPTOGAMS licenses depending on where you obtain it. For further
- # details see http://www.openssl.org/~appro/cryptogams/.
- # ====================================================================
- #
- # ECP_NISTZ256 module for ARMv8.
- #
- # February 2015.
- #
- # Original ECP_NISTZ256 submission targeting x86_64 is detailed in
- # http://eprint.iacr.org/2013/816.
- #
- # with/without -DECP_NISTZ256_ASM
- # Apple A7 +190-360%
- # Cortex-A53 +190-400%
- # Cortex-A57 +190-350%
- # Denver +230-400%
- #
- # Ranges denote minimum and maximum improvement coefficients depending
- # on benchmark. Lower coefficients are for ECDSA sign, server-side
- # operation. Keep in mind that +400% means 5x improvement.
- $flavour = shift;
- while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {}
- $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
- ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
- ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
- die "can't locate arm-xlate.pl";
- open OUT,"| \"$^X\" $xlate $flavour $output";
- *STDOUT=*OUT;
- {
- my ($rp,$ap,$bp,$bi,$a0,$a1,$a2,$a3,$t0,$t1,$t2,$t3,$poly1,$poly3,
- $acc0,$acc1,$acc2,$acc3,$acc4,$acc5) =
- map("x$_",(0..17,19,20));
- my ($acc6,$acc7)=($ap,$bp); # used in __ecp_nistz256_sqr_mont
- $code.=<<___;
- #include "arm_arch.h"
- .text
- ___
- ########################################################################
- # Convert ecp_nistz256_table.c to layout expected by ecp_nistz_gather_w7
- #
- $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
- open TABLE,"<ecp_nistz256_table.c" or
- open TABLE,"<${dir}../ecp_nistz256_table.c" or
- die "failed to open ecp_nistz256_table.c:",$!;
- use integer;
- foreach(<TABLE>) {
- s/TOBN\(\s*(0x[0-9a-f]+),\s*(0x[0-9a-f]+)\s*\)/push @arr,hex($2),hex($1)/geo;
- }
- close TABLE;
- # See ecp_nistz256_table.c for explanation for why it's 64*16*37.
- # 64*16*37-1 is because $#arr returns last valid index or @arr, not
- # amount of elements.
- die "insane number of elements" if ($#arr != 64*16*37-1);
- $code.=<<___;
- .globl ecp_nistz256_precomputed
- .type ecp_nistz256_precomputed,%object
- .align 12
- ecp_nistz256_precomputed:
- ___
- ########################################################################
- # this conversion smashes P256_POINT_AFFINE by individual bytes with
- # 64 byte interval, similar to
- # 1111222233334444
- # 1234123412341234
- for(1..37) {
- @tbl = splice(@arr,0,64*16);
- for($i=0;$i<64;$i++) {
- undef @line;
- for($j=0;$j<64;$j++) {
- push @line,(@tbl[$j*16+$i/4]>>(($i%4)*8))&0xff;
- }
- $code.=".byte\t";
- $code.=join(',',map { sprintf "0x%02x",$_} @line);
- $code.="\n";
- }
- }
- $code.=<<___;
- .size ecp_nistz256_precomputed,.-ecp_nistz256_precomputed
- .align 5
- .Lpoly:
- .quad 0xffffffffffffffff,0x00000000ffffffff,0x0000000000000000,0xffffffff00000001
- .LRR: // 2^512 mod P precomputed for NIST P256 polynomial
- .quad 0x0000000000000003,0xfffffffbffffffff,0xfffffffffffffffe,0x00000004fffffffd
- .Lone_mont:
- .quad 0x0000000000000001,0xffffffff00000000,0xffffffffffffffff,0x00000000fffffffe
- .Lone:
- .quad 1,0,0,0
- .Lord:
- .quad 0xf3b9cac2fc632551,0xbce6faada7179e84,0xffffffffffffffff,0xffffffff00000000
- .LordK:
- .quad 0xccd1c8aaee00bc4f
- .asciz "ECP_NISTZ256 for ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
- // void ecp_nistz256_to_mont(BN_ULONG x0[4],const BN_ULONG x1[4]);
- .globl ecp_nistz256_to_mont
- .type ecp_nistz256_to_mont,%function
- .align 6
- ecp_nistz256_to_mont:
- .inst 0xd503233f // paciasp
- stp x29,x30,[sp,#-32]!
- add x29,sp,#0
- stp x19,x20,[sp,#16]
- ldr $bi,.LRR // bp[0]
- ldp $a0,$a1,[$ap]
- ldp $a2,$a3,[$ap,#16]
- ldr $poly1,.Lpoly+8
- ldr $poly3,.Lpoly+24
- adr $bp,.LRR // &bp[0]
- bl __ecp_nistz256_mul_mont
- ldp x19,x20,[sp,#16]
- ldp x29,x30,[sp],#32
- .inst 0xd50323bf // autiasp
- ret
- .size ecp_nistz256_to_mont,.-ecp_nistz256_to_mont
- // void ecp_nistz256_from_mont(BN_ULONG x0[4],const BN_ULONG x1[4]);
- .globl ecp_nistz256_from_mont
- .type ecp_nistz256_from_mont,%function
- .align 4
- ecp_nistz256_from_mont:
- .inst 0xd503233f // paciasp
- stp x29,x30,[sp,#-32]!
- add x29,sp,#0
- stp x19,x20,[sp,#16]
- mov $bi,#1 // bp[0]
- ldp $a0,$a1,[$ap]
- ldp $a2,$a3,[$ap,#16]
- ldr $poly1,.Lpoly+8
- ldr $poly3,.Lpoly+24
- adr $bp,.Lone // &bp[0]
- bl __ecp_nistz256_mul_mont
- ldp x19,x20,[sp,#16]
- ldp x29,x30,[sp],#32
- .inst 0xd50323bf // autiasp
- ret
- .size ecp_nistz256_from_mont,.-ecp_nistz256_from_mont
- // void ecp_nistz256_mul_mont(BN_ULONG x0[4],const BN_ULONG x1[4],
- // const BN_ULONG x2[4]);
- .globl ecp_nistz256_mul_mont
- .type ecp_nistz256_mul_mont,%function
- .align 4
- ecp_nistz256_mul_mont:
- .inst 0xd503233f // paciasp
- stp x29,x30,[sp,#-32]!
- add x29,sp,#0
- stp x19,x20,[sp,#16]
- ldr $bi,[$bp] // bp[0]
- ldp $a0,$a1,[$ap]
- ldp $a2,$a3,[$ap,#16]
- ldr $poly1,.Lpoly+8
- ldr $poly3,.Lpoly+24
- bl __ecp_nistz256_mul_mont
- ldp x19,x20,[sp,#16]
- ldp x29,x30,[sp],#32
- .inst 0xd50323bf // autiasp
- ret
- .size ecp_nistz256_mul_mont,.-ecp_nistz256_mul_mont
- // void ecp_nistz256_sqr_mont(BN_ULONG x0[4],const BN_ULONG x1[4]);
- .globl ecp_nistz256_sqr_mont
- .type ecp_nistz256_sqr_mont,%function
- .align 4
- ecp_nistz256_sqr_mont:
- .inst 0xd503233f // paciasp
- stp x29,x30,[sp,#-32]!
- add x29,sp,#0
- stp x19,x20,[sp,#16]
- ldp $a0,$a1,[$ap]
- ldp $a2,$a3,[$ap,#16]
- ldr $poly1,.Lpoly+8
- ldr $poly3,.Lpoly+24
- bl __ecp_nistz256_sqr_mont
- ldp x19,x20,[sp,#16]
- ldp x29,x30,[sp],#32
- .inst 0xd50323bf // autiasp
- ret
- .size ecp_nistz256_sqr_mont,.-ecp_nistz256_sqr_mont
- // void ecp_nistz256_add(BN_ULONG x0[4],const BN_ULONG x1[4],
- // const BN_ULONG x2[4]);
- .globl ecp_nistz256_add
- .type ecp_nistz256_add,%function
- .align 4
- ecp_nistz256_add:
- .inst 0xd503233f // paciasp
- stp x29,x30,[sp,#-16]!
- add x29,sp,#0
- ldp $acc0,$acc1,[$ap]
- ldp $t0,$t1,[$bp]
- ldp $acc2,$acc3,[$ap,#16]
- ldp $t2,$t3,[$bp,#16]
- ldr $poly1,.Lpoly+8
- ldr $poly3,.Lpoly+24
- bl __ecp_nistz256_add
- ldp x29,x30,[sp],#16
- .inst 0xd50323bf // autiasp
- ret
- .size ecp_nistz256_add,.-ecp_nistz256_add
- // void ecp_nistz256_div_by_2(BN_ULONG x0[4],const BN_ULONG x1[4]);
- .globl ecp_nistz256_div_by_2
- .type ecp_nistz256_div_by_2,%function
- .align 4
- ecp_nistz256_div_by_2:
- .inst 0xd503233f // paciasp
- stp x29,x30,[sp,#-16]!
- add x29,sp,#0
- ldp $acc0,$acc1,[$ap]
- ldp $acc2,$acc3,[$ap,#16]
- ldr $poly1,.Lpoly+8
- ldr $poly3,.Lpoly+24
- bl __ecp_nistz256_div_by_2
- ldp x29,x30,[sp],#16
- .inst 0xd50323bf // autiasp
- ret
- .size ecp_nistz256_div_by_2,.-ecp_nistz256_div_by_2
- // void ecp_nistz256_mul_by_2(BN_ULONG x0[4],const BN_ULONG x1[4]);
- .globl ecp_nistz256_mul_by_2
- .type ecp_nistz256_mul_by_2,%function
- .align 4
- ecp_nistz256_mul_by_2:
- .inst 0xd503233f // paciasp
- stp x29,x30,[sp,#-16]!
- add x29,sp,#0
- ldp $acc0,$acc1,[$ap]
- ldp $acc2,$acc3,[$ap,#16]
- ldr $poly1,.Lpoly+8
- ldr $poly3,.Lpoly+24
- mov $t0,$acc0
- mov $t1,$acc1
- mov $t2,$acc2
- mov $t3,$acc3
- bl __ecp_nistz256_add // ret = a+a // 2*a
- ldp x29,x30,[sp],#16
- .inst 0xd50323bf // autiasp
- ret
- .size ecp_nistz256_mul_by_2,.-ecp_nistz256_mul_by_2
- // void ecp_nistz256_mul_by_3(BN_ULONG x0[4],const BN_ULONG x1[4]);
- .globl ecp_nistz256_mul_by_3
- .type ecp_nistz256_mul_by_3,%function
- .align 4
- ecp_nistz256_mul_by_3:
- .inst 0xd503233f // paciasp
- stp x29,x30,[sp,#-16]!
- add x29,sp,#0
- ldp $acc0,$acc1,[$ap]
- ldp $acc2,$acc3,[$ap,#16]
- ldr $poly1,.Lpoly+8
- ldr $poly3,.Lpoly+24
- mov $t0,$acc0
- mov $t1,$acc1
- mov $t2,$acc2
- mov $t3,$acc3
- mov $a0,$acc0
- mov $a1,$acc1
- mov $a2,$acc2
- mov $a3,$acc3
- bl __ecp_nistz256_add // ret = a+a // 2*a
- mov $t0,$a0
- mov $t1,$a1
- mov $t2,$a2
- mov $t3,$a3
- bl __ecp_nistz256_add // ret += a // 2*a+a=3*a
- ldp x29,x30,[sp],#16
- .inst 0xd50323bf // autiasp
- ret
- .size ecp_nistz256_mul_by_3,.-ecp_nistz256_mul_by_3
- // void ecp_nistz256_sub(BN_ULONG x0[4],const BN_ULONG x1[4],
- // const BN_ULONG x2[4]);
- .globl ecp_nistz256_sub
- .type ecp_nistz256_sub,%function
- .align 4
- ecp_nistz256_sub:
- .inst 0xd503233f // paciasp
- stp x29,x30,[sp,#-16]!
- add x29,sp,#0
- ldp $acc0,$acc1,[$ap]
- ldp $acc2,$acc3,[$ap,#16]
- ldr $poly1,.Lpoly+8
- ldr $poly3,.Lpoly+24
- bl __ecp_nistz256_sub_from
- ldp x29,x30,[sp],#16
- .inst 0xd50323bf // autiasp
- ret
- .size ecp_nistz256_sub,.-ecp_nistz256_sub
- // void ecp_nistz256_neg(BN_ULONG x0[4],const BN_ULONG x1[4]);
- .globl ecp_nistz256_neg
- .type ecp_nistz256_neg,%function
- .align 4
- ecp_nistz256_neg:
- .inst 0xd503233f // paciasp
- stp x29,x30,[sp,#-16]!
- add x29,sp,#0
- mov $bp,$ap
- mov $acc0,xzr // a = 0
- mov $acc1,xzr
- mov $acc2,xzr
- mov $acc3,xzr
- ldr $poly1,.Lpoly+8
- ldr $poly3,.Lpoly+24
- bl __ecp_nistz256_sub_from
- ldp x29,x30,[sp],#16
- .inst 0xd50323bf // autiasp
- ret
- .size ecp_nistz256_neg,.-ecp_nistz256_neg
- // note that __ecp_nistz256_mul_mont expects a[0-3] input pre-loaded
- // to $a0-$a3 and b[0] - to $bi
- .type __ecp_nistz256_mul_mont,%function
- .align 4
- __ecp_nistz256_mul_mont:
- mul $acc0,$a0,$bi // a[0]*b[0]
- umulh $t0,$a0,$bi
- mul $acc1,$a1,$bi // a[1]*b[0]
- umulh $t1,$a1,$bi
- mul $acc2,$a2,$bi // a[2]*b[0]
- umulh $t2,$a2,$bi
- mul $acc3,$a3,$bi // a[3]*b[0]
- umulh $t3,$a3,$bi
- ldr $bi,[$bp,#8] // b[1]
- adds $acc1,$acc1,$t0 // accumulate high parts of multiplication
- lsl $t0,$acc0,#32
- adcs $acc2,$acc2,$t1
- lsr $t1,$acc0,#32
- adcs $acc3,$acc3,$t2
- adc $acc4,xzr,$t3
- mov $acc5,xzr
- ___
- for($i=1;$i<4;$i++) {
- # Reduction iteration is normally performed by accumulating
- # result of multiplication of modulus by "magic" digit [and
- # omitting least significant word, which is guaranteed to
- # be 0], but thanks to special form of modulus and "magic"
- # digit being equal to least significant word, it can be
- # performed with additions and subtractions alone. Indeed:
- #
- # ffff0001.00000000.0000ffff.ffffffff
- # * abcdefgh
- # + xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.abcdefgh
- #
- # Now observing that ff..ff*x = (2^n-1)*x = 2^n*x-x, we
- # rewrite above as:
- #
- # xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.abcdefgh
- # + abcdefgh.abcdefgh.0000abcd.efgh0000.00000000
- # - 0000abcd.efgh0000.00000000.00000000.abcdefgh
- #
- # or marking redundant operations:
- #
- # xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.--------
- # + abcdefgh.abcdefgh.0000abcd.efgh0000.--------
- # - 0000abcd.efgh0000.--------.--------.--------
- $code.=<<___;
- subs $t2,$acc0,$t0 // "*0xffff0001"
- sbc $t3,$acc0,$t1
- adds $acc0,$acc1,$t0 // +=acc[0]<<96 and omit acc[0]
- mul $t0,$a0,$bi // lo(a[0]*b[i])
- adcs $acc1,$acc2,$t1
- mul $t1,$a1,$bi // lo(a[1]*b[i])
- adcs $acc2,$acc3,$t2 // +=acc[0]*0xffff0001
- mul $t2,$a2,$bi // lo(a[2]*b[i])
- adcs $acc3,$acc4,$t3
- mul $t3,$a3,$bi // lo(a[3]*b[i])
- adc $acc4,$acc5,xzr
- adds $acc0,$acc0,$t0 // accumulate low parts of multiplication
- umulh $t0,$a0,$bi // hi(a[0]*b[i])
- adcs $acc1,$acc1,$t1
- umulh $t1,$a1,$bi // hi(a[1]*b[i])
- adcs $acc2,$acc2,$t2
- umulh $t2,$a2,$bi // hi(a[2]*b[i])
- adcs $acc3,$acc3,$t3
- umulh $t3,$a3,$bi // hi(a[3]*b[i])
- adc $acc4,$acc4,xzr
- ___
- $code.=<<___ if ($i<3);
- ldr $bi,[$bp,#8*($i+1)] // b[$i+1]
- ___
- $code.=<<___;
- adds $acc1,$acc1,$t0 // accumulate high parts of multiplication
- lsl $t0,$acc0,#32
- adcs $acc2,$acc2,$t1
- lsr $t1,$acc0,#32
- adcs $acc3,$acc3,$t2
- adcs $acc4,$acc4,$t3
- adc $acc5,xzr,xzr
- ___
- }
- $code.=<<___;
- // last reduction
- subs $t2,$acc0,$t0 // "*0xffff0001"
- sbc $t3,$acc0,$t1
- adds $acc0,$acc1,$t0 // +=acc[0]<<96 and omit acc[0]
- adcs $acc1,$acc2,$t1
- adcs $acc2,$acc3,$t2 // +=acc[0]*0xffff0001
- adcs $acc3,$acc4,$t3
- adc $acc4,$acc5,xzr
- adds $t0,$acc0,#1 // subs $t0,$acc0,#-1 // tmp = ret-modulus
- sbcs $t1,$acc1,$poly1
- sbcs $t2,$acc2,xzr
- sbcs $t3,$acc3,$poly3
- sbcs xzr,$acc4,xzr // did it borrow?
- csel $acc0,$acc0,$t0,lo // ret = borrow ? ret : ret-modulus
- csel $acc1,$acc1,$t1,lo
- csel $acc2,$acc2,$t2,lo
- stp $acc0,$acc1,[$rp]
- csel $acc3,$acc3,$t3,lo
- stp $acc2,$acc3,[$rp,#16]
- ret
- .size __ecp_nistz256_mul_mont,.-__ecp_nistz256_mul_mont
- // note that __ecp_nistz256_sqr_mont expects a[0-3] input pre-loaded
- // to $a0-$a3
- .type __ecp_nistz256_sqr_mont,%function
- .align 4
- __ecp_nistz256_sqr_mont:
- // | | | | | |a1*a0| |
- // | | | | |a2*a0| | |
- // | |a3*a2|a3*a0| | | |
- // | | | |a2*a1| | | |
- // | | |a3*a1| | | | |
- // *| | | | | | | | 2|
- // +|a3*a3|a2*a2|a1*a1|a0*a0|
- // |--+--+--+--+--+--+--+--|
- // |A7|A6|A5|A4|A3|A2|A1|A0|, where Ax is $accx, i.e. follow $accx
- //
- // "can't overflow" below mark carrying into high part of
- // multiplication result, which can't overflow, because it
- // can never be all ones.
- mul $acc1,$a1,$a0 // a[1]*a[0]
- umulh $t1,$a1,$a0
- mul $acc2,$a2,$a0 // a[2]*a[0]
- umulh $t2,$a2,$a0
- mul $acc3,$a3,$a0 // a[3]*a[0]
- umulh $acc4,$a3,$a0
- adds $acc2,$acc2,$t1 // accumulate high parts of multiplication
- mul $t0,$a2,$a1 // a[2]*a[1]
- umulh $t1,$a2,$a1
- adcs $acc3,$acc3,$t2
- mul $t2,$a3,$a1 // a[3]*a[1]
- umulh $t3,$a3,$a1
- adc $acc4,$acc4,xzr // can't overflow
- mul $acc5,$a3,$a2 // a[3]*a[2]
- umulh $acc6,$a3,$a2
- adds $t1,$t1,$t2 // accumulate high parts of multiplication
- mul $acc0,$a0,$a0 // a[0]*a[0]
- adc $t2,$t3,xzr // can't overflow
- adds $acc3,$acc3,$t0 // accumulate low parts of multiplication
- umulh $a0,$a0,$a0
- adcs $acc4,$acc4,$t1
- mul $t1,$a1,$a1 // a[1]*a[1]
- adcs $acc5,$acc5,$t2
- umulh $a1,$a1,$a1
- adc $acc6,$acc6,xzr // can't overflow
- adds $acc1,$acc1,$acc1 // acc[1-6]*=2
- mul $t2,$a2,$a2 // a[2]*a[2]
- adcs $acc2,$acc2,$acc2
- umulh $a2,$a2,$a2
- adcs $acc3,$acc3,$acc3
- mul $t3,$a3,$a3 // a[3]*a[3]
- adcs $acc4,$acc4,$acc4
- umulh $a3,$a3,$a3
- adcs $acc5,$acc5,$acc5
- adcs $acc6,$acc6,$acc6
- adc $acc7,xzr,xzr
- adds $acc1,$acc1,$a0 // +a[i]*a[i]
- adcs $acc2,$acc2,$t1
- adcs $acc3,$acc3,$a1
- adcs $acc4,$acc4,$t2
- adcs $acc5,$acc5,$a2
- lsl $t0,$acc0,#32
- adcs $acc6,$acc6,$t3
- lsr $t1,$acc0,#32
- adc $acc7,$acc7,$a3
- ___
- for($i=0;$i<3;$i++) { # reductions, see commentary in
- # multiplication for details
- $code.=<<___;
- subs $t2,$acc0,$t0 // "*0xffff0001"
- sbc $t3,$acc0,$t1
- adds $acc0,$acc1,$t0 // +=acc[0]<<96 and omit acc[0]
- adcs $acc1,$acc2,$t1
- lsl $t0,$acc0,#32
- adcs $acc2,$acc3,$t2 // +=acc[0]*0xffff0001
- lsr $t1,$acc0,#32
- adc $acc3,$t3,xzr // can't overflow
- ___
- }
- $code.=<<___;
- subs $t2,$acc0,$t0 // "*0xffff0001"
- sbc $t3,$acc0,$t1
- adds $acc0,$acc1,$t0 // +=acc[0]<<96 and omit acc[0]
- adcs $acc1,$acc2,$t1
- adcs $acc2,$acc3,$t2 // +=acc[0]*0xffff0001
- adc $acc3,$t3,xzr // can't overflow
- adds $acc0,$acc0,$acc4 // accumulate upper half
- adcs $acc1,$acc1,$acc5
- adcs $acc2,$acc2,$acc6
- adcs $acc3,$acc3,$acc7
- adc $acc4,xzr,xzr
- adds $t0,$acc0,#1 // subs $t0,$acc0,#-1 // tmp = ret-modulus
- sbcs $t1,$acc1,$poly1
- sbcs $t2,$acc2,xzr
- sbcs $t3,$acc3,$poly3
- sbcs xzr,$acc4,xzr // did it borrow?
- csel $acc0,$acc0,$t0,lo // ret = borrow ? ret : ret-modulus
- csel $acc1,$acc1,$t1,lo
- csel $acc2,$acc2,$t2,lo
- stp $acc0,$acc1,[$rp]
- csel $acc3,$acc3,$t3,lo
- stp $acc2,$acc3,[$rp,#16]
- ret
- .size __ecp_nistz256_sqr_mont,.-__ecp_nistz256_sqr_mont
- // Note that __ecp_nistz256_add expects both input vectors pre-loaded to
- // $a0-$a3 and $t0-$t3. This is done because it's used in multiple
- // contexts, e.g. in multiplication by 2 and 3...
- .type __ecp_nistz256_add,%function
- .align 4
- __ecp_nistz256_add:
- adds $acc0,$acc0,$t0 // ret = a+b
- adcs $acc1,$acc1,$t1
- adcs $acc2,$acc2,$t2
- adcs $acc3,$acc3,$t3
- adc $ap,xzr,xzr // zap $ap
- adds $t0,$acc0,#1 // subs $t0,$a0,#-1 // tmp = ret-modulus
- sbcs $t1,$acc1,$poly1
- sbcs $t2,$acc2,xzr
- sbcs $t3,$acc3,$poly3
- sbcs xzr,$ap,xzr // did subtraction borrow?
- csel $acc0,$acc0,$t0,lo // ret = borrow ? ret : ret-modulus
- csel $acc1,$acc1,$t1,lo
- csel $acc2,$acc2,$t2,lo
- stp $acc0,$acc1,[$rp]
- csel $acc3,$acc3,$t3,lo
- stp $acc2,$acc3,[$rp,#16]
- ret
- .size __ecp_nistz256_add,.-__ecp_nistz256_add
- .type __ecp_nistz256_sub_from,%function
- .align 4
- __ecp_nistz256_sub_from:
- ldp $t0,$t1,[$bp]
- ldp $t2,$t3,[$bp,#16]
- subs $acc0,$acc0,$t0 // ret = a-b
- sbcs $acc1,$acc1,$t1
- sbcs $acc2,$acc2,$t2
- sbcs $acc3,$acc3,$t3
- sbc $ap,xzr,xzr // zap $ap
- subs $t0,$acc0,#1 // adds $t0,$a0,#-1 // tmp = ret+modulus
- adcs $t1,$acc1,$poly1
- adcs $t2,$acc2,xzr
- adc $t3,$acc3,$poly3
- cmp $ap,xzr // did subtraction borrow?
- csel $acc0,$acc0,$t0,eq // ret = borrow ? ret+modulus : ret
- csel $acc1,$acc1,$t1,eq
- csel $acc2,$acc2,$t2,eq
- stp $acc0,$acc1,[$rp]
- csel $acc3,$acc3,$t3,eq
- stp $acc2,$acc3,[$rp,#16]
- ret
- .size __ecp_nistz256_sub_from,.-__ecp_nistz256_sub_from
- .type __ecp_nistz256_sub_morf,%function
- .align 4
- __ecp_nistz256_sub_morf:
- ldp $t0,$t1,[$bp]
- ldp $t2,$t3,[$bp,#16]
- subs $acc0,$t0,$acc0 // ret = b-a
- sbcs $acc1,$t1,$acc1
- sbcs $acc2,$t2,$acc2
- sbcs $acc3,$t3,$acc3
- sbc $ap,xzr,xzr // zap $ap
- subs $t0,$acc0,#1 // adds $t0,$a0,#-1 // tmp = ret+modulus
- adcs $t1,$acc1,$poly1
- adcs $t2,$acc2,xzr
- adc $t3,$acc3,$poly3
- cmp $ap,xzr // did subtraction borrow?
- csel $acc0,$acc0,$t0,eq // ret = borrow ? ret+modulus : ret
- csel $acc1,$acc1,$t1,eq
- csel $acc2,$acc2,$t2,eq
- stp $acc0,$acc1,[$rp]
- csel $acc3,$acc3,$t3,eq
- stp $acc2,$acc3,[$rp,#16]
- ret
- .size __ecp_nistz256_sub_morf,.-__ecp_nistz256_sub_morf
- .type __ecp_nistz256_div_by_2,%function
- .align 4
- __ecp_nistz256_div_by_2:
- subs $t0,$acc0,#1 // adds $t0,$a0,#-1 // tmp = a+modulus
- adcs $t1,$acc1,$poly1
- adcs $t2,$acc2,xzr
- adcs $t3,$acc3,$poly3
- adc $ap,xzr,xzr // zap $ap
- tst $acc0,#1 // is a even?
- csel $acc0,$acc0,$t0,eq // ret = even ? a : a+modulus
- csel $acc1,$acc1,$t1,eq
- csel $acc2,$acc2,$t2,eq
- csel $acc3,$acc3,$t3,eq
- csel $ap,xzr,$ap,eq
- lsr $acc0,$acc0,#1 // ret >>= 1
- orr $acc0,$acc0,$acc1,lsl#63
- lsr $acc1,$acc1,#1
- orr $acc1,$acc1,$acc2,lsl#63
- lsr $acc2,$acc2,#1
- orr $acc2,$acc2,$acc3,lsl#63
- lsr $acc3,$acc3,#1
- stp $acc0,$acc1,[$rp]
- orr $acc3,$acc3,$ap,lsl#63
- stp $acc2,$acc3,[$rp,#16]
- ret
- .size __ecp_nistz256_div_by_2,.-__ecp_nistz256_div_by_2
- ___
- ########################################################################
- # following subroutines are "literal" implementation of those found in
- # ecp_nistz256.c
- #
- ########################################################################
- # void ecp_nistz256_point_double(P256_POINT *out,const P256_POINT *inp);
- #
- {
- my ($S,$M,$Zsqr,$tmp0)=map(32*$_,(0..3));
- # above map() describes stack layout with 4 temporary
- # 256-bit vectors on top.
- my ($rp_real,$ap_real) = map("x$_",(21,22));
- $code.=<<___;
- .globl ecp_nistz256_point_double
- .type ecp_nistz256_point_double,%function
- .align 5
- ecp_nistz256_point_double:
- .inst 0xd503233f // paciasp
- stp x29,x30,[sp,#-96]!
- add x29,sp,#0
- stp x19,x20,[sp,#16]
- stp x21,x22,[sp,#32]
- sub sp,sp,#32*4
- .Ldouble_shortcut:
- ldp $acc0,$acc1,[$ap,#32]
- mov $rp_real,$rp
- ldp $acc2,$acc3,[$ap,#48]
- mov $ap_real,$ap
- ldr $poly1,.Lpoly+8
- mov $t0,$acc0
- ldr $poly3,.Lpoly+24
- mov $t1,$acc1
- ldp $a0,$a1,[$ap_real,#64] // forward load for p256_sqr_mont
- mov $t2,$acc2
- mov $t3,$acc3
- ldp $a2,$a3,[$ap_real,#64+16]
- add $rp,sp,#$S
- bl __ecp_nistz256_add // p256_mul_by_2(S, in_y);
- add $rp,sp,#$Zsqr
- bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Zsqr, in_z);
- ldp $t0,$t1,[$ap_real]
- ldp $t2,$t3,[$ap_real,#16]
- mov $a0,$acc0 // put Zsqr aside for p256_sub
- mov $a1,$acc1
- mov $a2,$acc2
- mov $a3,$acc3
- add $rp,sp,#$M
- bl __ecp_nistz256_add // p256_add(M, Zsqr, in_x);
- add $bp,$ap_real,#0
- mov $acc0,$a0 // restore Zsqr
- mov $acc1,$a1
- ldp $a0,$a1,[sp,#$S] // forward load for p256_sqr_mont
- mov $acc2,$a2
- mov $acc3,$a3
- ldp $a2,$a3,[sp,#$S+16]
- add $rp,sp,#$Zsqr
- bl __ecp_nistz256_sub_morf // p256_sub(Zsqr, in_x, Zsqr);
- add $rp,sp,#$S
- bl __ecp_nistz256_sqr_mont // p256_sqr_mont(S, S);
- ldr $bi,[$ap_real,#32]
- ldp $a0,$a1,[$ap_real,#64]
- ldp $a2,$a3,[$ap_real,#64+16]
- add $bp,$ap_real,#32
- add $rp,sp,#$tmp0
- bl __ecp_nistz256_mul_mont // p256_mul_mont(tmp0, in_z, in_y);
- mov $t0,$acc0
- mov $t1,$acc1
- ldp $a0,$a1,[sp,#$S] // forward load for p256_sqr_mont
- mov $t2,$acc2
- mov $t3,$acc3
- ldp $a2,$a3,[sp,#$S+16]
- add $rp,$rp_real,#64
- bl __ecp_nistz256_add // p256_mul_by_2(res_z, tmp0);
- add $rp,sp,#$tmp0
- bl __ecp_nistz256_sqr_mont // p256_sqr_mont(tmp0, S);
- ldr $bi,[sp,#$Zsqr] // forward load for p256_mul_mont
- ldp $a0,$a1,[sp,#$M]
- ldp $a2,$a3,[sp,#$M+16]
- add $rp,$rp_real,#32
- bl __ecp_nistz256_div_by_2 // p256_div_by_2(res_y, tmp0);
- add $bp,sp,#$Zsqr
- add $rp,sp,#$M
- bl __ecp_nistz256_mul_mont // p256_mul_mont(M, M, Zsqr);
- mov $t0,$acc0 // duplicate M
- mov $t1,$acc1
- mov $t2,$acc2
- mov $t3,$acc3
- mov $a0,$acc0 // put M aside
- mov $a1,$acc1
- mov $a2,$acc2
- mov $a3,$acc3
- add $rp,sp,#$M
- bl __ecp_nistz256_add
- mov $t0,$a0 // restore M
- mov $t1,$a1
- ldr $bi,[$ap_real] // forward load for p256_mul_mont
- mov $t2,$a2
- ldp $a0,$a1,[sp,#$S]
- mov $t3,$a3
- ldp $a2,$a3,[sp,#$S+16]
- bl __ecp_nistz256_add // p256_mul_by_3(M, M);
- add $bp,$ap_real,#0
- add $rp,sp,#$S
- bl __ecp_nistz256_mul_mont // p256_mul_mont(S, S, in_x);
- mov $t0,$acc0
- mov $t1,$acc1
- ldp $a0,$a1,[sp,#$M] // forward load for p256_sqr_mont
- mov $t2,$acc2
- mov $t3,$acc3
- ldp $a2,$a3,[sp,#$M+16]
- add $rp,sp,#$tmp0
- bl __ecp_nistz256_add // p256_mul_by_2(tmp0, S);
- add $rp,$rp_real,#0
- bl __ecp_nistz256_sqr_mont // p256_sqr_mont(res_x, M);
- add $bp,sp,#$tmp0
- bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, tmp0);
- add $bp,sp,#$S
- add $rp,sp,#$S
- bl __ecp_nistz256_sub_morf // p256_sub(S, S, res_x);
- ldr $bi,[sp,#$M]
- mov $a0,$acc0 // copy S
- mov $a1,$acc1
- mov $a2,$acc2
- mov $a3,$acc3
- add $bp,sp,#$M
- bl __ecp_nistz256_mul_mont // p256_mul_mont(S, S, M);
- add $bp,$rp_real,#32
- add $rp,$rp_real,#32
- bl __ecp_nistz256_sub_from // p256_sub(res_y, S, res_y);
- add sp,x29,#0 // destroy frame
- ldp x19,x20,[x29,#16]
- ldp x21,x22,[x29,#32]
- ldp x29,x30,[sp],#96
- .inst 0xd50323bf // autiasp
- ret
- .size ecp_nistz256_point_double,.-ecp_nistz256_point_double
- ___
- }
- ########################################################################
- # void ecp_nistz256_point_add(P256_POINT *out,const P256_POINT *in1,
- # const P256_POINT *in2);
- {
- my ($res_x,$res_y,$res_z,
- $H,$Hsqr,$R,$Rsqr,$Hcub,
- $U1,$U2,$S1,$S2)=map(32*$_,(0..11));
- my ($Z1sqr, $Z2sqr) = ($Hsqr, $Rsqr);
- # above map() describes stack layout with 12 temporary
- # 256-bit vectors on top.
- my ($rp_real,$ap_real,$bp_real,$in1infty,$in2infty,$temp0,$temp1,$temp2)=map("x$_",(21..28));
- $code.=<<___;
- .globl ecp_nistz256_point_add
- .type ecp_nistz256_point_add,%function
- .align 5
- ecp_nistz256_point_add:
- .inst 0xd503233f // paciasp
- stp x29,x30,[sp,#-96]!
- add x29,sp,#0
- stp x19,x20,[sp,#16]
- stp x21,x22,[sp,#32]
- stp x23,x24,[sp,#48]
- stp x25,x26,[sp,#64]
- stp x27,x28,[sp,#80]
- sub sp,sp,#32*12
- ldp $a0,$a1,[$bp,#64] // in2_z
- ldp $a2,$a3,[$bp,#64+16]
- mov $rp_real,$rp
- mov $ap_real,$ap
- mov $bp_real,$bp
- ldr $poly1,.Lpoly+8
- ldr $poly3,.Lpoly+24
- orr $t0,$a0,$a1
- orr $t2,$a2,$a3
- orr $in2infty,$t0,$t2
- cmp $in2infty,#0
- csetm $in2infty,ne // ~in2infty
- add $rp,sp,#$Z2sqr
- bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z2sqr, in2_z);
- ldp $a0,$a1,[$ap_real,#64] // in1_z
- ldp $a2,$a3,[$ap_real,#64+16]
- orr $t0,$a0,$a1
- orr $t2,$a2,$a3
- orr $in1infty,$t0,$t2
- cmp $in1infty,#0
- csetm $in1infty,ne // ~in1infty
- add $rp,sp,#$Z1sqr
- bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z1sqr, in1_z);
- ldr $bi,[$bp_real,#64]
- ldp $a0,$a1,[sp,#$Z2sqr]
- ldp $a2,$a3,[sp,#$Z2sqr+16]
- add $bp,$bp_real,#64
- add $rp,sp,#$S1
- bl __ecp_nistz256_mul_mont // p256_mul_mont(S1, Z2sqr, in2_z);
- ldr $bi,[$ap_real,#64]
- ldp $a0,$a1,[sp,#$Z1sqr]
- ldp $a2,$a3,[sp,#$Z1sqr+16]
- add $bp,$ap_real,#64
- add $rp,sp,#$S2
- bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, Z1sqr, in1_z);
- ldr $bi,[$ap_real,#32]
- ldp $a0,$a1,[sp,#$S1]
- ldp $a2,$a3,[sp,#$S1+16]
- add $bp,$ap_real,#32
- add $rp,sp,#$S1
- bl __ecp_nistz256_mul_mont // p256_mul_mont(S1, S1, in1_y);
- ldr $bi,[$bp_real,#32]
- ldp $a0,$a1,[sp,#$S2]
- ldp $a2,$a3,[sp,#$S2+16]
- add $bp,$bp_real,#32
- add $rp,sp,#$S2
- bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S2, in2_y);
- add $bp,sp,#$S1
- ldr $bi,[sp,#$Z2sqr] // forward load for p256_mul_mont
- ldp $a0,$a1,[$ap_real]
- ldp $a2,$a3,[$ap_real,#16]
- add $rp,sp,#$R
- bl __ecp_nistz256_sub_from // p256_sub(R, S2, S1);
- orr $acc0,$acc0,$acc1 // see if result is zero
- orr $acc2,$acc2,$acc3
- orr $temp0,$acc0,$acc2 // ~is_equal(S1,S2)
- add $bp,sp,#$Z2sqr
- add $rp,sp,#$U1
- bl __ecp_nistz256_mul_mont // p256_mul_mont(U1, in1_x, Z2sqr);
- ldr $bi,[sp,#$Z1sqr]
- ldp $a0,$a1,[$bp_real]
- ldp $a2,$a3,[$bp_real,#16]
- add $bp,sp,#$Z1sqr
- add $rp,sp,#$U2
- bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, in2_x, Z1sqr);
- add $bp,sp,#$U1
- ldp $a0,$a1,[sp,#$R] // forward load for p256_sqr_mont
- ldp $a2,$a3,[sp,#$R+16]
- add $rp,sp,#$H
- bl __ecp_nistz256_sub_from // p256_sub(H, U2, U1);
- orr $acc0,$acc0,$acc1 // see if result is zero
- orr $acc2,$acc2,$acc3
- orr $acc0,$acc0,$acc2 // ~is_equal(U1,U2)
- mvn $temp1,$in1infty // -1/0 -> 0/-1
- mvn $temp2,$in2infty // -1/0 -> 0/-1
- orr $acc0,$acc0,$temp1
- orr $acc0,$acc0,$temp2
- orr $acc0,$acc0,$temp0
- cbnz $acc0,.Ladd_proceed // if(~is_equal(U1,U2) | in1infty | in2infty | ~is_equal(S1,S2))
- .Ladd_double:
- mov $ap,$ap_real
- mov $rp,$rp_real
- ldp x23,x24,[x29,#48]
- ldp x25,x26,[x29,#64]
- ldp x27,x28,[x29,#80]
- add sp,sp,#32*(12-4) // difference in stack frames
- b .Ldouble_shortcut
- .align 4
- .Ladd_proceed:
- add $rp,sp,#$Rsqr
- bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Rsqr, R);
- ldr $bi,[$ap_real,#64]
- ldp $a0,$a1,[sp,#$H]
- ldp $a2,$a3,[sp,#$H+16]
- add $bp,$ap_real,#64
- add $rp,sp,#$res_z
- bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, H, in1_z);
- ldp $a0,$a1,[sp,#$H]
- ldp $a2,$a3,[sp,#$H+16]
- add $rp,sp,#$Hsqr
- bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Hsqr, H);
- ldr $bi,[$bp_real,#64]
- ldp $a0,$a1,[sp,#$res_z]
- ldp $a2,$a3,[sp,#$res_z+16]
- add $bp,$bp_real,#64
- add $rp,sp,#$res_z
- bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, res_z, in2_z);
- ldr $bi,[sp,#$H]
- ldp $a0,$a1,[sp,#$Hsqr]
- ldp $a2,$a3,[sp,#$Hsqr+16]
- add $bp,sp,#$H
- add $rp,sp,#$Hcub
- bl __ecp_nistz256_mul_mont // p256_mul_mont(Hcub, Hsqr, H);
- ldr $bi,[sp,#$Hsqr]
- ldp $a0,$a1,[sp,#$U1]
- ldp $a2,$a3,[sp,#$U1+16]
- add $bp,sp,#$Hsqr
- add $rp,sp,#$U2
- bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, U1, Hsqr);
- mov $t0,$acc0
- mov $t1,$acc1
- mov $t2,$acc2
- mov $t3,$acc3
- add $rp,sp,#$Hsqr
- bl __ecp_nistz256_add // p256_mul_by_2(Hsqr, U2);
- add $bp,sp,#$Rsqr
- add $rp,sp,#$res_x
- bl __ecp_nistz256_sub_morf // p256_sub(res_x, Rsqr, Hsqr);
- add $bp,sp,#$Hcub
- bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, Hcub);
- add $bp,sp,#$U2
- ldr $bi,[sp,#$Hcub] // forward load for p256_mul_mont
- ldp $a0,$a1,[sp,#$S1]
- ldp $a2,$a3,[sp,#$S1+16]
- add $rp,sp,#$res_y
- bl __ecp_nistz256_sub_morf // p256_sub(res_y, U2, res_x);
- add $bp,sp,#$Hcub
- add $rp,sp,#$S2
- bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S1, Hcub);
- ldr $bi,[sp,#$R]
- ldp $a0,$a1,[sp,#$res_y]
- ldp $a2,$a3,[sp,#$res_y+16]
- add $bp,sp,#$R
- add $rp,sp,#$res_y
- bl __ecp_nistz256_mul_mont // p256_mul_mont(res_y, res_y, R);
- add $bp,sp,#$S2
- bl __ecp_nistz256_sub_from // p256_sub(res_y, res_y, S2);
- ldp $a0,$a1,[sp,#$res_x] // res
- ldp $a2,$a3,[sp,#$res_x+16]
- ldp $t0,$t1,[$bp_real] // in2
- ldp $t2,$t3,[$bp_real,#16]
- ___
- for($i=0;$i<64;$i+=32) { # conditional moves
- $code.=<<___;
- ldp $acc0,$acc1,[$ap_real,#$i] // in1
- cmp $in1infty,#0 // ~$in1intfy, remember?
- ldp $acc2,$acc3,[$ap_real,#$i+16]
- csel $t0,$a0,$t0,ne
- csel $t1,$a1,$t1,ne
- ldp $a0,$a1,[sp,#$res_x+$i+32] // res
- csel $t2,$a2,$t2,ne
- csel $t3,$a3,$t3,ne
- cmp $in2infty,#0 // ~$in2intfy, remember?
- ldp $a2,$a3,[sp,#$res_x+$i+48]
- csel $acc0,$t0,$acc0,ne
- csel $acc1,$t1,$acc1,ne
- ldp $t0,$t1,[$bp_real,#$i+32] // in2
- csel $acc2,$t2,$acc2,ne
- csel $acc3,$t3,$acc3,ne
- ldp $t2,$t3,[$bp_real,#$i+48]
- stp $acc0,$acc1,[$rp_real,#$i]
- stp $acc2,$acc3,[$rp_real,#$i+16]
- ___
- }
- $code.=<<___;
- ldp $acc0,$acc1,[$ap_real,#$i] // in1
- cmp $in1infty,#0 // ~$in1intfy, remember?
- ldp $acc2,$acc3,[$ap_real,#$i+16]
- csel $t0,$a0,$t0,ne
- csel $t1,$a1,$t1,ne
- csel $t2,$a2,$t2,ne
- csel $t3,$a3,$t3,ne
- cmp $in2infty,#0 // ~$in2intfy, remember?
- csel $acc0,$t0,$acc0,ne
- csel $acc1,$t1,$acc1,ne
- csel $acc2,$t2,$acc2,ne
- csel $acc3,$t3,$acc3,ne
- stp $acc0,$acc1,[$rp_real,#$i]
- stp $acc2,$acc3,[$rp_real,#$i+16]
- .Ladd_done:
- add sp,x29,#0 // destroy frame
- ldp x19,x20,[x29,#16]
- ldp x21,x22,[x29,#32]
- ldp x23,x24,[x29,#48]
- ldp x25,x26,[x29,#64]
- ldp x27,x28,[x29,#80]
- ldp x29,x30,[sp],#96
- .inst 0xd50323bf // autiasp
- ret
- .size ecp_nistz256_point_add,.-ecp_nistz256_point_add
- ___
- }
- ########################################################################
- # void ecp_nistz256_point_add_affine(P256_POINT *out,const P256_POINT *in1,
- # const P256_POINT_AFFINE *in2);
- {
- my ($res_x,$res_y,$res_z,
- $U2,$S2,$H,$R,$Hsqr,$Hcub,$Rsqr)=map(32*$_,(0..9));
- my $Z1sqr = $S2;
- # above map() describes stack layout with 10 temporary
- # 256-bit vectors on top.
- my ($rp_real,$ap_real,$bp_real,$in1infty,$in2infty,$temp)=map("x$_",(21..26));
- $code.=<<___;
- .globl ecp_nistz256_point_add_affine
- .type ecp_nistz256_point_add_affine,%function
- .align 5
- ecp_nistz256_point_add_affine:
- .inst 0xd503233f // paciasp
- stp x29,x30,[sp,#-80]!
- add x29,sp,#0
- stp x19,x20,[sp,#16]
- stp x21,x22,[sp,#32]
- stp x23,x24,[sp,#48]
- stp x25,x26,[sp,#64]
- sub sp,sp,#32*10
- mov $rp_real,$rp
- mov $ap_real,$ap
- mov $bp_real,$bp
- ldr $poly1,.Lpoly+8
- ldr $poly3,.Lpoly+24
- ldp $a0,$a1,[$ap,#64] // in1_z
- ldp $a2,$a3,[$ap,#64+16]
- orr $t0,$a0,$a1
- orr $t2,$a2,$a3
- orr $in1infty,$t0,$t2
- cmp $in1infty,#0
- csetm $in1infty,ne // ~in1infty
- ldp $acc0,$acc1,[$bp] // in2_x
- ldp $acc2,$acc3,[$bp,#16]
- ldp $t0,$t1,[$bp,#32] // in2_y
- ldp $t2,$t3,[$bp,#48]
- orr $acc0,$acc0,$acc1
- orr $acc2,$acc2,$acc3
- orr $t0,$t0,$t1
- orr $t2,$t2,$t3
- orr $acc0,$acc0,$acc2
- orr $t0,$t0,$t2
- orr $in2infty,$acc0,$t0
- cmp $in2infty,#0
- csetm $in2infty,ne // ~in2infty
- add $rp,sp,#$Z1sqr
- bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z1sqr, in1_z);
- mov $a0,$acc0
- mov $a1,$acc1
- mov $a2,$acc2
- mov $a3,$acc3
- ldr $bi,[$bp_real]
- add $bp,$bp_real,#0
- add $rp,sp,#$U2
- bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, Z1sqr, in2_x);
- add $bp,$ap_real,#0
- ldr $bi,[$ap_real,#64] // forward load for p256_mul_mont
- ldp $a0,$a1,[sp,#$Z1sqr]
- ldp $a2,$a3,[sp,#$Z1sqr+16]
- add $rp,sp,#$H
- bl __ecp_nistz256_sub_from // p256_sub(H, U2, in1_x);
- add $bp,$ap_real,#64
- add $rp,sp,#$S2
- bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, Z1sqr, in1_z);
- ldr $bi,[$ap_real,#64]
- ldp $a0,$a1,[sp,#$H]
- ldp $a2,$a3,[sp,#$H+16]
- add $bp,$ap_real,#64
- add $rp,sp,#$res_z
- bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, H, in1_z);
- ldr $bi,[$bp_real,#32]
- ldp $a0,$a1,[sp,#$S2]
- ldp $a2,$a3,[sp,#$S2+16]
- add $bp,$bp_real,#32
- add $rp,sp,#$S2
- bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S2, in2_y);
- add $bp,$ap_real,#32
- ldp $a0,$a1,[sp,#$H] // forward load for p256_sqr_mont
- ldp $a2,$a3,[sp,#$H+16]
- add $rp,sp,#$R
- bl __ecp_nistz256_sub_from // p256_sub(R, S2, in1_y);
- add $rp,sp,#$Hsqr
- bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Hsqr, H);
- ldp $a0,$a1,[sp,#$R]
- ldp $a2,$a3,[sp,#$R+16]
- add $rp,sp,#$Rsqr
- bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Rsqr, R);
- ldr $bi,[sp,#$H]
- ldp $a0,$a1,[sp,#$Hsqr]
- ldp $a2,$a3,[sp,#$Hsqr+16]
- add $bp,sp,#$H
- add $rp,sp,#$Hcub
- bl __ecp_nistz256_mul_mont // p256_mul_mont(Hcub, Hsqr, H);
- ldr $bi,[$ap_real]
- ldp $a0,$a1,[sp,#$Hsqr]
- ldp $a2,$a3,[sp,#$Hsqr+16]
- add $bp,$ap_real,#0
- add $rp,sp,#$U2
- bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, in1_x, Hsqr);
- mov $t0,$acc0
- mov $t1,$acc1
- mov $t2,$acc2
- mov $t3,$acc3
- add $rp,sp,#$Hsqr
- bl __ecp_nistz256_add // p256_mul_by_2(Hsqr, U2);
- add $bp,sp,#$Rsqr
- add $rp,sp,#$res_x
- bl __ecp_nistz256_sub_morf // p256_sub(res_x, Rsqr, Hsqr);
- add $bp,sp,#$Hcub
- bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, Hcub);
- add $bp,sp,#$U2
- ldr $bi,[$ap_real,#32] // forward load for p256_mul_mont
- ldp $a0,$a1,[sp,#$Hcub]
- ldp $a2,$a3,[sp,#$Hcub+16]
- add $rp,sp,#$res_y
- bl __ecp_nistz256_sub_morf // p256_sub(res_y, U2, res_x);
- add $bp,$ap_real,#32
- add $rp,sp,#$S2
- bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, in1_y, Hcub);
- ldr $bi,[sp,#$R]
- ldp $a0,$a1,[sp,#$res_y]
- ldp $a2,$a3,[sp,#$res_y+16]
- add $bp,sp,#$R
- add $rp,sp,#$res_y
- bl __ecp_nistz256_mul_mont // p256_mul_mont(res_y, res_y, R);
- add $bp,sp,#$S2
- bl __ecp_nistz256_sub_from // p256_sub(res_y, res_y, S2);
- ldp $a0,$a1,[sp,#$res_x] // res
- ldp $a2,$a3,[sp,#$res_x+16]
- ldp $t0,$t1,[$bp_real] // in2
- ldp $t2,$t3,[$bp_real,#16]
- ___
- for($i=0;$i<64;$i+=32) { # conditional moves
- $code.=<<___;
- ldp $acc0,$acc1,[$ap_real,#$i] // in1
- cmp $in1infty,#0 // ~$in1intfy, remember?
- ldp $acc2,$acc3,[$ap_real,#$i+16]
- csel $t0,$a0,$t0,ne
- csel $t1,$a1,$t1,ne
- ldp $a0,$a1,[sp,#$res_x+$i+32] // res
- csel $t2,$a2,$t2,ne
- csel $t3,$a3,$t3,ne
- cmp $in2infty,#0 // ~$in2intfy, remember?
- ldp $a2,$a3,[sp,#$res_x+$i+48]
- csel $acc0,$t0,$acc0,ne
- csel $acc1,$t1,$acc1,ne
- ldp $t0,$t1,[$bp_real,#$i+32] // in2
- csel $acc2,$t2,$acc2,ne
- csel $acc3,$t3,$acc3,ne
- ldp $t2,$t3,[$bp_real,#$i+48]
- stp $acc0,$acc1,[$rp_real,#$i]
- stp $acc2,$acc3,[$rp_real,#$i+16]
- ___
- $code.=<<___ if ($i == 0);
- adr $bp_real,.Lone_mont-64
- ___
- }
- $code.=<<___;
- ldp $acc0,$acc1,[$ap_real,#$i] // in1
- cmp $in1infty,#0 // ~$in1intfy, remember?
- ldp $acc2,$acc3,[$ap_real,#$i+16]
- csel $t0,$a0,$t0,ne
- csel $t1,$a1,$t1,ne
- csel $t2,$a2,$t2,ne
- csel $t3,$a3,$t3,ne
- cmp $in2infty,#0 // ~$in2intfy, remember?
- csel $acc0,$t0,$acc0,ne
- csel $acc1,$t1,$acc1,ne
- csel $acc2,$t2,$acc2,ne
- csel $acc3,$t3,$acc3,ne
- stp $acc0,$acc1,[$rp_real,#$i]
- stp $acc2,$acc3,[$rp_real,#$i+16]
- add sp,x29,#0 // destroy frame
- ldp x19,x20,[x29,#16]
- ldp x21,x22,[x29,#32]
- ldp x23,x24,[x29,#48]
- ldp x25,x26,[x29,#64]
- ldp x29,x30,[sp],#80
- .inst 0xd50323bf // autiasp
- ret
- .size ecp_nistz256_point_add_affine,.-ecp_nistz256_point_add_affine
- ___
- }
- if (1) {
- my ($ord0,$ord1) = ($poly1,$poly3);
- my ($ord2,$ord3,$ordk,$t4) = map("x$_",(21..24));
- my $acc7 = $bi;
- $code.=<<___;
- ////////////////////////////////////////////////////////////////////////
- // void ecp_nistz256_ord_mul_mont(uint64_t res[4], uint64_t a[4],
- // uint64_t b[4]);
- .globl ecp_nistz256_ord_mul_mont
- .type ecp_nistz256_ord_mul_mont,%function
- .align 4
- ecp_nistz256_ord_mul_mont:
- stp x29,x30,[sp,#-64]!
- add x29,sp,#0
- stp x19,x20,[sp,#16]
- stp x21,x22,[sp,#32]
- stp x23,x24,[sp,#48]
- adr $ordk,.Lord
- ldr $bi,[$bp] // bp[0]
- ldp $a0,$a1,[$ap]
- ldp $a2,$a3,[$ap,#16]
- ldp $ord0,$ord1,[$ordk,#0]
- ldp $ord2,$ord3,[$ordk,#16]
- ldr $ordk,[$ordk,#32]
- mul $acc0,$a0,$bi // a[0]*b[0]
- umulh $t0,$a0,$bi
- mul $acc1,$a1,$bi // a[1]*b[0]
- umulh $t1,$a1,$bi
- mul $acc2,$a2,$bi // a[2]*b[0]
- umulh $t2,$a2,$bi
- mul $acc3,$a3,$bi // a[3]*b[0]
- umulh $acc4,$a3,$bi
- mul $t4,$acc0,$ordk
- adds $acc1,$acc1,$t0 // accumulate high parts of multiplication
- adcs $acc2,$acc2,$t1
- adcs $acc3,$acc3,$t2
- adc $acc4,$acc4,xzr
- mov $acc5,xzr
- ___
- for ($i=1;$i<4;$i++) {
- ################################################################
- # ffff0000.ffffffff.yyyyyyyy.zzzzzzzz
- # * abcdefgh
- # + xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx
- #
- # Now observing that ff..ff*x = (2^n-1)*x = 2^n*x-x, we
- # rewrite above as:
- #
- # xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx
- # - 0000abcd.efgh0000.abcdefgh.00000000.00000000
- # + abcdefgh.abcdefgh.yzayzbyz.cyzdyzey.zfyzgyzh
- $code.=<<___;
- ldr $bi,[$bp,#8*$i] // b[i]
- lsl $t0,$t4,#32
- subs $acc2,$acc2,$t4
- lsr $t1,$t4,#32
- sbcs $acc3,$acc3,$t0
- sbcs $acc4,$acc4,$t1
- sbc $acc5,$acc5,xzr
- subs xzr,$acc0,#1
- umulh $t1,$ord0,$t4
- mul $t2,$ord1,$t4
- umulh $t3,$ord1,$t4
- adcs $t2,$t2,$t1
- mul $t0,$a0,$bi
- adc $t3,$t3,xzr
- mul $t1,$a1,$bi
- adds $acc0,$acc1,$t2
- mul $t2,$a2,$bi
- adcs $acc1,$acc2,$t3
- mul $t3,$a3,$bi
- adcs $acc2,$acc3,$t4
- adcs $acc3,$acc4,$t4
- adc $acc4,$acc5,xzr
- adds $acc0,$acc0,$t0 // accumulate low parts
- umulh $t0,$a0,$bi
- adcs $acc1,$acc1,$t1
- umulh $t1,$a1,$bi
- adcs $acc2,$acc2,$t2
- umulh $t2,$a2,$bi
- adcs $acc3,$acc3,$t3
- umulh $t3,$a3,$bi
- adc $acc4,$acc4,xzr
- mul $t4,$acc0,$ordk
- adds $acc1,$acc1,$t0 // accumulate high parts
- adcs $acc2,$acc2,$t1
- adcs $acc3,$acc3,$t2
- adcs $acc4,$acc4,$t3
- adc $acc5,xzr,xzr
- ___
- }
- $code.=<<___;
- lsl $t0,$t4,#32 // last reduction
- subs $acc2,$acc2,$t4
- lsr $t1,$t4,#32
- sbcs $acc3,$acc3,$t0
- sbcs $acc4,$acc4,$t1
- sbc $acc5,$acc5,xzr
- subs xzr,$acc0,#1
- umulh $t1,$ord0,$t4
- mul $t2,$ord1,$t4
- umulh $t3,$ord1,$t4
- adcs $t2,$t2,$t1
- adc $t3,$t3,xzr
- adds $acc0,$acc1,$t2
- adcs $acc1,$acc2,$t3
- adcs $acc2,$acc3,$t4
- adcs $acc3,$acc4,$t4
- adc $acc4,$acc5,xzr
- subs $t0,$acc0,$ord0 // ret -= modulus
- sbcs $t1,$acc1,$ord1
- sbcs $t2,$acc2,$ord2
- sbcs $t3,$acc3,$ord3
- sbcs xzr,$acc4,xzr
- csel $acc0,$acc0,$t0,lo // ret = borrow ? ret : ret-modulus
- csel $acc1,$acc1,$t1,lo
- csel $acc2,$acc2,$t2,lo
- stp $acc0,$acc1,[$rp]
- csel $acc3,$acc3,$t3,lo
- stp $acc2,$acc3,[$rp,#16]
- ldp x19,x20,[sp,#16]
- ldp x21,x22,[sp,#32]
- ldp x23,x24,[sp,#48]
- ldr x29,[sp],#64
- ret
- .size ecp_nistz256_ord_mul_mont,.-ecp_nistz256_ord_mul_mont
- ////////////////////////////////////////////////////////////////////////
- // void ecp_nistz256_ord_sqr_mont(uint64_t res[4], uint64_t a[4],
- // int rep);
- .globl ecp_nistz256_ord_sqr_mont
- .type ecp_nistz256_ord_sqr_mont,%function
- .align 4
- ecp_nistz256_ord_sqr_mont:
- stp x29,x30,[sp,#-64]!
- add x29,sp,#0
- stp x19,x20,[sp,#16]
- stp x21,x22,[sp,#32]
- stp x23,x24,[sp,#48]
- adr $ordk,.Lord
- ldp $a0,$a1,[$ap]
- ldp $a2,$a3,[$ap,#16]
- ldp $ord0,$ord1,[$ordk,#0]
- ldp $ord2,$ord3,[$ordk,#16]
- ldr $ordk,[$ordk,#32]
- b .Loop_ord_sqr
- .align 4
- .Loop_ord_sqr:
- sub $bp,$bp,#1
- ////////////////////////////////////////////////////////////////
- // | | | | | |a1*a0| |
- // | | | | |a2*a0| | |
- // | |a3*a2|a3*a0| | | |
- // | | | |a2*a1| | | |
- // | | |a3*a1| | | | |
- // *| | | | | | | | 2|
- // +|a3*a3|a2*a2|a1*a1|a0*a0|
- // |--+--+--+--+--+--+--+--|
- // |A7|A6|A5|A4|A3|A2|A1|A0|, where Ax is $accx, i.e. follow $accx
- //
- // "can't overflow" below mark carrying into high part of
- // multiplication result, which can't overflow, because it
- // can never be all ones.
- mul $acc1,$a1,$a0 // a[1]*a[0]
- umulh $t1,$a1,$a0
- mul $acc2,$a2,$a0 // a[2]*a[0]
- umulh $t2,$a2,$a0
- mul $acc3,$a3,$a0 // a[3]*a[0]
- umulh $acc4,$a3,$a0
- adds $acc2,$acc2,$t1 // accumulate high parts of multiplication
- mul $t0,$a2,$a1 // a[2]*a[1]
- umulh $t1,$a2,$a1
- adcs $acc3,$acc3,$t2
- mul $t2,$a3,$a1 // a[3]*a[1]
- umulh $t3,$a3,$a1
- adc $acc4,$acc4,xzr // can't overflow
- mul $acc5,$a3,$a2 // a[3]*a[2]
- umulh $acc6,$a3,$a2
- adds $t1,$t1,$t2 // accumulate high parts of multiplication
- mul $acc0,$a0,$a0 // a[0]*a[0]
- adc $t2,$t3,xzr // can't overflow
- adds $acc3,$acc3,$t0 // accumulate low parts of multiplication
- umulh $a0,$a0,$a0
- adcs $acc4,$acc4,$t1
- mul $t1,$a1,$a1 // a[1]*a[1]
- adcs $acc5,$acc5,$t2
- umulh $a1,$a1,$a1
- adc $acc6,$acc6,xzr // can't overflow
- adds $acc1,$acc1,$acc1 // acc[1-6]*=2
- mul $t2,$a2,$a2 // a[2]*a[2]
- adcs $acc2,$acc2,$acc2
- umulh $a2,$a2,$a2
- adcs $acc3,$acc3,$acc3
- mul $t3,$a3,$a3 // a[3]*a[3]
- adcs $acc4,$acc4,$acc4
- umulh $a3,$a3,$a3
- adcs $acc5,$acc5,$acc5
- adcs $acc6,$acc6,$acc6
- adc $acc7,xzr,xzr
- adds $acc1,$acc1,$a0 // +a[i]*a[i]
- mul $t4,$acc0,$ordk
- adcs $acc2,$acc2,$t1
- adcs $acc3,$acc3,$a1
- adcs $acc4,$acc4,$t2
- adcs $acc5,$acc5,$a2
- adcs $acc6,$acc6,$t3
- adc $acc7,$acc7,$a3
- ___
- for($i=0; $i<4; $i++) { # reductions
- $code.=<<___;
- subs xzr,$acc0,#1
- umulh $t1,$ord0,$t4
- mul $t2,$ord1,$t4
- umulh $t3,$ord1,$t4
- adcs $t2,$t2,$t1
- adc $t3,$t3,xzr
- adds $acc0,$acc1,$t2
- adcs $acc1,$acc2,$t3
- adcs $acc2,$acc3,$t4
- adc $acc3,xzr,$t4 // can't overflow
- ___
- $code.=<<___ if ($i<3);
- mul $t3,$acc0,$ordk
- ___
- $code.=<<___;
- lsl $t0,$t4,#32
- subs $acc1,$acc1,$t4
- lsr $t1,$t4,#32
- sbcs $acc2,$acc2,$t0
- sbc $acc3,$acc3,$t1 // can't borrow
- ___
- ($t3,$t4) = ($t4,$t3);
- }
- $code.=<<___;
- adds $acc0,$acc0,$acc4 // accumulate upper half
- adcs $acc1,$acc1,$acc5
- adcs $acc2,$acc2,$acc6
- adcs $acc3,$acc3,$acc7
- adc $acc4,xzr,xzr
- subs $t0,$acc0,$ord0 // ret -= modulus
- sbcs $t1,$acc1,$ord1
- sbcs $t2,$acc2,$ord2
- sbcs $t3,$acc3,$ord3
- sbcs xzr,$acc4,xzr
- csel $a0,$acc0,$t0,lo // ret = borrow ? ret : ret-modulus
- csel $a1,$acc1,$t1,lo
- csel $a2,$acc2,$t2,lo
- csel $a3,$acc3,$t3,lo
- cbnz $bp,.Loop_ord_sqr
- stp $a0,$a1,[$rp]
- stp $a2,$a3,[$rp,#16]
- ldp x19,x20,[sp,#16]
- ldp x21,x22,[sp,#32]
- ldp x23,x24,[sp,#48]
- ldr x29,[sp],#64
- ret
- .size ecp_nistz256_ord_sqr_mont,.-ecp_nistz256_ord_sqr_mont
- ___
- } }
- ########################################################################
- # scatter-gather subroutines
- {
- my ($out,$inp,$index,$mask)=map("x$_",(0..3));
- $code.=<<___;
- // void ecp_nistz256_scatter_w5(void *x0,const P256_POINT *x1,
- // int x2);
- .globl ecp_nistz256_scatter_w5
- .type ecp_nistz256_scatter_w5,%function
- .align 4
- ecp_nistz256_scatter_w5:
- stp x29,x30,[sp,#-16]!
- add x29,sp,#0
- add $out,$out,$index,lsl#2
- ldp x4,x5,[$inp] // X
- ldp x6,x7,[$inp,#16]
- str w4,[$out,#64*0-4]
- lsr x4,x4,#32
- str w5,[$out,#64*1-4]
- lsr x5,x5,#32
- str w6,[$out,#64*2-4]
- lsr x6,x6,#32
- str w7,[$out,#64*3-4]
- lsr x7,x7,#32
- str w4,[$out,#64*4-4]
- str w5,[$out,#64*5-4]
- str w6,[$out,#64*6-4]
- str w7,[$out,#64*7-4]
- add $out,$out,#64*8
- ldp x4,x5,[$inp,#32] // Y
- ldp x6,x7,[$inp,#48]
- str w4,[$out,#64*0-4]
- lsr x4,x4,#32
- str w5,[$out,#64*1-4]
- lsr x5,x5,#32
- str w6,[$out,#64*2-4]
- lsr x6,x6,#32
- str w7,[$out,#64*3-4]
- lsr x7,x7,#32
- str w4,[$out,#64*4-4]
- str w5,[$out,#64*5-4]
- str w6,[$out,#64*6-4]
- str w7,[$out,#64*7-4]
- add $out,$out,#64*8
- ldp x4,x5,[$inp,#64] // Z
- ldp x6,x7,[$inp,#80]
- str w4,[$out,#64*0-4]
- lsr x4,x4,#32
- str w5,[$out,#64*1-4]
- lsr x5,x5,#32
- str w6,[$out,#64*2-4]
- lsr x6,x6,#32
- str w7,[$out,#64*3-4]
- lsr x7,x7,#32
- str w4,[$out,#64*4-4]
- str w5,[$out,#64*5-4]
- str w6,[$out,#64*6-4]
- str w7,[$out,#64*7-4]
- ldr x29,[sp],#16
- ret
- .size ecp_nistz256_scatter_w5,.-ecp_nistz256_scatter_w5
- // void ecp_nistz256_gather_w5(P256_POINT *x0,const void *x1,
- // int x2);
- .globl ecp_nistz256_gather_w5
- .type ecp_nistz256_gather_w5,%function
- .align 4
- ecp_nistz256_gather_w5:
- stp x29,x30,[sp,#-16]!
- add x29,sp,#0
- cmp $index,xzr
- csetm x3,ne
- add $index,$index,x3
- add $inp,$inp,$index,lsl#2
- ldr w4,[$inp,#64*0]
- ldr w5,[$inp,#64*1]
- ldr w6,[$inp,#64*2]
- ldr w7,[$inp,#64*3]
- ldr w8,[$inp,#64*4]
- ldr w9,[$inp,#64*5]
- ldr w10,[$inp,#64*6]
- ldr w11,[$inp,#64*7]
- add $inp,$inp,#64*8
- orr x4,x4,x8,lsl#32
- orr x5,x5,x9,lsl#32
- orr x6,x6,x10,lsl#32
- orr x7,x7,x11,lsl#32
- csel x4,x4,xzr,ne
- csel x5,x5,xzr,ne
- csel x6,x6,xzr,ne
- csel x7,x7,xzr,ne
- stp x4,x5,[$out] // X
- stp x6,x7,[$out,#16]
- ldr w4,[$inp,#64*0]
- ldr w5,[$inp,#64*1]
- ldr w6,[$inp,#64*2]
- ldr w7,[$inp,#64*3]
- ldr w8,[$inp,#64*4]
- ldr w9,[$inp,#64*5]
- ldr w10,[$inp,#64*6]
- ldr w11,[$inp,#64*7]
- add $inp,$inp,#64*8
- orr x4,x4,x8,lsl#32
- orr x5,x5,x9,lsl#32
- orr x6,x6,x10,lsl#32
- orr x7,x7,x11,lsl#32
- csel x4,x4,xzr,ne
- csel x5,x5,xzr,ne
- csel x6,x6,xzr,ne
- csel x7,x7,xzr,ne
- stp x4,x5,[$out,#32] // Y
- stp x6,x7,[$out,#48]
- ldr w4,[$inp,#64*0]
- ldr w5,[$inp,#64*1]
- ldr w6,[$inp,#64*2]
- ldr w7,[$inp,#64*3]
- ldr w8,[$inp,#64*4]
- ldr w9,[$inp,#64*5]
- ldr w10,[$inp,#64*6]
- ldr w11,[$inp,#64*7]
- orr x4,x4,x8,lsl#32
- orr x5,x5,x9,lsl#32
- orr x6,x6,x10,lsl#32
- orr x7,x7,x11,lsl#32
- csel x4,x4,xzr,ne
- csel x5,x5,xzr,ne
- csel x6,x6,xzr,ne
- csel x7,x7,xzr,ne
- stp x4,x5,[$out,#64] // Z
- stp x6,x7,[$out,#80]
- ldr x29,[sp],#16
- ret
- .size ecp_nistz256_gather_w5,.-ecp_nistz256_gather_w5
- // void ecp_nistz256_scatter_w7(void *x0,const P256_POINT_AFFINE *x1,
- // int x2);
- .globl ecp_nistz256_scatter_w7
- .type ecp_nistz256_scatter_w7,%function
- .align 4
- ecp_nistz256_scatter_w7:
- stp x29,x30,[sp,#-16]!
- add x29,sp,#0
- add $out,$out,$index
- mov $index,#64/8
- .Loop_scatter_w7:
- ldr x3,[$inp],#8
- subs $index,$index,#1
- prfm pstl1strm,[$out,#4096+64*0]
- prfm pstl1strm,[$out,#4096+64*1]
- prfm pstl1strm,[$out,#4096+64*2]
- prfm pstl1strm,[$out,#4096+64*3]
- prfm pstl1strm,[$out,#4096+64*4]
- prfm pstl1strm,[$out,#4096+64*5]
- prfm pstl1strm,[$out,#4096+64*6]
- prfm pstl1strm,[$out,#4096+64*7]
- strb w3,[$out,#64*0]
- lsr x3,x3,#8
- strb w3,[$out,#64*1]
- lsr x3,x3,#8
- strb w3,[$out,#64*2]
- lsr x3,x3,#8
- strb w3,[$out,#64*3]
- lsr x3,x3,#8
- strb w3,[$out,#64*4]
- lsr x3,x3,#8
- strb w3,[$out,#64*5]
- lsr x3,x3,#8
- strb w3,[$out,#64*6]
- lsr x3,x3,#8
- strb w3,[$out,#64*7]
- add $out,$out,#64*8
- b.ne .Loop_scatter_w7
- ldr x29,[sp],#16
- ret
- .size ecp_nistz256_scatter_w7,.-ecp_nistz256_scatter_w7
- // void ecp_nistz256_gather_w7(P256_POINT_AFFINE *x0,const void *x1,
- // int x2);
- .globl ecp_nistz256_gather_w7
- .type ecp_nistz256_gather_w7,%function
- .align 4
- ecp_nistz256_gather_w7:
- stp x29,x30,[sp,#-16]!
- add x29,sp,#0
- cmp $index,xzr
- csetm x3,ne
- add $index,$index,x3
- add $inp,$inp,$index
- mov $index,#64/8
- nop
- .Loop_gather_w7:
- ldrb w4,[$inp,#64*0]
- prfm pldl1strm,[$inp,#4096+64*0]
- subs $index,$index,#1
- ldrb w5,[$inp,#64*1]
- prfm pldl1strm,[$inp,#4096+64*1]
- ldrb w6,[$inp,#64*2]
- prfm pldl1strm,[$inp,#4096+64*2]
- ldrb w7,[$inp,#64*3]
- prfm pldl1strm,[$inp,#4096+64*3]
- ldrb w8,[$inp,#64*4]
- prfm pldl1strm,[$inp,#4096+64*4]
- ldrb w9,[$inp,#64*5]
- prfm pldl1strm,[$inp,#4096+64*5]
- ldrb w10,[$inp,#64*6]
- prfm pldl1strm,[$inp,#4096+64*6]
- ldrb w11,[$inp,#64*7]
- prfm pldl1strm,[$inp,#4096+64*7]
- add $inp,$inp,#64*8
- orr x4,x4,x5,lsl#8
- orr x6,x6,x7,lsl#8
- orr x8,x8,x9,lsl#8
- orr x4,x4,x6,lsl#16
- orr x10,x10,x11,lsl#8
- orr x4,x4,x8,lsl#32
- orr x4,x4,x10,lsl#48
- and x4,x4,x3
- str x4,[$out],#8
- b.ne .Loop_gather_w7
- ldr x29,[sp],#16
- ret
- .size ecp_nistz256_gather_w7,.-ecp_nistz256_gather_w7
- ___
- }
- foreach (split("\n",$code)) {
- s/\`([^\`]*)\`/eval $1/ge;
- print $_,"\n";
- }
- close STDOUT or die "error closing STDOUT: $!"; # enforce flush
|