chacha-x86_64.pl 95 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037
  1. #! /usr/bin/env perl
  2. # Copyright 2016-2021 The OpenSSL Project Authors. All Rights Reserved.
  3. #
  4. # Licensed under the OpenSSL license (the "License"). You may not use
  5. # this file except in compliance with the License. You can obtain a copy
  6. # in the file LICENSE in the source distribution or at
  7. # https://www.openssl.org/source/license.html
  8. #
  9. # ====================================================================
  10. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  11. # project. The module is, however, dual licensed under OpenSSL and
  12. # CRYPTOGAMS licenses depending on where you obtain it. For further
  13. # details see http://www.openssl.org/~appro/cryptogams/.
  14. # ====================================================================
  15. #
  16. # November 2014
  17. #
  18. # ChaCha20 for x86_64.
  19. #
  20. # December 2016
  21. #
  22. # Add AVX512F code path.
  23. #
  24. # December 2017
  25. #
  26. # Add AVX512VL code path.
  27. #
  28. # Performance in cycles per byte out of large buffer.
  29. #
  30. # IALU/gcc 4.8(i) 1x/2xSSSE3(ii) 4xSSSE3 NxAVX(v)
  31. #
  32. # P4 9.48/+99% - -
  33. # Core2 7.83/+55% 7.90/5.76 4.35
  34. # Westmere 7.19/+50% 5.60/4.50 3.00
  35. # Sandy Bridge 8.31/+42% 5.45/4.00 2.72
  36. # Ivy Bridge 6.71/+46% 5.40/? 2.41
  37. # Haswell 5.92/+43% 5.20/3.45 2.42 1.23
  38. # Skylake[-X] 5.87/+39% 4.70/3.22 2.31 1.19[0.80(vi)]
  39. # Silvermont 12.0/+33% 7.75/6.90 7.03(iii)
  40. # Knights L 11.7/- ? 9.60(iii) 0.80
  41. # Goldmont 10.6/+17% 5.10/3.52 3.28
  42. # Sledgehammer 7.28/+52% - -
  43. # Bulldozer 9.66/+28% 9.85/5.35(iv) 3.06(iv)
  44. # Ryzen 5.96/+50% 5.19/3.00 2.40 2.09
  45. # VIA Nano 10.5/+46% 6.72/6.88 6.05
  46. #
  47. # (i) compared to older gcc 3.x one can observe >2x improvement on
  48. # most platforms;
  49. # (ii) 2xSSSE3 is code path optimized specifically for 128 bytes used
  50. # by chacha20_poly1305_tls_cipher, results are EVP-free;
  51. # (iii) this is not optimal result for Atom because of MSROM
  52. # limitations, SSE2 can do better, but gain is considered too
  53. # low to justify the [maintenance] effort;
  54. # (iv) Bulldozer actually executes 4xXOP code path that delivers 2.20
  55. # and 4.85 for 128-byte inputs;
  56. # (v) 8xAVX2, 8xAVX512VL or 16xAVX512F, whichever best applicable;
  57. # (vi) even though Skylake-X can execute AVX512F code and deliver 0.57
  58. # cpb in single thread, the corresponding capability is suppressed;
  59. $flavour = shift;
  60. $output = shift;
  61. if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
  62. $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
  63. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  64. ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
  65. ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
  66. die "can't locate x86_64-xlate.pl";
  67. if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
  68. =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
  69. $avx = ($1>=2.19) + ($1>=2.22) + ($1>=2.25);
  70. }
  71. if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
  72. `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)(?:\.([0-9]+))?/) {
  73. $avx = ($1>=2.09) + ($1>=2.10) + ($1>=2.12);
  74. $avx += 1 if ($1==2.11 && $2>=8);
  75. }
  76. if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
  77. `ml64 2>&1` =~ /Version ([0-9]+)\./) {
  78. $avx = ($1>=10) + ($1>=11);
  79. }
  80. if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:clang|LLVM) version|.*based on LLVM) ([0-9]+\.[0-9]+)/) {
  81. $avx = ($2>=3.0) + ($2>3.0);
  82. }
  83. open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
  84. *STDOUT=*OUT;
  85. # input parameter block
  86. ($out,$inp,$len,$key,$counter)=("%rdi","%rsi","%rdx","%rcx","%r8");
  87. $code.=<<___;
  88. .text
  89. .extern OPENSSL_ia32cap_P
  90. .align 64
  91. .Lzero:
  92. .long 0,0,0,0
  93. .Lone:
  94. .long 1,0,0,0
  95. .Linc:
  96. .long 0,1,2,3
  97. .Lfour:
  98. .long 4,4,4,4
  99. .Lincy:
  100. .long 0,2,4,6,1,3,5,7
  101. .Leight:
  102. .long 8,8,8,8,8,8,8,8
  103. .Lrot16:
  104. .byte 0x2,0x3,0x0,0x1, 0x6,0x7,0x4,0x5, 0xa,0xb,0x8,0x9, 0xe,0xf,0xc,0xd
  105. .Lrot24:
  106. .byte 0x3,0x0,0x1,0x2, 0x7,0x4,0x5,0x6, 0xb,0x8,0x9,0xa, 0xf,0xc,0xd,0xe
  107. .Ltwoy:
  108. .long 2,0,0,0, 2,0,0,0
  109. .align 64
  110. .Lzeroz:
  111. .long 0,0,0,0, 1,0,0,0, 2,0,0,0, 3,0,0,0
  112. .Lfourz:
  113. .long 4,0,0,0, 4,0,0,0, 4,0,0,0, 4,0,0,0
  114. .Lincz:
  115. .long 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
  116. .Lsixteen:
  117. .long 16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16
  118. .Lsigma:
  119. .asciz "expand 32-byte k"
  120. .asciz "ChaCha20 for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
  121. ___
  122. sub AUTOLOAD() # thunk [simplified] 32-bit style perlasm
  123. { my $opcode = $AUTOLOAD; $opcode =~ s/.*:://;
  124. my $arg = pop;
  125. $arg = "\$$arg" if ($arg*1 eq $arg);
  126. $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n";
  127. }
  128. @x=("%eax","%ebx","%ecx","%edx",map("%r${_}d",(8..11)),
  129. "%nox","%nox","%nox","%nox",map("%r${_}d",(12..15)));
  130. @t=("%esi","%edi");
  131. sub ROUND { # critical path is 24 cycles per round
  132. my ($a0,$b0,$c0,$d0)=@_;
  133. my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
  134. my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
  135. my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
  136. my ($xc,$xc_)=map("\"$_\"",@t);
  137. my @x=map("\"$_\"",@x);
  138. # Consider order in which variables are addressed by their
  139. # index:
  140. #
  141. # a b c d
  142. #
  143. # 0 4 8 12 < even round
  144. # 1 5 9 13
  145. # 2 6 10 14
  146. # 3 7 11 15
  147. # 0 5 10 15 < odd round
  148. # 1 6 11 12
  149. # 2 7 8 13
  150. # 3 4 9 14
  151. #
  152. # 'a', 'b' and 'd's are permanently allocated in registers,
  153. # @x[0..7,12..15], while 'c's are maintained in memory. If
  154. # you observe 'c' column, you'll notice that pair of 'c's is
  155. # invariant between rounds. This means that we have to reload
  156. # them once per round, in the middle. This is why you'll see
  157. # bunch of 'c' stores and loads in the middle, but none in
  158. # the beginning or end.
  159. # Normally instructions would be interleaved to favour in-order
  160. # execution. Generally out-of-order cores manage it gracefully,
  161. # but not this time for some reason. As in-order execution
  162. # cores are dying breed, old Atom is the only one around,
  163. # instructions are left uninterleaved. Besides, Atom is better
  164. # off executing 1xSSSE3 code anyway...
  165. (
  166. "&add (@x[$a0],@x[$b0])", # Q1
  167. "&xor (@x[$d0],@x[$a0])",
  168. "&rol (@x[$d0],16)",
  169. "&add (@x[$a1],@x[$b1])", # Q2
  170. "&xor (@x[$d1],@x[$a1])",
  171. "&rol (@x[$d1],16)",
  172. "&add ($xc,@x[$d0])",
  173. "&xor (@x[$b0],$xc)",
  174. "&rol (@x[$b0],12)",
  175. "&add ($xc_,@x[$d1])",
  176. "&xor (@x[$b1],$xc_)",
  177. "&rol (@x[$b1],12)",
  178. "&add (@x[$a0],@x[$b0])",
  179. "&xor (@x[$d0],@x[$a0])",
  180. "&rol (@x[$d0],8)",
  181. "&add (@x[$a1],@x[$b1])",
  182. "&xor (@x[$d1],@x[$a1])",
  183. "&rol (@x[$d1],8)",
  184. "&add ($xc,@x[$d0])",
  185. "&xor (@x[$b0],$xc)",
  186. "&rol (@x[$b0],7)",
  187. "&add ($xc_,@x[$d1])",
  188. "&xor (@x[$b1],$xc_)",
  189. "&rol (@x[$b1],7)",
  190. "&mov (\"4*$c0(%rsp)\",$xc)", # reload pair of 'c's
  191. "&mov (\"4*$c1(%rsp)\",$xc_)",
  192. "&mov ($xc,\"4*$c2(%rsp)\")",
  193. "&mov ($xc_,\"4*$c3(%rsp)\")",
  194. "&add (@x[$a2],@x[$b2])", # Q3
  195. "&xor (@x[$d2],@x[$a2])",
  196. "&rol (@x[$d2],16)",
  197. "&add (@x[$a3],@x[$b3])", # Q4
  198. "&xor (@x[$d3],@x[$a3])",
  199. "&rol (@x[$d3],16)",
  200. "&add ($xc,@x[$d2])",
  201. "&xor (@x[$b2],$xc)",
  202. "&rol (@x[$b2],12)",
  203. "&add ($xc_,@x[$d3])",
  204. "&xor (@x[$b3],$xc_)",
  205. "&rol (@x[$b3],12)",
  206. "&add (@x[$a2],@x[$b2])",
  207. "&xor (@x[$d2],@x[$a2])",
  208. "&rol (@x[$d2],8)",
  209. "&add (@x[$a3],@x[$b3])",
  210. "&xor (@x[$d3],@x[$a3])",
  211. "&rol (@x[$d3],8)",
  212. "&add ($xc,@x[$d2])",
  213. "&xor (@x[$b2],$xc)",
  214. "&rol (@x[$b2],7)",
  215. "&add ($xc_,@x[$d3])",
  216. "&xor (@x[$b3],$xc_)",
  217. "&rol (@x[$b3],7)"
  218. );
  219. }
  220. ########################################################################
  221. # Generic code path that handles all lengths on pre-SSSE3 processors.
  222. $code.=<<___;
  223. .globl ChaCha20_ctr32
  224. .type ChaCha20_ctr32,\@function,5
  225. .align 64
  226. ChaCha20_ctr32:
  227. .cfi_startproc
  228. cmp \$0,$len
  229. je .Lno_data
  230. mov OPENSSL_ia32cap_P+4(%rip),%r10
  231. ___
  232. $code.=<<___ if ($avx>2);
  233. bt \$48,%r10 # check for AVX512F
  234. jc .LChaCha20_avx512
  235. test %r10,%r10 # check for AVX512VL
  236. js .LChaCha20_avx512vl
  237. ___
  238. $code.=<<___;
  239. test \$`1<<(41-32)`,%r10d
  240. jnz .LChaCha20_ssse3
  241. push %rbx
  242. .cfi_push %rbx
  243. push %rbp
  244. .cfi_push %rbp
  245. push %r12
  246. .cfi_push %r12
  247. push %r13
  248. .cfi_push %r13
  249. push %r14
  250. .cfi_push %r14
  251. push %r15
  252. .cfi_push %r15
  253. sub \$64+24,%rsp
  254. .cfi_adjust_cfa_offset 64+24
  255. .Lctr32_body:
  256. #movdqa .Lsigma(%rip),%xmm0
  257. movdqu ($key),%xmm1
  258. movdqu 16($key),%xmm2
  259. movdqu ($counter),%xmm3
  260. movdqa .Lone(%rip),%xmm4
  261. #movdqa %xmm0,4*0(%rsp) # key[0]
  262. movdqa %xmm1,4*4(%rsp) # key[1]
  263. movdqa %xmm2,4*8(%rsp) # key[2]
  264. movdqa %xmm3,4*12(%rsp) # key[3]
  265. mov $len,%rbp # reassign $len
  266. jmp .Loop_outer
  267. .align 32
  268. .Loop_outer:
  269. mov \$0x61707865,@x[0] # 'expa'
  270. mov \$0x3320646e,@x[1] # 'nd 3'
  271. mov \$0x79622d32,@x[2] # '2-by'
  272. mov \$0x6b206574,@x[3] # 'te k'
  273. mov 4*4(%rsp),@x[4]
  274. mov 4*5(%rsp),@x[5]
  275. mov 4*6(%rsp),@x[6]
  276. mov 4*7(%rsp),@x[7]
  277. movd %xmm3,@x[12]
  278. mov 4*13(%rsp),@x[13]
  279. mov 4*14(%rsp),@x[14]
  280. mov 4*15(%rsp),@x[15]
  281. mov %rbp,64+0(%rsp) # save len
  282. mov \$10,%ebp
  283. mov $inp,64+8(%rsp) # save inp
  284. movq %xmm2,%rsi # "@x[8]"
  285. mov $out,64+16(%rsp) # save out
  286. mov %rsi,%rdi
  287. shr \$32,%rdi # "@x[9]"
  288. jmp .Loop
  289. .align 32
  290. .Loop:
  291. ___
  292. foreach (&ROUND (0, 4, 8,12)) { eval; }
  293. foreach (&ROUND (0, 5,10,15)) { eval; }
  294. &dec ("%ebp");
  295. &jnz (".Loop");
  296. $code.=<<___;
  297. mov @t[1],4*9(%rsp) # modulo-scheduled
  298. mov @t[0],4*8(%rsp)
  299. mov 64(%rsp),%rbp # load len
  300. movdqa %xmm2,%xmm1
  301. mov 64+8(%rsp),$inp # load inp
  302. paddd %xmm4,%xmm3 # increment counter
  303. mov 64+16(%rsp),$out # load out
  304. add \$0x61707865,@x[0] # 'expa'
  305. add \$0x3320646e,@x[1] # 'nd 3'
  306. add \$0x79622d32,@x[2] # '2-by'
  307. add \$0x6b206574,@x[3] # 'te k'
  308. add 4*4(%rsp),@x[4]
  309. add 4*5(%rsp),@x[5]
  310. add 4*6(%rsp),@x[6]
  311. add 4*7(%rsp),@x[7]
  312. add 4*12(%rsp),@x[12]
  313. add 4*13(%rsp),@x[13]
  314. add 4*14(%rsp),@x[14]
  315. add 4*15(%rsp),@x[15]
  316. paddd 4*8(%rsp),%xmm1
  317. cmp \$64,%rbp
  318. jb .Ltail
  319. xor 4*0($inp),@x[0] # xor with input
  320. xor 4*1($inp),@x[1]
  321. xor 4*2($inp),@x[2]
  322. xor 4*3($inp),@x[3]
  323. xor 4*4($inp),@x[4]
  324. xor 4*5($inp),@x[5]
  325. xor 4*6($inp),@x[6]
  326. xor 4*7($inp),@x[7]
  327. movdqu 4*8($inp),%xmm0
  328. xor 4*12($inp),@x[12]
  329. xor 4*13($inp),@x[13]
  330. xor 4*14($inp),@x[14]
  331. xor 4*15($inp),@x[15]
  332. lea 4*16($inp),$inp # inp+=64
  333. pxor %xmm1,%xmm0
  334. movdqa %xmm2,4*8(%rsp)
  335. movd %xmm3,4*12(%rsp)
  336. mov @x[0],4*0($out) # write output
  337. mov @x[1],4*1($out)
  338. mov @x[2],4*2($out)
  339. mov @x[3],4*3($out)
  340. mov @x[4],4*4($out)
  341. mov @x[5],4*5($out)
  342. mov @x[6],4*6($out)
  343. mov @x[7],4*7($out)
  344. movdqu %xmm0,4*8($out)
  345. mov @x[12],4*12($out)
  346. mov @x[13],4*13($out)
  347. mov @x[14],4*14($out)
  348. mov @x[15],4*15($out)
  349. lea 4*16($out),$out # out+=64
  350. sub \$64,%rbp
  351. jnz .Loop_outer
  352. jmp .Ldone
  353. .align 16
  354. .Ltail:
  355. mov @x[0],4*0(%rsp)
  356. mov @x[1],4*1(%rsp)
  357. xor %rbx,%rbx
  358. mov @x[2],4*2(%rsp)
  359. mov @x[3],4*3(%rsp)
  360. mov @x[4],4*4(%rsp)
  361. mov @x[5],4*5(%rsp)
  362. mov @x[6],4*6(%rsp)
  363. mov @x[7],4*7(%rsp)
  364. movdqa %xmm1,4*8(%rsp)
  365. mov @x[12],4*12(%rsp)
  366. mov @x[13],4*13(%rsp)
  367. mov @x[14],4*14(%rsp)
  368. mov @x[15],4*15(%rsp)
  369. .Loop_tail:
  370. movzb ($inp,%rbx),%eax
  371. movzb (%rsp,%rbx),%edx
  372. lea 1(%rbx),%rbx
  373. xor %edx,%eax
  374. mov %al,-1($out,%rbx)
  375. dec %rbp
  376. jnz .Loop_tail
  377. .Ldone:
  378. lea 64+24+48(%rsp),%rsi
  379. .cfi_def_cfa %rsi,8
  380. mov -48(%rsi),%r15
  381. .cfi_restore %r15
  382. mov -40(%rsi),%r14
  383. .cfi_restore %r14
  384. mov -32(%rsi),%r13
  385. .cfi_restore %r13
  386. mov -24(%rsi),%r12
  387. .cfi_restore %r12
  388. mov -16(%rsi),%rbp
  389. .cfi_restore %rbp
  390. mov -8(%rsi),%rbx
  391. .cfi_restore %rbx
  392. lea (%rsi),%rsp
  393. .cfi_def_cfa_register %rsp
  394. .Lno_data:
  395. ret
  396. .cfi_endproc
  397. .size ChaCha20_ctr32,.-ChaCha20_ctr32
  398. ___
  399. ########################################################################
  400. # SSSE3 code path that handles shorter lengths
  401. {
  402. my ($a,$b,$c,$d,$t,$t1,$rot16,$rot24)=map("%xmm$_",(0..7));
  403. sub SSSE3ROUND { # critical path is 20 "SIMD ticks" per round
  404. &paddd ($a,$b);
  405. &pxor ($d,$a);
  406. &pshufb ($d,$rot16);
  407. &paddd ($c,$d);
  408. &pxor ($b,$c);
  409. &movdqa ($t,$b);
  410. &psrld ($b,20);
  411. &pslld ($t,12);
  412. &por ($b,$t);
  413. &paddd ($a,$b);
  414. &pxor ($d,$a);
  415. &pshufb ($d,$rot24);
  416. &paddd ($c,$d);
  417. &pxor ($b,$c);
  418. &movdqa ($t,$b);
  419. &psrld ($b,25);
  420. &pslld ($t,7);
  421. &por ($b,$t);
  422. }
  423. my $xframe = $win64 ? 160+8 : 8;
  424. $code.=<<___;
  425. .type ChaCha20_ssse3,\@function,5
  426. .align 32
  427. ChaCha20_ssse3:
  428. .cfi_startproc
  429. .LChaCha20_ssse3:
  430. mov %rsp,%r9 # frame pointer
  431. .cfi_def_cfa_register %r9
  432. ___
  433. $code.=<<___ if ($avx);
  434. test \$`1<<(43-32)`,%r10d
  435. jnz .LChaCha20_4xop # XOP is fastest even if we use 1/4
  436. ___
  437. $code.=<<___;
  438. cmp \$128,$len # we might throw away some data,
  439. je .LChaCha20_128
  440. ja .LChaCha20_4x # but overall it won't be slower
  441. .Ldo_sse3_after_all:
  442. sub \$64+$xframe,%rsp
  443. ___
  444. $code.=<<___ if ($win64);
  445. movaps %xmm6,-0x28(%r9)
  446. movaps %xmm7,-0x18(%r9)
  447. .Lssse3_body:
  448. ___
  449. $code.=<<___;
  450. movdqa .Lsigma(%rip),$a
  451. movdqu ($key),$b
  452. movdqu 16($key),$c
  453. movdqu ($counter),$d
  454. movdqa .Lrot16(%rip),$rot16
  455. movdqa .Lrot24(%rip),$rot24
  456. movdqa $a,0x00(%rsp)
  457. movdqa $b,0x10(%rsp)
  458. movdqa $c,0x20(%rsp)
  459. movdqa $d,0x30(%rsp)
  460. mov \$10,$counter # reuse $counter
  461. jmp .Loop_ssse3
  462. .align 32
  463. .Loop_outer_ssse3:
  464. movdqa .Lone(%rip),$d
  465. movdqa 0x00(%rsp),$a
  466. movdqa 0x10(%rsp),$b
  467. movdqa 0x20(%rsp),$c
  468. paddd 0x30(%rsp),$d
  469. mov \$10,$counter
  470. movdqa $d,0x30(%rsp)
  471. jmp .Loop_ssse3
  472. .align 32
  473. .Loop_ssse3:
  474. ___
  475. &SSSE3ROUND();
  476. &pshufd ($c,$c,0b01001110);
  477. &pshufd ($b,$b,0b00111001);
  478. &pshufd ($d,$d,0b10010011);
  479. &nop ();
  480. &SSSE3ROUND();
  481. &pshufd ($c,$c,0b01001110);
  482. &pshufd ($b,$b,0b10010011);
  483. &pshufd ($d,$d,0b00111001);
  484. &dec ($counter);
  485. &jnz (".Loop_ssse3");
  486. $code.=<<___;
  487. paddd 0x00(%rsp),$a
  488. paddd 0x10(%rsp),$b
  489. paddd 0x20(%rsp),$c
  490. paddd 0x30(%rsp),$d
  491. cmp \$64,$len
  492. jb .Ltail_ssse3
  493. movdqu 0x00($inp),$t
  494. movdqu 0x10($inp),$t1
  495. pxor $t,$a # xor with input
  496. movdqu 0x20($inp),$t
  497. pxor $t1,$b
  498. movdqu 0x30($inp),$t1
  499. lea 0x40($inp),$inp # inp+=64
  500. pxor $t,$c
  501. pxor $t1,$d
  502. movdqu $a,0x00($out) # write output
  503. movdqu $b,0x10($out)
  504. movdqu $c,0x20($out)
  505. movdqu $d,0x30($out)
  506. lea 0x40($out),$out # out+=64
  507. sub \$64,$len
  508. jnz .Loop_outer_ssse3
  509. jmp .Ldone_ssse3
  510. .align 16
  511. .Ltail_ssse3:
  512. movdqa $a,0x00(%rsp)
  513. movdqa $b,0x10(%rsp)
  514. movdqa $c,0x20(%rsp)
  515. movdqa $d,0x30(%rsp)
  516. xor $counter,$counter
  517. .Loop_tail_ssse3:
  518. movzb ($inp,$counter),%eax
  519. movzb (%rsp,$counter),%ecx
  520. lea 1($counter),$counter
  521. xor %ecx,%eax
  522. mov %al,-1($out,$counter)
  523. dec $len
  524. jnz .Loop_tail_ssse3
  525. .Ldone_ssse3:
  526. ___
  527. $code.=<<___ if ($win64);
  528. movaps -0x28(%r9),%xmm6
  529. movaps -0x18(%r9),%xmm7
  530. ___
  531. $code.=<<___;
  532. lea (%r9),%rsp
  533. .cfi_def_cfa_register %rsp
  534. .Lssse3_epilogue:
  535. ret
  536. .cfi_endproc
  537. .size ChaCha20_ssse3,.-ChaCha20_ssse3
  538. ___
  539. }
  540. ########################################################################
  541. # SSSE3 code path that handles 128-byte inputs
  542. {
  543. my ($a,$b,$c,$d,$t,$t1,$rot16,$rot24)=map("%xmm$_",(8,9,2..7));
  544. my ($a1,$b1,$c1,$d1)=map("%xmm$_",(10,11,0,1));
  545. sub SSSE3ROUND_2x {
  546. &paddd ($a,$b);
  547. &pxor ($d,$a);
  548. &paddd ($a1,$b1);
  549. &pxor ($d1,$a1);
  550. &pshufb ($d,$rot16);
  551. &pshufb($d1,$rot16);
  552. &paddd ($c,$d);
  553. &paddd ($c1,$d1);
  554. &pxor ($b,$c);
  555. &pxor ($b1,$c1);
  556. &movdqa ($t,$b);
  557. &psrld ($b,20);
  558. &movdqa($t1,$b1);
  559. &pslld ($t,12);
  560. &psrld ($b1,20);
  561. &por ($b,$t);
  562. &pslld ($t1,12);
  563. &por ($b1,$t1);
  564. &paddd ($a,$b);
  565. &pxor ($d,$a);
  566. &paddd ($a1,$b1);
  567. &pxor ($d1,$a1);
  568. &pshufb ($d,$rot24);
  569. &pshufb($d1,$rot24);
  570. &paddd ($c,$d);
  571. &paddd ($c1,$d1);
  572. &pxor ($b,$c);
  573. &pxor ($b1,$c1);
  574. &movdqa ($t,$b);
  575. &psrld ($b,25);
  576. &movdqa($t1,$b1);
  577. &pslld ($t,7);
  578. &psrld ($b1,25);
  579. &por ($b,$t);
  580. &pslld ($t1,7);
  581. &por ($b1,$t1);
  582. }
  583. my $xframe = $win64 ? 0x68 : 8;
  584. $code.=<<___;
  585. .type ChaCha20_128,\@function,5
  586. .align 32
  587. ChaCha20_128:
  588. .cfi_startproc
  589. .LChaCha20_128:
  590. mov %rsp,%r9 # frame pointer
  591. .cfi_def_cfa_register %r9
  592. sub \$64+$xframe,%rsp
  593. ___
  594. $code.=<<___ if ($win64);
  595. movaps %xmm6,-0x68(%r9)
  596. movaps %xmm7,-0x58(%r9)
  597. movaps %xmm8,-0x48(%r9)
  598. movaps %xmm9,-0x38(%r9)
  599. movaps %xmm10,-0x28(%r9)
  600. movaps %xmm11,-0x18(%r9)
  601. .L128_body:
  602. ___
  603. $code.=<<___;
  604. movdqa .Lsigma(%rip),$a
  605. movdqu ($key),$b
  606. movdqu 16($key),$c
  607. movdqu ($counter),$d
  608. movdqa .Lone(%rip),$d1
  609. movdqa .Lrot16(%rip),$rot16
  610. movdqa .Lrot24(%rip),$rot24
  611. movdqa $a,$a1
  612. movdqa $a,0x00(%rsp)
  613. movdqa $b,$b1
  614. movdqa $b,0x10(%rsp)
  615. movdqa $c,$c1
  616. movdqa $c,0x20(%rsp)
  617. paddd $d,$d1
  618. movdqa $d,0x30(%rsp)
  619. mov \$10,$counter # reuse $counter
  620. jmp .Loop_128
  621. .align 32
  622. .Loop_128:
  623. ___
  624. &SSSE3ROUND_2x();
  625. &pshufd ($c,$c,0b01001110);
  626. &pshufd ($b,$b,0b00111001);
  627. &pshufd ($d,$d,0b10010011);
  628. &pshufd ($c1,$c1,0b01001110);
  629. &pshufd ($b1,$b1,0b00111001);
  630. &pshufd ($d1,$d1,0b10010011);
  631. &SSSE3ROUND_2x();
  632. &pshufd ($c,$c,0b01001110);
  633. &pshufd ($b,$b,0b10010011);
  634. &pshufd ($d,$d,0b00111001);
  635. &pshufd ($c1,$c1,0b01001110);
  636. &pshufd ($b1,$b1,0b10010011);
  637. &pshufd ($d1,$d1,0b00111001);
  638. &dec ($counter);
  639. &jnz (".Loop_128");
  640. $code.=<<___;
  641. paddd 0x00(%rsp),$a
  642. paddd 0x10(%rsp),$b
  643. paddd 0x20(%rsp),$c
  644. paddd 0x30(%rsp),$d
  645. paddd .Lone(%rip),$d1
  646. paddd 0x00(%rsp),$a1
  647. paddd 0x10(%rsp),$b1
  648. paddd 0x20(%rsp),$c1
  649. paddd 0x30(%rsp),$d1
  650. movdqu 0x00($inp),$t
  651. movdqu 0x10($inp),$t1
  652. pxor $t,$a # xor with input
  653. movdqu 0x20($inp),$t
  654. pxor $t1,$b
  655. movdqu 0x30($inp),$t1
  656. pxor $t,$c
  657. movdqu 0x40($inp),$t
  658. pxor $t1,$d
  659. movdqu 0x50($inp),$t1
  660. pxor $t,$a1
  661. movdqu 0x60($inp),$t
  662. pxor $t1,$b1
  663. movdqu 0x70($inp),$t1
  664. pxor $t,$c1
  665. pxor $t1,$d1
  666. movdqu $a,0x00($out) # write output
  667. movdqu $b,0x10($out)
  668. movdqu $c,0x20($out)
  669. movdqu $d,0x30($out)
  670. movdqu $a1,0x40($out)
  671. movdqu $b1,0x50($out)
  672. movdqu $c1,0x60($out)
  673. movdqu $d1,0x70($out)
  674. ___
  675. $code.=<<___ if ($win64);
  676. movaps -0x68(%r9),%xmm6
  677. movaps -0x58(%r9),%xmm7
  678. movaps -0x48(%r9),%xmm8
  679. movaps -0x38(%r9),%xmm9
  680. movaps -0x28(%r9),%xmm10
  681. movaps -0x18(%r9),%xmm11
  682. ___
  683. $code.=<<___;
  684. lea (%r9),%rsp
  685. .cfi_def_cfa_register %rsp
  686. .L128_epilogue:
  687. ret
  688. .cfi_endproc
  689. .size ChaCha20_128,.-ChaCha20_128
  690. ___
  691. }
  692. ########################################################################
  693. # SSSE3 code path that handles longer messages.
  694. {
  695. # assign variables to favor Atom front-end
  696. my ($xd0,$xd1,$xd2,$xd3, $xt0,$xt1,$xt2,$xt3,
  697. $xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3)=map("%xmm$_",(0..15));
  698. my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
  699. "%nox","%nox","%nox","%nox", $xd0,$xd1,$xd2,$xd3);
  700. sub SSSE3_lane_ROUND {
  701. my ($a0,$b0,$c0,$d0)=@_;
  702. my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
  703. my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
  704. my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
  705. my ($xc,$xc_,$t0,$t1)=map("\"$_\"",$xt0,$xt1,$xt2,$xt3);
  706. my @x=map("\"$_\"",@xx);
  707. # Consider order in which variables are addressed by their
  708. # index:
  709. #
  710. # a b c d
  711. #
  712. # 0 4 8 12 < even round
  713. # 1 5 9 13
  714. # 2 6 10 14
  715. # 3 7 11 15
  716. # 0 5 10 15 < odd round
  717. # 1 6 11 12
  718. # 2 7 8 13
  719. # 3 4 9 14
  720. #
  721. # 'a', 'b' and 'd's are permanently allocated in registers,
  722. # @x[0..7,12..15], while 'c's are maintained in memory. If
  723. # you observe 'c' column, you'll notice that pair of 'c's is
  724. # invariant between rounds. This means that we have to reload
  725. # them once per round, in the middle. This is why you'll see
  726. # bunch of 'c' stores and loads in the middle, but none in
  727. # the beginning or end.
  728. (
  729. "&paddd (@x[$a0],@x[$b0])", # Q1
  730. "&paddd (@x[$a1],@x[$b1])", # Q2
  731. "&pxor (@x[$d0],@x[$a0])",
  732. "&pxor (@x[$d1],@x[$a1])",
  733. "&pshufb (@x[$d0],$t1)",
  734. "&pshufb (@x[$d1],$t1)",
  735. "&paddd ($xc,@x[$d0])",
  736. "&paddd ($xc_,@x[$d1])",
  737. "&pxor (@x[$b0],$xc)",
  738. "&pxor (@x[$b1],$xc_)",
  739. "&movdqa ($t0,@x[$b0])",
  740. "&pslld (@x[$b0],12)",
  741. "&psrld ($t0,20)",
  742. "&movdqa ($t1,@x[$b1])",
  743. "&pslld (@x[$b1],12)",
  744. "&por (@x[$b0],$t0)",
  745. "&psrld ($t1,20)",
  746. "&movdqa ($t0,'(%r11)')", # .Lrot24(%rip)
  747. "&por (@x[$b1],$t1)",
  748. "&paddd (@x[$a0],@x[$b0])",
  749. "&paddd (@x[$a1],@x[$b1])",
  750. "&pxor (@x[$d0],@x[$a0])",
  751. "&pxor (@x[$d1],@x[$a1])",
  752. "&pshufb (@x[$d0],$t0)",
  753. "&pshufb (@x[$d1],$t0)",
  754. "&paddd ($xc,@x[$d0])",
  755. "&paddd ($xc_,@x[$d1])",
  756. "&pxor (@x[$b0],$xc)",
  757. "&pxor (@x[$b1],$xc_)",
  758. "&movdqa ($t1,@x[$b0])",
  759. "&pslld (@x[$b0],7)",
  760. "&psrld ($t1,25)",
  761. "&movdqa ($t0,@x[$b1])",
  762. "&pslld (@x[$b1],7)",
  763. "&por (@x[$b0],$t1)",
  764. "&psrld ($t0,25)",
  765. "&movdqa ($t1,'(%r10)')", # .Lrot16(%rip)
  766. "&por (@x[$b1],$t0)",
  767. "&movdqa (\"`16*($c0-8)`(%rsp)\",$xc)", # reload pair of 'c's
  768. "&movdqa (\"`16*($c1-8)`(%rsp)\",$xc_)",
  769. "&movdqa ($xc,\"`16*($c2-8)`(%rsp)\")",
  770. "&movdqa ($xc_,\"`16*($c3-8)`(%rsp)\")",
  771. "&paddd (@x[$a2],@x[$b2])", # Q3
  772. "&paddd (@x[$a3],@x[$b3])", # Q4
  773. "&pxor (@x[$d2],@x[$a2])",
  774. "&pxor (@x[$d3],@x[$a3])",
  775. "&pshufb (@x[$d2],$t1)",
  776. "&pshufb (@x[$d3],$t1)",
  777. "&paddd ($xc,@x[$d2])",
  778. "&paddd ($xc_,@x[$d3])",
  779. "&pxor (@x[$b2],$xc)",
  780. "&pxor (@x[$b3],$xc_)",
  781. "&movdqa ($t0,@x[$b2])",
  782. "&pslld (@x[$b2],12)",
  783. "&psrld ($t0,20)",
  784. "&movdqa ($t1,@x[$b3])",
  785. "&pslld (@x[$b3],12)",
  786. "&por (@x[$b2],$t0)",
  787. "&psrld ($t1,20)",
  788. "&movdqa ($t0,'(%r11)')", # .Lrot24(%rip)
  789. "&por (@x[$b3],$t1)",
  790. "&paddd (@x[$a2],@x[$b2])",
  791. "&paddd (@x[$a3],@x[$b3])",
  792. "&pxor (@x[$d2],@x[$a2])",
  793. "&pxor (@x[$d3],@x[$a3])",
  794. "&pshufb (@x[$d2],$t0)",
  795. "&pshufb (@x[$d3],$t0)",
  796. "&paddd ($xc,@x[$d2])",
  797. "&paddd ($xc_,@x[$d3])",
  798. "&pxor (@x[$b2],$xc)",
  799. "&pxor (@x[$b3],$xc_)",
  800. "&movdqa ($t1,@x[$b2])",
  801. "&pslld (@x[$b2],7)",
  802. "&psrld ($t1,25)",
  803. "&movdqa ($t0,@x[$b3])",
  804. "&pslld (@x[$b3],7)",
  805. "&por (@x[$b2],$t1)",
  806. "&psrld ($t0,25)",
  807. "&movdqa ($t1,'(%r10)')", # .Lrot16(%rip)
  808. "&por (@x[$b3],$t0)"
  809. );
  810. }
  811. my $xframe = $win64 ? 0xa8 : 8;
  812. $code.=<<___;
  813. .type ChaCha20_4x,\@function,5
  814. .align 32
  815. ChaCha20_4x:
  816. .cfi_startproc
  817. .LChaCha20_4x:
  818. mov %rsp,%r9 # frame pointer
  819. .cfi_def_cfa_register %r9
  820. mov %r10,%r11
  821. ___
  822. $code.=<<___ if ($avx>1);
  823. shr \$32,%r10 # OPENSSL_ia32cap_P+8
  824. test \$`1<<5`,%r10 # test AVX2
  825. jnz .LChaCha20_8x
  826. ___
  827. $code.=<<___;
  828. cmp \$192,$len
  829. ja .Lproceed4x
  830. and \$`1<<26|1<<22`,%r11 # isolate XSAVE+MOVBE
  831. cmp \$`1<<22`,%r11 # check for MOVBE without XSAVE
  832. je .Ldo_sse3_after_all # to detect Atom
  833. .Lproceed4x:
  834. sub \$0x140+$xframe,%rsp
  835. ___
  836. ################ stack layout
  837. # +0x00 SIMD equivalent of @x[8-12]
  838. # ...
  839. # +0x40 constant copy of key[0-2] smashed by lanes
  840. # ...
  841. # +0x100 SIMD counters (with nonce smashed by lanes)
  842. # ...
  843. # +0x140
  844. $code.=<<___ if ($win64);
  845. movaps %xmm6,-0xa8(%r9)
  846. movaps %xmm7,-0x98(%r9)
  847. movaps %xmm8,-0x88(%r9)
  848. movaps %xmm9,-0x78(%r9)
  849. movaps %xmm10,-0x68(%r9)
  850. movaps %xmm11,-0x58(%r9)
  851. movaps %xmm12,-0x48(%r9)
  852. movaps %xmm13,-0x38(%r9)
  853. movaps %xmm14,-0x28(%r9)
  854. movaps %xmm15,-0x18(%r9)
  855. .L4x_body:
  856. ___
  857. $code.=<<___;
  858. movdqa .Lsigma(%rip),$xa3 # key[0]
  859. movdqu ($key),$xb3 # key[1]
  860. movdqu 16($key),$xt3 # key[2]
  861. movdqu ($counter),$xd3 # key[3]
  862. lea 0x100(%rsp),%rcx # size optimization
  863. lea .Lrot16(%rip),%r10
  864. lea .Lrot24(%rip),%r11
  865. pshufd \$0x00,$xa3,$xa0 # smash key by lanes...
  866. pshufd \$0x55,$xa3,$xa1
  867. movdqa $xa0,0x40(%rsp) # ... and offload
  868. pshufd \$0xaa,$xa3,$xa2
  869. movdqa $xa1,0x50(%rsp)
  870. pshufd \$0xff,$xa3,$xa3
  871. movdqa $xa2,0x60(%rsp)
  872. movdqa $xa3,0x70(%rsp)
  873. pshufd \$0x00,$xb3,$xb0
  874. pshufd \$0x55,$xb3,$xb1
  875. movdqa $xb0,0x80-0x100(%rcx)
  876. pshufd \$0xaa,$xb3,$xb2
  877. movdqa $xb1,0x90-0x100(%rcx)
  878. pshufd \$0xff,$xb3,$xb3
  879. movdqa $xb2,0xa0-0x100(%rcx)
  880. movdqa $xb3,0xb0-0x100(%rcx)
  881. pshufd \$0x00,$xt3,$xt0 # "$xc0"
  882. pshufd \$0x55,$xt3,$xt1 # "$xc1"
  883. movdqa $xt0,0xc0-0x100(%rcx)
  884. pshufd \$0xaa,$xt3,$xt2 # "$xc2"
  885. movdqa $xt1,0xd0-0x100(%rcx)
  886. pshufd \$0xff,$xt3,$xt3 # "$xc3"
  887. movdqa $xt2,0xe0-0x100(%rcx)
  888. movdqa $xt3,0xf0-0x100(%rcx)
  889. pshufd \$0x00,$xd3,$xd0
  890. pshufd \$0x55,$xd3,$xd1
  891. paddd .Linc(%rip),$xd0 # don't save counters yet
  892. pshufd \$0xaa,$xd3,$xd2
  893. movdqa $xd1,0x110-0x100(%rcx)
  894. pshufd \$0xff,$xd3,$xd3
  895. movdqa $xd2,0x120-0x100(%rcx)
  896. movdqa $xd3,0x130-0x100(%rcx)
  897. jmp .Loop_enter4x
  898. .align 32
  899. .Loop_outer4x:
  900. movdqa 0x40(%rsp),$xa0 # re-load smashed key
  901. movdqa 0x50(%rsp),$xa1
  902. movdqa 0x60(%rsp),$xa2
  903. movdqa 0x70(%rsp),$xa3
  904. movdqa 0x80-0x100(%rcx),$xb0
  905. movdqa 0x90-0x100(%rcx),$xb1
  906. movdqa 0xa0-0x100(%rcx),$xb2
  907. movdqa 0xb0-0x100(%rcx),$xb3
  908. movdqa 0xc0-0x100(%rcx),$xt0 # "$xc0"
  909. movdqa 0xd0-0x100(%rcx),$xt1 # "$xc1"
  910. movdqa 0xe0-0x100(%rcx),$xt2 # "$xc2"
  911. movdqa 0xf0-0x100(%rcx),$xt3 # "$xc3"
  912. movdqa 0x100-0x100(%rcx),$xd0
  913. movdqa 0x110-0x100(%rcx),$xd1
  914. movdqa 0x120-0x100(%rcx),$xd2
  915. movdqa 0x130-0x100(%rcx),$xd3
  916. paddd .Lfour(%rip),$xd0 # next SIMD counters
  917. .Loop_enter4x:
  918. movdqa $xt2,0x20(%rsp) # SIMD equivalent of "@x[10]"
  919. movdqa $xt3,0x30(%rsp) # SIMD equivalent of "@x[11]"
  920. movdqa (%r10),$xt3 # .Lrot16(%rip)
  921. mov \$10,%eax
  922. movdqa $xd0,0x100-0x100(%rcx) # save SIMD counters
  923. jmp .Loop4x
  924. .align 32
  925. .Loop4x:
  926. ___
  927. foreach (&SSSE3_lane_ROUND(0, 4, 8,12)) { eval; }
  928. foreach (&SSSE3_lane_ROUND(0, 5,10,15)) { eval; }
  929. $code.=<<___;
  930. dec %eax
  931. jnz .Loop4x
  932. paddd 0x40(%rsp),$xa0 # accumulate key material
  933. paddd 0x50(%rsp),$xa1
  934. paddd 0x60(%rsp),$xa2
  935. paddd 0x70(%rsp),$xa3
  936. movdqa $xa0,$xt2 # "de-interlace" data
  937. punpckldq $xa1,$xa0
  938. movdqa $xa2,$xt3
  939. punpckldq $xa3,$xa2
  940. punpckhdq $xa1,$xt2
  941. punpckhdq $xa3,$xt3
  942. movdqa $xa0,$xa1
  943. punpcklqdq $xa2,$xa0 # "a0"
  944. movdqa $xt2,$xa3
  945. punpcklqdq $xt3,$xt2 # "a2"
  946. punpckhqdq $xa2,$xa1 # "a1"
  947. punpckhqdq $xt3,$xa3 # "a3"
  948. ___
  949. ($xa2,$xt2)=($xt2,$xa2);
  950. $code.=<<___;
  951. paddd 0x80-0x100(%rcx),$xb0
  952. paddd 0x90-0x100(%rcx),$xb1
  953. paddd 0xa0-0x100(%rcx),$xb2
  954. paddd 0xb0-0x100(%rcx),$xb3
  955. movdqa $xa0,0x00(%rsp) # offload $xaN
  956. movdqa $xa1,0x10(%rsp)
  957. movdqa 0x20(%rsp),$xa0 # "xc2"
  958. movdqa 0x30(%rsp),$xa1 # "xc3"
  959. movdqa $xb0,$xt2
  960. punpckldq $xb1,$xb0
  961. movdqa $xb2,$xt3
  962. punpckldq $xb3,$xb2
  963. punpckhdq $xb1,$xt2
  964. punpckhdq $xb3,$xt3
  965. movdqa $xb0,$xb1
  966. punpcklqdq $xb2,$xb0 # "b0"
  967. movdqa $xt2,$xb3
  968. punpcklqdq $xt3,$xt2 # "b2"
  969. punpckhqdq $xb2,$xb1 # "b1"
  970. punpckhqdq $xt3,$xb3 # "b3"
  971. ___
  972. ($xb2,$xt2)=($xt2,$xb2);
  973. my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1);
  974. $code.=<<___;
  975. paddd 0xc0-0x100(%rcx),$xc0
  976. paddd 0xd0-0x100(%rcx),$xc1
  977. paddd 0xe0-0x100(%rcx),$xc2
  978. paddd 0xf0-0x100(%rcx),$xc3
  979. movdqa $xa2,0x20(%rsp) # keep offloading $xaN
  980. movdqa $xa3,0x30(%rsp)
  981. movdqa $xc0,$xt2
  982. punpckldq $xc1,$xc0
  983. movdqa $xc2,$xt3
  984. punpckldq $xc3,$xc2
  985. punpckhdq $xc1,$xt2
  986. punpckhdq $xc3,$xt3
  987. movdqa $xc0,$xc1
  988. punpcklqdq $xc2,$xc0 # "c0"
  989. movdqa $xt2,$xc3
  990. punpcklqdq $xt3,$xt2 # "c2"
  991. punpckhqdq $xc2,$xc1 # "c1"
  992. punpckhqdq $xt3,$xc3 # "c3"
  993. ___
  994. ($xc2,$xt2)=($xt2,$xc2);
  995. ($xt0,$xt1)=($xa2,$xa3); # use $xaN as temporary
  996. $code.=<<___;
  997. paddd 0x100-0x100(%rcx),$xd0
  998. paddd 0x110-0x100(%rcx),$xd1
  999. paddd 0x120-0x100(%rcx),$xd2
  1000. paddd 0x130-0x100(%rcx),$xd3
  1001. movdqa $xd0,$xt2
  1002. punpckldq $xd1,$xd0
  1003. movdqa $xd2,$xt3
  1004. punpckldq $xd3,$xd2
  1005. punpckhdq $xd1,$xt2
  1006. punpckhdq $xd3,$xt3
  1007. movdqa $xd0,$xd1
  1008. punpcklqdq $xd2,$xd0 # "d0"
  1009. movdqa $xt2,$xd3
  1010. punpcklqdq $xt3,$xt2 # "d2"
  1011. punpckhqdq $xd2,$xd1 # "d1"
  1012. punpckhqdq $xt3,$xd3 # "d3"
  1013. ___
  1014. ($xd2,$xt2)=($xt2,$xd2);
  1015. $code.=<<___;
  1016. cmp \$64*4,$len
  1017. jb .Ltail4x
  1018. movdqu 0x00($inp),$xt0 # xor with input
  1019. movdqu 0x10($inp),$xt1
  1020. movdqu 0x20($inp),$xt2
  1021. movdqu 0x30($inp),$xt3
  1022. pxor 0x00(%rsp),$xt0 # $xaN is offloaded, remember?
  1023. pxor $xb0,$xt1
  1024. pxor $xc0,$xt2
  1025. pxor $xd0,$xt3
  1026. movdqu $xt0,0x00($out)
  1027. movdqu 0x40($inp),$xt0
  1028. movdqu $xt1,0x10($out)
  1029. movdqu 0x50($inp),$xt1
  1030. movdqu $xt2,0x20($out)
  1031. movdqu 0x60($inp),$xt2
  1032. movdqu $xt3,0x30($out)
  1033. movdqu 0x70($inp),$xt3
  1034. lea 0x80($inp),$inp # size optimization
  1035. pxor 0x10(%rsp),$xt0
  1036. pxor $xb1,$xt1
  1037. pxor $xc1,$xt2
  1038. pxor $xd1,$xt3
  1039. movdqu $xt0,0x40($out)
  1040. movdqu 0x00($inp),$xt0
  1041. movdqu $xt1,0x50($out)
  1042. movdqu 0x10($inp),$xt1
  1043. movdqu $xt2,0x60($out)
  1044. movdqu 0x20($inp),$xt2
  1045. movdqu $xt3,0x70($out)
  1046. lea 0x80($out),$out # size optimization
  1047. movdqu 0x30($inp),$xt3
  1048. pxor 0x20(%rsp),$xt0
  1049. pxor $xb2,$xt1
  1050. pxor $xc2,$xt2
  1051. pxor $xd2,$xt3
  1052. movdqu $xt0,0x00($out)
  1053. movdqu 0x40($inp),$xt0
  1054. movdqu $xt1,0x10($out)
  1055. movdqu 0x50($inp),$xt1
  1056. movdqu $xt2,0x20($out)
  1057. movdqu 0x60($inp),$xt2
  1058. movdqu $xt3,0x30($out)
  1059. movdqu 0x70($inp),$xt3
  1060. lea 0x80($inp),$inp # inp+=64*4
  1061. pxor 0x30(%rsp),$xt0
  1062. pxor $xb3,$xt1
  1063. pxor $xc3,$xt2
  1064. pxor $xd3,$xt3
  1065. movdqu $xt0,0x40($out)
  1066. movdqu $xt1,0x50($out)
  1067. movdqu $xt2,0x60($out)
  1068. movdqu $xt3,0x70($out)
  1069. lea 0x80($out),$out # out+=64*4
  1070. sub \$64*4,$len
  1071. jnz .Loop_outer4x
  1072. jmp .Ldone4x
  1073. .Ltail4x:
  1074. cmp \$192,$len
  1075. jae .L192_or_more4x
  1076. cmp \$128,$len
  1077. jae .L128_or_more4x
  1078. cmp \$64,$len
  1079. jae .L64_or_more4x
  1080. #movdqa 0x00(%rsp),$xt0 # $xaN is offloaded, remember?
  1081. xor %r10,%r10
  1082. #movdqa $xt0,0x00(%rsp)
  1083. movdqa $xb0,0x10(%rsp)
  1084. movdqa $xc0,0x20(%rsp)
  1085. movdqa $xd0,0x30(%rsp)
  1086. jmp .Loop_tail4x
  1087. .align 32
  1088. .L64_or_more4x:
  1089. movdqu 0x00($inp),$xt0 # xor with input
  1090. movdqu 0x10($inp),$xt1
  1091. movdqu 0x20($inp),$xt2
  1092. movdqu 0x30($inp),$xt3
  1093. pxor 0x00(%rsp),$xt0 # $xaxN is offloaded, remember?
  1094. pxor $xb0,$xt1
  1095. pxor $xc0,$xt2
  1096. pxor $xd0,$xt3
  1097. movdqu $xt0,0x00($out)
  1098. movdqu $xt1,0x10($out)
  1099. movdqu $xt2,0x20($out)
  1100. movdqu $xt3,0x30($out)
  1101. je .Ldone4x
  1102. movdqa 0x10(%rsp),$xt0 # $xaN is offloaded, remember?
  1103. lea 0x40($inp),$inp # inp+=64*1
  1104. xor %r10,%r10
  1105. movdqa $xt0,0x00(%rsp)
  1106. movdqa $xb1,0x10(%rsp)
  1107. lea 0x40($out),$out # out+=64*1
  1108. movdqa $xc1,0x20(%rsp)
  1109. sub \$64,$len # len-=64*1
  1110. movdqa $xd1,0x30(%rsp)
  1111. jmp .Loop_tail4x
  1112. .align 32
  1113. .L128_or_more4x:
  1114. movdqu 0x00($inp),$xt0 # xor with input
  1115. movdqu 0x10($inp),$xt1
  1116. movdqu 0x20($inp),$xt2
  1117. movdqu 0x30($inp),$xt3
  1118. pxor 0x00(%rsp),$xt0 # $xaN is offloaded, remember?
  1119. pxor $xb0,$xt1
  1120. pxor $xc0,$xt2
  1121. pxor $xd0,$xt3
  1122. movdqu $xt0,0x00($out)
  1123. movdqu 0x40($inp),$xt0
  1124. movdqu $xt1,0x10($out)
  1125. movdqu 0x50($inp),$xt1
  1126. movdqu $xt2,0x20($out)
  1127. movdqu 0x60($inp),$xt2
  1128. movdqu $xt3,0x30($out)
  1129. movdqu 0x70($inp),$xt3
  1130. pxor 0x10(%rsp),$xt0
  1131. pxor $xb1,$xt1
  1132. pxor $xc1,$xt2
  1133. pxor $xd1,$xt3
  1134. movdqu $xt0,0x40($out)
  1135. movdqu $xt1,0x50($out)
  1136. movdqu $xt2,0x60($out)
  1137. movdqu $xt3,0x70($out)
  1138. je .Ldone4x
  1139. movdqa 0x20(%rsp),$xt0 # $xaN is offloaded, remember?
  1140. lea 0x80($inp),$inp # inp+=64*2
  1141. xor %r10,%r10
  1142. movdqa $xt0,0x00(%rsp)
  1143. movdqa $xb2,0x10(%rsp)
  1144. lea 0x80($out),$out # out+=64*2
  1145. movdqa $xc2,0x20(%rsp)
  1146. sub \$128,$len # len-=64*2
  1147. movdqa $xd2,0x30(%rsp)
  1148. jmp .Loop_tail4x
  1149. .align 32
  1150. .L192_or_more4x:
  1151. movdqu 0x00($inp),$xt0 # xor with input
  1152. movdqu 0x10($inp),$xt1
  1153. movdqu 0x20($inp),$xt2
  1154. movdqu 0x30($inp),$xt3
  1155. pxor 0x00(%rsp),$xt0 # $xaN is offloaded, remember?
  1156. pxor $xb0,$xt1
  1157. pxor $xc0,$xt2
  1158. pxor $xd0,$xt3
  1159. movdqu $xt0,0x00($out)
  1160. movdqu 0x40($inp),$xt0
  1161. movdqu $xt1,0x10($out)
  1162. movdqu 0x50($inp),$xt1
  1163. movdqu $xt2,0x20($out)
  1164. movdqu 0x60($inp),$xt2
  1165. movdqu $xt3,0x30($out)
  1166. movdqu 0x70($inp),$xt3
  1167. lea 0x80($inp),$inp # size optimization
  1168. pxor 0x10(%rsp),$xt0
  1169. pxor $xb1,$xt1
  1170. pxor $xc1,$xt2
  1171. pxor $xd1,$xt3
  1172. movdqu $xt0,0x40($out)
  1173. movdqu 0x00($inp),$xt0
  1174. movdqu $xt1,0x50($out)
  1175. movdqu 0x10($inp),$xt1
  1176. movdqu $xt2,0x60($out)
  1177. movdqu 0x20($inp),$xt2
  1178. movdqu $xt3,0x70($out)
  1179. lea 0x80($out),$out # size optimization
  1180. movdqu 0x30($inp),$xt3
  1181. pxor 0x20(%rsp),$xt0
  1182. pxor $xb2,$xt1
  1183. pxor $xc2,$xt2
  1184. pxor $xd2,$xt3
  1185. movdqu $xt0,0x00($out)
  1186. movdqu $xt1,0x10($out)
  1187. movdqu $xt2,0x20($out)
  1188. movdqu $xt3,0x30($out)
  1189. je .Ldone4x
  1190. movdqa 0x30(%rsp),$xt0 # $xaN is offloaded, remember?
  1191. lea 0x40($inp),$inp # inp+=64*3
  1192. xor %r10,%r10
  1193. movdqa $xt0,0x00(%rsp)
  1194. movdqa $xb3,0x10(%rsp)
  1195. lea 0x40($out),$out # out+=64*3
  1196. movdqa $xc3,0x20(%rsp)
  1197. sub \$192,$len # len-=64*3
  1198. movdqa $xd3,0x30(%rsp)
  1199. .Loop_tail4x:
  1200. movzb ($inp,%r10),%eax
  1201. movzb (%rsp,%r10),%ecx
  1202. lea 1(%r10),%r10
  1203. xor %ecx,%eax
  1204. mov %al,-1($out,%r10)
  1205. dec $len
  1206. jnz .Loop_tail4x
  1207. .Ldone4x:
  1208. ___
  1209. $code.=<<___ if ($win64);
  1210. movaps -0xa8(%r9),%xmm6
  1211. movaps -0x98(%r9),%xmm7
  1212. movaps -0x88(%r9),%xmm8
  1213. movaps -0x78(%r9),%xmm9
  1214. movaps -0x68(%r9),%xmm10
  1215. movaps -0x58(%r9),%xmm11
  1216. movaps -0x48(%r9),%xmm12
  1217. movaps -0x38(%r9),%xmm13
  1218. movaps -0x28(%r9),%xmm14
  1219. movaps -0x18(%r9),%xmm15
  1220. ___
  1221. $code.=<<___;
  1222. lea (%r9),%rsp
  1223. .cfi_def_cfa_register %rsp
  1224. .L4x_epilogue:
  1225. ret
  1226. .cfi_endproc
  1227. .size ChaCha20_4x,.-ChaCha20_4x
  1228. ___
  1229. }
  1230. ########################################################################
  1231. # XOP code path that handles all lengths.
  1232. if ($avx) {
  1233. # There is some "anomaly" observed depending on instructions' size or
  1234. # alignment. If you look closely at below code you'll notice that
  1235. # sometimes argument order varies. The order affects instruction
  1236. # encoding by making it larger, and such fiddling gives 5% performance
  1237. # improvement. This is on FX-4100...
  1238. my ($xb0,$xb1,$xb2,$xb3, $xd0,$xd1,$xd2,$xd3,
  1239. $xa0,$xa1,$xa2,$xa3, $xt0,$xt1,$xt2,$xt3)=map("%xmm$_",(0..15));
  1240. my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
  1241. $xt0,$xt1,$xt2,$xt3, $xd0,$xd1,$xd2,$xd3);
  1242. sub XOP_lane_ROUND {
  1243. my ($a0,$b0,$c0,$d0)=@_;
  1244. my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
  1245. my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
  1246. my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
  1247. my @x=map("\"$_\"",@xx);
  1248. (
  1249. "&vpaddd (@x[$a0],@x[$a0],@x[$b0])", # Q1
  1250. "&vpaddd (@x[$a1],@x[$a1],@x[$b1])", # Q2
  1251. "&vpaddd (@x[$a2],@x[$a2],@x[$b2])", # Q3
  1252. "&vpaddd (@x[$a3],@x[$a3],@x[$b3])", # Q4
  1253. "&vpxor (@x[$d0],@x[$a0],@x[$d0])",
  1254. "&vpxor (@x[$d1],@x[$a1],@x[$d1])",
  1255. "&vpxor (@x[$d2],@x[$a2],@x[$d2])",
  1256. "&vpxor (@x[$d3],@x[$a3],@x[$d3])",
  1257. "&vprotd (@x[$d0],@x[$d0],16)",
  1258. "&vprotd (@x[$d1],@x[$d1],16)",
  1259. "&vprotd (@x[$d2],@x[$d2],16)",
  1260. "&vprotd (@x[$d3],@x[$d3],16)",
  1261. "&vpaddd (@x[$c0],@x[$c0],@x[$d0])",
  1262. "&vpaddd (@x[$c1],@x[$c1],@x[$d1])",
  1263. "&vpaddd (@x[$c2],@x[$c2],@x[$d2])",
  1264. "&vpaddd (@x[$c3],@x[$c3],@x[$d3])",
  1265. "&vpxor (@x[$b0],@x[$c0],@x[$b0])",
  1266. "&vpxor (@x[$b1],@x[$c1],@x[$b1])",
  1267. "&vpxor (@x[$b2],@x[$b2],@x[$c2])", # flip
  1268. "&vpxor (@x[$b3],@x[$b3],@x[$c3])", # flip
  1269. "&vprotd (@x[$b0],@x[$b0],12)",
  1270. "&vprotd (@x[$b1],@x[$b1],12)",
  1271. "&vprotd (@x[$b2],@x[$b2],12)",
  1272. "&vprotd (@x[$b3],@x[$b3],12)",
  1273. "&vpaddd (@x[$a0],@x[$b0],@x[$a0])", # flip
  1274. "&vpaddd (@x[$a1],@x[$b1],@x[$a1])", # flip
  1275. "&vpaddd (@x[$a2],@x[$a2],@x[$b2])",
  1276. "&vpaddd (@x[$a3],@x[$a3],@x[$b3])",
  1277. "&vpxor (@x[$d0],@x[$a0],@x[$d0])",
  1278. "&vpxor (@x[$d1],@x[$a1],@x[$d1])",
  1279. "&vpxor (@x[$d2],@x[$a2],@x[$d2])",
  1280. "&vpxor (@x[$d3],@x[$a3],@x[$d3])",
  1281. "&vprotd (@x[$d0],@x[$d0],8)",
  1282. "&vprotd (@x[$d1],@x[$d1],8)",
  1283. "&vprotd (@x[$d2],@x[$d2],8)",
  1284. "&vprotd (@x[$d3],@x[$d3],8)",
  1285. "&vpaddd (@x[$c0],@x[$c0],@x[$d0])",
  1286. "&vpaddd (@x[$c1],@x[$c1],@x[$d1])",
  1287. "&vpaddd (@x[$c2],@x[$c2],@x[$d2])",
  1288. "&vpaddd (@x[$c3],@x[$c3],@x[$d3])",
  1289. "&vpxor (@x[$b0],@x[$c0],@x[$b0])",
  1290. "&vpxor (@x[$b1],@x[$c1],@x[$b1])",
  1291. "&vpxor (@x[$b2],@x[$b2],@x[$c2])", # flip
  1292. "&vpxor (@x[$b3],@x[$b3],@x[$c3])", # flip
  1293. "&vprotd (@x[$b0],@x[$b0],7)",
  1294. "&vprotd (@x[$b1],@x[$b1],7)",
  1295. "&vprotd (@x[$b2],@x[$b2],7)",
  1296. "&vprotd (@x[$b3],@x[$b3],7)"
  1297. );
  1298. }
  1299. my $xframe = $win64 ? 0xa8 : 8;
  1300. $code.=<<___;
  1301. .type ChaCha20_4xop,\@function,5
  1302. .align 32
  1303. ChaCha20_4xop:
  1304. .cfi_startproc
  1305. .LChaCha20_4xop:
  1306. mov %rsp,%r9 # frame pointer
  1307. .cfi_def_cfa_register %r9
  1308. sub \$0x140+$xframe,%rsp
  1309. ___
  1310. ################ stack layout
  1311. # +0x00 SIMD equivalent of @x[8-12]
  1312. # ...
  1313. # +0x40 constant copy of key[0-2] smashed by lanes
  1314. # ...
  1315. # +0x100 SIMD counters (with nonce smashed by lanes)
  1316. # ...
  1317. # +0x140
  1318. $code.=<<___ if ($win64);
  1319. movaps %xmm6,-0xa8(%r9)
  1320. movaps %xmm7,-0x98(%r9)
  1321. movaps %xmm8,-0x88(%r9)
  1322. movaps %xmm9,-0x78(%r9)
  1323. movaps %xmm10,-0x68(%r9)
  1324. movaps %xmm11,-0x58(%r9)
  1325. movaps %xmm12,-0x48(%r9)
  1326. movaps %xmm13,-0x38(%r9)
  1327. movaps %xmm14,-0x28(%r9)
  1328. movaps %xmm15,-0x18(%r9)
  1329. .L4xop_body:
  1330. ___
  1331. $code.=<<___;
  1332. vzeroupper
  1333. vmovdqa .Lsigma(%rip),$xa3 # key[0]
  1334. vmovdqu ($key),$xb3 # key[1]
  1335. vmovdqu 16($key),$xt3 # key[2]
  1336. vmovdqu ($counter),$xd3 # key[3]
  1337. lea 0x100(%rsp),%rcx # size optimization
  1338. vpshufd \$0x00,$xa3,$xa0 # smash key by lanes...
  1339. vpshufd \$0x55,$xa3,$xa1
  1340. vmovdqa $xa0,0x40(%rsp) # ... and offload
  1341. vpshufd \$0xaa,$xa3,$xa2
  1342. vmovdqa $xa1,0x50(%rsp)
  1343. vpshufd \$0xff,$xa3,$xa3
  1344. vmovdqa $xa2,0x60(%rsp)
  1345. vmovdqa $xa3,0x70(%rsp)
  1346. vpshufd \$0x00,$xb3,$xb0
  1347. vpshufd \$0x55,$xb3,$xb1
  1348. vmovdqa $xb0,0x80-0x100(%rcx)
  1349. vpshufd \$0xaa,$xb3,$xb2
  1350. vmovdqa $xb1,0x90-0x100(%rcx)
  1351. vpshufd \$0xff,$xb3,$xb3
  1352. vmovdqa $xb2,0xa0-0x100(%rcx)
  1353. vmovdqa $xb3,0xb0-0x100(%rcx)
  1354. vpshufd \$0x00,$xt3,$xt0 # "$xc0"
  1355. vpshufd \$0x55,$xt3,$xt1 # "$xc1"
  1356. vmovdqa $xt0,0xc0-0x100(%rcx)
  1357. vpshufd \$0xaa,$xt3,$xt2 # "$xc2"
  1358. vmovdqa $xt1,0xd0-0x100(%rcx)
  1359. vpshufd \$0xff,$xt3,$xt3 # "$xc3"
  1360. vmovdqa $xt2,0xe0-0x100(%rcx)
  1361. vmovdqa $xt3,0xf0-0x100(%rcx)
  1362. vpshufd \$0x00,$xd3,$xd0
  1363. vpshufd \$0x55,$xd3,$xd1
  1364. vpaddd .Linc(%rip),$xd0,$xd0 # don't save counters yet
  1365. vpshufd \$0xaa,$xd3,$xd2
  1366. vmovdqa $xd1,0x110-0x100(%rcx)
  1367. vpshufd \$0xff,$xd3,$xd3
  1368. vmovdqa $xd2,0x120-0x100(%rcx)
  1369. vmovdqa $xd3,0x130-0x100(%rcx)
  1370. jmp .Loop_enter4xop
  1371. .align 32
  1372. .Loop_outer4xop:
  1373. vmovdqa 0x40(%rsp),$xa0 # re-load smashed key
  1374. vmovdqa 0x50(%rsp),$xa1
  1375. vmovdqa 0x60(%rsp),$xa2
  1376. vmovdqa 0x70(%rsp),$xa3
  1377. vmovdqa 0x80-0x100(%rcx),$xb0
  1378. vmovdqa 0x90-0x100(%rcx),$xb1
  1379. vmovdqa 0xa0-0x100(%rcx),$xb2
  1380. vmovdqa 0xb0-0x100(%rcx),$xb3
  1381. vmovdqa 0xc0-0x100(%rcx),$xt0 # "$xc0"
  1382. vmovdqa 0xd0-0x100(%rcx),$xt1 # "$xc1"
  1383. vmovdqa 0xe0-0x100(%rcx),$xt2 # "$xc2"
  1384. vmovdqa 0xf0-0x100(%rcx),$xt3 # "$xc3"
  1385. vmovdqa 0x100-0x100(%rcx),$xd0
  1386. vmovdqa 0x110-0x100(%rcx),$xd1
  1387. vmovdqa 0x120-0x100(%rcx),$xd2
  1388. vmovdqa 0x130-0x100(%rcx),$xd3
  1389. vpaddd .Lfour(%rip),$xd0,$xd0 # next SIMD counters
  1390. .Loop_enter4xop:
  1391. mov \$10,%eax
  1392. vmovdqa $xd0,0x100-0x100(%rcx) # save SIMD counters
  1393. jmp .Loop4xop
  1394. .align 32
  1395. .Loop4xop:
  1396. ___
  1397. foreach (&XOP_lane_ROUND(0, 4, 8,12)) { eval; }
  1398. foreach (&XOP_lane_ROUND(0, 5,10,15)) { eval; }
  1399. $code.=<<___;
  1400. dec %eax
  1401. jnz .Loop4xop
  1402. vpaddd 0x40(%rsp),$xa0,$xa0 # accumulate key material
  1403. vpaddd 0x50(%rsp),$xa1,$xa1
  1404. vpaddd 0x60(%rsp),$xa2,$xa2
  1405. vpaddd 0x70(%rsp),$xa3,$xa3
  1406. vmovdqa $xt2,0x20(%rsp) # offload $xc2,3
  1407. vmovdqa $xt3,0x30(%rsp)
  1408. vpunpckldq $xa1,$xa0,$xt2 # "de-interlace" data
  1409. vpunpckldq $xa3,$xa2,$xt3
  1410. vpunpckhdq $xa1,$xa0,$xa0
  1411. vpunpckhdq $xa3,$xa2,$xa2
  1412. vpunpcklqdq $xt3,$xt2,$xa1 # "a0"
  1413. vpunpckhqdq $xt3,$xt2,$xt2 # "a1"
  1414. vpunpcklqdq $xa2,$xa0,$xa3 # "a2"
  1415. vpunpckhqdq $xa2,$xa0,$xa0 # "a3"
  1416. ___
  1417. ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2);
  1418. $code.=<<___;
  1419. vpaddd 0x80-0x100(%rcx),$xb0,$xb0
  1420. vpaddd 0x90-0x100(%rcx),$xb1,$xb1
  1421. vpaddd 0xa0-0x100(%rcx),$xb2,$xb2
  1422. vpaddd 0xb0-0x100(%rcx),$xb3,$xb3
  1423. vmovdqa $xa0,0x00(%rsp) # offload $xa0,1
  1424. vmovdqa $xa1,0x10(%rsp)
  1425. vmovdqa 0x20(%rsp),$xa0 # "xc2"
  1426. vmovdqa 0x30(%rsp),$xa1 # "xc3"
  1427. vpunpckldq $xb1,$xb0,$xt2
  1428. vpunpckldq $xb3,$xb2,$xt3
  1429. vpunpckhdq $xb1,$xb0,$xb0
  1430. vpunpckhdq $xb3,$xb2,$xb2
  1431. vpunpcklqdq $xt3,$xt2,$xb1 # "b0"
  1432. vpunpckhqdq $xt3,$xt2,$xt2 # "b1"
  1433. vpunpcklqdq $xb2,$xb0,$xb3 # "b2"
  1434. vpunpckhqdq $xb2,$xb0,$xb0 # "b3"
  1435. ___
  1436. ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2);
  1437. my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1);
  1438. $code.=<<___;
  1439. vpaddd 0xc0-0x100(%rcx),$xc0,$xc0
  1440. vpaddd 0xd0-0x100(%rcx),$xc1,$xc1
  1441. vpaddd 0xe0-0x100(%rcx),$xc2,$xc2
  1442. vpaddd 0xf0-0x100(%rcx),$xc3,$xc3
  1443. vpunpckldq $xc1,$xc0,$xt2
  1444. vpunpckldq $xc3,$xc2,$xt3
  1445. vpunpckhdq $xc1,$xc0,$xc0
  1446. vpunpckhdq $xc3,$xc2,$xc2
  1447. vpunpcklqdq $xt3,$xt2,$xc1 # "c0"
  1448. vpunpckhqdq $xt3,$xt2,$xt2 # "c1"
  1449. vpunpcklqdq $xc2,$xc0,$xc3 # "c2"
  1450. vpunpckhqdq $xc2,$xc0,$xc0 # "c3"
  1451. ___
  1452. ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2);
  1453. $code.=<<___;
  1454. vpaddd 0x100-0x100(%rcx),$xd0,$xd0
  1455. vpaddd 0x110-0x100(%rcx),$xd1,$xd1
  1456. vpaddd 0x120-0x100(%rcx),$xd2,$xd2
  1457. vpaddd 0x130-0x100(%rcx),$xd3,$xd3
  1458. vpunpckldq $xd1,$xd0,$xt2
  1459. vpunpckldq $xd3,$xd2,$xt3
  1460. vpunpckhdq $xd1,$xd0,$xd0
  1461. vpunpckhdq $xd3,$xd2,$xd2
  1462. vpunpcklqdq $xt3,$xt2,$xd1 # "d0"
  1463. vpunpckhqdq $xt3,$xt2,$xt2 # "d1"
  1464. vpunpcklqdq $xd2,$xd0,$xd3 # "d2"
  1465. vpunpckhqdq $xd2,$xd0,$xd0 # "d3"
  1466. ___
  1467. ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2);
  1468. ($xa0,$xa1)=($xt2,$xt3);
  1469. $code.=<<___;
  1470. vmovdqa 0x00(%rsp),$xa0 # restore $xa0,1
  1471. vmovdqa 0x10(%rsp),$xa1
  1472. cmp \$64*4,$len
  1473. jb .Ltail4xop
  1474. vpxor 0x00($inp),$xa0,$xa0 # xor with input
  1475. vpxor 0x10($inp),$xb0,$xb0
  1476. vpxor 0x20($inp),$xc0,$xc0
  1477. vpxor 0x30($inp),$xd0,$xd0
  1478. vpxor 0x40($inp),$xa1,$xa1
  1479. vpxor 0x50($inp),$xb1,$xb1
  1480. vpxor 0x60($inp),$xc1,$xc1
  1481. vpxor 0x70($inp),$xd1,$xd1
  1482. lea 0x80($inp),$inp # size optimization
  1483. vpxor 0x00($inp),$xa2,$xa2
  1484. vpxor 0x10($inp),$xb2,$xb2
  1485. vpxor 0x20($inp),$xc2,$xc2
  1486. vpxor 0x30($inp),$xd2,$xd2
  1487. vpxor 0x40($inp),$xa3,$xa3
  1488. vpxor 0x50($inp),$xb3,$xb3
  1489. vpxor 0x60($inp),$xc3,$xc3
  1490. vpxor 0x70($inp),$xd3,$xd3
  1491. lea 0x80($inp),$inp # inp+=64*4
  1492. vmovdqu $xa0,0x00($out)
  1493. vmovdqu $xb0,0x10($out)
  1494. vmovdqu $xc0,0x20($out)
  1495. vmovdqu $xd0,0x30($out)
  1496. vmovdqu $xa1,0x40($out)
  1497. vmovdqu $xb1,0x50($out)
  1498. vmovdqu $xc1,0x60($out)
  1499. vmovdqu $xd1,0x70($out)
  1500. lea 0x80($out),$out # size optimization
  1501. vmovdqu $xa2,0x00($out)
  1502. vmovdqu $xb2,0x10($out)
  1503. vmovdqu $xc2,0x20($out)
  1504. vmovdqu $xd2,0x30($out)
  1505. vmovdqu $xa3,0x40($out)
  1506. vmovdqu $xb3,0x50($out)
  1507. vmovdqu $xc3,0x60($out)
  1508. vmovdqu $xd3,0x70($out)
  1509. lea 0x80($out),$out # out+=64*4
  1510. sub \$64*4,$len
  1511. jnz .Loop_outer4xop
  1512. jmp .Ldone4xop
  1513. .align 32
  1514. .Ltail4xop:
  1515. cmp \$192,$len
  1516. jae .L192_or_more4xop
  1517. cmp \$128,$len
  1518. jae .L128_or_more4xop
  1519. cmp \$64,$len
  1520. jae .L64_or_more4xop
  1521. xor %r10,%r10
  1522. vmovdqa $xa0,0x00(%rsp)
  1523. vmovdqa $xb0,0x10(%rsp)
  1524. vmovdqa $xc0,0x20(%rsp)
  1525. vmovdqa $xd0,0x30(%rsp)
  1526. jmp .Loop_tail4xop
  1527. .align 32
  1528. .L64_or_more4xop:
  1529. vpxor 0x00($inp),$xa0,$xa0 # xor with input
  1530. vpxor 0x10($inp),$xb0,$xb0
  1531. vpxor 0x20($inp),$xc0,$xc0
  1532. vpxor 0x30($inp),$xd0,$xd0
  1533. vmovdqu $xa0,0x00($out)
  1534. vmovdqu $xb0,0x10($out)
  1535. vmovdqu $xc0,0x20($out)
  1536. vmovdqu $xd0,0x30($out)
  1537. je .Ldone4xop
  1538. lea 0x40($inp),$inp # inp+=64*1
  1539. vmovdqa $xa1,0x00(%rsp)
  1540. xor %r10,%r10
  1541. vmovdqa $xb1,0x10(%rsp)
  1542. lea 0x40($out),$out # out+=64*1
  1543. vmovdqa $xc1,0x20(%rsp)
  1544. sub \$64,$len # len-=64*1
  1545. vmovdqa $xd1,0x30(%rsp)
  1546. jmp .Loop_tail4xop
  1547. .align 32
  1548. .L128_or_more4xop:
  1549. vpxor 0x00($inp),$xa0,$xa0 # xor with input
  1550. vpxor 0x10($inp),$xb0,$xb0
  1551. vpxor 0x20($inp),$xc0,$xc0
  1552. vpxor 0x30($inp),$xd0,$xd0
  1553. vpxor 0x40($inp),$xa1,$xa1
  1554. vpxor 0x50($inp),$xb1,$xb1
  1555. vpxor 0x60($inp),$xc1,$xc1
  1556. vpxor 0x70($inp),$xd1,$xd1
  1557. vmovdqu $xa0,0x00($out)
  1558. vmovdqu $xb0,0x10($out)
  1559. vmovdqu $xc0,0x20($out)
  1560. vmovdqu $xd0,0x30($out)
  1561. vmovdqu $xa1,0x40($out)
  1562. vmovdqu $xb1,0x50($out)
  1563. vmovdqu $xc1,0x60($out)
  1564. vmovdqu $xd1,0x70($out)
  1565. je .Ldone4xop
  1566. lea 0x80($inp),$inp # inp+=64*2
  1567. vmovdqa $xa2,0x00(%rsp)
  1568. xor %r10,%r10
  1569. vmovdqa $xb2,0x10(%rsp)
  1570. lea 0x80($out),$out # out+=64*2
  1571. vmovdqa $xc2,0x20(%rsp)
  1572. sub \$128,$len # len-=64*2
  1573. vmovdqa $xd2,0x30(%rsp)
  1574. jmp .Loop_tail4xop
  1575. .align 32
  1576. .L192_or_more4xop:
  1577. vpxor 0x00($inp),$xa0,$xa0 # xor with input
  1578. vpxor 0x10($inp),$xb0,$xb0
  1579. vpxor 0x20($inp),$xc0,$xc0
  1580. vpxor 0x30($inp),$xd0,$xd0
  1581. vpxor 0x40($inp),$xa1,$xa1
  1582. vpxor 0x50($inp),$xb1,$xb1
  1583. vpxor 0x60($inp),$xc1,$xc1
  1584. vpxor 0x70($inp),$xd1,$xd1
  1585. lea 0x80($inp),$inp # size optimization
  1586. vpxor 0x00($inp),$xa2,$xa2
  1587. vpxor 0x10($inp),$xb2,$xb2
  1588. vpxor 0x20($inp),$xc2,$xc2
  1589. vpxor 0x30($inp),$xd2,$xd2
  1590. vmovdqu $xa0,0x00($out)
  1591. vmovdqu $xb0,0x10($out)
  1592. vmovdqu $xc0,0x20($out)
  1593. vmovdqu $xd0,0x30($out)
  1594. vmovdqu $xa1,0x40($out)
  1595. vmovdqu $xb1,0x50($out)
  1596. vmovdqu $xc1,0x60($out)
  1597. vmovdqu $xd1,0x70($out)
  1598. lea 0x80($out),$out # size optimization
  1599. vmovdqu $xa2,0x00($out)
  1600. vmovdqu $xb2,0x10($out)
  1601. vmovdqu $xc2,0x20($out)
  1602. vmovdqu $xd2,0x30($out)
  1603. je .Ldone4xop
  1604. lea 0x40($inp),$inp # inp+=64*3
  1605. vmovdqa $xa3,0x00(%rsp)
  1606. xor %r10,%r10
  1607. vmovdqa $xb3,0x10(%rsp)
  1608. lea 0x40($out),$out # out+=64*3
  1609. vmovdqa $xc3,0x20(%rsp)
  1610. sub \$192,$len # len-=64*3
  1611. vmovdqa $xd3,0x30(%rsp)
  1612. .Loop_tail4xop:
  1613. movzb ($inp,%r10),%eax
  1614. movzb (%rsp,%r10),%ecx
  1615. lea 1(%r10),%r10
  1616. xor %ecx,%eax
  1617. mov %al,-1($out,%r10)
  1618. dec $len
  1619. jnz .Loop_tail4xop
  1620. .Ldone4xop:
  1621. vzeroupper
  1622. ___
  1623. $code.=<<___ if ($win64);
  1624. movaps -0xa8(%r9),%xmm6
  1625. movaps -0x98(%r9),%xmm7
  1626. movaps -0x88(%r9),%xmm8
  1627. movaps -0x78(%r9),%xmm9
  1628. movaps -0x68(%r9),%xmm10
  1629. movaps -0x58(%r9),%xmm11
  1630. movaps -0x48(%r9),%xmm12
  1631. movaps -0x38(%r9),%xmm13
  1632. movaps -0x28(%r9),%xmm14
  1633. movaps -0x18(%r9),%xmm15
  1634. ___
  1635. $code.=<<___;
  1636. lea (%r9),%rsp
  1637. .cfi_def_cfa_register %rsp
  1638. .L4xop_epilogue:
  1639. ret
  1640. .cfi_endproc
  1641. .size ChaCha20_4xop,.-ChaCha20_4xop
  1642. ___
  1643. }
  1644. ########################################################################
  1645. # AVX2 code path
  1646. if ($avx>1) {
  1647. my ($xb0,$xb1,$xb2,$xb3, $xd0,$xd1,$xd2,$xd3,
  1648. $xa0,$xa1,$xa2,$xa3, $xt0,$xt1,$xt2,$xt3)=map("%ymm$_",(0..15));
  1649. my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
  1650. "%nox","%nox","%nox","%nox", $xd0,$xd1,$xd2,$xd3);
  1651. sub AVX2_lane_ROUND {
  1652. my ($a0,$b0,$c0,$d0)=@_;
  1653. my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
  1654. my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
  1655. my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
  1656. my ($xc,$xc_,$t0,$t1)=map("\"$_\"",$xt0,$xt1,$xt2,$xt3);
  1657. my @x=map("\"$_\"",@xx);
  1658. # Consider order in which variables are addressed by their
  1659. # index:
  1660. #
  1661. # a b c d
  1662. #
  1663. # 0 4 8 12 < even round
  1664. # 1 5 9 13
  1665. # 2 6 10 14
  1666. # 3 7 11 15
  1667. # 0 5 10 15 < odd round
  1668. # 1 6 11 12
  1669. # 2 7 8 13
  1670. # 3 4 9 14
  1671. #
  1672. # 'a', 'b' and 'd's are permanently allocated in registers,
  1673. # @x[0..7,12..15], while 'c's are maintained in memory. If
  1674. # you observe 'c' column, you'll notice that pair of 'c's is
  1675. # invariant between rounds. This means that we have to reload
  1676. # them once per round, in the middle. This is why you'll see
  1677. # bunch of 'c' stores and loads in the middle, but none in
  1678. # the beginning or end.
  1679. (
  1680. "&vpaddd (@x[$a0],@x[$a0],@x[$b0])", # Q1
  1681. "&vpxor (@x[$d0],@x[$a0],@x[$d0])",
  1682. "&vpshufb (@x[$d0],@x[$d0],$t1)",
  1683. "&vpaddd (@x[$a1],@x[$a1],@x[$b1])", # Q2
  1684. "&vpxor (@x[$d1],@x[$a1],@x[$d1])",
  1685. "&vpshufb (@x[$d1],@x[$d1],$t1)",
  1686. "&vpaddd ($xc,$xc,@x[$d0])",
  1687. "&vpxor (@x[$b0],$xc,@x[$b0])",
  1688. "&vpslld ($t0,@x[$b0],12)",
  1689. "&vpsrld (@x[$b0],@x[$b0],20)",
  1690. "&vpor (@x[$b0],$t0,@x[$b0])",
  1691. "&vbroadcasti128($t0,'(%r11)')", # .Lrot24(%rip)
  1692. "&vpaddd ($xc_,$xc_,@x[$d1])",
  1693. "&vpxor (@x[$b1],$xc_,@x[$b1])",
  1694. "&vpslld ($t1,@x[$b1],12)",
  1695. "&vpsrld (@x[$b1],@x[$b1],20)",
  1696. "&vpor (@x[$b1],$t1,@x[$b1])",
  1697. "&vpaddd (@x[$a0],@x[$a0],@x[$b0])",
  1698. "&vpxor (@x[$d0],@x[$a0],@x[$d0])",
  1699. "&vpshufb (@x[$d0],@x[$d0],$t0)",
  1700. "&vpaddd (@x[$a1],@x[$a1],@x[$b1])",
  1701. "&vpxor (@x[$d1],@x[$a1],@x[$d1])",
  1702. "&vpshufb (@x[$d1],@x[$d1],$t0)",
  1703. "&vpaddd ($xc,$xc,@x[$d0])",
  1704. "&vpxor (@x[$b0],$xc,@x[$b0])",
  1705. "&vpslld ($t1,@x[$b0],7)",
  1706. "&vpsrld (@x[$b0],@x[$b0],25)",
  1707. "&vpor (@x[$b0],$t1,@x[$b0])",
  1708. "&vbroadcasti128($t1,'(%r10)')", # .Lrot16(%rip)
  1709. "&vpaddd ($xc_,$xc_,@x[$d1])",
  1710. "&vpxor (@x[$b1],$xc_,@x[$b1])",
  1711. "&vpslld ($t0,@x[$b1],7)",
  1712. "&vpsrld (@x[$b1],@x[$b1],25)",
  1713. "&vpor (@x[$b1],$t0,@x[$b1])",
  1714. "&vmovdqa (\"`32*($c0-8)`(%rsp)\",$xc)", # reload pair of 'c's
  1715. "&vmovdqa (\"`32*($c1-8)`(%rsp)\",$xc_)",
  1716. "&vmovdqa ($xc,\"`32*($c2-8)`(%rsp)\")",
  1717. "&vmovdqa ($xc_,\"`32*($c3-8)`(%rsp)\")",
  1718. "&vpaddd (@x[$a2],@x[$a2],@x[$b2])", # Q3
  1719. "&vpxor (@x[$d2],@x[$a2],@x[$d2])",
  1720. "&vpshufb (@x[$d2],@x[$d2],$t1)",
  1721. "&vpaddd (@x[$a3],@x[$a3],@x[$b3])", # Q4
  1722. "&vpxor (@x[$d3],@x[$a3],@x[$d3])",
  1723. "&vpshufb (@x[$d3],@x[$d3],$t1)",
  1724. "&vpaddd ($xc,$xc,@x[$d2])",
  1725. "&vpxor (@x[$b2],$xc,@x[$b2])",
  1726. "&vpslld ($t0,@x[$b2],12)",
  1727. "&vpsrld (@x[$b2],@x[$b2],20)",
  1728. "&vpor (@x[$b2],$t0,@x[$b2])",
  1729. "&vbroadcasti128($t0,'(%r11)')", # .Lrot24(%rip)
  1730. "&vpaddd ($xc_,$xc_,@x[$d3])",
  1731. "&vpxor (@x[$b3],$xc_,@x[$b3])",
  1732. "&vpslld ($t1,@x[$b3],12)",
  1733. "&vpsrld (@x[$b3],@x[$b3],20)",
  1734. "&vpor (@x[$b3],$t1,@x[$b3])",
  1735. "&vpaddd (@x[$a2],@x[$a2],@x[$b2])",
  1736. "&vpxor (@x[$d2],@x[$a2],@x[$d2])",
  1737. "&vpshufb (@x[$d2],@x[$d2],$t0)",
  1738. "&vpaddd (@x[$a3],@x[$a3],@x[$b3])",
  1739. "&vpxor (@x[$d3],@x[$a3],@x[$d3])",
  1740. "&vpshufb (@x[$d3],@x[$d3],$t0)",
  1741. "&vpaddd ($xc,$xc,@x[$d2])",
  1742. "&vpxor (@x[$b2],$xc,@x[$b2])",
  1743. "&vpslld ($t1,@x[$b2],7)",
  1744. "&vpsrld (@x[$b2],@x[$b2],25)",
  1745. "&vpor (@x[$b2],$t1,@x[$b2])",
  1746. "&vbroadcasti128($t1,'(%r10)')", # .Lrot16(%rip)
  1747. "&vpaddd ($xc_,$xc_,@x[$d3])",
  1748. "&vpxor (@x[$b3],$xc_,@x[$b3])",
  1749. "&vpslld ($t0,@x[$b3],7)",
  1750. "&vpsrld (@x[$b3],@x[$b3],25)",
  1751. "&vpor (@x[$b3],$t0,@x[$b3])"
  1752. );
  1753. }
  1754. my $xframe = $win64 ? 0xa8 : 8;
  1755. $code.=<<___;
  1756. .type ChaCha20_8x,\@function,5
  1757. .align 32
  1758. ChaCha20_8x:
  1759. .cfi_startproc
  1760. .LChaCha20_8x:
  1761. mov %rsp,%r9 # frame register
  1762. .cfi_def_cfa_register %r9
  1763. sub \$0x280+$xframe,%rsp
  1764. and \$-32,%rsp
  1765. ___
  1766. $code.=<<___ if ($win64);
  1767. movaps %xmm6,-0xa8(%r9)
  1768. movaps %xmm7,-0x98(%r9)
  1769. movaps %xmm8,-0x88(%r9)
  1770. movaps %xmm9,-0x78(%r9)
  1771. movaps %xmm10,-0x68(%r9)
  1772. movaps %xmm11,-0x58(%r9)
  1773. movaps %xmm12,-0x48(%r9)
  1774. movaps %xmm13,-0x38(%r9)
  1775. movaps %xmm14,-0x28(%r9)
  1776. movaps %xmm15,-0x18(%r9)
  1777. .L8x_body:
  1778. ___
  1779. $code.=<<___;
  1780. vzeroupper
  1781. ################ stack layout
  1782. # +0x00 SIMD equivalent of @x[8-12]
  1783. # ...
  1784. # +0x80 constant copy of key[0-2] smashed by lanes
  1785. # ...
  1786. # +0x200 SIMD counters (with nonce smashed by lanes)
  1787. # ...
  1788. # +0x280
  1789. vbroadcasti128 .Lsigma(%rip),$xa3 # key[0]
  1790. vbroadcasti128 ($key),$xb3 # key[1]
  1791. vbroadcasti128 16($key),$xt3 # key[2]
  1792. vbroadcasti128 ($counter),$xd3 # key[3]
  1793. lea 0x100(%rsp),%rcx # size optimization
  1794. lea 0x200(%rsp),%rax # size optimization
  1795. lea .Lrot16(%rip),%r10
  1796. lea .Lrot24(%rip),%r11
  1797. vpshufd \$0x00,$xa3,$xa0 # smash key by lanes...
  1798. vpshufd \$0x55,$xa3,$xa1
  1799. vmovdqa $xa0,0x80-0x100(%rcx) # ... and offload
  1800. vpshufd \$0xaa,$xa3,$xa2
  1801. vmovdqa $xa1,0xa0-0x100(%rcx)
  1802. vpshufd \$0xff,$xa3,$xa3
  1803. vmovdqa $xa2,0xc0-0x100(%rcx)
  1804. vmovdqa $xa3,0xe0-0x100(%rcx)
  1805. vpshufd \$0x00,$xb3,$xb0
  1806. vpshufd \$0x55,$xb3,$xb1
  1807. vmovdqa $xb0,0x100-0x100(%rcx)
  1808. vpshufd \$0xaa,$xb3,$xb2
  1809. vmovdqa $xb1,0x120-0x100(%rcx)
  1810. vpshufd \$0xff,$xb3,$xb3
  1811. vmovdqa $xb2,0x140-0x100(%rcx)
  1812. vmovdqa $xb3,0x160-0x100(%rcx)
  1813. vpshufd \$0x00,$xt3,$xt0 # "xc0"
  1814. vpshufd \$0x55,$xt3,$xt1 # "xc1"
  1815. vmovdqa $xt0,0x180-0x200(%rax)
  1816. vpshufd \$0xaa,$xt3,$xt2 # "xc2"
  1817. vmovdqa $xt1,0x1a0-0x200(%rax)
  1818. vpshufd \$0xff,$xt3,$xt3 # "xc3"
  1819. vmovdqa $xt2,0x1c0-0x200(%rax)
  1820. vmovdqa $xt3,0x1e0-0x200(%rax)
  1821. vpshufd \$0x00,$xd3,$xd0
  1822. vpshufd \$0x55,$xd3,$xd1
  1823. vpaddd .Lincy(%rip),$xd0,$xd0 # don't save counters yet
  1824. vpshufd \$0xaa,$xd3,$xd2
  1825. vmovdqa $xd1,0x220-0x200(%rax)
  1826. vpshufd \$0xff,$xd3,$xd3
  1827. vmovdqa $xd2,0x240-0x200(%rax)
  1828. vmovdqa $xd3,0x260-0x200(%rax)
  1829. jmp .Loop_enter8x
  1830. .align 32
  1831. .Loop_outer8x:
  1832. vmovdqa 0x80-0x100(%rcx),$xa0 # re-load smashed key
  1833. vmovdqa 0xa0-0x100(%rcx),$xa1
  1834. vmovdqa 0xc0-0x100(%rcx),$xa2
  1835. vmovdqa 0xe0-0x100(%rcx),$xa3
  1836. vmovdqa 0x100-0x100(%rcx),$xb0
  1837. vmovdqa 0x120-0x100(%rcx),$xb1
  1838. vmovdqa 0x140-0x100(%rcx),$xb2
  1839. vmovdqa 0x160-0x100(%rcx),$xb3
  1840. vmovdqa 0x180-0x200(%rax),$xt0 # "xc0"
  1841. vmovdqa 0x1a0-0x200(%rax),$xt1 # "xc1"
  1842. vmovdqa 0x1c0-0x200(%rax),$xt2 # "xc2"
  1843. vmovdqa 0x1e0-0x200(%rax),$xt3 # "xc3"
  1844. vmovdqa 0x200-0x200(%rax),$xd0
  1845. vmovdqa 0x220-0x200(%rax),$xd1
  1846. vmovdqa 0x240-0x200(%rax),$xd2
  1847. vmovdqa 0x260-0x200(%rax),$xd3
  1848. vpaddd .Leight(%rip),$xd0,$xd0 # next SIMD counters
  1849. .Loop_enter8x:
  1850. vmovdqa $xt2,0x40(%rsp) # SIMD equivalent of "@x[10]"
  1851. vmovdqa $xt3,0x60(%rsp) # SIMD equivalent of "@x[11]"
  1852. vbroadcasti128 (%r10),$xt3
  1853. vmovdqa $xd0,0x200-0x200(%rax) # save SIMD counters
  1854. mov \$10,%eax
  1855. jmp .Loop8x
  1856. .align 32
  1857. .Loop8x:
  1858. ___
  1859. foreach (&AVX2_lane_ROUND(0, 4, 8,12)) { eval; }
  1860. foreach (&AVX2_lane_ROUND(0, 5,10,15)) { eval; }
  1861. $code.=<<___;
  1862. dec %eax
  1863. jnz .Loop8x
  1864. lea 0x200(%rsp),%rax # size optimization
  1865. vpaddd 0x80-0x100(%rcx),$xa0,$xa0 # accumulate key
  1866. vpaddd 0xa0-0x100(%rcx),$xa1,$xa1
  1867. vpaddd 0xc0-0x100(%rcx),$xa2,$xa2
  1868. vpaddd 0xe0-0x100(%rcx),$xa3,$xa3
  1869. vpunpckldq $xa1,$xa0,$xt2 # "de-interlace" data
  1870. vpunpckldq $xa3,$xa2,$xt3
  1871. vpunpckhdq $xa1,$xa0,$xa0
  1872. vpunpckhdq $xa3,$xa2,$xa2
  1873. vpunpcklqdq $xt3,$xt2,$xa1 # "a0"
  1874. vpunpckhqdq $xt3,$xt2,$xt2 # "a1"
  1875. vpunpcklqdq $xa2,$xa0,$xa3 # "a2"
  1876. vpunpckhqdq $xa2,$xa0,$xa0 # "a3"
  1877. ___
  1878. ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2);
  1879. $code.=<<___;
  1880. vpaddd 0x100-0x100(%rcx),$xb0,$xb0
  1881. vpaddd 0x120-0x100(%rcx),$xb1,$xb1
  1882. vpaddd 0x140-0x100(%rcx),$xb2,$xb2
  1883. vpaddd 0x160-0x100(%rcx),$xb3,$xb3
  1884. vpunpckldq $xb1,$xb0,$xt2
  1885. vpunpckldq $xb3,$xb2,$xt3
  1886. vpunpckhdq $xb1,$xb0,$xb0
  1887. vpunpckhdq $xb3,$xb2,$xb2
  1888. vpunpcklqdq $xt3,$xt2,$xb1 # "b0"
  1889. vpunpckhqdq $xt3,$xt2,$xt2 # "b1"
  1890. vpunpcklqdq $xb2,$xb0,$xb3 # "b2"
  1891. vpunpckhqdq $xb2,$xb0,$xb0 # "b3"
  1892. ___
  1893. ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2);
  1894. $code.=<<___;
  1895. vperm2i128 \$0x20,$xb0,$xa0,$xt3 # "de-interlace" further
  1896. vperm2i128 \$0x31,$xb0,$xa0,$xb0
  1897. vperm2i128 \$0x20,$xb1,$xa1,$xa0
  1898. vperm2i128 \$0x31,$xb1,$xa1,$xb1
  1899. vperm2i128 \$0x20,$xb2,$xa2,$xa1
  1900. vperm2i128 \$0x31,$xb2,$xa2,$xb2
  1901. vperm2i128 \$0x20,$xb3,$xa3,$xa2
  1902. vperm2i128 \$0x31,$xb3,$xa3,$xb3
  1903. ___
  1904. ($xa0,$xa1,$xa2,$xa3,$xt3)=($xt3,$xa0,$xa1,$xa2,$xa3);
  1905. my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1);
  1906. $code.=<<___;
  1907. vmovdqa $xa0,0x00(%rsp) # offload $xaN
  1908. vmovdqa $xa1,0x20(%rsp)
  1909. vmovdqa 0x40(%rsp),$xc2 # $xa0
  1910. vmovdqa 0x60(%rsp),$xc3 # $xa1
  1911. vpaddd 0x180-0x200(%rax),$xc0,$xc0
  1912. vpaddd 0x1a0-0x200(%rax),$xc1,$xc1
  1913. vpaddd 0x1c0-0x200(%rax),$xc2,$xc2
  1914. vpaddd 0x1e0-0x200(%rax),$xc3,$xc3
  1915. vpunpckldq $xc1,$xc0,$xt2
  1916. vpunpckldq $xc3,$xc2,$xt3
  1917. vpunpckhdq $xc1,$xc0,$xc0
  1918. vpunpckhdq $xc3,$xc2,$xc2
  1919. vpunpcklqdq $xt3,$xt2,$xc1 # "c0"
  1920. vpunpckhqdq $xt3,$xt2,$xt2 # "c1"
  1921. vpunpcklqdq $xc2,$xc0,$xc3 # "c2"
  1922. vpunpckhqdq $xc2,$xc0,$xc0 # "c3"
  1923. ___
  1924. ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2);
  1925. $code.=<<___;
  1926. vpaddd 0x200-0x200(%rax),$xd0,$xd0
  1927. vpaddd 0x220-0x200(%rax),$xd1,$xd1
  1928. vpaddd 0x240-0x200(%rax),$xd2,$xd2
  1929. vpaddd 0x260-0x200(%rax),$xd3,$xd3
  1930. vpunpckldq $xd1,$xd0,$xt2
  1931. vpunpckldq $xd3,$xd2,$xt3
  1932. vpunpckhdq $xd1,$xd0,$xd0
  1933. vpunpckhdq $xd3,$xd2,$xd2
  1934. vpunpcklqdq $xt3,$xt2,$xd1 # "d0"
  1935. vpunpckhqdq $xt3,$xt2,$xt2 # "d1"
  1936. vpunpcklqdq $xd2,$xd0,$xd3 # "d2"
  1937. vpunpckhqdq $xd2,$xd0,$xd0 # "d3"
  1938. ___
  1939. ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2);
  1940. $code.=<<___;
  1941. vperm2i128 \$0x20,$xd0,$xc0,$xt3 # "de-interlace" further
  1942. vperm2i128 \$0x31,$xd0,$xc0,$xd0
  1943. vperm2i128 \$0x20,$xd1,$xc1,$xc0
  1944. vperm2i128 \$0x31,$xd1,$xc1,$xd1
  1945. vperm2i128 \$0x20,$xd2,$xc2,$xc1
  1946. vperm2i128 \$0x31,$xd2,$xc2,$xd2
  1947. vperm2i128 \$0x20,$xd3,$xc3,$xc2
  1948. vperm2i128 \$0x31,$xd3,$xc3,$xd3
  1949. ___
  1950. ($xc0,$xc1,$xc2,$xc3,$xt3)=($xt3,$xc0,$xc1,$xc2,$xc3);
  1951. ($xb0,$xb1,$xb2,$xb3,$xc0,$xc1,$xc2,$xc3)=
  1952. ($xc0,$xc1,$xc2,$xc3,$xb0,$xb1,$xb2,$xb3);
  1953. ($xa0,$xa1)=($xt2,$xt3);
  1954. $code.=<<___;
  1955. vmovdqa 0x00(%rsp),$xa0 # $xaN was offloaded, remember?
  1956. vmovdqa 0x20(%rsp),$xa1
  1957. cmp \$64*8,$len
  1958. jb .Ltail8x
  1959. vpxor 0x00($inp),$xa0,$xa0 # xor with input
  1960. vpxor 0x20($inp),$xb0,$xb0
  1961. vpxor 0x40($inp),$xc0,$xc0
  1962. vpxor 0x60($inp),$xd0,$xd0
  1963. lea 0x80($inp),$inp # size optimization
  1964. vmovdqu $xa0,0x00($out)
  1965. vmovdqu $xb0,0x20($out)
  1966. vmovdqu $xc0,0x40($out)
  1967. vmovdqu $xd0,0x60($out)
  1968. lea 0x80($out),$out # size optimization
  1969. vpxor 0x00($inp),$xa1,$xa1
  1970. vpxor 0x20($inp),$xb1,$xb1
  1971. vpxor 0x40($inp),$xc1,$xc1
  1972. vpxor 0x60($inp),$xd1,$xd1
  1973. lea 0x80($inp),$inp # size optimization
  1974. vmovdqu $xa1,0x00($out)
  1975. vmovdqu $xb1,0x20($out)
  1976. vmovdqu $xc1,0x40($out)
  1977. vmovdqu $xd1,0x60($out)
  1978. lea 0x80($out),$out # size optimization
  1979. vpxor 0x00($inp),$xa2,$xa2
  1980. vpxor 0x20($inp),$xb2,$xb2
  1981. vpxor 0x40($inp),$xc2,$xc2
  1982. vpxor 0x60($inp),$xd2,$xd2
  1983. lea 0x80($inp),$inp # size optimization
  1984. vmovdqu $xa2,0x00($out)
  1985. vmovdqu $xb2,0x20($out)
  1986. vmovdqu $xc2,0x40($out)
  1987. vmovdqu $xd2,0x60($out)
  1988. lea 0x80($out),$out # size optimization
  1989. vpxor 0x00($inp),$xa3,$xa3
  1990. vpxor 0x20($inp),$xb3,$xb3
  1991. vpxor 0x40($inp),$xc3,$xc3
  1992. vpxor 0x60($inp),$xd3,$xd3
  1993. lea 0x80($inp),$inp # size optimization
  1994. vmovdqu $xa3,0x00($out)
  1995. vmovdqu $xb3,0x20($out)
  1996. vmovdqu $xc3,0x40($out)
  1997. vmovdqu $xd3,0x60($out)
  1998. lea 0x80($out),$out # size optimization
  1999. sub \$64*8,$len
  2000. jnz .Loop_outer8x
  2001. jmp .Ldone8x
  2002. .Ltail8x:
  2003. cmp \$448,$len
  2004. jae .L448_or_more8x
  2005. cmp \$384,$len
  2006. jae .L384_or_more8x
  2007. cmp \$320,$len
  2008. jae .L320_or_more8x
  2009. cmp \$256,$len
  2010. jae .L256_or_more8x
  2011. cmp \$192,$len
  2012. jae .L192_or_more8x
  2013. cmp \$128,$len
  2014. jae .L128_or_more8x
  2015. cmp \$64,$len
  2016. jae .L64_or_more8x
  2017. xor %r10,%r10
  2018. vmovdqa $xa0,0x00(%rsp)
  2019. vmovdqa $xb0,0x20(%rsp)
  2020. jmp .Loop_tail8x
  2021. .align 32
  2022. .L64_or_more8x:
  2023. vpxor 0x00($inp),$xa0,$xa0 # xor with input
  2024. vpxor 0x20($inp),$xb0,$xb0
  2025. vmovdqu $xa0,0x00($out)
  2026. vmovdqu $xb0,0x20($out)
  2027. je .Ldone8x
  2028. lea 0x40($inp),$inp # inp+=64*1
  2029. xor %r10,%r10
  2030. vmovdqa $xc0,0x00(%rsp)
  2031. lea 0x40($out),$out # out+=64*1
  2032. sub \$64,$len # len-=64*1
  2033. vmovdqa $xd0,0x20(%rsp)
  2034. jmp .Loop_tail8x
  2035. .align 32
  2036. .L128_or_more8x:
  2037. vpxor 0x00($inp),$xa0,$xa0 # xor with input
  2038. vpxor 0x20($inp),$xb0,$xb0
  2039. vpxor 0x40($inp),$xc0,$xc0
  2040. vpxor 0x60($inp),$xd0,$xd0
  2041. vmovdqu $xa0,0x00($out)
  2042. vmovdqu $xb0,0x20($out)
  2043. vmovdqu $xc0,0x40($out)
  2044. vmovdqu $xd0,0x60($out)
  2045. je .Ldone8x
  2046. lea 0x80($inp),$inp # inp+=64*2
  2047. xor %r10,%r10
  2048. vmovdqa $xa1,0x00(%rsp)
  2049. lea 0x80($out),$out # out+=64*2
  2050. sub \$128,$len # len-=64*2
  2051. vmovdqa $xb1,0x20(%rsp)
  2052. jmp .Loop_tail8x
  2053. .align 32
  2054. .L192_or_more8x:
  2055. vpxor 0x00($inp),$xa0,$xa0 # xor with input
  2056. vpxor 0x20($inp),$xb0,$xb0
  2057. vpxor 0x40($inp),$xc0,$xc0
  2058. vpxor 0x60($inp),$xd0,$xd0
  2059. vpxor 0x80($inp),$xa1,$xa1
  2060. vpxor 0xa0($inp),$xb1,$xb1
  2061. vmovdqu $xa0,0x00($out)
  2062. vmovdqu $xb0,0x20($out)
  2063. vmovdqu $xc0,0x40($out)
  2064. vmovdqu $xd0,0x60($out)
  2065. vmovdqu $xa1,0x80($out)
  2066. vmovdqu $xb1,0xa0($out)
  2067. je .Ldone8x
  2068. lea 0xc0($inp),$inp # inp+=64*3
  2069. xor %r10,%r10
  2070. vmovdqa $xc1,0x00(%rsp)
  2071. lea 0xc0($out),$out # out+=64*3
  2072. sub \$192,$len # len-=64*3
  2073. vmovdqa $xd1,0x20(%rsp)
  2074. jmp .Loop_tail8x
  2075. .align 32
  2076. .L256_or_more8x:
  2077. vpxor 0x00($inp),$xa0,$xa0 # xor with input
  2078. vpxor 0x20($inp),$xb0,$xb0
  2079. vpxor 0x40($inp),$xc0,$xc0
  2080. vpxor 0x60($inp),$xd0,$xd0
  2081. vpxor 0x80($inp),$xa1,$xa1
  2082. vpxor 0xa0($inp),$xb1,$xb1
  2083. vpxor 0xc0($inp),$xc1,$xc1
  2084. vpxor 0xe0($inp),$xd1,$xd1
  2085. vmovdqu $xa0,0x00($out)
  2086. vmovdqu $xb0,0x20($out)
  2087. vmovdqu $xc0,0x40($out)
  2088. vmovdqu $xd0,0x60($out)
  2089. vmovdqu $xa1,0x80($out)
  2090. vmovdqu $xb1,0xa0($out)
  2091. vmovdqu $xc1,0xc0($out)
  2092. vmovdqu $xd1,0xe0($out)
  2093. je .Ldone8x
  2094. lea 0x100($inp),$inp # inp+=64*4
  2095. xor %r10,%r10
  2096. vmovdqa $xa2,0x00(%rsp)
  2097. lea 0x100($out),$out # out+=64*4
  2098. sub \$256,$len # len-=64*4
  2099. vmovdqa $xb2,0x20(%rsp)
  2100. jmp .Loop_tail8x
  2101. .align 32
  2102. .L320_or_more8x:
  2103. vpxor 0x00($inp),$xa0,$xa0 # xor with input
  2104. vpxor 0x20($inp),$xb0,$xb0
  2105. vpxor 0x40($inp),$xc0,$xc0
  2106. vpxor 0x60($inp),$xd0,$xd0
  2107. vpxor 0x80($inp),$xa1,$xa1
  2108. vpxor 0xa0($inp),$xb1,$xb1
  2109. vpxor 0xc0($inp),$xc1,$xc1
  2110. vpxor 0xe0($inp),$xd1,$xd1
  2111. vpxor 0x100($inp),$xa2,$xa2
  2112. vpxor 0x120($inp),$xb2,$xb2
  2113. vmovdqu $xa0,0x00($out)
  2114. vmovdqu $xb0,0x20($out)
  2115. vmovdqu $xc0,0x40($out)
  2116. vmovdqu $xd0,0x60($out)
  2117. vmovdqu $xa1,0x80($out)
  2118. vmovdqu $xb1,0xa0($out)
  2119. vmovdqu $xc1,0xc0($out)
  2120. vmovdqu $xd1,0xe0($out)
  2121. vmovdqu $xa2,0x100($out)
  2122. vmovdqu $xb2,0x120($out)
  2123. je .Ldone8x
  2124. lea 0x140($inp),$inp # inp+=64*5
  2125. xor %r10,%r10
  2126. vmovdqa $xc2,0x00(%rsp)
  2127. lea 0x140($out),$out # out+=64*5
  2128. sub \$320,$len # len-=64*5
  2129. vmovdqa $xd2,0x20(%rsp)
  2130. jmp .Loop_tail8x
  2131. .align 32
  2132. .L384_or_more8x:
  2133. vpxor 0x00($inp),$xa0,$xa0 # xor with input
  2134. vpxor 0x20($inp),$xb0,$xb0
  2135. vpxor 0x40($inp),$xc0,$xc0
  2136. vpxor 0x60($inp),$xd0,$xd0
  2137. vpxor 0x80($inp),$xa1,$xa1
  2138. vpxor 0xa0($inp),$xb1,$xb1
  2139. vpxor 0xc0($inp),$xc1,$xc1
  2140. vpxor 0xe0($inp),$xd1,$xd1
  2141. vpxor 0x100($inp),$xa2,$xa2
  2142. vpxor 0x120($inp),$xb2,$xb2
  2143. vpxor 0x140($inp),$xc2,$xc2
  2144. vpxor 0x160($inp),$xd2,$xd2
  2145. vmovdqu $xa0,0x00($out)
  2146. vmovdqu $xb0,0x20($out)
  2147. vmovdqu $xc0,0x40($out)
  2148. vmovdqu $xd0,0x60($out)
  2149. vmovdqu $xa1,0x80($out)
  2150. vmovdqu $xb1,0xa0($out)
  2151. vmovdqu $xc1,0xc0($out)
  2152. vmovdqu $xd1,0xe0($out)
  2153. vmovdqu $xa2,0x100($out)
  2154. vmovdqu $xb2,0x120($out)
  2155. vmovdqu $xc2,0x140($out)
  2156. vmovdqu $xd2,0x160($out)
  2157. je .Ldone8x
  2158. lea 0x180($inp),$inp # inp+=64*6
  2159. xor %r10,%r10
  2160. vmovdqa $xa3,0x00(%rsp)
  2161. lea 0x180($out),$out # out+=64*6
  2162. sub \$384,$len # len-=64*6
  2163. vmovdqa $xb3,0x20(%rsp)
  2164. jmp .Loop_tail8x
  2165. .align 32
  2166. .L448_or_more8x:
  2167. vpxor 0x00($inp),$xa0,$xa0 # xor with input
  2168. vpxor 0x20($inp),$xb0,$xb0
  2169. vpxor 0x40($inp),$xc0,$xc0
  2170. vpxor 0x60($inp),$xd0,$xd0
  2171. vpxor 0x80($inp),$xa1,$xa1
  2172. vpxor 0xa0($inp),$xb1,$xb1
  2173. vpxor 0xc0($inp),$xc1,$xc1
  2174. vpxor 0xe0($inp),$xd1,$xd1
  2175. vpxor 0x100($inp),$xa2,$xa2
  2176. vpxor 0x120($inp),$xb2,$xb2
  2177. vpxor 0x140($inp),$xc2,$xc2
  2178. vpxor 0x160($inp),$xd2,$xd2
  2179. vpxor 0x180($inp),$xa3,$xa3
  2180. vpxor 0x1a0($inp),$xb3,$xb3
  2181. vmovdqu $xa0,0x00($out)
  2182. vmovdqu $xb0,0x20($out)
  2183. vmovdqu $xc0,0x40($out)
  2184. vmovdqu $xd0,0x60($out)
  2185. vmovdqu $xa1,0x80($out)
  2186. vmovdqu $xb1,0xa0($out)
  2187. vmovdqu $xc1,0xc0($out)
  2188. vmovdqu $xd1,0xe0($out)
  2189. vmovdqu $xa2,0x100($out)
  2190. vmovdqu $xb2,0x120($out)
  2191. vmovdqu $xc2,0x140($out)
  2192. vmovdqu $xd2,0x160($out)
  2193. vmovdqu $xa3,0x180($out)
  2194. vmovdqu $xb3,0x1a0($out)
  2195. je .Ldone8x
  2196. lea 0x1c0($inp),$inp # inp+=64*7
  2197. xor %r10,%r10
  2198. vmovdqa $xc3,0x00(%rsp)
  2199. lea 0x1c0($out),$out # out+=64*7
  2200. sub \$448,$len # len-=64*7
  2201. vmovdqa $xd3,0x20(%rsp)
  2202. .Loop_tail8x:
  2203. movzb ($inp,%r10),%eax
  2204. movzb (%rsp,%r10),%ecx
  2205. lea 1(%r10),%r10
  2206. xor %ecx,%eax
  2207. mov %al,-1($out,%r10)
  2208. dec $len
  2209. jnz .Loop_tail8x
  2210. .Ldone8x:
  2211. vzeroall
  2212. ___
  2213. $code.=<<___ if ($win64);
  2214. movaps -0xa8(%r9),%xmm6
  2215. movaps -0x98(%r9),%xmm7
  2216. movaps -0x88(%r9),%xmm8
  2217. movaps -0x78(%r9),%xmm9
  2218. movaps -0x68(%r9),%xmm10
  2219. movaps -0x58(%r9),%xmm11
  2220. movaps -0x48(%r9),%xmm12
  2221. movaps -0x38(%r9),%xmm13
  2222. movaps -0x28(%r9),%xmm14
  2223. movaps -0x18(%r9),%xmm15
  2224. ___
  2225. $code.=<<___;
  2226. lea (%r9),%rsp
  2227. .cfi_def_cfa_register %rsp
  2228. .L8x_epilogue:
  2229. ret
  2230. .cfi_endproc
  2231. .size ChaCha20_8x,.-ChaCha20_8x
  2232. ___
  2233. }
  2234. ########################################################################
  2235. # AVX512 code paths
  2236. if ($avx>2) {
  2237. # This one handles shorter inputs...
  2238. my ($a,$b,$c,$d, $a_,$b_,$c_,$d_,$fourz) = map("%zmm$_",(0..3,16..20));
  2239. my ($t0,$t1,$t2,$t3) = map("%xmm$_",(4..7));
  2240. sub vpxord() # size optimization
  2241. { my $opcode = "vpxor"; # adhere to vpxor when possible
  2242. foreach (@_) {
  2243. if (/%([zy])mm([0-9]+)/ && ($1 eq "z" || $2>=16)) {
  2244. $opcode = "vpxord";
  2245. last;
  2246. }
  2247. }
  2248. $code .= "\t$opcode\t".join(',',reverse @_)."\n";
  2249. }
  2250. sub AVX512ROUND { # critical path is 14 "SIMD ticks" per round
  2251. &vpaddd ($a,$a,$b);
  2252. &vpxord ($d,$d,$a);
  2253. &vprold ($d,$d,16);
  2254. &vpaddd ($c,$c,$d);
  2255. &vpxord ($b,$b,$c);
  2256. &vprold ($b,$b,12);
  2257. &vpaddd ($a,$a,$b);
  2258. &vpxord ($d,$d,$a);
  2259. &vprold ($d,$d,8);
  2260. &vpaddd ($c,$c,$d);
  2261. &vpxord ($b,$b,$c);
  2262. &vprold ($b,$b,7);
  2263. }
  2264. my $xframe = $win64 ? 160+8 : 8;
  2265. $code.=<<___;
  2266. .type ChaCha20_avx512,\@function,5
  2267. .align 32
  2268. ChaCha20_avx512:
  2269. .cfi_startproc
  2270. .LChaCha20_avx512:
  2271. mov %rsp,%r9 # frame pointer
  2272. .cfi_def_cfa_register %r9
  2273. cmp \$512,$len
  2274. ja .LChaCha20_16x
  2275. sub \$64+$xframe,%rsp
  2276. ___
  2277. $code.=<<___ if ($win64);
  2278. movaps %xmm6,-0xa8(%r9)
  2279. movaps %xmm7,-0x98(%r9)
  2280. movaps %xmm8,-0x88(%r9)
  2281. movaps %xmm9,-0x78(%r9)
  2282. movaps %xmm10,-0x68(%r9)
  2283. movaps %xmm11,-0x58(%r9)
  2284. movaps %xmm12,-0x48(%r9)
  2285. movaps %xmm13,-0x38(%r9)
  2286. movaps %xmm14,-0x28(%r9)
  2287. movaps %xmm15,-0x18(%r9)
  2288. .Lavx512_body:
  2289. ___
  2290. $code.=<<___;
  2291. vbroadcasti32x4 .Lsigma(%rip),$a
  2292. vbroadcasti32x4 ($key),$b
  2293. vbroadcasti32x4 16($key),$c
  2294. vbroadcasti32x4 ($counter),$d
  2295. vmovdqa32 $a,$a_
  2296. vmovdqa32 $b,$b_
  2297. vmovdqa32 $c,$c_
  2298. vpaddd .Lzeroz(%rip),$d,$d
  2299. vmovdqa32 .Lfourz(%rip),$fourz
  2300. mov \$10,$counter # reuse $counter
  2301. vmovdqa32 $d,$d_
  2302. jmp .Loop_avx512
  2303. .align 16
  2304. .Loop_outer_avx512:
  2305. vmovdqa32 $a_,$a
  2306. vmovdqa32 $b_,$b
  2307. vmovdqa32 $c_,$c
  2308. vpaddd $fourz,$d_,$d
  2309. mov \$10,$counter
  2310. vmovdqa32 $d,$d_
  2311. jmp .Loop_avx512
  2312. .align 32
  2313. .Loop_avx512:
  2314. ___
  2315. &AVX512ROUND();
  2316. &vpshufd ($c,$c,0b01001110);
  2317. &vpshufd ($b,$b,0b00111001);
  2318. &vpshufd ($d,$d,0b10010011);
  2319. &AVX512ROUND();
  2320. &vpshufd ($c,$c,0b01001110);
  2321. &vpshufd ($b,$b,0b10010011);
  2322. &vpshufd ($d,$d,0b00111001);
  2323. &dec ($counter);
  2324. &jnz (".Loop_avx512");
  2325. $code.=<<___;
  2326. vpaddd $a_,$a,$a
  2327. vpaddd $b_,$b,$b
  2328. vpaddd $c_,$c,$c
  2329. vpaddd $d_,$d,$d
  2330. sub \$64,$len
  2331. jb .Ltail64_avx512
  2332. vpxor 0x00($inp),%x#$a,$t0 # xor with input
  2333. vpxor 0x10($inp),%x#$b,$t1
  2334. vpxor 0x20($inp),%x#$c,$t2
  2335. vpxor 0x30($inp),%x#$d,$t3
  2336. lea 0x40($inp),$inp # inp+=64
  2337. vmovdqu $t0,0x00($out) # write output
  2338. vmovdqu $t1,0x10($out)
  2339. vmovdqu $t2,0x20($out)
  2340. vmovdqu $t3,0x30($out)
  2341. lea 0x40($out),$out # out+=64
  2342. jz .Ldone_avx512
  2343. vextracti32x4 \$1,$a,$t0
  2344. vextracti32x4 \$1,$b,$t1
  2345. vextracti32x4 \$1,$c,$t2
  2346. vextracti32x4 \$1,$d,$t3
  2347. sub \$64,$len
  2348. jb .Ltail_avx512
  2349. vpxor 0x00($inp),$t0,$t0 # xor with input
  2350. vpxor 0x10($inp),$t1,$t1
  2351. vpxor 0x20($inp),$t2,$t2
  2352. vpxor 0x30($inp),$t3,$t3
  2353. lea 0x40($inp),$inp # inp+=64
  2354. vmovdqu $t0,0x00($out) # write output
  2355. vmovdqu $t1,0x10($out)
  2356. vmovdqu $t2,0x20($out)
  2357. vmovdqu $t3,0x30($out)
  2358. lea 0x40($out),$out # out+=64
  2359. jz .Ldone_avx512
  2360. vextracti32x4 \$2,$a,$t0
  2361. vextracti32x4 \$2,$b,$t1
  2362. vextracti32x4 \$2,$c,$t2
  2363. vextracti32x4 \$2,$d,$t3
  2364. sub \$64,$len
  2365. jb .Ltail_avx512
  2366. vpxor 0x00($inp),$t0,$t0 # xor with input
  2367. vpxor 0x10($inp),$t1,$t1
  2368. vpxor 0x20($inp),$t2,$t2
  2369. vpxor 0x30($inp),$t3,$t3
  2370. lea 0x40($inp),$inp # inp+=64
  2371. vmovdqu $t0,0x00($out) # write output
  2372. vmovdqu $t1,0x10($out)
  2373. vmovdqu $t2,0x20($out)
  2374. vmovdqu $t3,0x30($out)
  2375. lea 0x40($out),$out # out+=64
  2376. jz .Ldone_avx512
  2377. vextracti32x4 \$3,$a,$t0
  2378. vextracti32x4 \$3,$b,$t1
  2379. vextracti32x4 \$3,$c,$t2
  2380. vextracti32x4 \$3,$d,$t3
  2381. sub \$64,$len
  2382. jb .Ltail_avx512
  2383. vpxor 0x00($inp),$t0,$t0 # xor with input
  2384. vpxor 0x10($inp),$t1,$t1
  2385. vpxor 0x20($inp),$t2,$t2
  2386. vpxor 0x30($inp),$t3,$t3
  2387. lea 0x40($inp),$inp # inp+=64
  2388. vmovdqu $t0,0x00($out) # write output
  2389. vmovdqu $t1,0x10($out)
  2390. vmovdqu $t2,0x20($out)
  2391. vmovdqu $t3,0x30($out)
  2392. lea 0x40($out),$out # out+=64
  2393. jnz .Loop_outer_avx512
  2394. jmp .Ldone_avx512
  2395. .align 16
  2396. .Ltail64_avx512:
  2397. vmovdqa %x#$a,0x00(%rsp)
  2398. vmovdqa %x#$b,0x10(%rsp)
  2399. vmovdqa %x#$c,0x20(%rsp)
  2400. vmovdqa %x#$d,0x30(%rsp)
  2401. add \$64,$len
  2402. jmp .Loop_tail_avx512
  2403. .align 16
  2404. .Ltail_avx512:
  2405. vmovdqa $t0,0x00(%rsp)
  2406. vmovdqa $t1,0x10(%rsp)
  2407. vmovdqa $t2,0x20(%rsp)
  2408. vmovdqa $t3,0x30(%rsp)
  2409. add \$64,$len
  2410. .Loop_tail_avx512:
  2411. movzb ($inp,$counter),%eax
  2412. movzb (%rsp,$counter),%ecx
  2413. lea 1($counter),$counter
  2414. xor %ecx,%eax
  2415. mov %al,-1($out,$counter)
  2416. dec $len
  2417. jnz .Loop_tail_avx512
  2418. vmovdqu32 $a_,0x00(%rsp)
  2419. .Ldone_avx512:
  2420. vzeroall
  2421. ___
  2422. $code.=<<___ if ($win64);
  2423. movaps -0xa8(%r9),%xmm6
  2424. movaps -0x98(%r9),%xmm7
  2425. movaps -0x88(%r9),%xmm8
  2426. movaps -0x78(%r9),%xmm9
  2427. movaps -0x68(%r9),%xmm10
  2428. movaps -0x58(%r9),%xmm11
  2429. movaps -0x48(%r9),%xmm12
  2430. movaps -0x38(%r9),%xmm13
  2431. movaps -0x28(%r9),%xmm14
  2432. movaps -0x18(%r9),%xmm15
  2433. ___
  2434. $code.=<<___;
  2435. lea (%r9),%rsp
  2436. .cfi_def_cfa_register %rsp
  2437. .Lavx512_epilogue:
  2438. ret
  2439. .cfi_endproc
  2440. .size ChaCha20_avx512,.-ChaCha20_avx512
  2441. ___
  2442. map(s/%z/%y/, $a,$b,$c,$d, $a_,$b_,$c_,$d_,$fourz);
  2443. $code.=<<___;
  2444. .type ChaCha20_avx512vl,\@function,5
  2445. .align 32
  2446. ChaCha20_avx512vl:
  2447. .cfi_startproc
  2448. .LChaCha20_avx512vl:
  2449. mov %rsp,%r9 # frame pointer
  2450. .cfi_def_cfa_register %r9
  2451. cmp \$128,$len
  2452. ja .LChaCha20_8xvl
  2453. sub \$64+$xframe,%rsp
  2454. ___
  2455. $code.=<<___ if ($win64);
  2456. movaps %xmm6,-0xa8(%r9)
  2457. movaps %xmm7,-0x98(%r9)
  2458. movaps %xmm8,-0x88(%r9)
  2459. movaps %xmm9,-0x78(%r9)
  2460. movaps %xmm10,-0x68(%r9)
  2461. movaps %xmm11,-0x58(%r9)
  2462. movaps %xmm12,-0x48(%r9)
  2463. movaps %xmm13,-0x38(%r9)
  2464. movaps %xmm14,-0x28(%r9)
  2465. movaps %xmm15,-0x18(%r9)
  2466. .Lavx512vl_body:
  2467. ___
  2468. $code.=<<___;
  2469. vbroadcasti128 .Lsigma(%rip),$a
  2470. vbroadcasti128 ($key),$b
  2471. vbroadcasti128 16($key),$c
  2472. vbroadcasti128 ($counter),$d
  2473. vmovdqa32 $a,$a_
  2474. vmovdqa32 $b,$b_
  2475. vmovdqa32 $c,$c_
  2476. vpaddd .Lzeroz(%rip),$d,$d
  2477. vmovdqa32 .Ltwoy(%rip),$fourz
  2478. mov \$10,$counter # reuse $counter
  2479. vmovdqa32 $d,$d_
  2480. jmp .Loop_avx512vl
  2481. .align 16
  2482. .Loop_outer_avx512vl:
  2483. vmovdqa32 $c_,$c
  2484. vpaddd $fourz,$d_,$d
  2485. mov \$10,$counter
  2486. vmovdqa32 $d,$d_
  2487. jmp .Loop_avx512vl
  2488. .align 32
  2489. .Loop_avx512vl:
  2490. ___
  2491. &AVX512ROUND();
  2492. &vpshufd ($c,$c,0b01001110);
  2493. &vpshufd ($b,$b,0b00111001);
  2494. &vpshufd ($d,$d,0b10010011);
  2495. &AVX512ROUND();
  2496. &vpshufd ($c,$c,0b01001110);
  2497. &vpshufd ($b,$b,0b10010011);
  2498. &vpshufd ($d,$d,0b00111001);
  2499. &dec ($counter);
  2500. &jnz (".Loop_avx512vl");
  2501. $code.=<<___;
  2502. vpaddd $a_,$a,$a
  2503. vpaddd $b_,$b,$b
  2504. vpaddd $c_,$c,$c
  2505. vpaddd $d_,$d,$d
  2506. sub \$64,$len
  2507. jb .Ltail64_avx512vl
  2508. vpxor 0x00($inp),%x#$a,$t0 # xor with input
  2509. vpxor 0x10($inp),%x#$b,$t1
  2510. vpxor 0x20($inp),%x#$c,$t2
  2511. vpxor 0x30($inp),%x#$d,$t3
  2512. lea 0x40($inp),$inp # inp+=64
  2513. vmovdqu $t0,0x00($out) # write output
  2514. vmovdqu $t1,0x10($out)
  2515. vmovdqu $t2,0x20($out)
  2516. vmovdqu $t3,0x30($out)
  2517. lea 0x40($out),$out # out+=64
  2518. jz .Ldone_avx512vl
  2519. vextracti128 \$1,$a,$t0
  2520. vextracti128 \$1,$b,$t1
  2521. vextracti128 \$1,$c,$t2
  2522. vextracti128 \$1,$d,$t3
  2523. sub \$64,$len
  2524. jb .Ltail_avx512vl
  2525. vpxor 0x00($inp),$t0,$t0 # xor with input
  2526. vpxor 0x10($inp),$t1,$t1
  2527. vpxor 0x20($inp),$t2,$t2
  2528. vpxor 0x30($inp),$t3,$t3
  2529. lea 0x40($inp),$inp # inp+=64
  2530. vmovdqu $t0,0x00($out) # write output
  2531. vmovdqu $t1,0x10($out)
  2532. vmovdqu $t2,0x20($out)
  2533. vmovdqu $t3,0x30($out)
  2534. lea 0x40($out),$out # out+=64
  2535. vmovdqa32 $a_,$a
  2536. vmovdqa32 $b_,$b
  2537. jnz .Loop_outer_avx512vl
  2538. jmp .Ldone_avx512vl
  2539. .align 16
  2540. .Ltail64_avx512vl:
  2541. vmovdqa %x#$a,0x00(%rsp)
  2542. vmovdqa %x#$b,0x10(%rsp)
  2543. vmovdqa %x#$c,0x20(%rsp)
  2544. vmovdqa %x#$d,0x30(%rsp)
  2545. add \$64,$len
  2546. jmp .Loop_tail_avx512vl
  2547. .align 16
  2548. .Ltail_avx512vl:
  2549. vmovdqa $t0,0x00(%rsp)
  2550. vmovdqa $t1,0x10(%rsp)
  2551. vmovdqa $t2,0x20(%rsp)
  2552. vmovdqa $t3,0x30(%rsp)
  2553. add \$64,$len
  2554. .Loop_tail_avx512vl:
  2555. movzb ($inp,$counter),%eax
  2556. movzb (%rsp,$counter),%ecx
  2557. lea 1($counter),$counter
  2558. xor %ecx,%eax
  2559. mov %al,-1($out,$counter)
  2560. dec $len
  2561. jnz .Loop_tail_avx512vl
  2562. vmovdqu32 $a_,0x00(%rsp)
  2563. vmovdqu32 $a_,0x20(%rsp)
  2564. .Ldone_avx512vl:
  2565. vzeroall
  2566. ___
  2567. $code.=<<___ if ($win64);
  2568. movaps -0xa8(%r9),%xmm6
  2569. movaps -0x98(%r9),%xmm7
  2570. movaps -0x88(%r9),%xmm8
  2571. movaps -0x78(%r9),%xmm9
  2572. movaps -0x68(%r9),%xmm10
  2573. movaps -0x58(%r9),%xmm11
  2574. movaps -0x48(%r9),%xmm12
  2575. movaps -0x38(%r9),%xmm13
  2576. movaps -0x28(%r9),%xmm14
  2577. movaps -0x18(%r9),%xmm15
  2578. ___
  2579. $code.=<<___;
  2580. lea (%r9),%rsp
  2581. .cfi_def_cfa_register %rsp
  2582. .Lavx512vl_epilogue:
  2583. ret
  2584. .cfi_endproc
  2585. .size ChaCha20_avx512vl,.-ChaCha20_avx512vl
  2586. ___
  2587. }
  2588. if ($avx>2) {
  2589. # This one handles longer inputs...
  2590. my ($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
  2591. $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3)=map("%zmm$_",(0..15));
  2592. my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
  2593. $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3);
  2594. my @key=map("%zmm$_",(16..31));
  2595. my ($xt0,$xt1,$xt2,$xt3)=@key[0..3];
  2596. sub AVX512_lane_ROUND {
  2597. my ($a0,$b0,$c0,$d0)=@_;
  2598. my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
  2599. my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
  2600. my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
  2601. my @x=map("\"$_\"",@xx);
  2602. (
  2603. "&vpaddd (@x[$a0],@x[$a0],@x[$b0])", # Q1
  2604. "&vpaddd (@x[$a1],@x[$a1],@x[$b1])", # Q2
  2605. "&vpaddd (@x[$a2],@x[$a2],@x[$b2])", # Q3
  2606. "&vpaddd (@x[$a3],@x[$a3],@x[$b3])", # Q4
  2607. "&vpxord (@x[$d0],@x[$d0],@x[$a0])",
  2608. "&vpxord (@x[$d1],@x[$d1],@x[$a1])",
  2609. "&vpxord (@x[$d2],@x[$d2],@x[$a2])",
  2610. "&vpxord (@x[$d3],@x[$d3],@x[$a3])",
  2611. "&vprold (@x[$d0],@x[$d0],16)",
  2612. "&vprold (@x[$d1],@x[$d1],16)",
  2613. "&vprold (@x[$d2],@x[$d2],16)",
  2614. "&vprold (@x[$d3],@x[$d3],16)",
  2615. "&vpaddd (@x[$c0],@x[$c0],@x[$d0])",
  2616. "&vpaddd (@x[$c1],@x[$c1],@x[$d1])",
  2617. "&vpaddd (@x[$c2],@x[$c2],@x[$d2])",
  2618. "&vpaddd (@x[$c3],@x[$c3],@x[$d3])",
  2619. "&vpxord (@x[$b0],@x[$b0],@x[$c0])",
  2620. "&vpxord (@x[$b1],@x[$b1],@x[$c1])",
  2621. "&vpxord (@x[$b2],@x[$b2],@x[$c2])",
  2622. "&vpxord (@x[$b3],@x[$b3],@x[$c3])",
  2623. "&vprold (@x[$b0],@x[$b0],12)",
  2624. "&vprold (@x[$b1],@x[$b1],12)",
  2625. "&vprold (@x[$b2],@x[$b2],12)",
  2626. "&vprold (@x[$b3],@x[$b3],12)",
  2627. "&vpaddd (@x[$a0],@x[$a0],@x[$b0])",
  2628. "&vpaddd (@x[$a1],@x[$a1],@x[$b1])",
  2629. "&vpaddd (@x[$a2],@x[$a2],@x[$b2])",
  2630. "&vpaddd (@x[$a3],@x[$a3],@x[$b3])",
  2631. "&vpxord (@x[$d0],@x[$d0],@x[$a0])",
  2632. "&vpxord (@x[$d1],@x[$d1],@x[$a1])",
  2633. "&vpxord (@x[$d2],@x[$d2],@x[$a2])",
  2634. "&vpxord (@x[$d3],@x[$d3],@x[$a3])",
  2635. "&vprold (@x[$d0],@x[$d0],8)",
  2636. "&vprold (@x[$d1],@x[$d1],8)",
  2637. "&vprold (@x[$d2],@x[$d2],8)",
  2638. "&vprold (@x[$d3],@x[$d3],8)",
  2639. "&vpaddd (@x[$c0],@x[$c0],@x[$d0])",
  2640. "&vpaddd (@x[$c1],@x[$c1],@x[$d1])",
  2641. "&vpaddd (@x[$c2],@x[$c2],@x[$d2])",
  2642. "&vpaddd (@x[$c3],@x[$c3],@x[$d3])",
  2643. "&vpxord (@x[$b0],@x[$b0],@x[$c0])",
  2644. "&vpxord (@x[$b1],@x[$b1],@x[$c1])",
  2645. "&vpxord (@x[$b2],@x[$b2],@x[$c2])",
  2646. "&vpxord (@x[$b3],@x[$b3],@x[$c3])",
  2647. "&vprold (@x[$b0],@x[$b0],7)",
  2648. "&vprold (@x[$b1],@x[$b1],7)",
  2649. "&vprold (@x[$b2],@x[$b2],7)",
  2650. "&vprold (@x[$b3],@x[$b3],7)"
  2651. );
  2652. }
  2653. my $xframe = $win64 ? 0xa8 : 8;
  2654. $code.=<<___;
  2655. .type ChaCha20_16x,\@function,5
  2656. .align 32
  2657. ChaCha20_16x:
  2658. .cfi_startproc
  2659. .LChaCha20_16x:
  2660. mov %rsp,%r9 # frame register
  2661. .cfi_def_cfa_register %r9
  2662. sub \$64+$xframe,%rsp
  2663. and \$-64,%rsp
  2664. ___
  2665. $code.=<<___ if ($win64);
  2666. movaps %xmm6,-0xa8(%r9)
  2667. movaps %xmm7,-0x98(%r9)
  2668. movaps %xmm8,-0x88(%r9)
  2669. movaps %xmm9,-0x78(%r9)
  2670. movaps %xmm10,-0x68(%r9)
  2671. movaps %xmm11,-0x58(%r9)
  2672. movaps %xmm12,-0x48(%r9)
  2673. movaps %xmm13,-0x38(%r9)
  2674. movaps %xmm14,-0x28(%r9)
  2675. movaps %xmm15,-0x18(%r9)
  2676. .L16x_body:
  2677. ___
  2678. $code.=<<___;
  2679. vzeroupper
  2680. lea .Lsigma(%rip),%r10
  2681. vbroadcasti32x4 (%r10),$xa3 # key[0]
  2682. vbroadcasti32x4 ($key),$xb3 # key[1]
  2683. vbroadcasti32x4 16($key),$xc3 # key[2]
  2684. vbroadcasti32x4 ($counter),$xd3 # key[3]
  2685. vpshufd \$0x00,$xa3,$xa0 # smash key by lanes...
  2686. vpshufd \$0x55,$xa3,$xa1
  2687. vpshufd \$0xaa,$xa3,$xa2
  2688. vpshufd \$0xff,$xa3,$xa3
  2689. vmovdqa64 $xa0,@key[0]
  2690. vmovdqa64 $xa1,@key[1]
  2691. vmovdqa64 $xa2,@key[2]
  2692. vmovdqa64 $xa3,@key[3]
  2693. vpshufd \$0x00,$xb3,$xb0
  2694. vpshufd \$0x55,$xb3,$xb1
  2695. vpshufd \$0xaa,$xb3,$xb2
  2696. vpshufd \$0xff,$xb3,$xb3
  2697. vmovdqa64 $xb0,@key[4]
  2698. vmovdqa64 $xb1,@key[5]
  2699. vmovdqa64 $xb2,@key[6]
  2700. vmovdqa64 $xb3,@key[7]
  2701. vpshufd \$0x00,$xc3,$xc0
  2702. vpshufd \$0x55,$xc3,$xc1
  2703. vpshufd \$0xaa,$xc3,$xc2
  2704. vpshufd \$0xff,$xc3,$xc3
  2705. vmovdqa64 $xc0,@key[8]
  2706. vmovdqa64 $xc1,@key[9]
  2707. vmovdqa64 $xc2,@key[10]
  2708. vmovdqa64 $xc3,@key[11]
  2709. vpshufd \$0x00,$xd3,$xd0
  2710. vpshufd \$0x55,$xd3,$xd1
  2711. vpshufd \$0xaa,$xd3,$xd2
  2712. vpshufd \$0xff,$xd3,$xd3
  2713. vpaddd .Lincz(%rip),$xd0,$xd0 # don't save counters yet
  2714. vmovdqa64 $xd0,@key[12]
  2715. vmovdqa64 $xd1,@key[13]
  2716. vmovdqa64 $xd2,@key[14]
  2717. vmovdqa64 $xd3,@key[15]
  2718. mov \$10,%eax
  2719. jmp .Loop16x
  2720. .align 32
  2721. .Loop_outer16x:
  2722. vpbroadcastd 0(%r10),$xa0 # reload key
  2723. vpbroadcastd 4(%r10),$xa1
  2724. vpbroadcastd 8(%r10),$xa2
  2725. vpbroadcastd 12(%r10),$xa3
  2726. vpaddd .Lsixteen(%rip),@key[12],@key[12] # next SIMD counters
  2727. vmovdqa64 @key[4],$xb0
  2728. vmovdqa64 @key[5],$xb1
  2729. vmovdqa64 @key[6],$xb2
  2730. vmovdqa64 @key[7],$xb3
  2731. vmovdqa64 @key[8],$xc0
  2732. vmovdqa64 @key[9],$xc1
  2733. vmovdqa64 @key[10],$xc2
  2734. vmovdqa64 @key[11],$xc3
  2735. vmovdqa64 @key[12],$xd0
  2736. vmovdqa64 @key[13],$xd1
  2737. vmovdqa64 @key[14],$xd2
  2738. vmovdqa64 @key[15],$xd3
  2739. vmovdqa64 $xa0,@key[0]
  2740. vmovdqa64 $xa1,@key[1]
  2741. vmovdqa64 $xa2,@key[2]
  2742. vmovdqa64 $xa3,@key[3]
  2743. mov \$10,%eax
  2744. jmp .Loop16x
  2745. .align 32
  2746. .Loop16x:
  2747. ___
  2748. foreach (&AVX512_lane_ROUND(0, 4, 8,12)) { eval; }
  2749. foreach (&AVX512_lane_ROUND(0, 5,10,15)) { eval; }
  2750. $code.=<<___;
  2751. dec %eax
  2752. jnz .Loop16x
  2753. vpaddd @key[0],$xa0,$xa0 # accumulate key
  2754. vpaddd @key[1],$xa1,$xa1
  2755. vpaddd @key[2],$xa2,$xa2
  2756. vpaddd @key[3],$xa3,$xa3
  2757. vpunpckldq $xa1,$xa0,$xt2 # "de-interlace" data
  2758. vpunpckldq $xa3,$xa2,$xt3
  2759. vpunpckhdq $xa1,$xa0,$xa0
  2760. vpunpckhdq $xa3,$xa2,$xa2
  2761. vpunpcklqdq $xt3,$xt2,$xa1 # "a0"
  2762. vpunpckhqdq $xt3,$xt2,$xt2 # "a1"
  2763. vpunpcklqdq $xa2,$xa0,$xa3 # "a2"
  2764. vpunpckhqdq $xa2,$xa0,$xa0 # "a3"
  2765. ___
  2766. ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2);
  2767. $code.=<<___;
  2768. vpaddd @key[4],$xb0,$xb0
  2769. vpaddd @key[5],$xb1,$xb1
  2770. vpaddd @key[6],$xb2,$xb2
  2771. vpaddd @key[7],$xb3,$xb3
  2772. vpunpckldq $xb1,$xb0,$xt2
  2773. vpunpckldq $xb3,$xb2,$xt3
  2774. vpunpckhdq $xb1,$xb0,$xb0
  2775. vpunpckhdq $xb3,$xb2,$xb2
  2776. vpunpcklqdq $xt3,$xt2,$xb1 # "b0"
  2777. vpunpckhqdq $xt3,$xt2,$xt2 # "b1"
  2778. vpunpcklqdq $xb2,$xb0,$xb3 # "b2"
  2779. vpunpckhqdq $xb2,$xb0,$xb0 # "b3"
  2780. ___
  2781. ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2);
  2782. $code.=<<___;
  2783. vshufi32x4 \$0x44,$xb0,$xa0,$xt3 # "de-interlace" further
  2784. vshufi32x4 \$0xee,$xb0,$xa0,$xb0
  2785. vshufi32x4 \$0x44,$xb1,$xa1,$xa0
  2786. vshufi32x4 \$0xee,$xb1,$xa1,$xb1
  2787. vshufi32x4 \$0x44,$xb2,$xa2,$xa1
  2788. vshufi32x4 \$0xee,$xb2,$xa2,$xb2
  2789. vshufi32x4 \$0x44,$xb3,$xa3,$xa2
  2790. vshufi32x4 \$0xee,$xb3,$xa3,$xb3
  2791. ___
  2792. ($xa0,$xa1,$xa2,$xa3,$xt3)=($xt3,$xa0,$xa1,$xa2,$xa3);
  2793. $code.=<<___;
  2794. vpaddd @key[8],$xc0,$xc0
  2795. vpaddd @key[9],$xc1,$xc1
  2796. vpaddd @key[10],$xc2,$xc2
  2797. vpaddd @key[11],$xc3,$xc3
  2798. vpunpckldq $xc1,$xc0,$xt2
  2799. vpunpckldq $xc3,$xc2,$xt3
  2800. vpunpckhdq $xc1,$xc0,$xc0
  2801. vpunpckhdq $xc3,$xc2,$xc2
  2802. vpunpcklqdq $xt3,$xt2,$xc1 # "c0"
  2803. vpunpckhqdq $xt3,$xt2,$xt2 # "c1"
  2804. vpunpcklqdq $xc2,$xc0,$xc3 # "c2"
  2805. vpunpckhqdq $xc2,$xc0,$xc0 # "c3"
  2806. ___
  2807. ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2);
  2808. $code.=<<___;
  2809. vpaddd @key[12],$xd0,$xd0
  2810. vpaddd @key[13],$xd1,$xd1
  2811. vpaddd @key[14],$xd2,$xd2
  2812. vpaddd @key[15],$xd3,$xd3
  2813. vpunpckldq $xd1,$xd0,$xt2
  2814. vpunpckldq $xd3,$xd2,$xt3
  2815. vpunpckhdq $xd1,$xd0,$xd0
  2816. vpunpckhdq $xd3,$xd2,$xd2
  2817. vpunpcklqdq $xt3,$xt2,$xd1 # "d0"
  2818. vpunpckhqdq $xt3,$xt2,$xt2 # "d1"
  2819. vpunpcklqdq $xd2,$xd0,$xd3 # "d2"
  2820. vpunpckhqdq $xd2,$xd0,$xd0 # "d3"
  2821. ___
  2822. ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2);
  2823. $code.=<<___;
  2824. vshufi32x4 \$0x44,$xd0,$xc0,$xt3 # "de-interlace" further
  2825. vshufi32x4 \$0xee,$xd0,$xc0,$xd0
  2826. vshufi32x4 \$0x44,$xd1,$xc1,$xc0
  2827. vshufi32x4 \$0xee,$xd1,$xc1,$xd1
  2828. vshufi32x4 \$0x44,$xd2,$xc2,$xc1
  2829. vshufi32x4 \$0xee,$xd2,$xc2,$xd2
  2830. vshufi32x4 \$0x44,$xd3,$xc3,$xc2
  2831. vshufi32x4 \$0xee,$xd3,$xc3,$xd3
  2832. ___
  2833. ($xc0,$xc1,$xc2,$xc3,$xt3)=($xt3,$xc0,$xc1,$xc2,$xc3);
  2834. $code.=<<___;
  2835. vshufi32x4 \$0x88,$xc0,$xa0,$xt0 # "de-interlace" further
  2836. vshufi32x4 \$0xdd,$xc0,$xa0,$xa0
  2837. vshufi32x4 \$0x88,$xd0,$xb0,$xc0
  2838. vshufi32x4 \$0xdd,$xd0,$xb0,$xd0
  2839. vshufi32x4 \$0x88,$xc1,$xa1,$xt1
  2840. vshufi32x4 \$0xdd,$xc1,$xa1,$xa1
  2841. vshufi32x4 \$0x88,$xd1,$xb1,$xc1
  2842. vshufi32x4 \$0xdd,$xd1,$xb1,$xd1
  2843. vshufi32x4 \$0x88,$xc2,$xa2,$xt2
  2844. vshufi32x4 \$0xdd,$xc2,$xa2,$xa2
  2845. vshufi32x4 \$0x88,$xd2,$xb2,$xc2
  2846. vshufi32x4 \$0xdd,$xd2,$xb2,$xd2
  2847. vshufi32x4 \$0x88,$xc3,$xa3,$xt3
  2848. vshufi32x4 \$0xdd,$xc3,$xa3,$xa3
  2849. vshufi32x4 \$0x88,$xd3,$xb3,$xc3
  2850. vshufi32x4 \$0xdd,$xd3,$xb3,$xd3
  2851. ___
  2852. ($xa0,$xa1,$xa2,$xa3,$xb0,$xb1,$xb2,$xb3)=
  2853. ($xt0,$xt1,$xt2,$xt3,$xa0,$xa1,$xa2,$xa3);
  2854. ($xa0,$xb0,$xc0,$xd0, $xa1,$xb1,$xc1,$xd1,
  2855. $xa2,$xb2,$xc2,$xd2, $xa3,$xb3,$xc3,$xd3) =
  2856. ($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
  2857. $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3);
  2858. $code.=<<___;
  2859. cmp \$64*16,$len
  2860. jb .Ltail16x
  2861. vpxord 0x00($inp),$xa0,$xa0 # xor with input
  2862. vpxord 0x40($inp),$xb0,$xb0
  2863. vpxord 0x80($inp),$xc0,$xc0
  2864. vpxord 0xc0($inp),$xd0,$xd0
  2865. vmovdqu32 $xa0,0x00($out)
  2866. vmovdqu32 $xb0,0x40($out)
  2867. vmovdqu32 $xc0,0x80($out)
  2868. vmovdqu32 $xd0,0xc0($out)
  2869. vpxord 0x100($inp),$xa1,$xa1
  2870. vpxord 0x140($inp),$xb1,$xb1
  2871. vpxord 0x180($inp),$xc1,$xc1
  2872. vpxord 0x1c0($inp),$xd1,$xd1
  2873. vmovdqu32 $xa1,0x100($out)
  2874. vmovdqu32 $xb1,0x140($out)
  2875. vmovdqu32 $xc1,0x180($out)
  2876. vmovdqu32 $xd1,0x1c0($out)
  2877. vpxord 0x200($inp),$xa2,$xa2
  2878. vpxord 0x240($inp),$xb2,$xb2
  2879. vpxord 0x280($inp),$xc2,$xc2
  2880. vpxord 0x2c0($inp),$xd2,$xd2
  2881. vmovdqu32 $xa2,0x200($out)
  2882. vmovdqu32 $xb2,0x240($out)
  2883. vmovdqu32 $xc2,0x280($out)
  2884. vmovdqu32 $xd2,0x2c0($out)
  2885. vpxord 0x300($inp),$xa3,$xa3
  2886. vpxord 0x340($inp),$xb3,$xb3
  2887. vpxord 0x380($inp),$xc3,$xc3
  2888. vpxord 0x3c0($inp),$xd3,$xd3
  2889. lea 0x400($inp),$inp
  2890. vmovdqu32 $xa3,0x300($out)
  2891. vmovdqu32 $xb3,0x340($out)
  2892. vmovdqu32 $xc3,0x380($out)
  2893. vmovdqu32 $xd3,0x3c0($out)
  2894. lea 0x400($out),$out
  2895. sub \$64*16,$len
  2896. jnz .Loop_outer16x
  2897. jmp .Ldone16x
  2898. .align 32
  2899. .Ltail16x:
  2900. xor %r10,%r10
  2901. sub $inp,$out
  2902. cmp \$64*1,$len
  2903. jb .Less_than_64_16x
  2904. vpxord ($inp),$xa0,$xa0 # xor with input
  2905. vmovdqu32 $xa0,($out,$inp)
  2906. je .Ldone16x
  2907. vmovdqa32 $xb0,$xa0
  2908. lea 64($inp),$inp
  2909. cmp \$64*2,$len
  2910. jb .Less_than_64_16x
  2911. vpxord ($inp),$xb0,$xb0
  2912. vmovdqu32 $xb0,($out,$inp)
  2913. je .Ldone16x
  2914. vmovdqa32 $xc0,$xa0
  2915. lea 64($inp),$inp
  2916. cmp \$64*3,$len
  2917. jb .Less_than_64_16x
  2918. vpxord ($inp),$xc0,$xc0
  2919. vmovdqu32 $xc0,($out,$inp)
  2920. je .Ldone16x
  2921. vmovdqa32 $xd0,$xa0
  2922. lea 64($inp),$inp
  2923. cmp \$64*4,$len
  2924. jb .Less_than_64_16x
  2925. vpxord ($inp),$xd0,$xd0
  2926. vmovdqu32 $xd0,($out,$inp)
  2927. je .Ldone16x
  2928. vmovdqa32 $xa1,$xa0
  2929. lea 64($inp),$inp
  2930. cmp \$64*5,$len
  2931. jb .Less_than_64_16x
  2932. vpxord ($inp),$xa1,$xa1
  2933. vmovdqu32 $xa1,($out,$inp)
  2934. je .Ldone16x
  2935. vmovdqa32 $xb1,$xa0
  2936. lea 64($inp),$inp
  2937. cmp \$64*6,$len
  2938. jb .Less_than_64_16x
  2939. vpxord ($inp),$xb1,$xb1
  2940. vmovdqu32 $xb1,($out,$inp)
  2941. je .Ldone16x
  2942. vmovdqa32 $xc1,$xa0
  2943. lea 64($inp),$inp
  2944. cmp \$64*7,$len
  2945. jb .Less_than_64_16x
  2946. vpxord ($inp),$xc1,$xc1
  2947. vmovdqu32 $xc1,($out,$inp)
  2948. je .Ldone16x
  2949. vmovdqa32 $xd1,$xa0
  2950. lea 64($inp),$inp
  2951. cmp \$64*8,$len
  2952. jb .Less_than_64_16x
  2953. vpxord ($inp),$xd1,$xd1
  2954. vmovdqu32 $xd1,($out,$inp)
  2955. je .Ldone16x
  2956. vmovdqa32 $xa2,$xa0
  2957. lea 64($inp),$inp
  2958. cmp \$64*9,$len
  2959. jb .Less_than_64_16x
  2960. vpxord ($inp),$xa2,$xa2
  2961. vmovdqu32 $xa2,($out,$inp)
  2962. je .Ldone16x
  2963. vmovdqa32 $xb2,$xa0
  2964. lea 64($inp),$inp
  2965. cmp \$64*10,$len
  2966. jb .Less_than_64_16x
  2967. vpxord ($inp),$xb2,$xb2
  2968. vmovdqu32 $xb2,($out,$inp)
  2969. je .Ldone16x
  2970. vmovdqa32 $xc2,$xa0
  2971. lea 64($inp),$inp
  2972. cmp \$64*11,$len
  2973. jb .Less_than_64_16x
  2974. vpxord ($inp),$xc2,$xc2
  2975. vmovdqu32 $xc2,($out,$inp)
  2976. je .Ldone16x
  2977. vmovdqa32 $xd2,$xa0
  2978. lea 64($inp),$inp
  2979. cmp \$64*12,$len
  2980. jb .Less_than_64_16x
  2981. vpxord ($inp),$xd2,$xd2
  2982. vmovdqu32 $xd2,($out,$inp)
  2983. je .Ldone16x
  2984. vmovdqa32 $xa3,$xa0
  2985. lea 64($inp),$inp
  2986. cmp \$64*13,$len
  2987. jb .Less_than_64_16x
  2988. vpxord ($inp),$xa3,$xa3
  2989. vmovdqu32 $xa3,($out,$inp)
  2990. je .Ldone16x
  2991. vmovdqa32 $xb3,$xa0
  2992. lea 64($inp),$inp
  2993. cmp \$64*14,$len
  2994. jb .Less_than_64_16x
  2995. vpxord ($inp),$xb3,$xb3
  2996. vmovdqu32 $xb3,($out,$inp)
  2997. je .Ldone16x
  2998. vmovdqa32 $xc3,$xa0
  2999. lea 64($inp),$inp
  3000. cmp \$64*15,$len
  3001. jb .Less_than_64_16x
  3002. vpxord ($inp),$xc3,$xc3
  3003. vmovdqu32 $xc3,($out,$inp)
  3004. je .Ldone16x
  3005. vmovdqa32 $xd3,$xa0
  3006. lea 64($inp),$inp
  3007. .Less_than_64_16x:
  3008. vmovdqa32 $xa0,0x00(%rsp)
  3009. lea ($out,$inp),$out
  3010. and \$63,$len
  3011. .Loop_tail16x:
  3012. movzb ($inp,%r10),%eax
  3013. movzb (%rsp,%r10),%ecx
  3014. lea 1(%r10),%r10
  3015. xor %ecx,%eax
  3016. mov %al,-1($out,%r10)
  3017. dec $len
  3018. jnz .Loop_tail16x
  3019. vpxord $xa0,$xa0,$xa0
  3020. vmovdqa32 $xa0,0(%rsp)
  3021. .Ldone16x:
  3022. vzeroall
  3023. ___
  3024. $code.=<<___ if ($win64);
  3025. movaps -0xa8(%r9),%xmm6
  3026. movaps -0x98(%r9),%xmm7
  3027. movaps -0x88(%r9),%xmm8
  3028. movaps -0x78(%r9),%xmm9
  3029. movaps -0x68(%r9),%xmm10
  3030. movaps -0x58(%r9),%xmm11
  3031. movaps -0x48(%r9),%xmm12
  3032. movaps -0x38(%r9),%xmm13
  3033. movaps -0x28(%r9),%xmm14
  3034. movaps -0x18(%r9),%xmm15
  3035. ___
  3036. $code.=<<___;
  3037. lea (%r9),%rsp
  3038. .cfi_def_cfa_register %rsp
  3039. .L16x_epilogue:
  3040. ret
  3041. .cfi_endproc
  3042. .size ChaCha20_16x,.-ChaCha20_16x
  3043. ___
  3044. # switch to %ymm domain
  3045. ($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
  3046. $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3)=map("%ymm$_",(0..15));
  3047. @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
  3048. $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3);
  3049. @key=map("%ymm$_",(16..31));
  3050. ($xt0,$xt1,$xt2,$xt3)=@key[0..3];
  3051. $code.=<<___;
  3052. .type ChaCha20_8xvl,\@function,5
  3053. .align 32
  3054. ChaCha20_8xvl:
  3055. .cfi_startproc
  3056. .LChaCha20_8xvl:
  3057. mov %rsp,%r9 # frame register
  3058. .cfi_def_cfa_register %r9
  3059. sub \$64+$xframe,%rsp
  3060. and \$-64,%rsp
  3061. ___
  3062. $code.=<<___ if ($win64);
  3063. movaps %xmm6,-0xa8(%r9)
  3064. movaps %xmm7,-0x98(%r9)
  3065. movaps %xmm8,-0x88(%r9)
  3066. movaps %xmm9,-0x78(%r9)
  3067. movaps %xmm10,-0x68(%r9)
  3068. movaps %xmm11,-0x58(%r9)
  3069. movaps %xmm12,-0x48(%r9)
  3070. movaps %xmm13,-0x38(%r9)
  3071. movaps %xmm14,-0x28(%r9)
  3072. movaps %xmm15,-0x18(%r9)
  3073. .L8xvl_body:
  3074. ___
  3075. $code.=<<___;
  3076. vzeroupper
  3077. lea .Lsigma(%rip),%r10
  3078. vbroadcasti128 (%r10),$xa3 # key[0]
  3079. vbroadcasti128 ($key),$xb3 # key[1]
  3080. vbroadcasti128 16($key),$xc3 # key[2]
  3081. vbroadcasti128 ($counter),$xd3 # key[3]
  3082. vpshufd \$0x00,$xa3,$xa0 # smash key by lanes...
  3083. vpshufd \$0x55,$xa3,$xa1
  3084. vpshufd \$0xaa,$xa3,$xa2
  3085. vpshufd \$0xff,$xa3,$xa3
  3086. vmovdqa64 $xa0,@key[0]
  3087. vmovdqa64 $xa1,@key[1]
  3088. vmovdqa64 $xa2,@key[2]
  3089. vmovdqa64 $xa3,@key[3]
  3090. vpshufd \$0x00,$xb3,$xb0
  3091. vpshufd \$0x55,$xb3,$xb1
  3092. vpshufd \$0xaa,$xb3,$xb2
  3093. vpshufd \$0xff,$xb3,$xb3
  3094. vmovdqa64 $xb0,@key[4]
  3095. vmovdqa64 $xb1,@key[5]
  3096. vmovdqa64 $xb2,@key[6]
  3097. vmovdqa64 $xb3,@key[7]
  3098. vpshufd \$0x00,$xc3,$xc0
  3099. vpshufd \$0x55,$xc3,$xc1
  3100. vpshufd \$0xaa,$xc3,$xc2
  3101. vpshufd \$0xff,$xc3,$xc3
  3102. vmovdqa64 $xc0,@key[8]
  3103. vmovdqa64 $xc1,@key[9]
  3104. vmovdqa64 $xc2,@key[10]
  3105. vmovdqa64 $xc3,@key[11]
  3106. vpshufd \$0x00,$xd3,$xd0
  3107. vpshufd \$0x55,$xd3,$xd1
  3108. vpshufd \$0xaa,$xd3,$xd2
  3109. vpshufd \$0xff,$xd3,$xd3
  3110. vpaddd .Lincy(%rip),$xd0,$xd0 # don't save counters yet
  3111. vmovdqa64 $xd0,@key[12]
  3112. vmovdqa64 $xd1,@key[13]
  3113. vmovdqa64 $xd2,@key[14]
  3114. vmovdqa64 $xd3,@key[15]
  3115. mov \$10,%eax
  3116. jmp .Loop8xvl
  3117. .align 32
  3118. .Loop_outer8xvl:
  3119. #vpbroadcastd 0(%r10),$xa0 # reload key
  3120. #vpbroadcastd 4(%r10),$xa1
  3121. vpbroadcastd 8(%r10),$xa2
  3122. vpbroadcastd 12(%r10),$xa3
  3123. vpaddd .Leight(%rip),@key[12],@key[12] # next SIMD counters
  3124. vmovdqa64 @key[4],$xb0
  3125. vmovdqa64 @key[5],$xb1
  3126. vmovdqa64 @key[6],$xb2
  3127. vmovdqa64 @key[7],$xb3
  3128. vmovdqa64 @key[8],$xc0
  3129. vmovdqa64 @key[9],$xc1
  3130. vmovdqa64 @key[10],$xc2
  3131. vmovdqa64 @key[11],$xc3
  3132. vmovdqa64 @key[12],$xd0
  3133. vmovdqa64 @key[13],$xd1
  3134. vmovdqa64 @key[14],$xd2
  3135. vmovdqa64 @key[15],$xd3
  3136. vmovdqa64 $xa0,@key[0]
  3137. vmovdqa64 $xa1,@key[1]
  3138. vmovdqa64 $xa2,@key[2]
  3139. vmovdqa64 $xa3,@key[3]
  3140. mov \$10,%eax
  3141. jmp .Loop8xvl
  3142. .align 32
  3143. .Loop8xvl:
  3144. ___
  3145. foreach (&AVX512_lane_ROUND(0, 4, 8,12)) { eval; }
  3146. foreach (&AVX512_lane_ROUND(0, 5,10,15)) { eval; }
  3147. $code.=<<___;
  3148. dec %eax
  3149. jnz .Loop8xvl
  3150. vpaddd @key[0],$xa0,$xa0 # accumulate key
  3151. vpaddd @key[1],$xa1,$xa1
  3152. vpaddd @key[2],$xa2,$xa2
  3153. vpaddd @key[3],$xa3,$xa3
  3154. vpunpckldq $xa1,$xa0,$xt2 # "de-interlace" data
  3155. vpunpckldq $xa3,$xa2,$xt3
  3156. vpunpckhdq $xa1,$xa0,$xa0
  3157. vpunpckhdq $xa3,$xa2,$xa2
  3158. vpunpcklqdq $xt3,$xt2,$xa1 # "a0"
  3159. vpunpckhqdq $xt3,$xt2,$xt2 # "a1"
  3160. vpunpcklqdq $xa2,$xa0,$xa3 # "a2"
  3161. vpunpckhqdq $xa2,$xa0,$xa0 # "a3"
  3162. ___
  3163. ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2);
  3164. $code.=<<___;
  3165. vpaddd @key[4],$xb0,$xb0
  3166. vpaddd @key[5],$xb1,$xb1
  3167. vpaddd @key[6],$xb2,$xb2
  3168. vpaddd @key[7],$xb3,$xb3
  3169. vpunpckldq $xb1,$xb0,$xt2
  3170. vpunpckldq $xb3,$xb2,$xt3
  3171. vpunpckhdq $xb1,$xb0,$xb0
  3172. vpunpckhdq $xb3,$xb2,$xb2
  3173. vpunpcklqdq $xt3,$xt2,$xb1 # "b0"
  3174. vpunpckhqdq $xt3,$xt2,$xt2 # "b1"
  3175. vpunpcklqdq $xb2,$xb0,$xb3 # "b2"
  3176. vpunpckhqdq $xb2,$xb0,$xb0 # "b3"
  3177. ___
  3178. ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2);
  3179. $code.=<<___;
  3180. vshufi32x4 \$0,$xb0,$xa0,$xt3 # "de-interlace" further
  3181. vshufi32x4 \$3,$xb0,$xa0,$xb0
  3182. vshufi32x4 \$0,$xb1,$xa1,$xa0
  3183. vshufi32x4 \$3,$xb1,$xa1,$xb1
  3184. vshufi32x4 \$0,$xb2,$xa2,$xa1
  3185. vshufi32x4 \$3,$xb2,$xa2,$xb2
  3186. vshufi32x4 \$0,$xb3,$xa3,$xa2
  3187. vshufi32x4 \$3,$xb3,$xa3,$xb3
  3188. ___
  3189. ($xa0,$xa1,$xa2,$xa3,$xt3)=($xt3,$xa0,$xa1,$xa2,$xa3);
  3190. $code.=<<___;
  3191. vpaddd @key[8],$xc0,$xc0
  3192. vpaddd @key[9],$xc1,$xc1
  3193. vpaddd @key[10],$xc2,$xc2
  3194. vpaddd @key[11],$xc3,$xc3
  3195. vpunpckldq $xc1,$xc0,$xt2
  3196. vpunpckldq $xc3,$xc2,$xt3
  3197. vpunpckhdq $xc1,$xc0,$xc0
  3198. vpunpckhdq $xc3,$xc2,$xc2
  3199. vpunpcklqdq $xt3,$xt2,$xc1 # "c0"
  3200. vpunpckhqdq $xt3,$xt2,$xt2 # "c1"
  3201. vpunpcklqdq $xc2,$xc0,$xc3 # "c2"
  3202. vpunpckhqdq $xc2,$xc0,$xc0 # "c3"
  3203. ___
  3204. ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2);
  3205. $code.=<<___;
  3206. vpaddd @key[12],$xd0,$xd0
  3207. vpaddd @key[13],$xd1,$xd1
  3208. vpaddd @key[14],$xd2,$xd2
  3209. vpaddd @key[15],$xd3,$xd3
  3210. vpunpckldq $xd1,$xd0,$xt2
  3211. vpunpckldq $xd3,$xd2,$xt3
  3212. vpunpckhdq $xd1,$xd0,$xd0
  3213. vpunpckhdq $xd3,$xd2,$xd2
  3214. vpunpcklqdq $xt3,$xt2,$xd1 # "d0"
  3215. vpunpckhqdq $xt3,$xt2,$xt2 # "d1"
  3216. vpunpcklqdq $xd2,$xd0,$xd3 # "d2"
  3217. vpunpckhqdq $xd2,$xd0,$xd0 # "d3"
  3218. ___
  3219. ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2);
  3220. $code.=<<___;
  3221. vperm2i128 \$0x20,$xd0,$xc0,$xt3 # "de-interlace" further
  3222. vperm2i128 \$0x31,$xd0,$xc0,$xd0
  3223. vperm2i128 \$0x20,$xd1,$xc1,$xc0
  3224. vperm2i128 \$0x31,$xd1,$xc1,$xd1
  3225. vperm2i128 \$0x20,$xd2,$xc2,$xc1
  3226. vperm2i128 \$0x31,$xd2,$xc2,$xd2
  3227. vperm2i128 \$0x20,$xd3,$xc3,$xc2
  3228. vperm2i128 \$0x31,$xd3,$xc3,$xd3
  3229. ___
  3230. ($xc0,$xc1,$xc2,$xc3,$xt3)=($xt3,$xc0,$xc1,$xc2,$xc3);
  3231. ($xb0,$xb1,$xb2,$xb3,$xc0,$xc1,$xc2,$xc3)=
  3232. ($xc0,$xc1,$xc2,$xc3,$xb0,$xb1,$xb2,$xb3);
  3233. $code.=<<___;
  3234. cmp \$64*8,$len
  3235. jb .Ltail8xvl
  3236. mov \$0x80,%eax # size optimization
  3237. vpxord 0x00($inp),$xa0,$xa0 # xor with input
  3238. vpxor 0x20($inp),$xb0,$xb0
  3239. vpxor 0x40($inp),$xc0,$xc0
  3240. vpxor 0x60($inp),$xd0,$xd0
  3241. lea ($inp,%rax),$inp # size optimization
  3242. vmovdqu32 $xa0,0x00($out)
  3243. vmovdqu $xb0,0x20($out)
  3244. vmovdqu $xc0,0x40($out)
  3245. vmovdqu $xd0,0x60($out)
  3246. lea ($out,%rax),$out # size optimization
  3247. vpxor 0x00($inp),$xa1,$xa1
  3248. vpxor 0x20($inp),$xb1,$xb1
  3249. vpxor 0x40($inp),$xc1,$xc1
  3250. vpxor 0x60($inp),$xd1,$xd1
  3251. lea ($inp,%rax),$inp # size optimization
  3252. vmovdqu $xa1,0x00($out)
  3253. vmovdqu $xb1,0x20($out)
  3254. vmovdqu $xc1,0x40($out)
  3255. vmovdqu $xd1,0x60($out)
  3256. lea ($out,%rax),$out # size optimization
  3257. vpxord 0x00($inp),$xa2,$xa2
  3258. vpxor 0x20($inp),$xb2,$xb2
  3259. vpxor 0x40($inp),$xc2,$xc2
  3260. vpxor 0x60($inp),$xd2,$xd2
  3261. lea ($inp,%rax),$inp # size optimization
  3262. vmovdqu32 $xa2,0x00($out)
  3263. vmovdqu $xb2,0x20($out)
  3264. vmovdqu $xc2,0x40($out)
  3265. vmovdqu $xd2,0x60($out)
  3266. lea ($out,%rax),$out # size optimization
  3267. vpxor 0x00($inp),$xa3,$xa3
  3268. vpxor 0x20($inp),$xb3,$xb3
  3269. vpxor 0x40($inp),$xc3,$xc3
  3270. vpxor 0x60($inp),$xd3,$xd3
  3271. lea ($inp,%rax),$inp # size optimization
  3272. vmovdqu $xa3,0x00($out)
  3273. vmovdqu $xb3,0x20($out)
  3274. vmovdqu $xc3,0x40($out)
  3275. vmovdqu $xd3,0x60($out)
  3276. lea ($out,%rax),$out # size optimization
  3277. vpbroadcastd 0(%r10),%ymm0 # reload key
  3278. vpbroadcastd 4(%r10),%ymm1
  3279. sub \$64*8,$len
  3280. jnz .Loop_outer8xvl
  3281. jmp .Ldone8xvl
  3282. .align 32
  3283. .Ltail8xvl:
  3284. vmovdqa64 $xa0,%ymm8 # size optimization
  3285. ___
  3286. $xa0 = "%ymm8";
  3287. $code.=<<___;
  3288. xor %r10,%r10
  3289. sub $inp,$out
  3290. cmp \$64*1,$len
  3291. jb .Less_than_64_8xvl
  3292. vpxor 0x00($inp),$xa0,$xa0 # xor with input
  3293. vpxor 0x20($inp),$xb0,$xb0
  3294. vmovdqu $xa0,0x00($out,$inp)
  3295. vmovdqu $xb0,0x20($out,$inp)
  3296. je .Ldone8xvl
  3297. vmovdqa $xc0,$xa0
  3298. vmovdqa $xd0,$xb0
  3299. lea 64($inp),$inp
  3300. cmp \$64*2,$len
  3301. jb .Less_than_64_8xvl
  3302. vpxor 0x00($inp),$xc0,$xc0
  3303. vpxor 0x20($inp),$xd0,$xd0
  3304. vmovdqu $xc0,0x00($out,$inp)
  3305. vmovdqu $xd0,0x20($out,$inp)
  3306. je .Ldone8xvl
  3307. vmovdqa $xa1,$xa0
  3308. vmovdqa $xb1,$xb0
  3309. lea 64($inp),$inp
  3310. cmp \$64*3,$len
  3311. jb .Less_than_64_8xvl
  3312. vpxor 0x00($inp),$xa1,$xa1
  3313. vpxor 0x20($inp),$xb1,$xb1
  3314. vmovdqu $xa1,0x00($out,$inp)
  3315. vmovdqu $xb1,0x20($out,$inp)
  3316. je .Ldone8xvl
  3317. vmovdqa $xc1,$xa0
  3318. vmovdqa $xd1,$xb0
  3319. lea 64($inp),$inp
  3320. cmp \$64*4,$len
  3321. jb .Less_than_64_8xvl
  3322. vpxor 0x00($inp),$xc1,$xc1
  3323. vpxor 0x20($inp),$xd1,$xd1
  3324. vmovdqu $xc1,0x00($out,$inp)
  3325. vmovdqu $xd1,0x20($out,$inp)
  3326. je .Ldone8xvl
  3327. vmovdqa32 $xa2,$xa0
  3328. vmovdqa $xb2,$xb0
  3329. lea 64($inp),$inp
  3330. cmp \$64*5,$len
  3331. jb .Less_than_64_8xvl
  3332. vpxord 0x00($inp),$xa2,$xa2
  3333. vpxor 0x20($inp),$xb2,$xb2
  3334. vmovdqu32 $xa2,0x00($out,$inp)
  3335. vmovdqu $xb2,0x20($out,$inp)
  3336. je .Ldone8xvl
  3337. vmovdqa $xc2,$xa0
  3338. vmovdqa $xd2,$xb0
  3339. lea 64($inp),$inp
  3340. cmp \$64*6,$len
  3341. jb .Less_than_64_8xvl
  3342. vpxor 0x00($inp),$xc2,$xc2
  3343. vpxor 0x20($inp),$xd2,$xd2
  3344. vmovdqu $xc2,0x00($out,$inp)
  3345. vmovdqu $xd2,0x20($out,$inp)
  3346. je .Ldone8xvl
  3347. vmovdqa $xa3,$xa0
  3348. vmovdqa $xb3,$xb0
  3349. lea 64($inp),$inp
  3350. cmp \$64*7,$len
  3351. jb .Less_than_64_8xvl
  3352. vpxor 0x00($inp),$xa3,$xa3
  3353. vpxor 0x20($inp),$xb3,$xb3
  3354. vmovdqu $xa3,0x00($out,$inp)
  3355. vmovdqu $xb3,0x20($out,$inp)
  3356. je .Ldone8xvl
  3357. vmovdqa $xc3,$xa0
  3358. vmovdqa $xd3,$xb0
  3359. lea 64($inp),$inp
  3360. .Less_than_64_8xvl:
  3361. vmovdqa $xa0,0x00(%rsp)
  3362. vmovdqa $xb0,0x20(%rsp)
  3363. lea ($out,$inp),$out
  3364. and \$63,$len
  3365. .Loop_tail8xvl:
  3366. movzb ($inp,%r10),%eax
  3367. movzb (%rsp,%r10),%ecx
  3368. lea 1(%r10),%r10
  3369. xor %ecx,%eax
  3370. mov %al,-1($out,%r10)
  3371. dec $len
  3372. jnz .Loop_tail8xvl
  3373. vpxor $xa0,$xa0,$xa0
  3374. vmovdqa $xa0,0x00(%rsp)
  3375. vmovdqa $xa0,0x20(%rsp)
  3376. .Ldone8xvl:
  3377. vzeroall
  3378. ___
  3379. $code.=<<___ if ($win64);
  3380. movaps -0xa8(%r9),%xmm6
  3381. movaps -0x98(%r9),%xmm7
  3382. movaps -0x88(%r9),%xmm8
  3383. movaps -0x78(%r9),%xmm9
  3384. movaps -0x68(%r9),%xmm10
  3385. movaps -0x58(%r9),%xmm11
  3386. movaps -0x48(%r9),%xmm12
  3387. movaps -0x38(%r9),%xmm13
  3388. movaps -0x28(%r9),%xmm14
  3389. movaps -0x18(%r9),%xmm15
  3390. ___
  3391. $code.=<<___;
  3392. lea (%r9),%rsp
  3393. .cfi_def_cfa_register %rsp
  3394. .L8xvl_epilogue:
  3395. ret
  3396. .cfi_endproc
  3397. .size ChaCha20_8xvl,.-ChaCha20_8xvl
  3398. ___
  3399. }
  3400. # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
  3401. # CONTEXT *context,DISPATCHER_CONTEXT *disp)
  3402. if ($win64) {
  3403. $rec="%rcx";
  3404. $frame="%rdx";
  3405. $context="%r8";
  3406. $disp="%r9";
  3407. $code.=<<___;
  3408. .extern __imp_RtlVirtualUnwind
  3409. .type se_handler,\@abi-omnipotent
  3410. .align 16
  3411. se_handler:
  3412. push %rsi
  3413. push %rdi
  3414. push %rbx
  3415. push %rbp
  3416. push %r12
  3417. push %r13
  3418. push %r14
  3419. push %r15
  3420. pushfq
  3421. sub \$64,%rsp
  3422. mov 120($context),%rax # pull context->Rax
  3423. mov 248($context),%rbx # pull context->Rip
  3424. mov 8($disp),%rsi # disp->ImageBase
  3425. mov 56($disp),%r11 # disp->HandlerData
  3426. lea .Lctr32_body(%rip),%r10
  3427. cmp %r10,%rbx # context->Rip<.Lprologue
  3428. jb .Lcommon_seh_tail
  3429. mov 152($context),%rax # pull context->Rsp
  3430. lea .Lno_data(%rip),%r10 # epilogue label
  3431. cmp %r10,%rbx # context->Rip>=.Lepilogue
  3432. jae .Lcommon_seh_tail
  3433. lea 64+24+48(%rax),%rax
  3434. mov -8(%rax),%rbx
  3435. mov -16(%rax),%rbp
  3436. mov -24(%rax),%r12
  3437. mov -32(%rax),%r13
  3438. mov -40(%rax),%r14
  3439. mov -48(%rax),%r15
  3440. mov %rbx,144($context) # restore context->Rbx
  3441. mov %rbp,160($context) # restore context->Rbp
  3442. mov %r12,216($context) # restore context->R12
  3443. mov %r13,224($context) # restore context->R13
  3444. mov %r14,232($context) # restore context->R14
  3445. mov %r15,240($context) # restore context->R14
  3446. .Lcommon_seh_tail:
  3447. mov 8(%rax),%rdi
  3448. mov 16(%rax),%rsi
  3449. mov %rax,152($context) # restore context->Rsp
  3450. mov %rsi,168($context) # restore context->Rsi
  3451. mov %rdi,176($context) # restore context->Rdi
  3452. mov 40($disp),%rdi # disp->ContextRecord
  3453. mov $context,%rsi # context
  3454. mov \$154,%ecx # sizeof(CONTEXT)
  3455. .long 0xa548f3fc # cld; rep movsq
  3456. mov $disp,%rsi
  3457. xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
  3458. mov 8(%rsi),%rdx # arg2, disp->ImageBase
  3459. mov 0(%rsi),%r8 # arg3, disp->ControlPc
  3460. mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
  3461. mov 40(%rsi),%r10 # disp->ContextRecord
  3462. lea 56(%rsi),%r11 # &disp->HandlerData
  3463. lea 24(%rsi),%r12 # &disp->EstablisherFrame
  3464. mov %r10,32(%rsp) # arg5
  3465. mov %r11,40(%rsp) # arg6
  3466. mov %r12,48(%rsp) # arg7
  3467. mov %rcx,56(%rsp) # arg8, (NULL)
  3468. call *__imp_RtlVirtualUnwind(%rip)
  3469. mov \$1,%eax # ExceptionContinueSearch
  3470. add \$64,%rsp
  3471. popfq
  3472. pop %r15
  3473. pop %r14
  3474. pop %r13
  3475. pop %r12
  3476. pop %rbp
  3477. pop %rbx
  3478. pop %rdi
  3479. pop %rsi
  3480. ret
  3481. .size se_handler,.-se_handler
  3482. .type simd_handler,\@abi-omnipotent
  3483. .align 16
  3484. simd_handler:
  3485. push %rsi
  3486. push %rdi
  3487. push %rbx
  3488. push %rbp
  3489. push %r12
  3490. push %r13
  3491. push %r14
  3492. push %r15
  3493. pushfq
  3494. sub \$64,%rsp
  3495. mov 120($context),%rax # pull context->Rax
  3496. mov 248($context),%rbx # pull context->Rip
  3497. mov 8($disp),%rsi # disp->ImageBase
  3498. mov 56($disp),%r11 # disp->HandlerData
  3499. mov 0(%r11),%r10d # HandlerData[0]
  3500. lea (%rsi,%r10),%r10 # prologue label
  3501. cmp %r10,%rbx # context->Rip<prologue label
  3502. jb .Lcommon_seh_tail
  3503. mov 192($context),%rax # pull context->R9
  3504. mov 4(%r11),%r10d # HandlerData[1]
  3505. mov 8(%r11),%ecx # HandlerData[2]
  3506. lea (%rsi,%r10),%r10 # epilogue label
  3507. cmp %r10,%rbx # context->Rip>=epilogue label
  3508. jae .Lcommon_seh_tail
  3509. neg %rcx
  3510. lea -8(%rax,%rcx),%rsi
  3511. lea 512($context),%rdi # &context.Xmm6
  3512. neg %ecx
  3513. shr \$3,%ecx
  3514. .long 0xa548f3fc # cld; rep movsq
  3515. jmp .Lcommon_seh_tail
  3516. .size simd_handler,.-simd_handler
  3517. .section .pdata
  3518. .align 4
  3519. .rva .LSEH_begin_ChaCha20_ctr32
  3520. .rva .LSEH_end_ChaCha20_ctr32
  3521. .rva .LSEH_info_ChaCha20_ctr32
  3522. .rva .LSEH_begin_ChaCha20_ssse3
  3523. .rva .LSEH_end_ChaCha20_ssse3
  3524. .rva .LSEH_info_ChaCha20_ssse3
  3525. .rva .LSEH_begin_ChaCha20_128
  3526. .rva .LSEH_end_ChaCha20_128
  3527. .rva .LSEH_info_ChaCha20_128
  3528. .rva .LSEH_begin_ChaCha20_4x
  3529. .rva .LSEH_end_ChaCha20_4x
  3530. .rva .LSEH_info_ChaCha20_4x
  3531. ___
  3532. $code.=<<___ if ($avx);
  3533. .rva .LSEH_begin_ChaCha20_4xop
  3534. .rva .LSEH_end_ChaCha20_4xop
  3535. .rva .LSEH_info_ChaCha20_4xop
  3536. ___
  3537. $code.=<<___ if ($avx>1);
  3538. .rva .LSEH_begin_ChaCha20_8x
  3539. .rva .LSEH_end_ChaCha20_8x
  3540. .rva .LSEH_info_ChaCha20_8x
  3541. ___
  3542. $code.=<<___ if ($avx>2);
  3543. .rva .LSEH_begin_ChaCha20_avx512
  3544. .rva .LSEH_end_ChaCha20_avx512
  3545. .rva .LSEH_info_ChaCha20_avx512
  3546. .rva .LSEH_begin_ChaCha20_avx512vl
  3547. .rva .LSEH_end_ChaCha20_avx512vl
  3548. .rva .LSEH_info_ChaCha20_avx512vl
  3549. .rva .LSEH_begin_ChaCha20_16x
  3550. .rva .LSEH_end_ChaCha20_16x
  3551. .rva .LSEH_info_ChaCha20_16x
  3552. .rva .LSEH_begin_ChaCha20_8xvl
  3553. .rva .LSEH_end_ChaCha20_8xvl
  3554. .rva .LSEH_info_ChaCha20_8xvl
  3555. ___
  3556. $code.=<<___;
  3557. .section .xdata
  3558. .align 8
  3559. .LSEH_info_ChaCha20_ctr32:
  3560. .byte 9,0,0,0
  3561. .rva se_handler
  3562. .LSEH_info_ChaCha20_ssse3:
  3563. .byte 9,0,0,0
  3564. .rva simd_handler
  3565. .rva .Lssse3_body,.Lssse3_epilogue
  3566. .long 0x20,0
  3567. .LSEH_info_ChaCha20_128:
  3568. .byte 9,0,0,0
  3569. .rva simd_handler
  3570. .rva .L128_body,.L128_epilogue
  3571. .long 0x60,0
  3572. .LSEH_info_ChaCha20_4x:
  3573. .byte 9,0,0,0
  3574. .rva simd_handler
  3575. .rva .L4x_body,.L4x_epilogue
  3576. .long 0xa0,0
  3577. ___
  3578. $code.=<<___ if ($avx);
  3579. .LSEH_info_ChaCha20_4xop:
  3580. .byte 9,0,0,0
  3581. .rva simd_handler
  3582. .rva .L4xop_body,.L4xop_epilogue # HandlerData[]
  3583. .long 0xa0,0
  3584. ___
  3585. $code.=<<___ if ($avx>1);
  3586. .LSEH_info_ChaCha20_8x:
  3587. .byte 9,0,0,0
  3588. .rva simd_handler
  3589. .rva .L8x_body,.L8x_epilogue # HandlerData[]
  3590. .long 0xa0,0
  3591. ___
  3592. $code.=<<___ if ($avx>2);
  3593. .LSEH_info_ChaCha20_avx512:
  3594. .byte 9,0,0,0
  3595. .rva simd_handler
  3596. .rva .Lavx512_body,.Lavx512_epilogue # HandlerData[]
  3597. .long 0x20,0
  3598. .LSEH_info_ChaCha20_avx512vl:
  3599. .byte 9,0,0,0
  3600. .rva simd_handler
  3601. .rva .Lavx512vl_body,.Lavx512vl_epilogue # HandlerData[]
  3602. .long 0x20,0
  3603. .LSEH_info_ChaCha20_16x:
  3604. .byte 9,0,0,0
  3605. .rva simd_handler
  3606. .rva .L16x_body,.L16x_epilogue # HandlerData[]
  3607. .long 0xa0,0
  3608. .LSEH_info_ChaCha20_8xvl:
  3609. .byte 9,0,0,0
  3610. .rva simd_handler
  3611. .rva .L8xvl_body,.L8xvl_epilogue # HandlerData[]
  3612. .long 0xa0,0
  3613. ___
  3614. }
  3615. foreach (split("\n",$code)) {
  3616. s/\`([^\`]*)\`/eval $1/ge;
  3617. s/%x#%[yz]/%x/g; # "down-shift"
  3618. print $_,"\n";
  3619. }
  3620. close STDOUT or die "error closing STDOUT: $!";