ghash-x86.pl 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404
  1. #! /usr/bin/env perl
  2. # Copyright 2010-2020 The OpenSSL Project Authors. All Rights Reserved.
  3. #
  4. # Licensed under the OpenSSL license (the "License"). You may not use
  5. # this file except in compliance with the License. You can obtain a copy
  6. # in the file LICENSE in the source distribution or at
  7. # https://www.openssl.org/source/license.html
  8. #
  9. # ====================================================================
  10. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  11. # project. The module is, however, dual licensed under OpenSSL and
  12. # CRYPTOGAMS licenses depending on where you obtain it. For further
  13. # details see http://www.openssl.org/~appro/cryptogams/.
  14. # ====================================================================
  15. #
  16. # March, May, June 2010
  17. #
  18. # The module implements "4-bit" GCM GHASH function and underlying
  19. # single multiplication operation in GF(2^128). "4-bit" means that it
  20. # uses 256 bytes per-key table [+64/128 bytes fixed table]. It has two
  21. # code paths: vanilla x86 and vanilla SSE. Former will be executed on
  22. # 486 and Pentium, latter on all others. SSE GHASH features so called
  23. # "528B" variant of "4-bit" method utilizing additional 256+16 bytes
  24. # of per-key storage [+512 bytes shared table]. Performance results
  25. # are for streamed GHASH subroutine and are expressed in cycles per
  26. # processed byte, less is better:
  27. #
  28. # gcc 2.95.3(*) SSE assembler x86 assembler
  29. #
  30. # Pentium 105/111(**) - 50
  31. # PIII 68 /75 12.2 24
  32. # P4 125/125 17.8 84(***)
  33. # Opteron 66 /70 10.1 30
  34. # Core2 54 /67 8.4 18
  35. # Atom 105/105 16.8 53
  36. # VIA Nano 69 /71 13.0 27
  37. #
  38. # (*) gcc 3.4.x was observed to generate few percent slower code,
  39. # which is one of reasons why 2.95.3 results were chosen,
  40. # another reason is lack of 3.4.x results for older CPUs;
  41. # comparison with SSE results is not completely fair, because C
  42. # results are for vanilla "256B" implementation, while
  43. # assembler results are for "528B";-)
  44. # (**) second number is result for code compiled with -fPIC flag,
  45. # which is actually more relevant, because assembler code is
  46. # position-independent;
  47. # (***) see comment in non-MMX routine for further details;
  48. #
  49. # To summarize, it's >2-5 times faster than gcc-generated code. To
  50. # anchor it to something else SHA1 assembler processes one byte in
  51. # ~7 cycles on contemporary x86 cores. As for choice of MMX/SSE
  52. # in particular, see comment at the end of the file...
  53. # May 2010
  54. #
  55. # Add PCLMULQDQ version performing at 2.10 cycles per processed byte.
  56. # The question is how close is it to theoretical limit? The pclmulqdq
  57. # instruction latency appears to be 14 cycles and there can't be more
  58. # than 2 of them executing at any given time. This means that single
  59. # Karatsuba multiplication would take 28 cycles *plus* few cycles for
  60. # pre- and post-processing. Then multiplication has to be followed by
  61. # modulo-reduction. Given that aggregated reduction method [see
  62. # "Carry-less Multiplication and Its Usage for Computing the GCM Mode"
  63. # white paper by Intel] allows you to perform reduction only once in
  64. # a while we can assume that asymptotic performance can be estimated
  65. # as (28+Tmod/Naggr)/16, where Tmod is time to perform reduction
  66. # and Naggr is the aggregation factor.
  67. #
  68. # Before we proceed to this implementation let's have closer look at
  69. # the best-performing code suggested by Intel in their white paper.
  70. # By tracing inter-register dependencies Tmod is estimated as ~19
  71. # cycles and Naggr chosen by Intel is 4, resulting in 2.05 cycles per
  72. # processed byte. As implied, this is quite optimistic estimate,
  73. # because it does not account for Karatsuba pre- and post-processing,
  74. # which for a single multiplication is ~5 cycles. Unfortunately Intel
  75. # does not provide performance data for GHASH alone. But benchmarking
  76. # AES_GCM_encrypt ripped out of Fig. 15 of the white paper with aadt
  77. # alone resulted in 2.46 cycles per byte of out 16KB buffer. Note that
  78. # the result accounts even for pre-computing of degrees of the hash
  79. # key H, but its portion is negligible at 16KB buffer size.
  80. #
  81. # Moving on to the implementation in question. Tmod is estimated as
  82. # ~13 cycles and Naggr is 2, giving asymptotic performance of ...
  83. # 2.16. How is it possible that measured performance is better than
  84. # optimistic theoretical estimate? There is one thing Intel failed
  85. # to recognize. By serializing GHASH with CTR in same subroutine
  86. # former's performance is really limited to above (Tmul + Tmod/Naggr)
  87. # equation. But if GHASH procedure is detached, the modulo-reduction
  88. # can be interleaved with Naggr-1 multiplications at instruction level
  89. # and under ideal conditions even disappear from the equation. So that
  90. # optimistic theoretical estimate for this implementation is ...
  91. # 28/16=1.75, and not 2.16. Well, it's probably way too optimistic,
  92. # at least for such small Naggr. I'd argue that (28+Tproc/Naggr),
  93. # where Tproc is time required for Karatsuba pre- and post-processing,
  94. # is more realistic estimate. In this case it gives ... 1.91 cycles.
  95. # Or in other words, depending on how well we can interleave reduction
  96. # and one of the two multiplications the performance should be between
  97. # 1.91 and 2.16. As already mentioned, this implementation processes
  98. # one byte out of 8KB buffer in 2.10 cycles, while x86_64 counterpart
  99. # - in 2.02. x86_64 performance is better, because larger register
  100. # bank allows to interleave reduction and multiplication better.
  101. #
  102. # Does it make sense to increase Naggr? To start with it's virtually
  103. # impossible in 32-bit mode, because of limited register bank
  104. # capacity. Otherwise improvement has to be weighed against slower
  105. # setup, as well as code size and complexity increase. As even
  106. # optimistic estimate doesn't promise 30% performance improvement,
  107. # there are currently no plans to increase Naggr.
  108. #
  109. # Special thanks to David Woodhouse for providing access to a
  110. # Westmere-based system on behalf of Intel Open Source Technology Centre.
  111. # January 2010
  112. #
  113. # Tweaked to optimize transitions between integer and FP operations
  114. # on same XMM register, PCLMULQDQ subroutine was measured to process
  115. # one byte in 2.07 cycles on Sandy Bridge, and in 2.12 - on Westmere.
  116. # The minor regression on Westmere is outweighed by ~15% improvement
  117. # on Sandy Bridge. Strangely enough attempt to modify 64-bit code in
  118. # similar manner resulted in almost 20% degradation on Sandy Bridge,
  119. # where original 64-bit code processes one byte in 1.95 cycles.
  120. #####################################################################
  121. # For reference, AMD Bulldozer processes one byte in 1.98 cycles in
  122. # 32-bit mode and 1.89 in 64-bit.
  123. # February 2013
  124. #
  125. # Overhaul: aggregate Karatsuba post-processing, improve ILP in
  126. # reduction_alg9. Resulting performance is 1.96 cycles per byte on
  127. # Westmere, 1.95 - on Sandy/Ivy Bridge, 1.76 - on Bulldozer.
  128. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  129. push(@INC,"${dir}","${dir}../../perlasm");
  130. require "x86asm.pl";
  131. $output=pop;
  132. open STDOUT,">$output";
  133. &asm_init($ARGV[0],$x86only = $ARGV[$#ARGV] eq "386");
  134. $sse2=0;
  135. for (@ARGV) { $sse2=1 if (/-DOPENSSL_IA32_SSE2/); }
  136. ($Zhh,$Zhl,$Zlh,$Zll) = ("ebp","edx","ecx","ebx");
  137. $inp = "edi";
  138. $Htbl = "esi";
  139. $unroll = 0; # Affects x86 loop. Folded loop performs ~7% worse
  140. # than unrolled, which has to be weighted against
  141. # 2.5x x86-specific code size reduction.
  142. sub x86_loop {
  143. my $off = shift;
  144. my $rem = "eax";
  145. &mov ($Zhh,&DWP(4,$Htbl,$Zll));
  146. &mov ($Zhl,&DWP(0,$Htbl,$Zll));
  147. &mov ($Zlh,&DWP(12,$Htbl,$Zll));
  148. &mov ($Zll,&DWP(8,$Htbl,$Zll));
  149. &xor ($rem,$rem); # avoid partial register stalls on PIII
  150. # shrd practically kills P4, 2.5x deterioration, but P4 has
  151. # MMX code-path to execute. shrd runs tad faster [than twice
  152. # the shifts, move's and or's] on pre-MMX Pentium (as well as
  153. # PIII and Core2), *but* minimizes code size, spares register
  154. # and thus allows to fold the loop...
  155. if (!$unroll) {
  156. my $cnt = $inp;
  157. &mov ($cnt,15);
  158. &jmp (&label("x86_loop"));
  159. &set_label("x86_loop",16);
  160. for($i=1;$i<=2;$i++) {
  161. &mov (&LB($rem),&LB($Zll));
  162. &shrd ($Zll,$Zlh,4);
  163. &and (&LB($rem),0xf);
  164. &shrd ($Zlh,$Zhl,4);
  165. &shrd ($Zhl,$Zhh,4);
  166. &shr ($Zhh,4);
  167. &xor ($Zhh,&DWP($off+16,"esp",$rem,4));
  168. &mov (&LB($rem),&BP($off,"esp",$cnt));
  169. if ($i&1) {
  170. &and (&LB($rem),0xf0);
  171. } else {
  172. &shl (&LB($rem),4);
  173. }
  174. &xor ($Zll,&DWP(8,$Htbl,$rem));
  175. &xor ($Zlh,&DWP(12,$Htbl,$rem));
  176. &xor ($Zhl,&DWP(0,$Htbl,$rem));
  177. &xor ($Zhh,&DWP(4,$Htbl,$rem));
  178. if ($i&1) {
  179. &dec ($cnt);
  180. &js (&label("x86_break"));
  181. } else {
  182. &jmp (&label("x86_loop"));
  183. }
  184. }
  185. &set_label("x86_break",16);
  186. } else {
  187. for($i=1;$i<32;$i++) {
  188. &comment($i);
  189. &mov (&LB($rem),&LB($Zll));
  190. &shrd ($Zll,$Zlh,4);
  191. &and (&LB($rem),0xf);
  192. &shrd ($Zlh,$Zhl,4);
  193. &shrd ($Zhl,$Zhh,4);
  194. &shr ($Zhh,4);
  195. &xor ($Zhh,&DWP($off+16,"esp",$rem,4));
  196. if ($i&1) {
  197. &mov (&LB($rem),&BP($off+15-($i>>1),"esp"));
  198. &and (&LB($rem),0xf0);
  199. } else {
  200. &mov (&LB($rem),&BP($off+15-($i>>1),"esp"));
  201. &shl (&LB($rem),4);
  202. }
  203. &xor ($Zll,&DWP(8,$Htbl,$rem));
  204. &xor ($Zlh,&DWP(12,$Htbl,$rem));
  205. &xor ($Zhl,&DWP(0,$Htbl,$rem));
  206. &xor ($Zhh,&DWP(4,$Htbl,$rem));
  207. }
  208. }
  209. &bswap ($Zll);
  210. &bswap ($Zlh);
  211. &bswap ($Zhl);
  212. if (!$x86only) {
  213. &bswap ($Zhh);
  214. } else {
  215. &mov ("eax",$Zhh);
  216. &bswap ("eax");
  217. &mov ($Zhh,"eax");
  218. }
  219. }
  220. if ($unroll) {
  221. &function_begin_B("_x86_gmult_4bit_inner");
  222. &x86_loop(4);
  223. &ret ();
  224. &function_end_B("_x86_gmult_4bit_inner");
  225. }
  226. sub deposit_rem_4bit {
  227. my $bias = shift;
  228. &mov (&DWP($bias+0, "esp"),0x0000<<16);
  229. &mov (&DWP($bias+4, "esp"),0x1C20<<16);
  230. &mov (&DWP($bias+8, "esp"),0x3840<<16);
  231. &mov (&DWP($bias+12,"esp"),0x2460<<16);
  232. &mov (&DWP($bias+16,"esp"),0x7080<<16);
  233. &mov (&DWP($bias+20,"esp"),0x6CA0<<16);
  234. &mov (&DWP($bias+24,"esp"),0x48C0<<16);
  235. &mov (&DWP($bias+28,"esp"),0x54E0<<16);
  236. &mov (&DWP($bias+32,"esp"),0xE100<<16);
  237. &mov (&DWP($bias+36,"esp"),0xFD20<<16);
  238. &mov (&DWP($bias+40,"esp"),0xD940<<16);
  239. &mov (&DWP($bias+44,"esp"),0xC560<<16);
  240. &mov (&DWP($bias+48,"esp"),0x9180<<16);
  241. &mov (&DWP($bias+52,"esp"),0x8DA0<<16);
  242. &mov (&DWP($bias+56,"esp"),0xA9C0<<16);
  243. &mov (&DWP($bias+60,"esp"),0xB5E0<<16);
  244. }
  245. $suffix = $x86only ? "" : "_x86";
  246. &function_begin("gcm_gmult_4bit".$suffix);
  247. &stack_push(16+4+1); # +1 for stack alignment
  248. &mov ($inp,&wparam(0)); # load Xi
  249. &mov ($Htbl,&wparam(1)); # load Htable
  250. &mov ($Zhh,&DWP(0,$inp)); # load Xi[16]
  251. &mov ($Zhl,&DWP(4,$inp));
  252. &mov ($Zlh,&DWP(8,$inp));
  253. &mov ($Zll,&DWP(12,$inp));
  254. &deposit_rem_4bit(16);
  255. &mov (&DWP(0,"esp"),$Zhh); # copy Xi[16] on stack
  256. &mov (&DWP(4,"esp"),$Zhl);
  257. &mov (&DWP(8,"esp"),$Zlh);
  258. &mov (&DWP(12,"esp"),$Zll);
  259. &shr ($Zll,20);
  260. &and ($Zll,0xf0);
  261. if ($unroll) {
  262. &call ("_x86_gmult_4bit_inner");
  263. } else {
  264. &x86_loop(0);
  265. &mov ($inp,&wparam(0));
  266. }
  267. &mov (&DWP(12,$inp),$Zll);
  268. &mov (&DWP(8,$inp),$Zlh);
  269. &mov (&DWP(4,$inp),$Zhl);
  270. &mov (&DWP(0,$inp),$Zhh);
  271. &stack_pop(16+4+1);
  272. &function_end("gcm_gmult_4bit".$suffix);
  273. &function_begin("gcm_ghash_4bit".$suffix);
  274. &stack_push(16+4+1); # +1 for 64-bit alignment
  275. &mov ($Zll,&wparam(0)); # load Xi
  276. &mov ($Htbl,&wparam(1)); # load Htable
  277. &mov ($inp,&wparam(2)); # load in
  278. &mov ("ecx",&wparam(3)); # load len
  279. &add ("ecx",$inp);
  280. &mov (&wparam(3),"ecx");
  281. &mov ($Zhh,&DWP(0,$Zll)); # load Xi[16]
  282. &mov ($Zhl,&DWP(4,$Zll));
  283. &mov ($Zlh,&DWP(8,$Zll));
  284. &mov ($Zll,&DWP(12,$Zll));
  285. &deposit_rem_4bit(16);
  286. &set_label("x86_outer_loop",16);
  287. &xor ($Zll,&DWP(12,$inp)); # xor with input
  288. &xor ($Zlh,&DWP(8,$inp));
  289. &xor ($Zhl,&DWP(4,$inp));
  290. &xor ($Zhh,&DWP(0,$inp));
  291. &mov (&DWP(12,"esp"),$Zll); # dump it on stack
  292. &mov (&DWP(8,"esp"),$Zlh);
  293. &mov (&DWP(4,"esp"),$Zhl);
  294. &mov (&DWP(0,"esp"),$Zhh);
  295. &shr ($Zll,20);
  296. &and ($Zll,0xf0);
  297. if ($unroll) {
  298. &call ("_x86_gmult_4bit_inner");
  299. } else {
  300. &x86_loop(0);
  301. &mov ($inp,&wparam(2));
  302. }
  303. &lea ($inp,&DWP(16,$inp));
  304. &cmp ($inp,&wparam(3));
  305. &mov (&wparam(2),$inp) if (!$unroll);
  306. &jb (&label("x86_outer_loop"));
  307. &mov ($inp,&wparam(0)); # load Xi
  308. &mov (&DWP(12,$inp),$Zll);
  309. &mov (&DWP(8,$inp),$Zlh);
  310. &mov (&DWP(4,$inp),$Zhl);
  311. &mov (&DWP(0,$inp),$Zhh);
  312. &stack_pop(16+4+1);
  313. &function_end("gcm_ghash_4bit".$suffix);
  314. if (!$x86only) {{{
  315. &static_label("rem_4bit");
  316. if (!$sse2) {{ # pure-MMX "May" version...
  317. $S=12; # shift factor for rem_4bit
  318. &function_begin_B("_mmx_gmult_4bit_inner");
  319. # MMX version performs 3.5 times better on P4 (see comment in non-MMX
  320. # routine for further details), 100% better on Opteron, ~70% better
  321. # on Core2 and PIII... In other words effort is considered to be well
  322. # spent... Since initial release the loop was unrolled in order to
  323. # "liberate" register previously used as loop counter. Instead it's
  324. # used to optimize critical path in 'Z.hi ^= rem_4bit[Z.lo&0xf]'.
  325. # The path involves move of Z.lo from MMX to integer register,
  326. # effective address calculation and finally merge of value to Z.hi.
  327. # Reference to rem_4bit is scheduled so late that I had to >>4
  328. # rem_4bit elements. This resulted in 20-45% procent improvement
  329. # on contemporary µ-archs.
  330. {
  331. my $cnt;
  332. my $rem_4bit = "eax";
  333. my @rem = ($Zhh,$Zll);
  334. my $nhi = $Zhl;
  335. my $nlo = $Zlh;
  336. my ($Zlo,$Zhi) = ("mm0","mm1");
  337. my $tmp = "mm2";
  338. &xor ($nlo,$nlo); # avoid partial register stalls on PIII
  339. &mov ($nhi,$Zll);
  340. &mov (&LB($nlo),&LB($nhi));
  341. &shl (&LB($nlo),4);
  342. &and ($nhi,0xf0);
  343. &movq ($Zlo,&QWP(8,$Htbl,$nlo));
  344. &movq ($Zhi,&QWP(0,$Htbl,$nlo));
  345. &movd ($rem[0],$Zlo);
  346. for ($cnt=28;$cnt>=-2;$cnt--) {
  347. my $odd = $cnt&1;
  348. my $nix = $odd ? $nlo : $nhi;
  349. &shl (&LB($nlo),4) if ($odd);
  350. &psrlq ($Zlo,4);
  351. &movq ($tmp,$Zhi);
  352. &psrlq ($Zhi,4);
  353. &pxor ($Zlo,&QWP(8,$Htbl,$nix));
  354. &mov (&LB($nlo),&BP($cnt/2,$inp)) if (!$odd && $cnt>=0);
  355. &psllq ($tmp,60);
  356. &and ($nhi,0xf0) if ($odd);
  357. &pxor ($Zhi,&QWP(0,$rem_4bit,$rem[1],8)) if ($cnt<28);
  358. &and ($rem[0],0xf);
  359. &pxor ($Zhi,&QWP(0,$Htbl,$nix));
  360. &mov ($nhi,$nlo) if (!$odd && $cnt>=0);
  361. &movd ($rem[1],$Zlo);
  362. &pxor ($Zlo,$tmp);
  363. push (@rem,shift(@rem)); # "rotate" registers
  364. }
  365. &mov ($inp,&DWP(4,$rem_4bit,$rem[1],8)); # last rem_4bit[rem]
  366. &psrlq ($Zlo,32); # lower part of Zlo is already there
  367. &movd ($Zhl,$Zhi);
  368. &psrlq ($Zhi,32);
  369. &movd ($Zlh,$Zlo);
  370. &movd ($Zhh,$Zhi);
  371. &shl ($inp,4); # compensate for rem_4bit[i] being >>4
  372. &bswap ($Zll);
  373. &bswap ($Zhl);
  374. &bswap ($Zlh);
  375. &xor ($Zhh,$inp);
  376. &bswap ($Zhh);
  377. &ret ();
  378. }
  379. &function_end_B("_mmx_gmult_4bit_inner");
  380. &function_begin("gcm_gmult_4bit_mmx");
  381. &mov ($inp,&wparam(0)); # load Xi
  382. &mov ($Htbl,&wparam(1)); # load Htable
  383. &call (&label("pic_point"));
  384. &set_label("pic_point");
  385. &blindpop("eax");
  386. &lea ("eax",&DWP(&label("rem_4bit")."-".&label("pic_point"),"eax"));
  387. &movz ($Zll,&BP(15,$inp));
  388. &call ("_mmx_gmult_4bit_inner");
  389. &mov ($inp,&wparam(0)); # load Xi
  390. &emms ();
  391. &mov (&DWP(12,$inp),$Zll);
  392. &mov (&DWP(4,$inp),$Zhl);
  393. &mov (&DWP(8,$inp),$Zlh);
  394. &mov (&DWP(0,$inp),$Zhh);
  395. &function_end("gcm_gmult_4bit_mmx");
  396. # Streamed version performs 20% better on P4, 7% on Opteron,
  397. # 10% on Core2 and PIII...
  398. &function_begin("gcm_ghash_4bit_mmx");
  399. &mov ($Zhh,&wparam(0)); # load Xi
  400. &mov ($Htbl,&wparam(1)); # load Htable
  401. &mov ($inp,&wparam(2)); # load in
  402. &mov ($Zlh,&wparam(3)); # load len
  403. &call (&label("pic_point"));
  404. &set_label("pic_point");
  405. &blindpop("eax");
  406. &lea ("eax",&DWP(&label("rem_4bit")."-".&label("pic_point"),"eax"));
  407. &add ($Zlh,$inp);
  408. &mov (&wparam(3),$Zlh); # len to point at the end of input
  409. &stack_push(4+1); # +1 for stack alignment
  410. &mov ($Zll,&DWP(12,$Zhh)); # load Xi[16]
  411. &mov ($Zhl,&DWP(4,$Zhh));
  412. &mov ($Zlh,&DWP(8,$Zhh));
  413. &mov ($Zhh,&DWP(0,$Zhh));
  414. &jmp (&label("mmx_outer_loop"));
  415. &set_label("mmx_outer_loop",16);
  416. &xor ($Zll,&DWP(12,$inp));
  417. &xor ($Zhl,&DWP(4,$inp));
  418. &xor ($Zlh,&DWP(8,$inp));
  419. &xor ($Zhh,&DWP(0,$inp));
  420. &mov (&wparam(2),$inp);
  421. &mov (&DWP(12,"esp"),$Zll);
  422. &mov (&DWP(4,"esp"),$Zhl);
  423. &mov (&DWP(8,"esp"),$Zlh);
  424. &mov (&DWP(0,"esp"),$Zhh);
  425. &mov ($inp,"esp");
  426. &shr ($Zll,24);
  427. &call ("_mmx_gmult_4bit_inner");
  428. &mov ($inp,&wparam(2));
  429. &lea ($inp,&DWP(16,$inp));
  430. &cmp ($inp,&wparam(3));
  431. &jb (&label("mmx_outer_loop"));
  432. &mov ($inp,&wparam(0)); # load Xi
  433. &emms ();
  434. &mov (&DWP(12,$inp),$Zll);
  435. &mov (&DWP(4,$inp),$Zhl);
  436. &mov (&DWP(8,$inp),$Zlh);
  437. &mov (&DWP(0,$inp),$Zhh);
  438. &stack_pop(4+1);
  439. &function_end("gcm_ghash_4bit_mmx");
  440. }} else {{ # "June" MMX version...
  441. # ... has slower "April" gcm_gmult_4bit_mmx with folded
  442. # loop. This is done to conserve code size...
  443. $S=16; # shift factor for rem_4bit
  444. sub mmx_loop() {
  445. # MMX version performs 2.8 times better on P4 (see comment in non-MMX
  446. # routine for further details), 40% better on Opteron and Core2, 50%
  447. # better on PIII... In other words effort is considered to be well
  448. # spent...
  449. my $inp = shift;
  450. my $rem_4bit = shift;
  451. my $cnt = $Zhh;
  452. my $nhi = $Zhl;
  453. my $nlo = $Zlh;
  454. my $rem = $Zll;
  455. my ($Zlo,$Zhi) = ("mm0","mm1");
  456. my $tmp = "mm2";
  457. &xor ($nlo,$nlo); # avoid partial register stalls on PIII
  458. &mov ($nhi,$Zll);
  459. &mov (&LB($nlo),&LB($nhi));
  460. &mov ($cnt,14);
  461. &shl (&LB($nlo),4);
  462. &and ($nhi,0xf0);
  463. &movq ($Zlo,&QWP(8,$Htbl,$nlo));
  464. &movq ($Zhi,&QWP(0,$Htbl,$nlo));
  465. &movd ($rem,$Zlo);
  466. &jmp (&label("mmx_loop"));
  467. &set_label("mmx_loop",16);
  468. &psrlq ($Zlo,4);
  469. &and ($rem,0xf);
  470. &movq ($tmp,$Zhi);
  471. &psrlq ($Zhi,4);
  472. &pxor ($Zlo,&QWP(8,$Htbl,$nhi));
  473. &mov (&LB($nlo),&BP(0,$inp,$cnt));
  474. &psllq ($tmp,60);
  475. &pxor ($Zhi,&QWP(0,$rem_4bit,$rem,8));
  476. &dec ($cnt);
  477. &movd ($rem,$Zlo);
  478. &pxor ($Zhi,&QWP(0,$Htbl,$nhi));
  479. &mov ($nhi,$nlo);
  480. &pxor ($Zlo,$tmp);
  481. &js (&label("mmx_break"));
  482. &shl (&LB($nlo),4);
  483. &and ($rem,0xf);
  484. &psrlq ($Zlo,4);
  485. &and ($nhi,0xf0);
  486. &movq ($tmp,$Zhi);
  487. &psrlq ($Zhi,4);
  488. &pxor ($Zlo,&QWP(8,$Htbl,$nlo));
  489. &psllq ($tmp,60);
  490. &pxor ($Zhi,&QWP(0,$rem_4bit,$rem,8));
  491. &movd ($rem,$Zlo);
  492. &pxor ($Zhi,&QWP(0,$Htbl,$nlo));
  493. &pxor ($Zlo,$tmp);
  494. &jmp (&label("mmx_loop"));
  495. &set_label("mmx_break",16);
  496. &shl (&LB($nlo),4);
  497. &and ($rem,0xf);
  498. &psrlq ($Zlo,4);
  499. &and ($nhi,0xf0);
  500. &movq ($tmp,$Zhi);
  501. &psrlq ($Zhi,4);
  502. &pxor ($Zlo,&QWP(8,$Htbl,$nlo));
  503. &psllq ($tmp,60);
  504. &pxor ($Zhi,&QWP(0,$rem_4bit,$rem,8));
  505. &movd ($rem,$Zlo);
  506. &pxor ($Zhi,&QWP(0,$Htbl,$nlo));
  507. &pxor ($Zlo,$tmp);
  508. &psrlq ($Zlo,4);
  509. &and ($rem,0xf);
  510. &movq ($tmp,$Zhi);
  511. &psrlq ($Zhi,4);
  512. &pxor ($Zlo,&QWP(8,$Htbl,$nhi));
  513. &psllq ($tmp,60);
  514. &pxor ($Zhi,&QWP(0,$rem_4bit,$rem,8));
  515. &movd ($rem,$Zlo);
  516. &pxor ($Zhi,&QWP(0,$Htbl,$nhi));
  517. &pxor ($Zlo,$tmp);
  518. &psrlq ($Zlo,32); # lower part of Zlo is already there
  519. &movd ($Zhl,$Zhi);
  520. &psrlq ($Zhi,32);
  521. &movd ($Zlh,$Zlo);
  522. &movd ($Zhh,$Zhi);
  523. &bswap ($Zll);
  524. &bswap ($Zhl);
  525. &bswap ($Zlh);
  526. &bswap ($Zhh);
  527. }
  528. &function_begin("gcm_gmult_4bit_mmx");
  529. &mov ($inp,&wparam(0)); # load Xi
  530. &mov ($Htbl,&wparam(1)); # load Htable
  531. &call (&label("pic_point"));
  532. &set_label("pic_point");
  533. &blindpop("eax");
  534. &lea ("eax",&DWP(&label("rem_4bit")."-".&label("pic_point"),"eax"));
  535. &movz ($Zll,&BP(15,$inp));
  536. &mmx_loop($inp,"eax");
  537. &emms ();
  538. &mov (&DWP(12,$inp),$Zll);
  539. &mov (&DWP(4,$inp),$Zhl);
  540. &mov (&DWP(8,$inp),$Zlh);
  541. &mov (&DWP(0,$inp),$Zhh);
  542. &function_end("gcm_gmult_4bit_mmx");
  543. ######################################################################
  544. # Below subroutine is "528B" variant of "4-bit" GCM GHASH function
  545. # (see gcm128.c for details). It provides further 20-40% performance
  546. # improvement over above mentioned "May" version.
  547. &static_label("rem_8bit");
  548. &function_begin("gcm_ghash_4bit_mmx");
  549. { my ($Zlo,$Zhi) = ("mm7","mm6");
  550. my $rem_8bit = "esi";
  551. my $Htbl = "ebx";
  552. # parameter block
  553. &mov ("eax",&wparam(0)); # Xi
  554. &mov ("ebx",&wparam(1)); # Htable
  555. &mov ("ecx",&wparam(2)); # inp
  556. &mov ("edx",&wparam(3)); # len
  557. &mov ("ebp","esp"); # original %esp
  558. &call (&label("pic_point"));
  559. &set_label ("pic_point");
  560. &blindpop ($rem_8bit);
  561. &lea ($rem_8bit,&DWP(&label("rem_8bit")."-".&label("pic_point"),$rem_8bit));
  562. &sub ("esp",512+16+16); # allocate stack frame...
  563. &and ("esp",-64); # ...and align it
  564. &sub ("esp",16); # place for (u8)(H[]<<4)
  565. &add ("edx","ecx"); # pointer to the end of input
  566. &mov (&DWP(528+16+0,"esp"),"eax"); # save Xi
  567. &mov (&DWP(528+16+8,"esp"),"edx"); # save inp+len
  568. &mov (&DWP(528+16+12,"esp"),"ebp"); # save original %esp
  569. { my @lo = ("mm0","mm1","mm2");
  570. my @hi = ("mm3","mm4","mm5");
  571. my @tmp = ("mm6","mm7");
  572. my ($off1,$off2,$i) = (0,0,);
  573. &add ($Htbl,128); # optimize for size
  574. &lea ("edi",&DWP(16+128,"esp"));
  575. &lea ("ebp",&DWP(16+256+128,"esp"));
  576. # decompose Htable (low and high parts are kept separately),
  577. # generate Htable[]>>4, (u8)(Htable[]<<4), save to stack...
  578. for ($i=0;$i<18;$i++) {
  579. &mov ("edx",&DWP(16*$i+8-128,$Htbl)) if ($i<16);
  580. &movq ($lo[0],&QWP(16*$i+8-128,$Htbl)) if ($i<16);
  581. &psllq ($tmp[1],60) if ($i>1);
  582. &movq ($hi[0],&QWP(16*$i+0-128,$Htbl)) if ($i<16);
  583. &por ($lo[2],$tmp[1]) if ($i>1);
  584. &movq (&QWP($off1-128,"edi"),$lo[1]) if ($i>0 && $i<17);
  585. &psrlq ($lo[1],4) if ($i>0 && $i<17);
  586. &movq (&QWP($off1,"edi"),$hi[1]) if ($i>0 && $i<17);
  587. &movq ($tmp[0],$hi[1]) if ($i>0 && $i<17);
  588. &movq (&QWP($off2-128,"ebp"),$lo[2]) if ($i>1);
  589. &psrlq ($hi[1],4) if ($i>0 && $i<17);
  590. &movq (&QWP($off2,"ebp"),$hi[2]) if ($i>1);
  591. &shl ("edx",4) if ($i<16);
  592. &mov (&BP($i,"esp"),&LB("edx")) if ($i<16);
  593. unshift (@lo,pop(@lo)); # "rotate" registers
  594. unshift (@hi,pop(@hi));
  595. unshift (@tmp,pop(@tmp));
  596. $off1 += 8 if ($i>0);
  597. $off2 += 8 if ($i>1);
  598. }
  599. }
  600. &movq ($Zhi,&QWP(0,"eax"));
  601. &mov ("ebx",&DWP(8,"eax"));
  602. &mov ("edx",&DWP(12,"eax")); # load Xi
  603. &set_label("outer",16);
  604. { my $nlo = "eax";
  605. my $dat = "edx";
  606. my @nhi = ("edi","ebp");
  607. my @rem = ("ebx","ecx");
  608. my @red = ("mm0","mm1","mm2");
  609. my $tmp = "mm3";
  610. &xor ($dat,&DWP(12,"ecx")); # merge input data
  611. &xor ("ebx",&DWP(8,"ecx"));
  612. &pxor ($Zhi,&QWP(0,"ecx"));
  613. &lea ("ecx",&DWP(16,"ecx")); # inp+=16
  614. #&mov (&DWP(528+12,"esp"),$dat); # save inp^Xi
  615. &mov (&DWP(528+8,"esp"),"ebx");
  616. &movq (&QWP(528+0,"esp"),$Zhi);
  617. &mov (&DWP(528+16+4,"esp"),"ecx"); # save inp
  618. &xor ($nlo,$nlo);
  619. &rol ($dat,8);
  620. &mov (&LB($nlo),&LB($dat));
  621. &mov ($nhi[1],$nlo);
  622. &and (&LB($nlo),0x0f);
  623. &shr ($nhi[1],4);
  624. &pxor ($red[0],$red[0]);
  625. &rol ($dat,8); # next byte
  626. &pxor ($red[1],$red[1]);
  627. &pxor ($red[2],$red[2]);
  628. # Just like in "May" version modulo-schedule for critical path in
  629. # 'Z.hi ^= rem_8bit[Z.lo&0xff^((u8)H[nhi]<<4)]<<48'. Final 'pxor'
  630. # is scheduled so late that rem_8bit[] has to be shifted *right*
  631. # by 16, which is why last argument to pinsrw is 2, which
  632. # corresponds to <<32=<<48>>16...
  633. for ($j=11,$i=0;$i<15;$i++) {
  634. if ($i>0) {
  635. &pxor ($Zlo,&QWP(16,"esp",$nlo,8)); # Z^=H[nlo]
  636. &rol ($dat,8); # next byte
  637. &pxor ($Zhi,&QWP(16+128,"esp",$nlo,8));
  638. &pxor ($Zlo,$tmp);
  639. &pxor ($Zhi,&QWP(16+256+128,"esp",$nhi[0],8));
  640. &xor (&LB($rem[1]),&BP(0,"esp",$nhi[0])); # rem^(H[nhi]<<4)
  641. } else {
  642. &movq ($Zlo,&QWP(16,"esp",$nlo,8));
  643. &movq ($Zhi,&QWP(16+128,"esp",$nlo,8));
  644. }
  645. &mov (&LB($nlo),&LB($dat));
  646. &mov ($dat,&DWP(528+$j,"esp")) if (--$j%4==0);
  647. &movd ($rem[0],$Zlo);
  648. &movz ($rem[1],&LB($rem[1])) if ($i>0);
  649. &psrlq ($Zlo,8); # Z>>=8
  650. &movq ($tmp,$Zhi);
  651. &mov ($nhi[0],$nlo);
  652. &psrlq ($Zhi,8);
  653. &pxor ($Zlo,&QWP(16+256+0,"esp",$nhi[1],8)); # Z^=H[nhi]>>4
  654. &and (&LB($nlo),0x0f);
  655. &psllq ($tmp,56);
  656. &pxor ($Zhi,$red[1]) if ($i>1);
  657. &shr ($nhi[0],4);
  658. &pinsrw ($red[0],&WP(0,$rem_8bit,$rem[1],2),2) if ($i>0);
  659. unshift (@red,pop(@red)); # "rotate" registers
  660. unshift (@rem,pop(@rem));
  661. unshift (@nhi,pop(@nhi));
  662. }
  663. &pxor ($Zlo,&QWP(16,"esp",$nlo,8)); # Z^=H[nlo]
  664. &pxor ($Zhi,&QWP(16+128,"esp",$nlo,8));
  665. &xor (&LB($rem[1]),&BP(0,"esp",$nhi[0])); # rem^(H[nhi]<<4)
  666. &pxor ($Zlo,$tmp);
  667. &pxor ($Zhi,&QWP(16+256+128,"esp",$nhi[0],8));
  668. &movz ($rem[1],&LB($rem[1]));
  669. &pxor ($red[2],$red[2]); # clear 2nd word
  670. &psllq ($red[1],4);
  671. &movd ($rem[0],$Zlo);
  672. &psrlq ($Zlo,4); # Z>>=4
  673. &movq ($tmp,$Zhi);
  674. &psrlq ($Zhi,4);
  675. &shl ($rem[0],4); # rem<<4
  676. &pxor ($Zlo,&QWP(16,"esp",$nhi[1],8)); # Z^=H[nhi]
  677. &psllq ($tmp,60);
  678. &movz ($rem[0],&LB($rem[0]));
  679. &pxor ($Zlo,$tmp);
  680. &pxor ($Zhi,&QWP(16+128,"esp",$nhi[1],8));
  681. &pinsrw ($red[0],&WP(0,$rem_8bit,$rem[1],2),2);
  682. &pxor ($Zhi,$red[1]);
  683. &movd ($dat,$Zlo);
  684. &pinsrw ($red[2],&WP(0,$rem_8bit,$rem[0],2),3); # last is <<48
  685. &psllq ($red[0],12); # correct by <<16>>4
  686. &pxor ($Zhi,$red[0]);
  687. &psrlq ($Zlo,32);
  688. &pxor ($Zhi,$red[2]);
  689. &mov ("ecx",&DWP(528+16+4,"esp")); # restore inp
  690. &movd ("ebx",$Zlo);
  691. &movq ($tmp,$Zhi); # 01234567
  692. &psllw ($Zhi,8); # 1.3.5.7.
  693. &psrlw ($tmp,8); # .0.2.4.6
  694. &por ($Zhi,$tmp); # 10325476
  695. &bswap ($dat);
  696. &pshufw ($Zhi,$Zhi,0b00011011); # 76543210
  697. &bswap ("ebx");
  698. &cmp ("ecx",&DWP(528+16+8,"esp")); # are we done?
  699. &jne (&label("outer"));
  700. }
  701. &mov ("eax",&DWP(528+16+0,"esp")); # restore Xi
  702. &mov (&DWP(12,"eax"),"edx");
  703. &mov (&DWP(8,"eax"),"ebx");
  704. &movq (&QWP(0,"eax"),$Zhi);
  705. &mov ("esp",&DWP(528+16+12,"esp")); # restore original %esp
  706. &emms ();
  707. }
  708. &function_end("gcm_ghash_4bit_mmx");
  709. }}
  710. if ($sse2) {{
  711. ######################################################################
  712. # PCLMULQDQ version.
  713. $Xip="eax";
  714. $Htbl="edx";
  715. $const="ecx";
  716. $inp="esi";
  717. $len="ebx";
  718. ($Xi,$Xhi)=("xmm0","xmm1"); $Hkey="xmm2";
  719. ($T1,$T2,$T3)=("xmm3","xmm4","xmm5");
  720. ($Xn,$Xhn)=("xmm6","xmm7");
  721. &static_label("bswap");
  722. sub clmul64x64_T2 { # minimal "register" pressure
  723. my ($Xhi,$Xi,$Hkey,$HK)=@_;
  724. &movdqa ($Xhi,$Xi); #
  725. &pshufd ($T1,$Xi,0b01001110);
  726. &pshufd ($T2,$Hkey,0b01001110) if (!defined($HK));
  727. &pxor ($T1,$Xi); #
  728. &pxor ($T2,$Hkey) if (!defined($HK));
  729. $HK=$T2 if (!defined($HK));
  730. &pclmulqdq ($Xi,$Hkey,0x00); #######
  731. &pclmulqdq ($Xhi,$Hkey,0x11); #######
  732. &pclmulqdq ($T1,$HK,0x00); #######
  733. &xorps ($T1,$Xi); #
  734. &xorps ($T1,$Xhi); #
  735. &movdqa ($T2,$T1); #
  736. &psrldq ($T1,8);
  737. &pslldq ($T2,8); #
  738. &pxor ($Xhi,$T1);
  739. &pxor ($Xi,$T2); #
  740. }
  741. sub clmul64x64_T3 {
  742. # Even though this subroutine offers visually better ILP, it
  743. # was empirically found to be a tad slower than above version.
  744. # At least in gcm_ghash_clmul context. But it's just as well,
  745. # because loop modulo-scheduling is possible only thanks to
  746. # minimized "register" pressure...
  747. my ($Xhi,$Xi,$Hkey)=@_;
  748. &movdqa ($T1,$Xi); #
  749. &movdqa ($Xhi,$Xi);
  750. &pclmulqdq ($Xi,$Hkey,0x00); #######
  751. &pclmulqdq ($Xhi,$Hkey,0x11); #######
  752. &pshufd ($T2,$T1,0b01001110); #
  753. &pshufd ($T3,$Hkey,0b01001110);
  754. &pxor ($T2,$T1); #
  755. &pxor ($T3,$Hkey);
  756. &pclmulqdq ($T2,$T3,0x00); #######
  757. &pxor ($T2,$Xi); #
  758. &pxor ($T2,$Xhi); #
  759. &movdqa ($T3,$T2); #
  760. &psrldq ($T2,8);
  761. &pslldq ($T3,8); #
  762. &pxor ($Xhi,$T2);
  763. &pxor ($Xi,$T3); #
  764. }
  765. if (1) { # Algorithm 9 with <<1 twist.
  766. # Reduction is shorter and uses only two
  767. # temporary registers, which makes it better
  768. # candidate for interleaving with 64x64
  769. # multiplication. Pre-modulo-scheduled loop
  770. # was found to be ~20% faster than Algorithm 5
  771. # below. Algorithm 9 was therefore chosen for
  772. # further optimization...
  773. sub reduction_alg9 { # 17/11 times faster than Intel version
  774. my ($Xhi,$Xi) = @_;
  775. # 1st phase
  776. &movdqa ($T2,$Xi); #
  777. &movdqa ($T1,$Xi);
  778. &psllq ($Xi,5);
  779. &pxor ($T1,$Xi); #
  780. &psllq ($Xi,1);
  781. &pxor ($Xi,$T1); #
  782. &psllq ($Xi,57); #
  783. &movdqa ($T1,$Xi); #
  784. &pslldq ($Xi,8);
  785. &psrldq ($T1,8); #
  786. &pxor ($Xi,$T2);
  787. &pxor ($Xhi,$T1); #
  788. # 2nd phase
  789. &movdqa ($T2,$Xi);
  790. &psrlq ($Xi,1);
  791. &pxor ($Xhi,$T2); #
  792. &pxor ($T2,$Xi);
  793. &psrlq ($Xi,5);
  794. &pxor ($Xi,$T2); #
  795. &psrlq ($Xi,1); #
  796. &pxor ($Xi,$Xhi) #
  797. }
  798. &function_begin_B("gcm_init_clmul");
  799. &mov ($Htbl,&wparam(0));
  800. &mov ($Xip,&wparam(1));
  801. &call (&label("pic"));
  802. &set_label("pic");
  803. &blindpop ($const);
  804. &lea ($const,&DWP(&label("bswap")."-".&label("pic"),$const));
  805. &movdqu ($Hkey,&QWP(0,$Xip));
  806. &pshufd ($Hkey,$Hkey,0b01001110);# dword swap
  807. # <<1 twist
  808. &pshufd ($T2,$Hkey,0b11111111); # broadcast uppermost dword
  809. &movdqa ($T1,$Hkey);
  810. &psllq ($Hkey,1);
  811. &pxor ($T3,$T3); #
  812. &psrlq ($T1,63);
  813. &pcmpgtd ($T3,$T2); # broadcast carry bit
  814. &pslldq ($T1,8);
  815. &por ($Hkey,$T1); # H<<=1
  816. # magic reduction
  817. &pand ($T3,&QWP(16,$const)); # 0x1c2_polynomial
  818. &pxor ($Hkey,$T3); # if(carry) H^=0x1c2_polynomial
  819. # calculate H^2
  820. &movdqa ($Xi,$Hkey);
  821. &clmul64x64_T2 ($Xhi,$Xi,$Hkey);
  822. &reduction_alg9 ($Xhi,$Xi);
  823. &pshufd ($T1,$Hkey,0b01001110);
  824. &pshufd ($T2,$Xi,0b01001110);
  825. &pxor ($T1,$Hkey); # Karatsuba pre-processing
  826. &movdqu (&QWP(0,$Htbl),$Hkey); # save H
  827. &pxor ($T2,$Xi); # Karatsuba pre-processing
  828. &movdqu (&QWP(16,$Htbl),$Xi); # save H^2
  829. &palignr ($T2,$T1,8); # low part is H.lo^H.hi
  830. &movdqu (&QWP(32,$Htbl),$T2); # save Karatsuba "salt"
  831. &ret ();
  832. &function_end_B("gcm_init_clmul");
  833. &function_begin_B("gcm_gmult_clmul");
  834. &mov ($Xip,&wparam(0));
  835. &mov ($Htbl,&wparam(1));
  836. &call (&label("pic"));
  837. &set_label("pic");
  838. &blindpop ($const);
  839. &lea ($const,&DWP(&label("bswap")."-".&label("pic"),$const));
  840. &movdqu ($Xi,&QWP(0,$Xip));
  841. &movdqa ($T3,&QWP(0,$const));
  842. &movups ($Hkey,&QWP(0,$Htbl));
  843. &pshufb ($Xi,$T3);
  844. &movups ($T2,&QWP(32,$Htbl));
  845. &clmul64x64_T2 ($Xhi,$Xi,$Hkey,$T2);
  846. &reduction_alg9 ($Xhi,$Xi);
  847. &pshufb ($Xi,$T3);
  848. &movdqu (&QWP(0,$Xip),$Xi);
  849. &ret ();
  850. &function_end_B("gcm_gmult_clmul");
  851. &function_begin("gcm_ghash_clmul");
  852. &mov ($Xip,&wparam(0));
  853. &mov ($Htbl,&wparam(1));
  854. &mov ($inp,&wparam(2));
  855. &mov ($len,&wparam(3));
  856. &call (&label("pic"));
  857. &set_label("pic");
  858. &blindpop ($const);
  859. &lea ($const,&DWP(&label("bswap")."-".&label("pic"),$const));
  860. &movdqu ($Xi,&QWP(0,$Xip));
  861. &movdqa ($T3,&QWP(0,$const));
  862. &movdqu ($Hkey,&QWP(0,$Htbl));
  863. &pshufb ($Xi,$T3);
  864. &sub ($len,0x10);
  865. &jz (&label("odd_tail"));
  866. #######
  867. # Xi+2 =[H*(Ii+1 + Xi+1)] mod P =
  868. # [(H*Ii+1) + (H*Xi+1)] mod P =
  869. # [(H*Ii+1) + H^2*(Ii+Xi)] mod P
  870. #
  871. &movdqu ($T1,&QWP(0,$inp)); # Ii
  872. &movdqu ($Xn,&QWP(16,$inp)); # Ii+1
  873. &pshufb ($T1,$T3);
  874. &pshufb ($Xn,$T3);
  875. &movdqu ($T3,&QWP(32,$Htbl));
  876. &pxor ($Xi,$T1); # Ii+Xi
  877. &pshufd ($T1,$Xn,0b01001110); # H*Ii+1
  878. &movdqa ($Xhn,$Xn);
  879. &pxor ($T1,$Xn); #
  880. &lea ($inp,&DWP(32,$inp)); # i+=2
  881. &pclmulqdq ($Xn,$Hkey,0x00); #######
  882. &pclmulqdq ($Xhn,$Hkey,0x11); #######
  883. &pclmulqdq ($T1,$T3,0x00); #######
  884. &movups ($Hkey,&QWP(16,$Htbl)); # load H^2
  885. &nop ();
  886. &sub ($len,0x20);
  887. &jbe (&label("even_tail"));
  888. &jmp (&label("mod_loop"));
  889. &set_label("mod_loop",32);
  890. &pshufd ($T2,$Xi,0b01001110); # H^2*(Ii+Xi)
  891. &movdqa ($Xhi,$Xi);
  892. &pxor ($T2,$Xi); #
  893. &nop ();
  894. &pclmulqdq ($Xi,$Hkey,0x00); #######
  895. &pclmulqdq ($Xhi,$Hkey,0x11); #######
  896. &pclmulqdq ($T2,$T3,0x10); #######
  897. &movups ($Hkey,&QWP(0,$Htbl)); # load H
  898. &xorps ($Xi,$Xn); # (H*Ii+1) + H^2*(Ii+Xi)
  899. &movdqa ($T3,&QWP(0,$const));
  900. &xorps ($Xhi,$Xhn);
  901. &movdqu ($Xhn,&QWP(0,$inp)); # Ii
  902. &pxor ($T1,$Xi); # aggregated Karatsuba post-processing
  903. &movdqu ($Xn,&QWP(16,$inp)); # Ii+1
  904. &pxor ($T1,$Xhi); #
  905. &pshufb ($Xhn,$T3);
  906. &pxor ($T2,$T1); #
  907. &movdqa ($T1,$T2); #
  908. &psrldq ($T2,8);
  909. &pslldq ($T1,8); #
  910. &pxor ($Xhi,$T2);
  911. &pxor ($Xi,$T1); #
  912. &pshufb ($Xn,$T3);
  913. &pxor ($Xhi,$Xhn); # "Ii+Xi", consume early
  914. &movdqa ($Xhn,$Xn); #&clmul64x64_TX ($Xhn,$Xn,$Hkey); H*Ii+1
  915. &movdqa ($T2,$Xi); #&reduction_alg9($Xhi,$Xi); 1st phase
  916. &movdqa ($T1,$Xi);
  917. &psllq ($Xi,5);
  918. &pxor ($T1,$Xi); #
  919. &psllq ($Xi,1);
  920. &pxor ($Xi,$T1); #
  921. &pclmulqdq ($Xn,$Hkey,0x00); #######
  922. &movups ($T3,&QWP(32,$Htbl));
  923. &psllq ($Xi,57); #
  924. &movdqa ($T1,$Xi); #
  925. &pslldq ($Xi,8);
  926. &psrldq ($T1,8); #
  927. &pxor ($Xi,$T2);
  928. &pxor ($Xhi,$T1); #
  929. &pshufd ($T1,$Xhn,0b01001110);
  930. &movdqa ($T2,$Xi); # 2nd phase
  931. &psrlq ($Xi,1);
  932. &pxor ($T1,$Xhn);
  933. &pxor ($Xhi,$T2); #
  934. &pclmulqdq ($Xhn,$Hkey,0x11); #######
  935. &movups ($Hkey,&QWP(16,$Htbl)); # load H^2
  936. &pxor ($T2,$Xi);
  937. &psrlq ($Xi,5);
  938. &pxor ($Xi,$T2); #
  939. &psrlq ($Xi,1); #
  940. &pxor ($Xi,$Xhi) #
  941. &pclmulqdq ($T1,$T3,0x00); #######
  942. &lea ($inp,&DWP(32,$inp));
  943. &sub ($len,0x20);
  944. &ja (&label("mod_loop"));
  945. &set_label("even_tail");
  946. &pshufd ($T2,$Xi,0b01001110); # H^2*(Ii+Xi)
  947. &movdqa ($Xhi,$Xi);
  948. &pxor ($T2,$Xi); #
  949. &pclmulqdq ($Xi,$Hkey,0x00); #######
  950. &pclmulqdq ($Xhi,$Hkey,0x11); #######
  951. &pclmulqdq ($T2,$T3,0x10); #######
  952. &movdqa ($T3,&QWP(0,$const));
  953. &xorps ($Xi,$Xn); # (H*Ii+1) + H^2*(Ii+Xi)
  954. &xorps ($Xhi,$Xhn);
  955. &pxor ($T1,$Xi); # aggregated Karatsuba post-processing
  956. &pxor ($T1,$Xhi); #
  957. &pxor ($T2,$T1); #
  958. &movdqa ($T1,$T2); #
  959. &psrldq ($T2,8);
  960. &pslldq ($T1,8); #
  961. &pxor ($Xhi,$T2);
  962. &pxor ($Xi,$T1); #
  963. &reduction_alg9 ($Xhi,$Xi);
  964. &test ($len,$len);
  965. &jnz (&label("done"));
  966. &movups ($Hkey,&QWP(0,$Htbl)); # load H
  967. &set_label("odd_tail");
  968. &movdqu ($T1,&QWP(0,$inp)); # Ii
  969. &pshufb ($T1,$T3);
  970. &pxor ($Xi,$T1); # Ii+Xi
  971. &clmul64x64_T2 ($Xhi,$Xi,$Hkey); # H*(Ii+Xi)
  972. &reduction_alg9 ($Xhi,$Xi);
  973. &set_label("done");
  974. &pshufb ($Xi,$T3);
  975. &movdqu (&QWP(0,$Xip),$Xi);
  976. &function_end("gcm_ghash_clmul");
  977. } else { # Algorithm 5. Kept for reference purposes.
  978. sub reduction_alg5 { # 19/16 times faster than Intel version
  979. my ($Xhi,$Xi)=@_;
  980. # <<1
  981. &movdqa ($T1,$Xi); #
  982. &movdqa ($T2,$Xhi);
  983. &pslld ($Xi,1);
  984. &pslld ($Xhi,1); #
  985. &psrld ($T1,31);
  986. &psrld ($T2,31); #
  987. &movdqa ($T3,$T1);
  988. &pslldq ($T1,4);
  989. &psrldq ($T3,12); #
  990. &pslldq ($T2,4);
  991. &por ($Xhi,$T3); #
  992. &por ($Xi,$T1);
  993. &por ($Xhi,$T2); #
  994. # 1st phase
  995. &movdqa ($T1,$Xi);
  996. &movdqa ($T2,$Xi);
  997. &movdqa ($T3,$Xi); #
  998. &pslld ($T1,31);
  999. &pslld ($T2,30);
  1000. &pslld ($Xi,25); #
  1001. &pxor ($T1,$T2);
  1002. &pxor ($T1,$Xi); #
  1003. &movdqa ($T2,$T1); #
  1004. &pslldq ($T1,12);
  1005. &psrldq ($T2,4); #
  1006. &pxor ($T3,$T1);
  1007. # 2nd phase
  1008. &pxor ($Xhi,$T3); #
  1009. &movdqa ($Xi,$T3);
  1010. &movdqa ($T1,$T3);
  1011. &psrld ($Xi,1); #
  1012. &psrld ($T1,2);
  1013. &psrld ($T3,7); #
  1014. &pxor ($Xi,$T1);
  1015. &pxor ($Xhi,$T2);
  1016. &pxor ($Xi,$T3); #
  1017. &pxor ($Xi,$Xhi); #
  1018. }
  1019. &function_begin_B("gcm_init_clmul");
  1020. &mov ($Htbl,&wparam(0));
  1021. &mov ($Xip,&wparam(1));
  1022. &call (&label("pic"));
  1023. &set_label("pic");
  1024. &blindpop ($const);
  1025. &lea ($const,&DWP(&label("bswap")."-".&label("pic"),$const));
  1026. &movdqu ($Hkey,&QWP(0,$Xip));
  1027. &pshufd ($Hkey,$Hkey,0b01001110);# dword swap
  1028. # calculate H^2
  1029. &movdqa ($Xi,$Hkey);
  1030. &clmul64x64_T3 ($Xhi,$Xi,$Hkey);
  1031. &reduction_alg5 ($Xhi,$Xi);
  1032. &movdqu (&QWP(0,$Htbl),$Hkey); # save H
  1033. &movdqu (&QWP(16,$Htbl),$Xi); # save H^2
  1034. &ret ();
  1035. &function_end_B("gcm_init_clmul");
  1036. &function_begin_B("gcm_gmult_clmul");
  1037. &mov ($Xip,&wparam(0));
  1038. &mov ($Htbl,&wparam(1));
  1039. &call (&label("pic"));
  1040. &set_label("pic");
  1041. &blindpop ($const);
  1042. &lea ($const,&DWP(&label("bswap")."-".&label("pic"),$const));
  1043. &movdqu ($Xi,&QWP(0,$Xip));
  1044. &movdqa ($Xn,&QWP(0,$const));
  1045. &movdqu ($Hkey,&QWP(0,$Htbl));
  1046. &pshufb ($Xi,$Xn);
  1047. &clmul64x64_T3 ($Xhi,$Xi,$Hkey);
  1048. &reduction_alg5 ($Xhi,$Xi);
  1049. &pshufb ($Xi,$Xn);
  1050. &movdqu (&QWP(0,$Xip),$Xi);
  1051. &ret ();
  1052. &function_end_B("gcm_gmult_clmul");
  1053. &function_begin("gcm_ghash_clmul");
  1054. &mov ($Xip,&wparam(0));
  1055. &mov ($Htbl,&wparam(1));
  1056. &mov ($inp,&wparam(2));
  1057. &mov ($len,&wparam(3));
  1058. &call (&label("pic"));
  1059. &set_label("pic");
  1060. &blindpop ($const);
  1061. &lea ($const,&DWP(&label("bswap")."-".&label("pic"),$const));
  1062. &movdqu ($Xi,&QWP(0,$Xip));
  1063. &movdqa ($T3,&QWP(0,$const));
  1064. &movdqu ($Hkey,&QWP(0,$Htbl));
  1065. &pshufb ($Xi,$T3);
  1066. &sub ($len,0x10);
  1067. &jz (&label("odd_tail"));
  1068. #######
  1069. # Xi+2 =[H*(Ii+1 + Xi+1)] mod P =
  1070. # [(H*Ii+1) + (H*Xi+1)] mod P =
  1071. # [(H*Ii+1) + H^2*(Ii+Xi)] mod P
  1072. #
  1073. &movdqu ($T1,&QWP(0,$inp)); # Ii
  1074. &movdqu ($Xn,&QWP(16,$inp)); # Ii+1
  1075. &pshufb ($T1,$T3);
  1076. &pshufb ($Xn,$T3);
  1077. &pxor ($Xi,$T1); # Ii+Xi
  1078. &clmul64x64_T3 ($Xhn,$Xn,$Hkey); # H*Ii+1
  1079. &movdqu ($Hkey,&QWP(16,$Htbl)); # load H^2
  1080. &sub ($len,0x20);
  1081. &lea ($inp,&DWP(32,$inp)); # i+=2
  1082. &jbe (&label("even_tail"));
  1083. &set_label("mod_loop");
  1084. &clmul64x64_T3 ($Xhi,$Xi,$Hkey); # H^2*(Ii+Xi)
  1085. &movdqu ($Hkey,&QWP(0,$Htbl)); # load H
  1086. &pxor ($Xi,$Xn); # (H*Ii+1) + H^2*(Ii+Xi)
  1087. &pxor ($Xhi,$Xhn);
  1088. &reduction_alg5 ($Xhi,$Xi);
  1089. #######
  1090. &movdqa ($T3,&QWP(0,$const));
  1091. &movdqu ($T1,&QWP(0,$inp)); # Ii
  1092. &movdqu ($Xn,&QWP(16,$inp)); # Ii+1
  1093. &pshufb ($T1,$T3);
  1094. &pshufb ($Xn,$T3);
  1095. &pxor ($Xi,$T1); # Ii+Xi
  1096. &clmul64x64_T3 ($Xhn,$Xn,$Hkey); # H*Ii+1
  1097. &movdqu ($Hkey,&QWP(16,$Htbl)); # load H^2
  1098. &sub ($len,0x20);
  1099. &lea ($inp,&DWP(32,$inp));
  1100. &ja (&label("mod_loop"));
  1101. &set_label("even_tail");
  1102. &clmul64x64_T3 ($Xhi,$Xi,$Hkey); # H^2*(Ii+Xi)
  1103. &pxor ($Xi,$Xn); # (H*Ii+1) + H^2*(Ii+Xi)
  1104. &pxor ($Xhi,$Xhn);
  1105. &reduction_alg5 ($Xhi,$Xi);
  1106. &movdqa ($T3,&QWP(0,$const));
  1107. &test ($len,$len);
  1108. &jnz (&label("done"));
  1109. &movdqu ($Hkey,&QWP(0,$Htbl)); # load H
  1110. &set_label("odd_tail");
  1111. &movdqu ($T1,&QWP(0,$inp)); # Ii
  1112. &pshufb ($T1,$T3);
  1113. &pxor ($Xi,$T1); # Ii+Xi
  1114. &clmul64x64_T3 ($Xhi,$Xi,$Hkey); # H*(Ii+Xi)
  1115. &reduction_alg5 ($Xhi,$Xi);
  1116. &movdqa ($T3,&QWP(0,$const));
  1117. &set_label("done");
  1118. &pshufb ($Xi,$T3);
  1119. &movdqu (&QWP(0,$Xip),$Xi);
  1120. &function_end("gcm_ghash_clmul");
  1121. }
  1122. &set_label("bswap",64);
  1123. &data_byte(15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
  1124. &data_byte(1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2); # 0x1c2_polynomial
  1125. &set_label("rem_8bit",64);
  1126. &data_short(0x0000,0x01C2,0x0384,0x0246,0x0708,0x06CA,0x048C,0x054E);
  1127. &data_short(0x0E10,0x0FD2,0x0D94,0x0C56,0x0918,0x08DA,0x0A9C,0x0B5E);
  1128. &data_short(0x1C20,0x1DE2,0x1FA4,0x1E66,0x1B28,0x1AEA,0x18AC,0x196E);
  1129. &data_short(0x1230,0x13F2,0x11B4,0x1076,0x1538,0x14FA,0x16BC,0x177E);
  1130. &data_short(0x3840,0x3982,0x3BC4,0x3A06,0x3F48,0x3E8A,0x3CCC,0x3D0E);
  1131. &data_short(0x3650,0x3792,0x35D4,0x3416,0x3158,0x309A,0x32DC,0x331E);
  1132. &data_short(0x2460,0x25A2,0x27E4,0x2626,0x2368,0x22AA,0x20EC,0x212E);
  1133. &data_short(0x2A70,0x2BB2,0x29F4,0x2836,0x2D78,0x2CBA,0x2EFC,0x2F3E);
  1134. &data_short(0x7080,0x7142,0x7304,0x72C6,0x7788,0x764A,0x740C,0x75CE);
  1135. &data_short(0x7E90,0x7F52,0x7D14,0x7CD6,0x7998,0x785A,0x7A1C,0x7BDE);
  1136. &data_short(0x6CA0,0x6D62,0x6F24,0x6EE6,0x6BA8,0x6A6A,0x682C,0x69EE);
  1137. &data_short(0x62B0,0x6372,0x6134,0x60F6,0x65B8,0x647A,0x663C,0x67FE);
  1138. &data_short(0x48C0,0x4902,0x4B44,0x4A86,0x4FC8,0x4E0A,0x4C4C,0x4D8E);
  1139. &data_short(0x46D0,0x4712,0x4554,0x4496,0x41D8,0x401A,0x425C,0x439E);
  1140. &data_short(0x54E0,0x5522,0x5764,0x56A6,0x53E8,0x522A,0x506C,0x51AE);
  1141. &data_short(0x5AF0,0x5B32,0x5974,0x58B6,0x5DF8,0x5C3A,0x5E7C,0x5FBE);
  1142. &data_short(0xE100,0xE0C2,0xE284,0xE346,0xE608,0xE7CA,0xE58C,0xE44E);
  1143. &data_short(0xEF10,0xEED2,0xEC94,0xED56,0xE818,0xE9DA,0xEB9C,0xEA5E);
  1144. &data_short(0xFD20,0xFCE2,0xFEA4,0xFF66,0xFA28,0xFBEA,0xF9AC,0xF86E);
  1145. &data_short(0xF330,0xF2F2,0xF0B4,0xF176,0xF438,0xF5FA,0xF7BC,0xF67E);
  1146. &data_short(0xD940,0xD882,0xDAC4,0xDB06,0xDE48,0xDF8A,0xDDCC,0xDC0E);
  1147. &data_short(0xD750,0xD692,0xD4D4,0xD516,0xD058,0xD19A,0xD3DC,0xD21E);
  1148. &data_short(0xC560,0xC4A2,0xC6E4,0xC726,0xC268,0xC3AA,0xC1EC,0xC02E);
  1149. &data_short(0xCB70,0xCAB2,0xC8F4,0xC936,0xCC78,0xCDBA,0xCFFC,0xCE3E);
  1150. &data_short(0x9180,0x9042,0x9204,0x93C6,0x9688,0x974A,0x950C,0x94CE);
  1151. &data_short(0x9F90,0x9E52,0x9C14,0x9DD6,0x9898,0x995A,0x9B1C,0x9ADE);
  1152. &data_short(0x8DA0,0x8C62,0x8E24,0x8FE6,0x8AA8,0x8B6A,0x892C,0x88EE);
  1153. &data_short(0x83B0,0x8272,0x8034,0x81F6,0x84B8,0x857A,0x873C,0x86FE);
  1154. &data_short(0xA9C0,0xA802,0xAA44,0xAB86,0xAEC8,0xAF0A,0xAD4C,0xAC8E);
  1155. &data_short(0xA7D0,0xA612,0xA454,0xA596,0xA0D8,0xA11A,0xA35C,0xA29E);
  1156. &data_short(0xB5E0,0xB422,0xB664,0xB7A6,0xB2E8,0xB32A,0xB16C,0xB0AE);
  1157. &data_short(0xBBF0,0xBA32,0xB874,0xB9B6,0xBCF8,0xBD3A,0xBF7C,0xBEBE);
  1158. }} # $sse2
  1159. &set_label("rem_4bit",64);
  1160. &data_word(0,0x0000<<$S,0,0x1C20<<$S,0,0x3840<<$S,0,0x2460<<$S);
  1161. &data_word(0,0x7080<<$S,0,0x6CA0<<$S,0,0x48C0<<$S,0,0x54E0<<$S);
  1162. &data_word(0,0xE100<<$S,0,0xFD20<<$S,0,0xD940<<$S,0,0xC560<<$S);
  1163. &data_word(0,0x9180<<$S,0,0x8DA0<<$S,0,0xA9C0<<$S,0,0xB5E0<<$S);
  1164. }}} # !$x86only
  1165. &asciz("GHASH for x86, CRYPTOGAMS by <appro\@openssl.org>");
  1166. &asm_finish();
  1167. close STDOUT or die "error closing STDOUT: $!";
  1168. # A question was risen about choice of vanilla MMX. Or rather why wasn't
  1169. # SSE2 chosen instead? In addition to the fact that MMX runs on legacy
  1170. # CPUs such as PIII, "4-bit" MMX version was observed to provide better
  1171. # performance than *corresponding* SSE2 one even on contemporary CPUs.
  1172. # SSE2 results were provided by Peter-Michael Hager. He maintains SSE2
  1173. # implementation featuring full range of lookup-table sizes, but with
  1174. # per-invocation lookup table setup. Latter means that table size is
  1175. # chosen depending on how much data is to be hashed in every given call,
  1176. # more data - larger table. Best reported result for Core2 is ~4 cycles
  1177. # per processed byte out of 64KB block. This number accounts even for
  1178. # 64KB table setup overhead. As discussed in gcm128.c we choose to be
  1179. # more conservative in respect to lookup table sizes, but how do the
  1180. # results compare? Minimalistic "256B" MMX version delivers ~11 cycles
  1181. # on same platform. As also discussed in gcm128.c, next in line "8-bit
  1182. # Shoup's" or "4KB" method should deliver twice the performance of
  1183. # "256B" one, in other words not worse than ~6 cycles per byte. It
  1184. # should be also be noted that in SSE2 case improvement can be "super-
  1185. # linear," i.e. more than twice, mostly because >>8 maps to single
  1186. # instruction on SSE2 register. This is unlike "4-bit" case when >>4
  1187. # maps to same amount of instructions in both MMX and SSE2 cases.
  1188. # Bottom line is that switch to SSE2 is considered to be justifiable
  1189. # only in case we choose to implement "8-bit" method...