aesni-gcm-x86_64.pl 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107
  1. #! /usr/bin/env perl
  2. # Copyright 2013-2020 The OpenSSL Project Authors. All Rights Reserved.
  3. #
  4. # Licensed under the OpenSSL license (the "License"). You may not use
  5. # this file except in compliance with the License. You can obtain a copy
  6. # in the file LICENSE in the source distribution or at
  7. # https://www.openssl.org/source/license.html
  8. #
  9. # ====================================================================
  10. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  11. # project. The module is, however, dual licensed under OpenSSL and
  12. # CRYPTOGAMS licenses depending on where you obtain it. For further
  13. # details see http://www.openssl.org/~appro/cryptogams/.
  14. # ====================================================================
  15. #
  16. #
  17. # AES-NI-CTR+GHASH stitch.
  18. #
  19. # February 2013
  20. #
  21. # OpenSSL GCM implementation is organized in such way that its
  22. # performance is rather close to the sum of its streamed components,
  23. # in the context parallelized AES-NI CTR and modulo-scheduled
  24. # PCLMULQDQ-enabled GHASH. Unfortunately, as no stitch implementation
  25. # was observed to perform significantly better than the sum of the
  26. # components on contemporary CPUs, the effort was deemed impossible to
  27. # justify. This module is based on combination of Intel submissions,
  28. # [1] and [2], with MOVBE twist suggested by Ilya Albrekht and Max
  29. # Locktyukhin of Intel Corp. who verified that it reduces shuffles
  30. # pressure with notable relative improvement, achieving 1.0 cycle per
  31. # byte processed with 128-bit key on Haswell processor, 0.74 - on
  32. # Broadwell, 0.63 - on Skylake... [Mentioned results are raw profiled
  33. # measurements for favourable packet size, one divisible by 96.
  34. # Applications using the EVP interface will observe a few percent
  35. # worse performance.]
  36. #
  37. # Knights Landing processes 1 byte in 1.25 cycles (measured with EVP).
  38. #
  39. # [1] http://rt.openssl.org/Ticket/Display.html?id=2900&user=guest&pass=guest
  40. # [2] http://www.intel.com/content/dam/www/public/us/en/documents/software-support/enabling-high-performance-gcm.pdf
  41. $flavour = shift;
  42. $output = shift;
  43. if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
  44. $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
  45. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  46. ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
  47. ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
  48. die "can't locate x86_64-xlate.pl";
  49. if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
  50. =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
  51. $avx = ($1>=2.20) + ($1>=2.22);
  52. }
  53. if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
  54. `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
  55. $avx = ($1>=2.09) + ($1>=2.10);
  56. }
  57. if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
  58. `ml64 2>&1` =~ /Version ([0-9]+)\./) {
  59. $avx = ($1>=10) + ($1>=11);
  60. }
  61. if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:clang|LLVM) version|.*based on LLVM) ([0-9]+\.[0-9]+)/) {
  62. $avx = ($2>=3.0) + ($2>3.0);
  63. }
  64. open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
  65. *STDOUT=*OUT;
  66. if ($avx>1) {{{
  67. ($inp,$out,$len,$key,$ivp,$Xip)=("%rdi","%rsi","%rdx","%rcx","%r8","%r9");
  68. ($Ii,$T1,$T2,$Hkey,
  69. $Z0,$Z1,$Z2,$Z3,$Xi) = map("%xmm$_",(0..8));
  70. ($inout0,$inout1,$inout2,$inout3,$inout4,$inout5,$rndkey) = map("%xmm$_",(9..15));
  71. ($counter,$rounds,$ret,$const,$in0,$end0)=("%ebx","%ebp","%r10","%r11","%r14","%r15");
  72. $code=<<___;
  73. .text
  74. .type _aesni_ctr32_ghash_6x,\@abi-omnipotent
  75. .align 32
  76. _aesni_ctr32_ghash_6x:
  77. .cfi_startproc
  78. vmovdqu 0x20($const),$T2 # borrow $T2, .Lone_msb
  79. sub \$6,$len
  80. vpxor $Z0,$Z0,$Z0 # $Z0 = 0
  81. vmovdqu 0x00-0x80($key),$rndkey
  82. vpaddb $T2,$T1,$inout1
  83. vpaddb $T2,$inout1,$inout2
  84. vpaddb $T2,$inout2,$inout3
  85. vpaddb $T2,$inout3,$inout4
  86. vpaddb $T2,$inout4,$inout5
  87. vpxor $rndkey,$T1,$inout0
  88. vmovdqu $Z0,16+8(%rsp) # "$Z3" = 0
  89. jmp .Loop6x
  90. .align 32
  91. .Loop6x:
  92. add \$`6<<24`,$counter
  93. jc .Lhandle_ctr32 # discard $inout[1-5]?
  94. vmovdqu 0x00-0x20($Xip),$Hkey # $Hkey^1
  95. vpaddb $T2,$inout5,$T1 # next counter value
  96. vpxor $rndkey,$inout1,$inout1
  97. vpxor $rndkey,$inout2,$inout2
  98. .Lresume_ctr32:
  99. vmovdqu $T1,($ivp) # save next counter value
  100. vpclmulqdq \$0x10,$Hkey,$Z3,$Z1
  101. vpxor $rndkey,$inout3,$inout3
  102. vmovups 0x10-0x80($key),$T2 # borrow $T2 for $rndkey
  103. vpclmulqdq \$0x01,$Hkey,$Z3,$Z2
  104. xor %r12,%r12
  105. cmp $in0,$end0
  106. vaesenc $T2,$inout0,$inout0
  107. vmovdqu 0x30+8(%rsp),$Ii # I[4]
  108. vpxor $rndkey,$inout4,$inout4
  109. vpclmulqdq \$0x00,$Hkey,$Z3,$T1
  110. vaesenc $T2,$inout1,$inout1
  111. vpxor $rndkey,$inout5,$inout5
  112. setnc %r12b
  113. vpclmulqdq \$0x11,$Hkey,$Z3,$Z3
  114. vaesenc $T2,$inout2,$inout2
  115. vmovdqu 0x10-0x20($Xip),$Hkey # $Hkey^2
  116. neg %r12
  117. vaesenc $T2,$inout3,$inout3
  118. vpxor $Z1,$Z2,$Z2
  119. vpclmulqdq \$0x00,$Hkey,$Ii,$Z1
  120. vpxor $Z0,$Xi,$Xi # modulo-scheduled
  121. vaesenc $T2,$inout4,$inout4
  122. vpxor $Z1,$T1,$Z0
  123. and \$0x60,%r12
  124. vmovups 0x20-0x80($key),$rndkey
  125. vpclmulqdq \$0x10,$Hkey,$Ii,$T1
  126. vaesenc $T2,$inout5,$inout5
  127. vpclmulqdq \$0x01,$Hkey,$Ii,$T2
  128. lea ($in0,%r12),$in0
  129. vaesenc $rndkey,$inout0,$inout0
  130. vpxor 16+8(%rsp),$Xi,$Xi # modulo-scheduled [vpxor $Z3,$Xi,$Xi]
  131. vpclmulqdq \$0x11,$Hkey,$Ii,$Hkey
  132. vmovdqu 0x40+8(%rsp),$Ii # I[3]
  133. vaesenc $rndkey,$inout1,$inout1
  134. movbe 0x58($in0),%r13
  135. vaesenc $rndkey,$inout2,$inout2
  136. movbe 0x50($in0),%r12
  137. vaesenc $rndkey,$inout3,$inout3
  138. mov %r13,0x20+8(%rsp)
  139. vaesenc $rndkey,$inout4,$inout4
  140. mov %r12,0x28+8(%rsp)
  141. vmovdqu 0x30-0x20($Xip),$Z1 # borrow $Z1 for $Hkey^3
  142. vaesenc $rndkey,$inout5,$inout5
  143. vmovups 0x30-0x80($key),$rndkey
  144. vpxor $T1,$Z2,$Z2
  145. vpclmulqdq \$0x00,$Z1,$Ii,$T1
  146. vaesenc $rndkey,$inout0,$inout0
  147. vpxor $T2,$Z2,$Z2
  148. vpclmulqdq \$0x10,$Z1,$Ii,$T2
  149. vaesenc $rndkey,$inout1,$inout1
  150. vpxor $Hkey,$Z3,$Z3
  151. vpclmulqdq \$0x01,$Z1,$Ii,$Hkey
  152. vaesenc $rndkey,$inout2,$inout2
  153. vpclmulqdq \$0x11,$Z1,$Ii,$Z1
  154. vmovdqu 0x50+8(%rsp),$Ii # I[2]
  155. vaesenc $rndkey,$inout3,$inout3
  156. vaesenc $rndkey,$inout4,$inout4
  157. vpxor $T1,$Z0,$Z0
  158. vmovdqu 0x40-0x20($Xip),$T1 # borrow $T1 for $Hkey^4
  159. vaesenc $rndkey,$inout5,$inout5
  160. vmovups 0x40-0x80($key),$rndkey
  161. vpxor $T2,$Z2,$Z2
  162. vpclmulqdq \$0x00,$T1,$Ii,$T2
  163. vaesenc $rndkey,$inout0,$inout0
  164. vpxor $Hkey,$Z2,$Z2
  165. vpclmulqdq \$0x10,$T1,$Ii,$Hkey
  166. vaesenc $rndkey,$inout1,$inout1
  167. movbe 0x48($in0),%r13
  168. vpxor $Z1,$Z3,$Z3
  169. vpclmulqdq \$0x01,$T1,$Ii,$Z1
  170. vaesenc $rndkey,$inout2,$inout2
  171. movbe 0x40($in0),%r12
  172. vpclmulqdq \$0x11,$T1,$Ii,$T1
  173. vmovdqu 0x60+8(%rsp),$Ii # I[1]
  174. vaesenc $rndkey,$inout3,$inout3
  175. mov %r13,0x30+8(%rsp)
  176. vaesenc $rndkey,$inout4,$inout4
  177. mov %r12,0x38+8(%rsp)
  178. vpxor $T2,$Z0,$Z0
  179. vmovdqu 0x60-0x20($Xip),$T2 # borrow $T2 for $Hkey^5
  180. vaesenc $rndkey,$inout5,$inout5
  181. vmovups 0x50-0x80($key),$rndkey
  182. vpxor $Hkey,$Z2,$Z2
  183. vpclmulqdq \$0x00,$T2,$Ii,$Hkey
  184. vaesenc $rndkey,$inout0,$inout0
  185. vpxor $Z1,$Z2,$Z2
  186. vpclmulqdq \$0x10,$T2,$Ii,$Z1
  187. vaesenc $rndkey,$inout1,$inout1
  188. movbe 0x38($in0),%r13
  189. vpxor $T1,$Z3,$Z3
  190. vpclmulqdq \$0x01,$T2,$Ii,$T1
  191. vpxor 0x70+8(%rsp),$Xi,$Xi # accumulate I[0]
  192. vaesenc $rndkey,$inout2,$inout2
  193. movbe 0x30($in0),%r12
  194. vpclmulqdq \$0x11,$T2,$Ii,$T2
  195. vaesenc $rndkey,$inout3,$inout3
  196. mov %r13,0x40+8(%rsp)
  197. vaesenc $rndkey,$inout4,$inout4
  198. mov %r12,0x48+8(%rsp)
  199. vpxor $Hkey,$Z0,$Z0
  200. vmovdqu 0x70-0x20($Xip),$Hkey # $Hkey^6
  201. vaesenc $rndkey,$inout5,$inout5
  202. vmovups 0x60-0x80($key),$rndkey
  203. vpxor $Z1,$Z2,$Z2
  204. vpclmulqdq \$0x10,$Hkey,$Xi,$Z1
  205. vaesenc $rndkey,$inout0,$inout0
  206. vpxor $T1,$Z2,$Z2
  207. vpclmulqdq \$0x01,$Hkey,$Xi,$T1
  208. vaesenc $rndkey,$inout1,$inout1
  209. movbe 0x28($in0),%r13
  210. vpxor $T2,$Z3,$Z3
  211. vpclmulqdq \$0x00,$Hkey,$Xi,$T2
  212. vaesenc $rndkey,$inout2,$inout2
  213. movbe 0x20($in0),%r12
  214. vpclmulqdq \$0x11,$Hkey,$Xi,$Xi
  215. vaesenc $rndkey,$inout3,$inout3
  216. mov %r13,0x50+8(%rsp)
  217. vaesenc $rndkey,$inout4,$inout4
  218. mov %r12,0x58+8(%rsp)
  219. vpxor $Z1,$Z2,$Z2
  220. vaesenc $rndkey,$inout5,$inout5
  221. vpxor $T1,$Z2,$Z2
  222. vmovups 0x70-0x80($key),$rndkey
  223. vpslldq \$8,$Z2,$Z1
  224. vpxor $T2,$Z0,$Z0
  225. vmovdqu 0x10($const),$Hkey # .Lpoly
  226. vaesenc $rndkey,$inout0,$inout0
  227. vpxor $Xi,$Z3,$Z3
  228. vaesenc $rndkey,$inout1,$inout1
  229. vpxor $Z1,$Z0,$Z0
  230. movbe 0x18($in0),%r13
  231. vaesenc $rndkey,$inout2,$inout2
  232. movbe 0x10($in0),%r12
  233. vpalignr \$8,$Z0,$Z0,$Ii # 1st phase
  234. vpclmulqdq \$0x10,$Hkey,$Z0,$Z0
  235. mov %r13,0x60+8(%rsp)
  236. vaesenc $rndkey,$inout3,$inout3
  237. mov %r12,0x68+8(%rsp)
  238. vaesenc $rndkey,$inout4,$inout4
  239. vmovups 0x80-0x80($key),$T1 # borrow $T1 for $rndkey
  240. vaesenc $rndkey,$inout5,$inout5
  241. vaesenc $T1,$inout0,$inout0
  242. vmovups 0x90-0x80($key),$rndkey
  243. vaesenc $T1,$inout1,$inout1
  244. vpsrldq \$8,$Z2,$Z2
  245. vaesenc $T1,$inout2,$inout2
  246. vpxor $Z2,$Z3,$Z3
  247. vaesenc $T1,$inout3,$inout3
  248. vpxor $Ii,$Z0,$Z0
  249. movbe 0x08($in0),%r13
  250. vaesenc $T1,$inout4,$inout4
  251. movbe 0x00($in0),%r12
  252. vaesenc $T1,$inout5,$inout5
  253. vmovups 0xa0-0x80($key),$T1
  254. cmp \$11,$rounds
  255. jb .Lenc_tail # 128-bit key
  256. vaesenc $rndkey,$inout0,$inout0
  257. vaesenc $rndkey,$inout1,$inout1
  258. vaesenc $rndkey,$inout2,$inout2
  259. vaesenc $rndkey,$inout3,$inout3
  260. vaesenc $rndkey,$inout4,$inout4
  261. vaesenc $rndkey,$inout5,$inout5
  262. vaesenc $T1,$inout0,$inout0
  263. vaesenc $T1,$inout1,$inout1
  264. vaesenc $T1,$inout2,$inout2
  265. vaesenc $T1,$inout3,$inout3
  266. vaesenc $T1,$inout4,$inout4
  267. vmovups 0xb0-0x80($key),$rndkey
  268. vaesenc $T1,$inout5,$inout5
  269. vmovups 0xc0-0x80($key),$T1
  270. je .Lenc_tail # 192-bit key
  271. vaesenc $rndkey,$inout0,$inout0
  272. vaesenc $rndkey,$inout1,$inout1
  273. vaesenc $rndkey,$inout2,$inout2
  274. vaesenc $rndkey,$inout3,$inout3
  275. vaesenc $rndkey,$inout4,$inout4
  276. vaesenc $rndkey,$inout5,$inout5
  277. vaesenc $T1,$inout0,$inout0
  278. vaesenc $T1,$inout1,$inout1
  279. vaesenc $T1,$inout2,$inout2
  280. vaesenc $T1,$inout3,$inout3
  281. vaesenc $T1,$inout4,$inout4
  282. vmovups 0xd0-0x80($key),$rndkey
  283. vaesenc $T1,$inout5,$inout5
  284. vmovups 0xe0-0x80($key),$T1
  285. jmp .Lenc_tail # 256-bit key
  286. .align 32
  287. .Lhandle_ctr32:
  288. vmovdqu ($const),$Ii # borrow $Ii for .Lbswap_mask
  289. vpshufb $Ii,$T1,$Z2 # byte-swap counter
  290. vmovdqu 0x30($const),$Z1 # borrow $Z1, .Ltwo_lsb
  291. vpaddd 0x40($const),$Z2,$inout1 # .Lone_lsb
  292. vpaddd $Z1,$Z2,$inout2
  293. vmovdqu 0x00-0x20($Xip),$Hkey # $Hkey^1
  294. vpaddd $Z1,$inout1,$inout3
  295. vpshufb $Ii,$inout1,$inout1
  296. vpaddd $Z1,$inout2,$inout4
  297. vpshufb $Ii,$inout2,$inout2
  298. vpxor $rndkey,$inout1,$inout1
  299. vpaddd $Z1,$inout3,$inout5
  300. vpshufb $Ii,$inout3,$inout3
  301. vpxor $rndkey,$inout2,$inout2
  302. vpaddd $Z1,$inout4,$T1 # byte-swapped next counter value
  303. vpshufb $Ii,$inout4,$inout4
  304. vpshufb $Ii,$inout5,$inout5
  305. vpshufb $Ii,$T1,$T1 # next counter value
  306. jmp .Lresume_ctr32
  307. .align 32
  308. .Lenc_tail:
  309. vaesenc $rndkey,$inout0,$inout0
  310. vmovdqu $Z3,16+8(%rsp) # postpone vpxor $Z3,$Xi,$Xi
  311. vpalignr \$8,$Z0,$Z0,$Xi # 2nd phase
  312. vaesenc $rndkey,$inout1,$inout1
  313. vpclmulqdq \$0x10,$Hkey,$Z0,$Z0
  314. vpxor 0x00($inp),$T1,$T2
  315. vaesenc $rndkey,$inout2,$inout2
  316. vpxor 0x10($inp),$T1,$Ii
  317. vaesenc $rndkey,$inout3,$inout3
  318. vpxor 0x20($inp),$T1,$Z1
  319. vaesenc $rndkey,$inout4,$inout4
  320. vpxor 0x30($inp),$T1,$Z2
  321. vaesenc $rndkey,$inout5,$inout5
  322. vpxor 0x40($inp),$T1,$Z3
  323. vpxor 0x50($inp),$T1,$Hkey
  324. vmovdqu ($ivp),$T1 # load next counter value
  325. vaesenclast $T2,$inout0,$inout0
  326. vmovdqu 0x20($const),$T2 # borrow $T2, .Lone_msb
  327. vaesenclast $Ii,$inout1,$inout1
  328. vpaddb $T2,$T1,$Ii
  329. mov %r13,0x70+8(%rsp)
  330. lea 0x60($inp),$inp
  331. vaesenclast $Z1,$inout2,$inout2
  332. vpaddb $T2,$Ii,$Z1
  333. mov %r12,0x78+8(%rsp)
  334. lea 0x60($out),$out
  335. vmovdqu 0x00-0x80($key),$rndkey
  336. vaesenclast $Z2,$inout3,$inout3
  337. vpaddb $T2,$Z1,$Z2
  338. vaesenclast $Z3, $inout4,$inout4
  339. vpaddb $T2,$Z2,$Z3
  340. vaesenclast $Hkey,$inout5,$inout5
  341. vpaddb $T2,$Z3,$Hkey
  342. add \$0x60,$ret
  343. sub \$0x6,$len
  344. jc .L6x_done
  345. vmovups $inout0,-0x60($out) # save output
  346. vpxor $rndkey,$T1,$inout0
  347. vmovups $inout1,-0x50($out)
  348. vmovdqa $Ii,$inout1 # 0 latency
  349. vmovups $inout2,-0x40($out)
  350. vmovdqa $Z1,$inout2 # 0 latency
  351. vmovups $inout3,-0x30($out)
  352. vmovdqa $Z2,$inout3 # 0 latency
  353. vmovups $inout4,-0x20($out)
  354. vmovdqa $Z3,$inout4 # 0 latency
  355. vmovups $inout5,-0x10($out)
  356. vmovdqa $Hkey,$inout5 # 0 latency
  357. vmovdqu 0x20+8(%rsp),$Z3 # I[5]
  358. jmp .Loop6x
  359. .L6x_done:
  360. vpxor 16+8(%rsp),$Xi,$Xi # modulo-scheduled
  361. vpxor $Z0,$Xi,$Xi # modulo-scheduled
  362. ret
  363. .cfi_endproc
  364. .size _aesni_ctr32_ghash_6x,.-_aesni_ctr32_ghash_6x
  365. ___
  366. ######################################################################
  367. #
  368. # size_t aesni_gcm_[en|de]crypt(const void *inp, void *out, size_t len,
  369. # const AES_KEY *key, unsigned char iv[16],
  370. # struct { u128 Xi,H,Htbl[9]; } *Xip);
  371. $code.=<<___;
  372. .globl aesni_gcm_decrypt
  373. .type aesni_gcm_decrypt,\@function,6
  374. .align 32
  375. aesni_gcm_decrypt:
  376. .cfi_startproc
  377. xor $ret,$ret
  378. cmp \$0x60,$len # minimal accepted length
  379. jb .Lgcm_dec_abort
  380. lea (%rsp),%rax # save stack pointer
  381. .cfi_def_cfa_register %rax
  382. push %rbx
  383. .cfi_push %rbx
  384. push %rbp
  385. .cfi_push %rbp
  386. push %r12
  387. .cfi_push %r12
  388. push %r13
  389. .cfi_push %r13
  390. push %r14
  391. .cfi_push %r14
  392. push %r15
  393. .cfi_push %r15
  394. ___
  395. $code.=<<___ if ($win64);
  396. lea -0xa8(%rsp),%rsp
  397. movaps %xmm6,-0xd8(%rax)
  398. movaps %xmm7,-0xc8(%rax)
  399. movaps %xmm8,-0xb8(%rax)
  400. movaps %xmm9,-0xa8(%rax)
  401. movaps %xmm10,-0x98(%rax)
  402. movaps %xmm11,-0x88(%rax)
  403. movaps %xmm12,-0x78(%rax)
  404. movaps %xmm13,-0x68(%rax)
  405. movaps %xmm14,-0x58(%rax)
  406. movaps %xmm15,-0x48(%rax)
  407. .Lgcm_dec_body:
  408. ___
  409. $code.=<<___;
  410. vzeroupper
  411. vmovdqu ($ivp),$T1 # input counter value
  412. add \$-128,%rsp
  413. mov 12($ivp),$counter
  414. lea .Lbswap_mask(%rip),$const
  415. lea -0x80($key),$in0 # borrow $in0
  416. mov \$0xf80,$end0 # borrow $end0
  417. vmovdqu ($Xip),$Xi # load Xi
  418. and \$-128,%rsp # ensure stack alignment
  419. vmovdqu ($const),$Ii # borrow $Ii for .Lbswap_mask
  420. lea 0x80($key),$key # size optimization
  421. lea 0x20+0x20($Xip),$Xip # size optimization
  422. mov 0xf0-0x80($key),$rounds
  423. vpshufb $Ii,$Xi,$Xi
  424. and $end0,$in0
  425. and %rsp,$end0
  426. sub $in0,$end0
  427. jc .Ldec_no_key_aliasing
  428. cmp \$768,$end0
  429. jnc .Ldec_no_key_aliasing
  430. sub $end0,%rsp # avoid aliasing with key
  431. .Ldec_no_key_aliasing:
  432. vmovdqu 0x50($inp),$Z3 # I[5]
  433. lea ($inp),$in0
  434. vmovdqu 0x40($inp),$Z0
  435. lea -0xc0($inp,$len),$end0
  436. vmovdqu 0x30($inp),$Z1
  437. shr \$4,$len
  438. xor $ret,$ret
  439. vmovdqu 0x20($inp),$Z2
  440. vpshufb $Ii,$Z3,$Z3 # passed to _aesni_ctr32_ghash_6x
  441. vmovdqu 0x10($inp),$T2
  442. vpshufb $Ii,$Z0,$Z0
  443. vmovdqu ($inp),$Hkey
  444. vpshufb $Ii,$Z1,$Z1
  445. vmovdqu $Z0,0x30(%rsp)
  446. vpshufb $Ii,$Z2,$Z2
  447. vmovdqu $Z1,0x40(%rsp)
  448. vpshufb $Ii,$T2,$T2
  449. vmovdqu $Z2,0x50(%rsp)
  450. vpshufb $Ii,$Hkey,$Hkey
  451. vmovdqu $T2,0x60(%rsp)
  452. vmovdqu $Hkey,0x70(%rsp)
  453. call _aesni_ctr32_ghash_6x
  454. vmovups $inout0,-0x60($out) # save output
  455. vmovups $inout1,-0x50($out)
  456. vmovups $inout2,-0x40($out)
  457. vmovups $inout3,-0x30($out)
  458. vmovups $inout4,-0x20($out)
  459. vmovups $inout5,-0x10($out)
  460. vpshufb ($const),$Xi,$Xi # .Lbswap_mask
  461. vmovdqu $Xi,-0x40($Xip) # output Xi
  462. vzeroupper
  463. ___
  464. $code.=<<___ if ($win64);
  465. movaps -0xd8(%rax),%xmm6
  466. movaps -0xc8(%rax),%xmm7
  467. movaps -0xb8(%rax),%xmm8
  468. movaps -0xa8(%rax),%xmm9
  469. movaps -0x98(%rax),%xmm10
  470. movaps -0x88(%rax),%xmm11
  471. movaps -0x78(%rax),%xmm12
  472. movaps -0x68(%rax),%xmm13
  473. movaps -0x58(%rax),%xmm14
  474. movaps -0x48(%rax),%xmm15
  475. ___
  476. $code.=<<___;
  477. mov -48(%rax),%r15
  478. .cfi_restore %r15
  479. mov -40(%rax),%r14
  480. .cfi_restore %r14
  481. mov -32(%rax),%r13
  482. .cfi_restore %r13
  483. mov -24(%rax),%r12
  484. .cfi_restore %r12
  485. mov -16(%rax),%rbp
  486. .cfi_restore %rbp
  487. mov -8(%rax),%rbx
  488. .cfi_restore %rbx
  489. lea (%rax),%rsp # restore %rsp
  490. .cfi_def_cfa_register %rsp
  491. .Lgcm_dec_abort:
  492. mov $ret,%rax # return value
  493. ret
  494. .cfi_endproc
  495. .size aesni_gcm_decrypt,.-aesni_gcm_decrypt
  496. ___
  497. $code.=<<___;
  498. .type _aesni_ctr32_6x,\@abi-omnipotent
  499. .align 32
  500. _aesni_ctr32_6x:
  501. .cfi_startproc
  502. vmovdqu 0x00-0x80($key),$Z0 # borrow $Z0 for $rndkey
  503. vmovdqu 0x20($const),$T2 # borrow $T2, .Lone_msb
  504. lea -1($rounds),%r13
  505. vmovups 0x10-0x80($key),$rndkey
  506. lea 0x20-0x80($key),%r12
  507. vpxor $Z0,$T1,$inout0
  508. add \$`6<<24`,$counter
  509. jc .Lhandle_ctr32_2
  510. vpaddb $T2,$T1,$inout1
  511. vpaddb $T2,$inout1,$inout2
  512. vpxor $Z0,$inout1,$inout1
  513. vpaddb $T2,$inout2,$inout3
  514. vpxor $Z0,$inout2,$inout2
  515. vpaddb $T2,$inout3,$inout4
  516. vpxor $Z0,$inout3,$inout3
  517. vpaddb $T2,$inout4,$inout5
  518. vpxor $Z0,$inout4,$inout4
  519. vpaddb $T2,$inout5,$T1
  520. vpxor $Z0,$inout5,$inout5
  521. jmp .Loop_ctr32
  522. .align 16
  523. .Loop_ctr32:
  524. vaesenc $rndkey,$inout0,$inout0
  525. vaesenc $rndkey,$inout1,$inout1
  526. vaesenc $rndkey,$inout2,$inout2
  527. vaesenc $rndkey,$inout3,$inout3
  528. vaesenc $rndkey,$inout4,$inout4
  529. vaesenc $rndkey,$inout5,$inout5
  530. vmovups (%r12),$rndkey
  531. lea 0x10(%r12),%r12
  532. dec %r13d
  533. jnz .Loop_ctr32
  534. vmovdqu (%r12),$Hkey # last round key
  535. vaesenc $rndkey,$inout0,$inout0
  536. vpxor 0x00($inp),$Hkey,$Z0
  537. vaesenc $rndkey,$inout1,$inout1
  538. vpxor 0x10($inp),$Hkey,$Z1
  539. vaesenc $rndkey,$inout2,$inout2
  540. vpxor 0x20($inp),$Hkey,$Z2
  541. vaesenc $rndkey,$inout3,$inout3
  542. vpxor 0x30($inp),$Hkey,$Xi
  543. vaesenc $rndkey,$inout4,$inout4
  544. vpxor 0x40($inp),$Hkey,$T2
  545. vaesenc $rndkey,$inout5,$inout5
  546. vpxor 0x50($inp),$Hkey,$Hkey
  547. lea 0x60($inp),$inp
  548. vaesenclast $Z0,$inout0,$inout0
  549. vaesenclast $Z1,$inout1,$inout1
  550. vaesenclast $Z2,$inout2,$inout2
  551. vaesenclast $Xi,$inout3,$inout3
  552. vaesenclast $T2,$inout4,$inout4
  553. vaesenclast $Hkey,$inout5,$inout5
  554. vmovups $inout0,0x00($out)
  555. vmovups $inout1,0x10($out)
  556. vmovups $inout2,0x20($out)
  557. vmovups $inout3,0x30($out)
  558. vmovups $inout4,0x40($out)
  559. vmovups $inout5,0x50($out)
  560. lea 0x60($out),$out
  561. ret
  562. .align 32
  563. .Lhandle_ctr32_2:
  564. vpshufb $Ii,$T1,$Z2 # byte-swap counter
  565. vmovdqu 0x30($const),$Z1 # borrow $Z1, .Ltwo_lsb
  566. vpaddd 0x40($const),$Z2,$inout1 # .Lone_lsb
  567. vpaddd $Z1,$Z2,$inout2
  568. vpaddd $Z1,$inout1,$inout3
  569. vpshufb $Ii,$inout1,$inout1
  570. vpaddd $Z1,$inout2,$inout4
  571. vpshufb $Ii,$inout2,$inout2
  572. vpxor $Z0,$inout1,$inout1
  573. vpaddd $Z1,$inout3,$inout5
  574. vpshufb $Ii,$inout3,$inout3
  575. vpxor $Z0,$inout2,$inout2
  576. vpaddd $Z1,$inout4,$T1 # byte-swapped next counter value
  577. vpshufb $Ii,$inout4,$inout4
  578. vpxor $Z0,$inout3,$inout3
  579. vpshufb $Ii,$inout5,$inout5
  580. vpxor $Z0,$inout4,$inout4
  581. vpshufb $Ii,$T1,$T1 # next counter value
  582. vpxor $Z0,$inout5,$inout5
  583. jmp .Loop_ctr32
  584. .cfi_endproc
  585. .size _aesni_ctr32_6x,.-_aesni_ctr32_6x
  586. .globl aesni_gcm_encrypt
  587. .type aesni_gcm_encrypt,\@function,6
  588. .align 32
  589. aesni_gcm_encrypt:
  590. .cfi_startproc
  591. xor $ret,$ret
  592. cmp \$0x60*3,$len # minimal accepted length
  593. jb .Lgcm_enc_abort
  594. lea (%rsp),%rax # save stack pointer
  595. .cfi_def_cfa_register %rax
  596. push %rbx
  597. .cfi_push %rbx
  598. push %rbp
  599. .cfi_push %rbp
  600. push %r12
  601. .cfi_push %r12
  602. push %r13
  603. .cfi_push %r13
  604. push %r14
  605. .cfi_push %r14
  606. push %r15
  607. .cfi_push %r15
  608. ___
  609. $code.=<<___ if ($win64);
  610. lea -0xa8(%rsp),%rsp
  611. movaps %xmm6,-0xd8(%rax)
  612. movaps %xmm7,-0xc8(%rax)
  613. movaps %xmm8,-0xb8(%rax)
  614. movaps %xmm9,-0xa8(%rax)
  615. movaps %xmm10,-0x98(%rax)
  616. movaps %xmm11,-0x88(%rax)
  617. movaps %xmm12,-0x78(%rax)
  618. movaps %xmm13,-0x68(%rax)
  619. movaps %xmm14,-0x58(%rax)
  620. movaps %xmm15,-0x48(%rax)
  621. .Lgcm_enc_body:
  622. ___
  623. $code.=<<___;
  624. vzeroupper
  625. vmovdqu ($ivp),$T1 # input counter value
  626. add \$-128,%rsp
  627. mov 12($ivp),$counter
  628. lea .Lbswap_mask(%rip),$const
  629. lea -0x80($key),$in0 # borrow $in0
  630. mov \$0xf80,$end0 # borrow $end0
  631. lea 0x80($key),$key # size optimization
  632. vmovdqu ($const),$Ii # borrow $Ii for .Lbswap_mask
  633. and \$-128,%rsp # ensure stack alignment
  634. mov 0xf0-0x80($key),$rounds
  635. and $end0,$in0
  636. and %rsp,$end0
  637. sub $in0,$end0
  638. jc .Lenc_no_key_aliasing
  639. cmp \$768,$end0
  640. jnc .Lenc_no_key_aliasing
  641. sub $end0,%rsp # avoid aliasing with key
  642. .Lenc_no_key_aliasing:
  643. lea ($out),$in0
  644. lea -0xc0($out,$len),$end0
  645. shr \$4,$len
  646. call _aesni_ctr32_6x
  647. vpshufb $Ii,$inout0,$Xi # save bswapped output on stack
  648. vpshufb $Ii,$inout1,$T2
  649. vmovdqu $Xi,0x70(%rsp)
  650. vpshufb $Ii,$inout2,$Z0
  651. vmovdqu $T2,0x60(%rsp)
  652. vpshufb $Ii,$inout3,$Z1
  653. vmovdqu $Z0,0x50(%rsp)
  654. vpshufb $Ii,$inout4,$Z2
  655. vmovdqu $Z1,0x40(%rsp)
  656. vpshufb $Ii,$inout5,$Z3 # passed to _aesni_ctr32_ghash_6x
  657. vmovdqu $Z2,0x30(%rsp)
  658. call _aesni_ctr32_6x
  659. vmovdqu ($Xip),$Xi # load Xi
  660. lea 0x20+0x20($Xip),$Xip # size optimization
  661. sub \$12,$len
  662. mov \$0x60*2,$ret
  663. vpshufb $Ii,$Xi,$Xi
  664. call _aesni_ctr32_ghash_6x
  665. vmovdqu 0x20(%rsp),$Z3 # I[5]
  666. vmovdqu ($const),$Ii # borrow $Ii for .Lbswap_mask
  667. vmovdqu 0x00-0x20($Xip),$Hkey # $Hkey^1
  668. vpunpckhqdq $Z3,$Z3,$T1
  669. vmovdqu 0x20-0x20($Xip),$rndkey # borrow $rndkey for $HK
  670. vmovups $inout0,-0x60($out) # save output
  671. vpshufb $Ii,$inout0,$inout0 # but keep bswapped copy
  672. vpxor $Z3,$T1,$T1
  673. vmovups $inout1,-0x50($out)
  674. vpshufb $Ii,$inout1,$inout1
  675. vmovups $inout2,-0x40($out)
  676. vpshufb $Ii,$inout2,$inout2
  677. vmovups $inout3,-0x30($out)
  678. vpshufb $Ii,$inout3,$inout3
  679. vmovups $inout4,-0x20($out)
  680. vpshufb $Ii,$inout4,$inout4
  681. vmovups $inout5,-0x10($out)
  682. vpshufb $Ii,$inout5,$inout5
  683. vmovdqu $inout0,0x10(%rsp) # free $inout0
  684. ___
  685. { my ($HK,$T3)=($rndkey,$inout0);
  686. $code.=<<___;
  687. vmovdqu 0x30(%rsp),$Z2 # I[4]
  688. vmovdqu 0x10-0x20($Xip),$Ii # borrow $Ii for $Hkey^2
  689. vpunpckhqdq $Z2,$Z2,$T2
  690. vpclmulqdq \$0x00,$Hkey,$Z3,$Z1
  691. vpxor $Z2,$T2,$T2
  692. vpclmulqdq \$0x11,$Hkey,$Z3,$Z3
  693. vpclmulqdq \$0x00,$HK,$T1,$T1
  694. vmovdqu 0x40(%rsp),$T3 # I[3]
  695. vpclmulqdq \$0x00,$Ii,$Z2,$Z0
  696. vmovdqu 0x30-0x20($Xip),$Hkey # $Hkey^3
  697. vpxor $Z1,$Z0,$Z0
  698. vpunpckhqdq $T3,$T3,$Z1
  699. vpclmulqdq \$0x11,$Ii,$Z2,$Z2
  700. vpxor $T3,$Z1,$Z1
  701. vpxor $Z3,$Z2,$Z2
  702. vpclmulqdq \$0x10,$HK,$T2,$T2
  703. vmovdqu 0x50-0x20($Xip),$HK
  704. vpxor $T1,$T2,$T2
  705. vmovdqu 0x50(%rsp),$T1 # I[2]
  706. vpclmulqdq \$0x00,$Hkey,$T3,$Z3
  707. vmovdqu 0x40-0x20($Xip),$Ii # borrow $Ii for $Hkey^4
  708. vpxor $Z0,$Z3,$Z3
  709. vpunpckhqdq $T1,$T1,$Z0
  710. vpclmulqdq \$0x11,$Hkey,$T3,$T3
  711. vpxor $T1,$Z0,$Z0
  712. vpxor $Z2,$T3,$T3
  713. vpclmulqdq \$0x00,$HK,$Z1,$Z1
  714. vpxor $T2,$Z1,$Z1
  715. vmovdqu 0x60(%rsp),$T2 # I[1]
  716. vpclmulqdq \$0x00,$Ii,$T1,$Z2
  717. vmovdqu 0x60-0x20($Xip),$Hkey # $Hkey^5
  718. vpxor $Z3,$Z2,$Z2
  719. vpunpckhqdq $T2,$T2,$Z3
  720. vpclmulqdq \$0x11,$Ii,$T1,$T1
  721. vpxor $T2,$Z3,$Z3
  722. vpxor $T3,$T1,$T1
  723. vpclmulqdq \$0x10,$HK,$Z0,$Z0
  724. vmovdqu 0x80-0x20($Xip),$HK
  725. vpxor $Z1,$Z0,$Z0
  726. vpxor 0x70(%rsp),$Xi,$Xi # accumulate I[0]
  727. vpclmulqdq \$0x00,$Hkey,$T2,$Z1
  728. vmovdqu 0x70-0x20($Xip),$Ii # borrow $Ii for $Hkey^6
  729. vpunpckhqdq $Xi,$Xi,$T3
  730. vpxor $Z2,$Z1,$Z1
  731. vpclmulqdq \$0x11,$Hkey,$T2,$T2
  732. vpxor $Xi,$T3,$T3
  733. vpxor $T1,$T2,$T2
  734. vpclmulqdq \$0x00,$HK,$Z3,$Z3
  735. vpxor $Z0,$Z3,$Z0
  736. vpclmulqdq \$0x00,$Ii,$Xi,$Z2
  737. vmovdqu 0x00-0x20($Xip),$Hkey # $Hkey^1
  738. vpunpckhqdq $inout5,$inout5,$T1
  739. vpclmulqdq \$0x11,$Ii,$Xi,$Xi
  740. vpxor $inout5,$T1,$T1
  741. vpxor $Z1,$Z2,$Z1
  742. vpclmulqdq \$0x10,$HK,$T3,$T3
  743. vmovdqu 0x20-0x20($Xip),$HK
  744. vpxor $T2,$Xi,$Z3
  745. vpxor $Z0,$T3,$Z2
  746. vmovdqu 0x10-0x20($Xip),$Ii # borrow $Ii for $Hkey^2
  747. vpxor $Z1,$Z3,$T3 # aggregated Karatsuba post-processing
  748. vpclmulqdq \$0x00,$Hkey,$inout5,$Z0
  749. vpxor $T3,$Z2,$Z2
  750. vpunpckhqdq $inout4,$inout4,$T2
  751. vpclmulqdq \$0x11,$Hkey,$inout5,$inout5
  752. vpxor $inout4,$T2,$T2
  753. vpslldq \$8,$Z2,$T3
  754. vpclmulqdq \$0x00,$HK,$T1,$T1
  755. vpxor $T3,$Z1,$Xi
  756. vpsrldq \$8,$Z2,$Z2
  757. vpxor $Z2,$Z3,$Z3
  758. vpclmulqdq \$0x00,$Ii,$inout4,$Z1
  759. vmovdqu 0x30-0x20($Xip),$Hkey # $Hkey^3
  760. vpxor $Z0,$Z1,$Z1
  761. vpunpckhqdq $inout3,$inout3,$T3
  762. vpclmulqdq \$0x11,$Ii,$inout4,$inout4
  763. vpxor $inout3,$T3,$T3
  764. vpxor $inout5,$inout4,$inout4
  765. vpalignr \$8,$Xi,$Xi,$inout5 # 1st phase
  766. vpclmulqdq \$0x10,$HK,$T2,$T2
  767. vmovdqu 0x50-0x20($Xip),$HK
  768. vpxor $T1,$T2,$T2
  769. vpclmulqdq \$0x00,$Hkey,$inout3,$Z0
  770. vmovdqu 0x40-0x20($Xip),$Ii # borrow $Ii for $Hkey^4
  771. vpxor $Z1,$Z0,$Z0
  772. vpunpckhqdq $inout2,$inout2,$T1
  773. vpclmulqdq \$0x11,$Hkey,$inout3,$inout3
  774. vpxor $inout2,$T1,$T1
  775. vpxor $inout4,$inout3,$inout3
  776. vxorps 0x10(%rsp),$Z3,$Z3 # accumulate $inout0
  777. vpclmulqdq \$0x00,$HK,$T3,$T3
  778. vpxor $T2,$T3,$T3
  779. vpclmulqdq \$0x10,0x10($const),$Xi,$Xi
  780. vxorps $inout5,$Xi,$Xi
  781. vpclmulqdq \$0x00,$Ii,$inout2,$Z1
  782. vmovdqu 0x60-0x20($Xip),$Hkey # $Hkey^5
  783. vpxor $Z0,$Z1,$Z1
  784. vpunpckhqdq $inout1,$inout1,$T2
  785. vpclmulqdq \$0x11,$Ii,$inout2,$inout2
  786. vpxor $inout1,$T2,$T2
  787. vpalignr \$8,$Xi,$Xi,$inout5 # 2nd phase
  788. vpxor $inout3,$inout2,$inout2
  789. vpclmulqdq \$0x10,$HK,$T1,$T1
  790. vmovdqu 0x80-0x20($Xip),$HK
  791. vpxor $T3,$T1,$T1
  792. vxorps $Z3,$inout5,$inout5
  793. vpclmulqdq \$0x10,0x10($const),$Xi,$Xi
  794. vxorps $inout5,$Xi,$Xi
  795. vpclmulqdq \$0x00,$Hkey,$inout1,$Z0
  796. vmovdqu 0x70-0x20($Xip),$Ii # borrow $Ii for $Hkey^6
  797. vpxor $Z1,$Z0,$Z0
  798. vpunpckhqdq $Xi,$Xi,$T3
  799. vpclmulqdq \$0x11,$Hkey,$inout1,$inout1
  800. vpxor $Xi,$T3,$T3
  801. vpxor $inout2,$inout1,$inout1
  802. vpclmulqdq \$0x00,$HK,$T2,$T2
  803. vpxor $T1,$T2,$T2
  804. vpclmulqdq \$0x00,$Ii,$Xi,$Z1
  805. vpclmulqdq \$0x11,$Ii,$Xi,$Z3
  806. vpxor $Z0,$Z1,$Z1
  807. vpclmulqdq \$0x10,$HK,$T3,$Z2
  808. vpxor $inout1,$Z3,$Z3
  809. vpxor $T2,$Z2,$Z2
  810. vpxor $Z1,$Z3,$Z0 # aggregated Karatsuba post-processing
  811. vpxor $Z0,$Z2,$Z2
  812. vpslldq \$8,$Z2,$T1
  813. vmovdqu 0x10($const),$Hkey # .Lpoly
  814. vpsrldq \$8,$Z2,$Z2
  815. vpxor $T1,$Z1,$Xi
  816. vpxor $Z2,$Z3,$Z3
  817. vpalignr \$8,$Xi,$Xi,$T2 # 1st phase
  818. vpclmulqdq \$0x10,$Hkey,$Xi,$Xi
  819. vpxor $T2,$Xi,$Xi
  820. vpalignr \$8,$Xi,$Xi,$T2 # 2nd phase
  821. vpclmulqdq \$0x10,$Hkey,$Xi,$Xi
  822. vpxor $Z3,$T2,$T2
  823. vpxor $T2,$Xi,$Xi
  824. ___
  825. }
  826. $code.=<<___;
  827. vpshufb ($const),$Xi,$Xi # .Lbswap_mask
  828. vmovdqu $Xi,-0x40($Xip) # output Xi
  829. vzeroupper
  830. ___
  831. $code.=<<___ if ($win64);
  832. movaps -0xd8(%rax),%xmm6
  833. movaps -0xc8(%rax),%xmm7
  834. movaps -0xb8(%rax),%xmm8
  835. movaps -0xa8(%rax),%xmm9
  836. movaps -0x98(%rax),%xmm10
  837. movaps -0x88(%rax),%xmm11
  838. movaps -0x78(%rax),%xmm12
  839. movaps -0x68(%rax),%xmm13
  840. movaps -0x58(%rax),%xmm14
  841. movaps -0x48(%rax),%xmm15
  842. ___
  843. $code.=<<___;
  844. mov -48(%rax),%r15
  845. .cfi_restore %r15
  846. mov -40(%rax),%r14
  847. .cfi_restore %r14
  848. mov -32(%rax),%r13
  849. .cfi_restore %r13
  850. mov -24(%rax),%r12
  851. .cfi_restore %r12
  852. mov -16(%rax),%rbp
  853. .cfi_restore %rbp
  854. mov -8(%rax),%rbx
  855. .cfi_restore %rbx
  856. lea (%rax),%rsp # restore %rsp
  857. .cfi_def_cfa_register %rsp
  858. .Lgcm_enc_abort:
  859. mov $ret,%rax # return value
  860. ret
  861. .cfi_endproc
  862. .size aesni_gcm_encrypt,.-aesni_gcm_encrypt
  863. ___
  864. $code.=<<___;
  865. .align 64
  866. .Lbswap_mask:
  867. .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
  868. .Lpoly:
  869. .byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2
  870. .Lone_msb:
  871. .byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1
  872. .Ltwo_lsb:
  873. .byte 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
  874. .Lone_lsb:
  875. .byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
  876. .asciz "AES-NI GCM module for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
  877. .align 64
  878. ___
  879. if ($win64) {
  880. $rec="%rcx";
  881. $frame="%rdx";
  882. $context="%r8";
  883. $disp="%r9";
  884. $code.=<<___
  885. .extern __imp_RtlVirtualUnwind
  886. .type gcm_se_handler,\@abi-omnipotent
  887. .align 16
  888. gcm_se_handler:
  889. push %rsi
  890. push %rdi
  891. push %rbx
  892. push %rbp
  893. push %r12
  894. push %r13
  895. push %r14
  896. push %r15
  897. pushfq
  898. sub \$64,%rsp
  899. mov 120($context),%rax # pull context->Rax
  900. mov 248($context),%rbx # pull context->Rip
  901. mov 8($disp),%rsi # disp->ImageBase
  902. mov 56($disp),%r11 # disp->HandlerData
  903. mov 0(%r11),%r10d # HandlerData[0]
  904. lea (%rsi,%r10),%r10 # prologue label
  905. cmp %r10,%rbx # context->Rip<prologue label
  906. jb .Lcommon_seh_tail
  907. mov 152($context),%rax # pull context->Rsp
  908. mov 4(%r11),%r10d # HandlerData[1]
  909. lea (%rsi,%r10),%r10 # epilogue label
  910. cmp %r10,%rbx # context->Rip>=epilogue label
  911. jae .Lcommon_seh_tail
  912. mov 120($context),%rax # pull context->Rax
  913. mov -48(%rax),%r15
  914. mov -40(%rax),%r14
  915. mov -32(%rax),%r13
  916. mov -24(%rax),%r12
  917. mov -16(%rax),%rbp
  918. mov -8(%rax),%rbx
  919. mov %r15,240($context)
  920. mov %r14,232($context)
  921. mov %r13,224($context)
  922. mov %r12,216($context)
  923. mov %rbp,160($context)
  924. mov %rbx,144($context)
  925. lea -0xd8(%rax),%rsi # %xmm save area
  926. lea 512($context),%rdi # & context.Xmm6
  927. mov \$20,%ecx # 10*sizeof(%xmm0)/sizeof(%rax)
  928. .long 0xa548f3fc # cld; rep movsq
  929. .Lcommon_seh_tail:
  930. mov 8(%rax),%rdi
  931. mov 16(%rax),%rsi
  932. mov %rax,152($context) # restore context->Rsp
  933. mov %rsi,168($context) # restore context->Rsi
  934. mov %rdi,176($context) # restore context->Rdi
  935. mov 40($disp),%rdi # disp->ContextRecord
  936. mov $context,%rsi # context
  937. mov \$154,%ecx # sizeof(CONTEXT)
  938. .long 0xa548f3fc # cld; rep movsq
  939. mov $disp,%rsi
  940. xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
  941. mov 8(%rsi),%rdx # arg2, disp->ImageBase
  942. mov 0(%rsi),%r8 # arg3, disp->ControlPc
  943. mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
  944. mov 40(%rsi),%r10 # disp->ContextRecord
  945. lea 56(%rsi),%r11 # &disp->HandlerData
  946. lea 24(%rsi),%r12 # &disp->EstablisherFrame
  947. mov %r10,32(%rsp) # arg5
  948. mov %r11,40(%rsp) # arg6
  949. mov %r12,48(%rsp) # arg7
  950. mov %rcx,56(%rsp) # arg8, (NULL)
  951. call *__imp_RtlVirtualUnwind(%rip)
  952. mov \$1,%eax # ExceptionContinueSearch
  953. add \$64,%rsp
  954. popfq
  955. pop %r15
  956. pop %r14
  957. pop %r13
  958. pop %r12
  959. pop %rbp
  960. pop %rbx
  961. pop %rdi
  962. pop %rsi
  963. ret
  964. .size gcm_se_handler,.-gcm_se_handler
  965. .section .pdata
  966. .align 4
  967. .rva .LSEH_begin_aesni_gcm_decrypt
  968. .rva .LSEH_end_aesni_gcm_decrypt
  969. .rva .LSEH_gcm_dec_info
  970. .rva .LSEH_begin_aesni_gcm_encrypt
  971. .rva .LSEH_end_aesni_gcm_encrypt
  972. .rva .LSEH_gcm_enc_info
  973. .section .xdata
  974. .align 8
  975. .LSEH_gcm_dec_info:
  976. .byte 9,0,0,0
  977. .rva gcm_se_handler
  978. .rva .Lgcm_dec_body,.Lgcm_dec_abort
  979. .LSEH_gcm_enc_info:
  980. .byte 9,0,0,0
  981. .rva gcm_se_handler
  982. .rva .Lgcm_enc_body,.Lgcm_enc_abort
  983. ___
  984. }
  985. }}} else {{{
  986. $code=<<___; # assembler is too old
  987. .text
  988. .globl aesni_gcm_encrypt
  989. .type aesni_gcm_encrypt,\@abi-omnipotent
  990. aesni_gcm_encrypt:
  991. .cfi_startproc
  992. xor %eax,%eax
  993. ret
  994. .cfi_endproc
  995. .size aesni_gcm_encrypt,.-aesni_gcm_encrypt
  996. .globl aesni_gcm_decrypt
  997. .type aesni_gcm_decrypt,\@abi-omnipotent
  998. aesni_gcm_decrypt:
  999. .cfi_startproc
  1000. xor %eax,%eax
  1001. ret
  1002. .cfi_endproc
  1003. .size aesni_gcm_decrypt,.-aesni_gcm_decrypt
  1004. ___
  1005. }}}
  1006. $code =~ s/\`([^\`]*)\`/eval($1)/gem;
  1007. print $code;
  1008. close STDOUT or die "error closing STDOUT: $!";