2
0

ghashp8-ppc.pl 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671
  1. #! /usr/bin/env perl
  2. # Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved.
  3. #
  4. # Licensed under the OpenSSL license (the "License"). You may not use
  5. # this file except in compliance with the License. You can obtain a copy
  6. # in the file LICENSE in the source distribution or at
  7. # https://www.openssl.org/source/license.html
  8. #
  9. # ====================================================================
  10. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  11. # project. The module is, however, dual licensed under OpenSSL and
  12. # CRYPTOGAMS licenses depending on where you obtain it. For further
  13. # details see http://www.openssl.org/~appro/cryptogams/.
  14. # ====================================================================
  15. #
  16. # GHASH for for PowerISA v2.07.
  17. #
  18. # July 2014
  19. #
  20. # Accurate performance measurements are problematic, because it's
  21. # always virtualized setup with possibly throttled processor.
  22. # Relative comparison is therefore more informative. This initial
  23. # version is ~2.1x slower than hardware-assisted AES-128-CTR, ~12x
  24. # faster than "4-bit" integer-only compiler-generated 64-bit code.
  25. # "Initial version" means that there is room for further improvement.
  26. # May 2016
  27. #
  28. # 2x aggregated reduction improves performance by 50% (resulting
  29. # performance on POWER8 is 1 cycle per processed byte), and 4x
  30. # aggregated reduction - by 170% or 2.7x (resulting in 0.55 cpb).
  31. # POWER9 delivers 0.51 cpb.
  32. $flavour=shift;
  33. $output =shift;
  34. if ($flavour =~ /64/) {
  35. $SIZE_T=8;
  36. $LRSAVE=2*$SIZE_T;
  37. $STU="stdu";
  38. $POP="ld";
  39. $PUSH="std";
  40. $UCMP="cmpld";
  41. $SHRI="srdi";
  42. } elsif ($flavour =~ /32/) {
  43. $SIZE_T=4;
  44. $LRSAVE=$SIZE_T;
  45. $STU="stwu";
  46. $POP="lwz";
  47. $PUSH="stw";
  48. $UCMP="cmplw";
  49. $SHRI="srwi";
  50. } else { die "nonsense $flavour"; }
  51. $sp="r1";
  52. $FRAME=6*$SIZE_T+13*16; # 13*16 is for v20-v31 offload
  53. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  54. ( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
  55. ( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
  56. die "can't locate ppc-xlate.pl";
  57. open STDOUT,"| $^X $xlate $flavour $output" || die "can't call $xlate: $!";
  58. my ($Xip,$Htbl,$inp,$len)=map("r$_",(3..6)); # argument block
  59. my ($Xl,$Xm,$Xh,$IN)=map("v$_",(0..3));
  60. my ($zero,$t0,$t1,$t2,$xC2,$H,$Hh,$Hl,$lemask)=map("v$_",(4..12));
  61. my ($Xl1,$Xm1,$Xh1,$IN1,$H2,$H2h,$H2l)=map("v$_",(13..19));
  62. my $vrsave="r12";
  63. $code=<<___;
  64. .machine "any"
  65. .text
  66. .globl .gcm_init_p8
  67. .align 5
  68. .gcm_init_p8:
  69. li r0,-4096
  70. li r8,0x10
  71. mfspr $vrsave,256
  72. li r9,0x20
  73. mtspr 256,r0
  74. li r10,0x30
  75. lvx_u $H,0,r4 # load H
  76. vspltisb $xC2,-16 # 0xf0
  77. vspltisb $t0,1 # one
  78. vaddubm $xC2,$xC2,$xC2 # 0xe0
  79. vxor $zero,$zero,$zero
  80. vor $xC2,$xC2,$t0 # 0xe1
  81. vsldoi $xC2,$xC2,$zero,15 # 0xe1...
  82. vsldoi $t1,$zero,$t0,1 # ...1
  83. vaddubm $xC2,$xC2,$xC2 # 0xc2...
  84. vspltisb $t2,7
  85. vor $xC2,$xC2,$t1 # 0xc2....01
  86. vspltb $t1,$H,0 # most significant byte
  87. vsl $H,$H,$t0 # H<<=1
  88. vsrab $t1,$t1,$t2 # broadcast carry bit
  89. vand $t1,$t1,$xC2
  90. vxor $IN,$H,$t1 # twisted H
  91. vsldoi $H,$IN,$IN,8 # twist even more ...
  92. vsldoi $xC2,$zero,$xC2,8 # 0xc2.0
  93. vsldoi $Hl,$zero,$H,8 # ... and split
  94. vsldoi $Hh,$H,$zero,8
  95. stvx_u $xC2,0,r3 # save pre-computed table
  96. stvx_u $Hl,r8,r3
  97. li r8,0x40
  98. stvx_u $H, r9,r3
  99. li r9,0x50
  100. stvx_u $Hh,r10,r3
  101. li r10,0x60
  102. vpmsumd $Xl,$IN,$Hl # H.lo·H.lo
  103. vpmsumd $Xm,$IN,$H # H.hi·H.lo+H.lo·H.hi
  104. vpmsumd $Xh,$IN,$Hh # H.hi·H.hi
  105. vpmsumd $t2,$Xl,$xC2 # 1st reduction phase
  106. vsldoi $t0,$Xm,$zero,8
  107. vsldoi $t1,$zero,$Xm,8
  108. vxor $Xl,$Xl,$t0
  109. vxor $Xh,$Xh,$t1
  110. vsldoi $Xl,$Xl,$Xl,8
  111. vxor $Xl,$Xl,$t2
  112. vsldoi $t1,$Xl,$Xl,8 # 2nd reduction phase
  113. vpmsumd $Xl,$Xl,$xC2
  114. vxor $t1,$t1,$Xh
  115. vxor $IN1,$Xl,$t1
  116. vsldoi $H2,$IN1,$IN1,8
  117. vsldoi $H2l,$zero,$H2,8
  118. vsldoi $H2h,$H2,$zero,8
  119. stvx_u $H2l,r8,r3 # save H^2
  120. li r8,0x70
  121. stvx_u $H2,r9,r3
  122. li r9,0x80
  123. stvx_u $H2h,r10,r3
  124. li r10,0x90
  125. ___
  126. {
  127. my ($t4,$t5,$t6) = ($Hl,$H,$Hh);
  128. $code.=<<___;
  129. vpmsumd $Xl,$IN,$H2l # H.lo·H^2.lo
  130. vpmsumd $Xl1,$IN1,$H2l # H^2.lo·H^2.lo
  131. vpmsumd $Xm,$IN,$H2 # H.hi·H^2.lo+H.lo·H^2.hi
  132. vpmsumd $Xm1,$IN1,$H2 # H^2.hi·H^2.lo+H^2.lo·H^2.hi
  133. vpmsumd $Xh,$IN,$H2h # H.hi·H^2.hi
  134. vpmsumd $Xh1,$IN1,$H2h # H^2.hi·H^2.hi
  135. vpmsumd $t2,$Xl,$xC2 # 1st reduction phase
  136. vpmsumd $t6,$Xl1,$xC2 # 1st reduction phase
  137. vsldoi $t0,$Xm,$zero,8
  138. vsldoi $t1,$zero,$Xm,8
  139. vsldoi $t4,$Xm1,$zero,8
  140. vsldoi $t5,$zero,$Xm1,8
  141. vxor $Xl,$Xl,$t0
  142. vxor $Xh,$Xh,$t1
  143. vxor $Xl1,$Xl1,$t4
  144. vxor $Xh1,$Xh1,$t5
  145. vsldoi $Xl,$Xl,$Xl,8
  146. vsldoi $Xl1,$Xl1,$Xl1,8
  147. vxor $Xl,$Xl,$t2
  148. vxor $Xl1,$Xl1,$t6
  149. vsldoi $t1,$Xl,$Xl,8 # 2nd reduction phase
  150. vsldoi $t5,$Xl1,$Xl1,8 # 2nd reduction phase
  151. vpmsumd $Xl,$Xl,$xC2
  152. vpmsumd $Xl1,$Xl1,$xC2
  153. vxor $t1,$t1,$Xh
  154. vxor $t5,$t5,$Xh1
  155. vxor $Xl,$Xl,$t1
  156. vxor $Xl1,$Xl1,$t5
  157. vsldoi $H,$Xl,$Xl,8
  158. vsldoi $H2,$Xl1,$Xl1,8
  159. vsldoi $Hl,$zero,$H,8
  160. vsldoi $Hh,$H,$zero,8
  161. vsldoi $H2l,$zero,$H2,8
  162. vsldoi $H2h,$H2,$zero,8
  163. stvx_u $Hl,r8,r3 # save H^3
  164. li r8,0xa0
  165. stvx_u $H,r9,r3
  166. li r9,0xb0
  167. stvx_u $Hh,r10,r3
  168. li r10,0xc0
  169. stvx_u $H2l,r8,r3 # save H^4
  170. stvx_u $H2,r9,r3
  171. stvx_u $H2h,r10,r3
  172. mtspr 256,$vrsave
  173. blr
  174. .long 0
  175. .byte 0,12,0x14,0,0,0,2,0
  176. .long 0
  177. .size .gcm_init_p8,.-.gcm_init_p8
  178. ___
  179. }
  180. $code.=<<___;
  181. .globl .gcm_gmult_p8
  182. .align 5
  183. .gcm_gmult_p8:
  184. lis r0,0xfff8
  185. li r8,0x10
  186. mfspr $vrsave,256
  187. li r9,0x20
  188. mtspr 256,r0
  189. li r10,0x30
  190. lvx_u $IN,0,$Xip # load Xi
  191. lvx_u $Hl,r8,$Htbl # load pre-computed table
  192. le?lvsl $lemask,r0,r0
  193. lvx_u $H, r9,$Htbl
  194. le?vspltisb $t0,0x07
  195. lvx_u $Hh,r10,$Htbl
  196. le?vxor $lemask,$lemask,$t0
  197. lvx_u $xC2,0,$Htbl
  198. le?vperm $IN,$IN,$IN,$lemask
  199. vxor $zero,$zero,$zero
  200. vpmsumd $Xl,$IN,$Hl # H.lo·Xi.lo
  201. vpmsumd $Xm,$IN,$H # H.hi·Xi.lo+H.lo·Xi.hi
  202. vpmsumd $Xh,$IN,$Hh # H.hi·Xi.hi
  203. vpmsumd $t2,$Xl,$xC2 # 1st reduction phase
  204. vsldoi $t0,$Xm,$zero,8
  205. vsldoi $t1,$zero,$Xm,8
  206. vxor $Xl,$Xl,$t0
  207. vxor $Xh,$Xh,$t1
  208. vsldoi $Xl,$Xl,$Xl,8
  209. vxor $Xl,$Xl,$t2
  210. vsldoi $t1,$Xl,$Xl,8 # 2nd reduction phase
  211. vpmsumd $Xl,$Xl,$xC2
  212. vxor $t1,$t1,$Xh
  213. vxor $Xl,$Xl,$t1
  214. le?vperm $Xl,$Xl,$Xl,$lemask
  215. stvx_u $Xl,0,$Xip # write out Xi
  216. mtspr 256,$vrsave
  217. blr
  218. .long 0
  219. .byte 0,12,0x14,0,0,0,2,0
  220. .long 0
  221. .size .gcm_gmult_p8,.-.gcm_gmult_p8
  222. .globl .gcm_ghash_p8
  223. .align 5
  224. .gcm_ghash_p8:
  225. li r0,-4096
  226. li r8,0x10
  227. mfspr $vrsave,256
  228. li r9,0x20
  229. mtspr 256,r0
  230. li r10,0x30
  231. lvx_u $Xl,0,$Xip # load Xi
  232. lvx_u $Hl,r8,$Htbl # load pre-computed table
  233. li r8,0x40
  234. le?lvsl $lemask,r0,r0
  235. lvx_u $H, r9,$Htbl
  236. li r9,0x50
  237. le?vspltisb $t0,0x07
  238. lvx_u $Hh,r10,$Htbl
  239. li r10,0x60
  240. le?vxor $lemask,$lemask,$t0
  241. lvx_u $xC2,0,$Htbl
  242. le?vperm $Xl,$Xl,$Xl,$lemask
  243. vxor $zero,$zero,$zero
  244. ${UCMP}i $len,64
  245. bge Lgcm_ghash_p8_4x
  246. lvx_u $IN,0,$inp
  247. addi $inp,$inp,16
  248. subic. $len,$len,16
  249. le?vperm $IN,$IN,$IN,$lemask
  250. vxor $IN,$IN,$Xl
  251. beq Lshort
  252. lvx_u $H2l,r8,$Htbl # load H^2
  253. li r8,16
  254. lvx_u $H2, r9,$Htbl
  255. add r9,$inp,$len # end of input
  256. lvx_u $H2h,r10,$Htbl
  257. be?b Loop_2x
  258. .align 5
  259. Loop_2x:
  260. lvx_u $IN1,0,$inp
  261. le?vperm $IN1,$IN1,$IN1,$lemask
  262. subic $len,$len,32
  263. vpmsumd $Xl,$IN,$H2l # H^2.lo·Xi.lo
  264. vpmsumd $Xl1,$IN1,$Hl # H.lo·Xi+1.lo
  265. subfe r0,r0,r0 # borrow?-1:0
  266. vpmsumd $Xm,$IN,$H2 # H^2.hi·Xi.lo+H^2.lo·Xi.hi
  267. vpmsumd $Xm1,$IN1,$H # H.hi·Xi+1.lo+H.lo·Xi+1.hi
  268. and r0,r0,$len
  269. vpmsumd $Xh,$IN,$H2h # H^2.hi·Xi.hi
  270. vpmsumd $Xh1,$IN1,$Hh # H.hi·Xi+1.hi
  271. add $inp,$inp,r0
  272. vxor $Xl,$Xl,$Xl1
  273. vxor $Xm,$Xm,$Xm1
  274. vpmsumd $t2,$Xl,$xC2 # 1st reduction phase
  275. vsldoi $t0,$Xm,$zero,8
  276. vsldoi $t1,$zero,$Xm,8
  277. vxor $Xh,$Xh,$Xh1
  278. vxor $Xl,$Xl,$t0
  279. vxor $Xh,$Xh,$t1
  280. vsldoi $Xl,$Xl,$Xl,8
  281. vxor $Xl,$Xl,$t2
  282. lvx_u $IN,r8,$inp
  283. addi $inp,$inp,32
  284. vsldoi $t1,$Xl,$Xl,8 # 2nd reduction phase
  285. vpmsumd $Xl,$Xl,$xC2
  286. le?vperm $IN,$IN,$IN,$lemask
  287. vxor $t1,$t1,$Xh
  288. vxor $IN,$IN,$t1
  289. vxor $IN,$IN,$Xl
  290. $UCMP r9,$inp
  291. bgt Loop_2x # done yet?
  292. cmplwi $len,0
  293. bne Leven
  294. Lshort:
  295. vpmsumd $Xl,$IN,$Hl # H.lo·Xi.lo
  296. vpmsumd $Xm,$IN,$H # H.hi·Xi.lo+H.lo·Xi.hi
  297. vpmsumd $Xh,$IN,$Hh # H.hi·Xi.hi
  298. vpmsumd $t2,$Xl,$xC2 # 1st reduction phase
  299. vsldoi $t0,$Xm,$zero,8
  300. vsldoi $t1,$zero,$Xm,8
  301. vxor $Xl,$Xl,$t0
  302. vxor $Xh,$Xh,$t1
  303. vsldoi $Xl,$Xl,$Xl,8
  304. vxor $Xl,$Xl,$t2
  305. vsldoi $t1,$Xl,$Xl,8 # 2nd reduction phase
  306. vpmsumd $Xl,$Xl,$xC2
  307. vxor $t1,$t1,$Xh
  308. Leven:
  309. vxor $Xl,$Xl,$t1
  310. le?vperm $Xl,$Xl,$Xl,$lemask
  311. stvx_u $Xl,0,$Xip # write out Xi
  312. mtspr 256,$vrsave
  313. blr
  314. .long 0
  315. .byte 0,12,0x14,0,0,0,4,0
  316. .long 0
  317. ___
  318. {
  319. my ($Xl3,$Xm2,$IN2,$H3l,$H3,$H3h,
  320. $Xh3,$Xm3,$IN3,$H4l,$H4,$H4h) = map("v$_",(20..31));
  321. my $IN0=$IN;
  322. my ($H21l,$H21h,$loperm,$hiperm) = ($Hl,$Hh,$H2l,$H2h);
  323. $code.=<<___;
  324. .align 5
  325. .gcm_ghash_p8_4x:
  326. Lgcm_ghash_p8_4x:
  327. $STU $sp,-$FRAME($sp)
  328. li r10,`15+6*$SIZE_T`
  329. li r11,`31+6*$SIZE_T`
  330. stvx v20,r10,$sp
  331. addi r10,r10,32
  332. stvx v21,r11,$sp
  333. addi r11,r11,32
  334. stvx v22,r10,$sp
  335. addi r10,r10,32
  336. stvx v23,r11,$sp
  337. addi r11,r11,32
  338. stvx v24,r10,$sp
  339. addi r10,r10,32
  340. stvx v25,r11,$sp
  341. addi r11,r11,32
  342. stvx v26,r10,$sp
  343. addi r10,r10,32
  344. stvx v27,r11,$sp
  345. addi r11,r11,32
  346. stvx v28,r10,$sp
  347. addi r10,r10,32
  348. stvx v29,r11,$sp
  349. addi r11,r11,32
  350. stvx v30,r10,$sp
  351. li r10,0x60
  352. stvx v31,r11,$sp
  353. li r0,-1
  354. stw $vrsave,`$FRAME-4`($sp) # save vrsave
  355. mtspr 256,r0 # preserve all AltiVec registers
  356. lvsl $t0,0,r8 # 0x0001..0e0f
  357. #lvx_u $H2l,r8,$Htbl # load H^2
  358. li r8,0x70
  359. lvx_u $H2, r9,$Htbl
  360. li r9,0x80
  361. vspltisb $t1,8 # 0x0808..0808
  362. #lvx_u $H2h,r10,$Htbl
  363. li r10,0x90
  364. lvx_u $H3l,r8,$Htbl # load H^3
  365. li r8,0xa0
  366. lvx_u $H3, r9,$Htbl
  367. li r9,0xb0
  368. lvx_u $H3h,r10,$Htbl
  369. li r10,0xc0
  370. lvx_u $H4l,r8,$Htbl # load H^4
  371. li r8,0x10
  372. lvx_u $H4, r9,$Htbl
  373. li r9,0x20
  374. lvx_u $H4h,r10,$Htbl
  375. li r10,0x30
  376. vsldoi $t2,$zero,$t1,8 # 0x0000..0808
  377. vaddubm $hiperm,$t0,$t2 # 0x0001..1617
  378. vaddubm $loperm,$t1,$hiperm # 0x0809..1e1f
  379. $SHRI $len,$len,4 # this allows to use sign bit
  380. # as carry
  381. lvx_u $IN0,0,$inp # load input
  382. lvx_u $IN1,r8,$inp
  383. subic. $len,$len,8
  384. lvx_u $IN2,r9,$inp
  385. lvx_u $IN3,r10,$inp
  386. addi $inp,$inp,0x40
  387. le?vperm $IN0,$IN0,$IN0,$lemask
  388. le?vperm $IN1,$IN1,$IN1,$lemask
  389. le?vperm $IN2,$IN2,$IN2,$lemask
  390. le?vperm $IN3,$IN3,$IN3,$lemask
  391. vxor $Xh,$IN0,$Xl
  392. vpmsumd $Xl1,$IN1,$H3l
  393. vpmsumd $Xm1,$IN1,$H3
  394. vpmsumd $Xh1,$IN1,$H3h
  395. vperm $H21l,$H2,$H,$hiperm
  396. vperm $t0,$IN2,$IN3,$loperm
  397. vperm $H21h,$H2,$H,$loperm
  398. vperm $t1,$IN2,$IN3,$hiperm
  399. vpmsumd $Xm2,$IN2,$H2 # H^2.lo·Xi+2.hi+H^2.hi·Xi+2.lo
  400. vpmsumd $Xl3,$t0,$H21l # H^2.lo·Xi+2.lo+H.lo·Xi+3.lo
  401. vpmsumd $Xm3,$IN3,$H # H.hi·Xi+3.lo +H.lo·Xi+3.hi
  402. vpmsumd $Xh3,$t1,$H21h # H^2.hi·Xi+2.hi+H.hi·Xi+3.hi
  403. vxor $Xm2,$Xm2,$Xm1
  404. vxor $Xl3,$Xl3,$Xl1
  405. vxor $Xm3,$Xm3,$Xm2
  406. vxor $Xh3,$Xh3,$Xh1
  407. blt Ltail_4x
  408. Loop_4x:
  409. lvx_u $IN0,0,$inp
  410. lvx_u $IN1,r8,$inp
  411. subic. $len,$len,4
  412. lvx_u $IN2,r9,$inp
  413. lvx_u $IN3,r10,$inp
  414. addi $inp,$inp,0x40
  415. le?vperm $IN1,$IN1,$IN1,$lemask
  416. le?vperm $IN2,$IN2,$IN2,$lemask
  417. le?vperm $IN3,$IN3,$IN3,$lemask
  418. le?vperm $IN0,$IN0,$IN0,$lemask
  419. vpmsumd $Xl,$Xh,$H4l # H^4.lo·Xi.lo
  420. vpmsumd $Xm,$Xh,$H4 # H^4.hi·Xi.lo+H^4.lo·Xi.hi
  421. vpmsumd $Xh,$Xh,$H4h # H^4.hi·Xi.hi
  422. vpmsumd $Xl1,$IN1,$H3l
  423. vpmsumd $Xm1,$IN1,$H3
  424. vpmsumd $Xh1,$IN1,$H3h
  425. vxor $Xl,$Xl,$Xl3
  426. vxor $Xm,$Xm,$Xm3
  427. vxor $Xh,$Xh,$Xh3
  428. vperm $t0,$IN2,$IN3,$loperm
  429. vperm $t1,$IN2,$IN3,$hiperm
  430. vpmsumd $t2,$Xl,$xC2 # 1st reduction phase
  431. vpmsumd $Xl3,$t0,$H21l # H.lo·Xi+3.lo +H^2.lo·Xi+2.lo
  432. vpmsumd $Xh3,$t1,$H21h # H.hi·Xi+3.hi +H^2.hi·Xi+2.hi
  433. vsldoi $t0,$Xm,$zero,8
  434. vsldoi $t1,$zero,$Xm,8
  435. vxor $Xl,$Xl,$t0
  436. vxor $Xh,$Xh,$t1
  437. vsldoi $Xl,$Xl,$Xl,8
  438. vxor $Xl,$Xl,$t2
  439. vsldoi $t1,$Xl,$Xl,8 # 2nd reduction phase
  440. vpmsumd $Xm2,$IN2,$H2 # H^2.hi·Xi+2.lo+H^2.lo·Xi+2.hi
  441. vpmsumd $Xm3,$IN3,$H # H.hi·Xi+3.lo +H.lo·Xi+3.hi
  442. vpmsumd $Xl,$Xl,$xC2
  443. vxor $Xl3,$Xl3,$Xl1
  444. vxor $Xh3,$Xh3,$Xh1
  445. vxor $Xh,$Xh,$IN0
  446. vxor $Xm2,$Xm2,$Xm1
  447. vxor $Xh,$Xh,$t1
  448. vxor $Xm3,$Xm3,$Xm2
  449. vxor $Xh,$Xh,$Xl
  450. bge Loop_4x
  451. Ltail_4x:
  452. vpmsumd $Xl,$Xh,$H4l # H^4.lo·Xi.lo
  453. vpmsumd $Xm,$Xh,$H4 # H^4.hi·Xi.lo+H^4.lo·Xi.hi
  454. vpmsumd $Xh,$Xh,$H4h # H^4.hi·Xi.hi
  455. vxor $Xl,$Xl,$Xl3
  456. vxor $Xm,$Xm,$Xm3
  457. vpmsumd $t2,$Xl,$xC2 # 1st reduction phase
  458. vsldoi $t0,$Xm,$zero,8
  459. vsldoi $t1,$zero,$Xm,8
  460. vxor $Xh,$Xh,$Xh3
  461. vxor $Xl,$Xl,$t0
  462. vxor $Xh,$Xh,$t1
  463. vsldoi $Xl,$Xl,$Xl,8
  464. vxor $Xl,$Xl,$t2
  465. vsldoi $t1,$Xl,$Xl,8 # 2nd reduction phase
  466. vpmsumd $Xl,$Xl,$xC2
  467. vxor $t1,$t1,$Xh
  468. vxor $Xl,$Xl,$t1
  469. addic. $len,$len,4
  470. beq Ldone_4x
  471. lvx_u $IN0,0,$inp
  472. ${UCMP}i $len,2
  473. li $len,-4
  474. blt Lone
  475. lvx_u $IN1,r8,$inp
  476. beq Ltwo
  477. Lthree:
  478. lvx_u $IN2,r9,$inp
  479. le?vperm $IN0,$IN0,$IN0,$lemask
  480. le?vperm $IN1,$IN1,$IN1,$lemask
  481. le?vperm $IN2,$IN2,$IN2,$lemask
  482. vxor $Xh,$IN0,$Xl
  483. vmr $H4l,$H3l
  484. vmr $H4, $H3
  485. vmr $H4h,$H3h
  486. vperm $t0,$IN1,$IN2,$loperm
  487. vperm $t1,$IN1,$IN2,$hiperm
  488. vpmsumd $Xm2,$IN1,$H2 # H^2.lo·Xi+1.hi+H^2.hi·Xi+1.lo
  489. vpmsumd $Xm3,$IN2,$H # H.hi·Xi+2.lo +H.lo·Xi+2.hi
  490. vpmsumd $Xl3,$t0,$H21l # H^2.lo·Xi+1.lo+H.lo·Xi+2.lo
  491. vpmsumd $Xh3,$t1,$H21h # H^2.hi·Xi+1.hi+H.hi·Xi+2.hi
  492. vxor $Xm3,$Xm3,$Xm2
  493. b Ltail_4x
  494. .align 4
  495. Ltwo:
  496. le?vperm $IN0,$IN0,$IN0,$lemask
  497. le?vperm $IN1,$IN1,$IN1,$lemask
  498. vxor $Xh,$IN0,$Xl
  499. vperm $t0,$zero,$IN1,$loperm
  500. vperm $t1,$zero,$IN1,$hiperm
  501. vsldoi $H4l,$zero,$H2,8
  502. vmr $H4, $H2
  503. vsldoi $H4h,$H2,$zero,8
  504. vpmsumd $Xl3,$t0, $H21l # H.lo·Xi+1.lo
  505. vpmsumd $Xm3,$IN1,$H # H.hi·Xi+1.lo+H.lo·Xi+2.hi
  506. vpmsumd $Xh3,$t1, $H21h # H.hi·Xi+1.hi
  507. b Ltail_4x
  508. .align 4
  509. Lone:
  510. le?vperm $IN0,$IN0,$IN0,$lemask
  511. vsldoi $H4l,$zero,$H,8
  512. vmr $H4, $H
  513. vsldoi $H4h,$H,$zero,8
  514. vxor $Xh,$IN0,$Xl
  515. vxor $Xl3,$Xl3,$Xl3
  516. vxor $Xm3,$Xm3,$Xm3
  517. vxor $Xh3,$Xh3,$Xh3
  518. b Ltail_4x
  519. Ldone_4x:
  520. le?vperm $Xl,$Xl,$Xl,$lemask
  521. stvx_u $Xl,0,$Xip # write out Xi
  522. li r10,`15+6*$SIZE_T`
  523. li r11,`31+6*$SIZE_T`
  524. mtspr 256,$vrsave
  525. lvx v20,r10,$sp
  526. addi r10,r10,32
  527. lvx v21,r11,$sp
  528. addi r11,r11,32
  529. lvx v22,r10,$sp
  530. addi r10,r10,32
  531. lvx v23,r11,$sp
  532. addi r11,r11,32
  533. lvx v24,r10,$sp
  534. addi r10,r10,32
  535. lvx v25,r11,$sp
  536. addi r11,r11,32
  537. lvx v26,r10,$sp
  538. addi r10,r10,32
  539. lvx v27,r11,$sp
  540. addi r11,r11,32
  541. lvx v28,r10,$sp
  542. addi r10,r10,32
  543. lvx v29,r11,$sp
  544. addi r11,r11,32
  545. lvx v30,r10,$sp
  546. lvx v31,r11,$sp
  547. addi $sp,$sp,$FRAME
  548. blr
  549. .long 0
  550. .byte 0,12,0x04,0,0x80,0,4,0
  551. .long 0
  552. ___
  553. }
  554. $code.=<<___;
  555. .size .gcm_ghash_p8,.-.gcm_ghash_p8
  556. .asciz "GHASH for PowerISA 2.07, CRYPTOGAMS by <appro\@openssl.org>"
  557. .align 2
  558. ___
  559. foreach (split("\n",$code)) {
  560. s/\`([^\`]*)\`/eval $1/geo;
  561. if ($flavour =~ /le$/o) { # little-endian
  562. s/le\?//o or
  563. s/be\?/#be#/o;
  564. } else {
  565. s/le\?/#le#/o or
  566. s/be\?//o;
  567. }
  568. print $_,"\n";
  569. }
  570. close STDOUT or die "error closing STDOUT: $!"; # enforce flush