2
0

chacha-x86.pl 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155
  1. #! /usr/bin/env perl
  2. # Copyright 2016-2020 The OpenSSL Project Authors. All Rights Reserved.
  3. #
  4. # Licensed under the OpenSSL license (the "License"). You may not use
  5. # this file except in compliance with the License. You can obtain a copy
  6. # in the file LICENSE in the source distribution or at
  7. # https://www.openssl.org/source/license.html
  8. #
  9. # ====================================================================
  10. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  11. # project. The module is, however, dual licensed under OpenSSL and
  12. # CRYPTOGAMS licenses depending on where you obtain it. For further
  13. # details see http://www.openssl.org/~appro/cryptogams/.
  14. # ====================================================================
  15. #
  16. # January 2015
  17. #
  18. # ChaCha20 for x86.
  19. #
  20. # Performance in cycles per byte out of large buffer.
  21. #
  22. # 1xIALU/gcc 4xSSSE3
  23. # Pentium 17.5/+80%
  24. # PIII 14.2/+60%
  25. # P4 18.6/+84%
  26. # Core2 9.56/+89% 4.83
  27. # Westmere 9.50/+45% 3.35
  28. # Sandy Bridge 10.5/+47% 3.20
  29. # Haswell 8.15/+50% 2.83
  30. # Skylake 7.53/+22% 2.75
  31. # Silvermont 17.4/+36% 8.35
  32. # Goldmont 13.4/+40% 4.36
  33. # Sledgehammer 10.2/+54%
  34. # Bulldozer 13.4/+50% 4.38(*)
  35. #
  36. # (*) Bulldozer actually executes 4xXOP code path that delivers 3.55;
  37. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  38. push(@INC,"${dir}","${dir}../../perlasm");
  39. require "x86asm.pl";
  40. $output=pop;
  41. open STDOUT,">$output";
  42. &asm_init($ARGV[0],$ARGV[$#ARGV] eq "386");
  43. $xmm=$ymm=0;
  44. for (@ARGV) { $xmm=1 if (/-DOPENSSL_IA32_SSE2/); }
  45. $ymm=1 if ($xmm &&
  46. `$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
  47. =~ /GNU assembler version ([2-9]\.[0-9]+)/ &&
  48. ($gasver=$1)>=2.19); # first version supporting AVX
  49. $ymm=1 if ($xmm && !$ymm && $ARGV[0] eq "win32n" &&
  50. `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/ &&
  51. $1>=2.03); # first version supporting AVX
  52. $ymm=1 if ($xmm && !$ymm && $ARGV[0] eq "win32" &&
  53. `ml 2>&1` =~ /Version ([0-9]+)\./ &&
  54. $1>=10); # first version supporting AVX
  55. $ymm=1 if ($xmm && !$ymm &&
  56. `$ENV{CC} -v 2>&1` =~ /((?:clang|LLVM) version|based on LLVM) ([0-9]+\.[0-9]+)/ &&
  57. $2>=3.0); # first version supporting AVX
  58. $a="eax";
  59. ($b,$b_)=("ebx","ebp");
  60. ($c,$c_)=("ecx","esi");
  61. ($d,$d_)=("edx","edi");
  62. sub QUARTERROUND {
  63. my ($ai,$bi,$ci,$di,$i)=@_;
  64. my ($an,$bn,$cn,$dn)=map(($_&~3)+(($_+1)&3),($ai,$bi,$ci,$di)); # next
  65. my ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_-1)&3),($ai,$bi,$ci,$di)); # previous
  66. # a b c d
  67. #
  68. # 0 4 8 12 < even round
  69. # 1 5 9 13
  70. # 2 6 10 14
  71. # 3 7 11 15
  72. # 0 5 10 15 < odd round
  73. # 1 6 11 12
  74. # 2 7 8 13
  75. # 3 4 9 14
  76. if ($i==0) {
  77. my $j=4;
  78. ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_-$j--)&3),($ap,$bp,$cp,$dp));
  79. } elsif ($i==3) {
  80. my $j=0;
  81. ($an,$bn,$cn,$dn)=map(($_&~3)+(($_+$j++)&3),($an,$bn,$cn,$dn));
  82. } elsif ($i==4) {
  83. my $j=4;
  84. ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_+$j--)&3),($ap,$bp,$cp,$dp));
  85. } elsif ($i==7) {
  86. my $j=0;
  87. ($an,$bn,$cn,$dn)=map(($_&~3)+(($_-$j++)&3),($an,$bn,$cn,$dn));
  88. }
  89. #&add ($a,$b); # see elsewhere
  90. &xor ($d,$a);
  91. &mov (&DWP(4*$cp,"esp"),$c_) if ($ai>0 && $ai<3);
  92. &rol ($d,16);
  93. &mov (&DWP(4*$bp,"esp"),$b_) if ($i!=0);
  94. &add ($c,$d);
  95. &mov ($c_,&DWP(4*$cn,"esp")) if ($ai>0 && $ai<3);
  96. &xor ($b,$c);
  97. &mov ($d_,&DWP(4*$dn,"esp")) if ($di!=$dn);
  98. &rol ($b,12);
  99. &mov ($b_,&DWP(4*$bn,"esp")) if ($i<7);
  100. &mov ($b_,&DWP(128,"esp")) if ($i==7); # loop counter
  101. &add ($a,$b);
  102. &xor ($d,$a);
  103. &mov (&DWP(4*$ai,"esp"),$a);
  104. &rol ($d,8);
  105. &mov ($a,&DWP(4*$an,"esp"));
  106. &add ($c,$d);
  107. &mov (&DWP(4*$di,"esp"),$d) if ($di!=$dn);
  108. &mov ($d_,$d) if ($di==$dn);
  109. &xor ($b,$c);
  110. &add ($a,$b_) if ($i<7); # elsewhere
  111. &rol ($b,7);
  112. ($b,$b_)=($b_,$b);
  113. ($c,$c_)=($c_,$c);
  114. ($d,$d_)=($d_,$d);
  115. }
  116. &static_label("ssse3_shortcut");
  117. &static_label("xop_shortcut");
  118. &static_label("ssse3_data");
  119. &static_label("pic_point");
  120. &function_begin("ChaCha20_ctr32");
  121. &xor ("eax","eax");
  122. &cmp ("eax",&wparam(2)); # len==0?
  123. &je (&label("no_data"));
  124. if ($xmm) {
  125. &call (&label("pic_point"));
  126. &set_label("pic_point");
  127. &blindpop("eax");
  128. &picmeup("ebp","OPENSSL_ia32cap_P","eax",&label("pic_point"));
  129. &test (&DWP(0,"ebp"),1<<24); # test FXSR bit
  130. &jz (&label("x86"));
  131. &test (&DWP(4,"ebp"),1<<9); # test SSSE3 bit
  132. &jz (&label("x86"));
  133. &jmp (&label("ssse3_shortcut"));
  134. &set_label("x86");
  135. }
  136. &mov ("esi",&wparam(3)); # key
  137. &mov ("edi",&wparam(4)); # counter and nonce
  138. &stack_push(33);
  139. &mov ("eax",&DWP(4*0,"esi")); # copy key
  140. &mov ("ebx",&DWP(4*1,"esi"));
  141. &mov ("ecx",&DWP(4*2,"esi"));
  142. &mov ("edx",&DWP(4*3,"esi"));
  143. &mov (&DWP(64+4*4,"esp"),"eax");
  144. &mov (&DWP(64+4*5,"esp"),"ebx");
  145. &mov (&DWP(64+4*6,"esp"),"ecx");
  146. &mov (&DWP(64+4*7,"esp"),"edx");
  147. &mov ("eax",&DWP(4*4,"esi"));
  148. &mov ("ebx",&DWP(4*5,"esi"));
  149. &mov ("ecx",&DWP(4*6,"esi"));
  150. &mov ("edx",&DWP(4*7,"esi"));
  151. &mov (&DWP(64+4*8,"esp"),"eax");
  152. &mov (&DWP(64+4*9,"esp"),"ebx");
  153. &mov (&DWP(64+4*10,"esp"),"ecx");
  154. &mov (&DWP(64+4*11,"esp"),"edx");
  155. &mov ("eax",&DWP(4*0,"edi")); # copy counter and nonce
  156. &mov ("ebx",&DWP(4*1,"edi"));
  157. &mov ("ecx",&DWP(4*2,"edi"));
  158. &mov ("edx",&DWP(4*3,"edi"));
  159. &sub ("eax",1);
  160. &mov (&DWP(64+4*12,"esp"),"eax");
  161. &mov (&DWP(64+4*13,"esp"),"ebx");
  162. &mov (&DWP(64+4*14,"esp"),"ecx");
  163. &mov (&DWP(64+4*15,"esp"),"edx");
  164. &jmp (&label("entry"));
  165. &set_label("outer_loop",16);
  166. &mov (&wparam(1),$b); # save input
  167. &mov (&wparam(0),$a); # save output
  168. &mov (&wparam(2),$c); # save len
  169. &set_label("entry");
  170. &mov ($a,0x61707865);
  171. &mov (&DWP(4*1,"esp"),0x3320646e);
  172. &mov (&DWP(4*2,"esp"),0x79622d32);
  173. &mov (&DWP(4*3,"esp"),0x6b206574);
  174. &mov ($b, &DWP(64+4*5,"esp")); # copy key material
  175. &mov ($b_,&DWP(64+4*6,"esp"));
  176. &mov ($c, &DWP(64+4*10,"esp"));
  177. &mov ($c_,&DWP(64+4*11,"esp"));
  178. &mov ($d, &DWP(64+4*13,"esp"));
  179. &mov ($d_,&DWP(64+4*14,"esp"));
  180. &mov (&DWP(4*5,"esp"),$b);
  181. &mov (&DWP(4*6,"esp"),$b_);
  182. &mov (&DWP(4*10,"esp"),$c);
  183. &mov (&DWP(4*11,"esp"),$c_);
  184. &mov (&DWP(4*13,"esp"),$d);
  185. &mov (&DWP(4*14,"esp"),$d_);
  186. &mov ($b, &DWP(64+4*7,"esp"));
  187. &mov ($d_,&DWP(64+4*15,"esp"));
  188. &mov ($d, &DWP(64+4*12,"esp"));
  189. &mov ($b_,&DWP(64+4*4,"esp"));
  190. &mov ($c, &DWP(64+4*8,"esp"));
  191. &mov ($c_,&DWP(64+4*9,"esp"));
  192. &add ($d,1); # counter value
  193. &mov (&DWP(4*7,"esp"),$b);
  194. &mov (&DWP(4*15,"esp"),$d_);
  195. &mov (&DWP(64+4*12,"esp"),$d); # save counter value
  196. &mov ($b,10); # loop counter
  197. &jmp (&label("loop"));
  198. &set_label("loop",16);
  199. &add ($a,$b_); # elsewhere
  200. &mov (&DWP(128,"esp"),$b); # save loop counter
  201. &mov ($b,$b_);
  202. &QUARTERROUND(0, 4, 8, 12, 0);
  203. &QUARTERROUND(1, 5, 9, 13, 1);
  204. &QUARTERROUND(2, 6,10, 14, 2);
  205. &QUARTERROUND(3, 7,11, 15, 3);
  206. &QUARTERROUND(0, 5,10, 15, 4);
  207. &QUARTERROUND(1, 6,11, 12, 5);
  208. &QUARTERROUND(2, 7, 8, 13, 6);
  209. &QUARTERROUND(3, 4, 9, 14, 7);
  210. &dec ($b);
  211. &jnz (&label("loop"));
  212. &mov ($b,&wparam(2)); # load len
  213. &add ($a,0x61707865); # accumulate key material
  214. &add ($b_,&DWP(64+4*4,"esp"));
  215. &add ($c, &DWP(64+4*8,"esp"));
  216. &add ($c_,&DWP(64+4*9,"esp"));
  217. &cmp ($b,64);
  218. &jb (&label("tail"));
  219. &mov ($b,&wparam(1)); # load input pointer
  220. &add ($d, &DWP(64+4*12,"esp"));
  221. &add ($d_,&DWP(64+4*14,"esp"));
  222. &xor ($a, &DWP(4*0,$b)); # xor with input
  223. &xor ($b_,&DWP(4*4,$b));
  224. &mov (&DWP(4*0,"esp"),$a);
  225. &mov ($a,&wparam(0)); # load output pointer
  226. &xor ($c, &DWP(4*8,$b));
  227. &xor ($c_,&DWP(4*9,$b));
  228. &xor ($d, &DWP(4*12,$b));
  229. &xor ($d_,&DWP(4*14,$b));
  230. &mov (&DWP(4*4,$a),$b_); # write output
  231. &mov (&DWP(4*8,$a),$c);
  232. &mov (&DWP(4*9,$a),$c_);
  233. &mov (&DWP(4*12,$a),$d);
  234. &mov (&DWP(4*14,$a),$d_);
  235. &mov ($b_,&DWP(4*1,"esp"));
  236. &mov ($c, &DWP(4*2,"esp"));
  237. &mov ($c_,&DWP(4*3,"esp"));
  238. &mov ($d, &DWP(4*5,"esp"));
  239. &mov ($d_,&DWP(4*6,"esp"));
  240. &add ($b_,0x3320646e); # accumulate key material
  241. &add ($c, 0x79622d32);
  242. &add ($c_,0x6b206574);
  243. &add ($d, &DWP(64+4*5,"esp"));
  244. &add ($d_,&DWP(64+4*6,"esp"));
  245. &xor ($b_,&DWP(4*1,$b));
  246. &xor ($c, &DWP(4*2,$b));
  247. &xor ($c_,&DWP(4*3,$b));
  248. &xor ($d, &DWP(4*5,$b));
  249. &xor ($d_,&DWP(4*6,$b));
  250. &mov (&DWP(4*1,$a),$b_);
  251. &mov (&DWP(4*2,$a),$c);
  252. &mov (&DWP(4*3,$a),$c_);
  253. &mov (&DWP(4*5,$a),$d);
  254. &mov (&DWP(4*6,$a),$d_);
  255. &mov ($b_,&DWP(4*7,"esp"));
  256. &mov ($c, &DWP(4*10,"esp"));
  257. &mov ($c_,&DWP(4*11,"esp"));
  258. &mov ($d, &DWP(4*13,"esp"));
  259. &mov ($d_,&DWP(4*15,"esp"));
  260. &add ($b_,&DWP(64+4*7,"esp"));
  261. &add ($c, &DWP(64+4*10,"esp"));
  262. &add ($c_,&DWP(64+4*11,"esp"));
  263. &add ($d, &DWP(64+4*13,"esp"));
  264. &add ($d_,&DWP(64+4*15,"esp"));
  265. &xor ($b_,&DWP(4*7,$b));
  266. &xor ($c, &DWP(4*10,$b));
  267. &xor ($c_,&DWP(4*11,$b));
  268. &xor ($d, &DWP(4*13,$b));
  269. &xor ($d_,&DWP(4*15,$b));
  270. &lea ($b,&DWP(4*16,$b));
  271. &mov (&DWP(4*7,$a),$b_);
  272. &mov ($b_,&DWP(4*0,"esp"));
  273. &mov (&DWP(4*10,$a),$c);
  274. &mov ($c,&wparam(2)); # len
  275. &mov (&DWP(4*11,$a),$c_);
  276. &mov (&DWP(4*13,$a),$d);
  277. &mov (&DWP(4*15,$a),$d_);
  278. &mov (&DWP(4*0,$a),$b_);
  279. &lea ($a,&DWP(4*16,$a));
  280. &sub ($c,64);
  281. &jnz (&label("outer_loop"));
  282. &jmp (&label("done"));
  283. &set_label("tail");
  284. &add ($d, &DWP(64+4*12,"esp"));
  285. &add ($d_,&DWP(64+4*14,"esp"));
  286. &mov (&DWP(4*0,"esp"),$a);
  287. &mov (&DWP(4*4,"esp"),$b_);
  288. &mov (&DWP(4*8,"esp"),$c);
  289. &mov (&DWP(4*9,"esp"),$c_);
  290. &mov (&DWP(4*12,"esp"),$d);
  291. &mov (&DWP(4*14,"esp"),$d_);
  292. &mov ($b_,&DWP(4*1,"esp"));
  293. &mov ($c, &DWP(4*2,"esp"));
  294. &mov ($c_,&DWP(4*3,"esp"));
  295. &mov ($d, &DWP(4*5,"esp"));
  296. &mov ($d_,&DWP(4*6,"esp"));
  297. &add ($b_,0x3320646e); # accumulate key material
  298. &add ($c, 0x79622d32);
  299. &add ($c_,0x6b206574);
  300. &add ($d, &DWP(64+4*5,"esp"));
  301. &add ($d_,&DWP(64+4*6,"esp"));
  302. &mov (&DWP(4*1,"esp"),$b_);
  303. &mov (&DWP(4*2,"esp"),$c);
  304. &mov (&DWP(4*3,"esp"),$c_);
  305. &mov (&DWP(4*5,"esp"),$d);
  306. &mov (&DWP(4*6,"esp"),$d_);
  307. &mov ($b_,&DWP(4*7,"esp"));
  308. &mov ($c, &DWP(4*10,"esp"));
  309. &mov ($c_,&DWP(4*11,"esp"));
  310. &mov ($d, &DWP(4*13,"esp"));
  311. &mov ($d_,&DWP(4*15,"esp"));
  312. &add ($b_,&DWP(64+4*7,"esp"));
  313. &add ($c, &DWP(64+4*10,"esp"));
  314. &add ($c_,&DWP(64+4*11,"esp"));
  315. &add ($d, &DWP(64+4*13,"esp"));
  316. &add ($d_,&DWP(64+4*15,"esp"));
  317. &mov (&DWP(4*7,"esp"),$b_);
  318. &mov ($b_,&wparam(1)); # load input
  319. &mov (&DWP(4*10,"esp"),$c);
  320. &mov ($c,&wparam(0)); # load output
  321. &mov (&DWP(4*11,"esp"),$c_);
  322. &xor ($c_,$c_);
  323. &mov (&DWP(4*13,"esp"),$d);
  324. &mov (&DWP(4*15,"esp"),$d_);
  325. &xor ("eax","eax");
  326. &xor ("edx","edx");
  327. &set_label("tail_loop");
  328. &movb ("al",&BP(0,$c_,$b_));
  329. &movb ("dl",&BP(0,"esp",$c_));
  330. &lea ($c_,&DWP(1,$c_));
  331. &xor ("al","dl");
  332. &mov (&BP(-1,$c,$c_),"al");
  333. &dec ($b);
  334. &jnz (&label("tail_loop"));
  335. &set_label("done");
  336. &stack_pop(33);
  337. &set_label("no_data");
  338. &function_end("ChaCha20_ctr32");
  339. if ($xmm) {
  340. my ($xa,$xa_,$xb,$xb_,$xc,$xc_,$xd,$xd_)=map("xmm$_",(0..7));
  341. my ($out,$inp,$len)=("edi","esi","ecx");
  342. sub QUARTERROUND_SSSE3 {
  343. my ($ai,$bi,$ci,$di,$i)=@_;
  344. my ($an,$bn,$cn,$dn)=map(($_&~3)+(($_+1)&3),($ai,$bi,$ci,$di)); # next
  345. my ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_-1)&3),($ai,$bi,$ci,$di)); # previous
  346. # a b c d
  347. #
  348. # 0 4 8 12 < even round
  349. # 1 5 9 13
  350. # 2 6 10 14
  351. # 3 7 11 15
  352. # 0 5 10 15 < odd round
  353. # 1 6 11 12
  354. # 2 7 8 13
  355. # 3 4 9 14
  356. if ($i==0) {
  357. my $j=4;
  358. ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_-$j--)&3),($ap,$bp,$cp,$dp));
  359. } elsif ($i==3) {
  360. my $j=0;
  361. ($an,$bn,$cn,$dn)=map(($_&~3)+(($_+$j++)&3),($an,$bn,$cn,$dn));
  362. } elsif ($i==4) {
  363. my $j=4;
  364. ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_+$j--)&3),($ap,$bp,$cp,$dp));
  365. } elsif ($i==7) {
  366. my $j=0;
  367. ($an,$bn,$cn,$dn)=map(($_&~3)+(($_-$j++)&3),($an,$bn,$cn,$dn));
  368. }
  369. #&paddd ($xa,$xb); # see elsewhere
  370. #&pxor ($xd,$xa); # see elsewhere
  371. &movdqa(&QWP(16*$cp-128,"ebx"),$xc_) if ($ai>0 && $ai<3);
  372. &pshufb ($xd,&QWP(0,"eax")); # rot16
  373. &movdqa(&QWP(16*$bp-128,"ebx"),$xb_) if ($i!=0);
  374. &paddd ($xc,$xd);
  375. &movdqa($xc_,&QWP(16*$cn-128,"ebx")) if ($ai>0 && $ai<3);
  376. &pxor ($xb,$xc);
  377. &movdqa($xb_,&QWP(16*$bn-128,"ebx")) if ($i<7);
  378. &movdqa ($xa_,$xb); # borrow as temporary
  379. &pslld ($xb,12);
  380. &psrld ($xa_,20);
  381. &por ($xb,$xa_);
  382. &movdqa($xa_,&QWP(16*$an-128,"ebx"));
  383. &paddd ($xa,$xb);
  384. &movdqa($xd_,&QWP(16*$dn-128,"ebx")) if ($di!=$dn);
  385. &pxor ($xd,$xa);
  386. &movdqa (&QWP(16*$ai-128,"ebx"),$xa);
  387. &pshufb ($xd,&QWP(16,"eax")); # rot8
  388. &paddd ($xc,$xd);
  389. &movdqa (&QWP(16*$di-128,"ebx"),$xd) if ($di!=$dn);
  390. &movdqa ($xd_,$xd) if ($di==$dn);
  391. &pxor ($xb,$xc);
  392. &paddd ($xa_,$xb_) if ($i<7); # elsewhere
  393. &movdqa ($xa,$xb); # borrow as temporary
  394. &pslld ($xb,7);
  395. &psrld ($xa,25);
  396. &pxor ($xd_,$xa_) if ($i<7); # elsewhere
  397. &por ($xb,$xa);
  398. ($xa,$xa_)=($xa_,$xa);
  399. ($xb,$xb_)=($xb_,$xb);
  400. ($xc,$xc_)=($xc_,$xc);
  401. ($xd,$xd_)=($xd_,$xd);
  402. }
  403. &function_begin("ChaCha20_ssse3");
  404. &set_label("ssse3_shortcut");
  405. if ($ymm) {
  406. &test (&DWP(4,"ebp"),1<<11); # test XOP bit
  407. &jnz (&label("xop_shortcut"));
  408. }
  409. &mov ($out,&wparam(0));
  410. &mov ($inp,&wparam(1));
  411. &mov ($len,&wparam(2));
  412. &mov ("edx",&wparam(3)); # key
  413. &mov ("ebx",&wparam(4)); # counter and nonce
  414. &mov ("ebp","esp");
  415. &stack_push (131);
  416. &and ("esp",-64);
  417. &mov (&DWP(512,"esp"),"ebp");
  418. &lea ("eax",&DWP(&label("ssse3_data")."-".
  419. &label("pic_point"),"eax"));
  420. &movdqu ("xmm3",&QWP(0,"ebx")); # counter and nonce
  421. if (defined($gasver) && $gasver>=2.17) { # even though we encode
  422. # pshufb manually, we
  423. # handle only register
  424. # operands, while this
  425. # segment uses memory
  426. # operand...
  427. &cmp ($len,64*4);
  428. &jb (&label("1x"));
  429. &mov (&DWP(512+4,"esp"),"edx"); # offload pointers
  430. &mov (&DWP(512+8,"esp"),"ebx");
  431. &sub ($len,64*4); # bias len
  432. &lea ("ebp",&DWP(256+128,"esp")); # size optimization
  433. &movdqu ("xmm7",&QWP(0,"edx")); # key
  434. &pshufd ("xmm0","xmm3",0x00);
  435. &pshufd ("xmm1","xmm3",0x55);
  436. &pshufd ("xmm2","xmm3",0xaa);
  437. &pshufd ("xmm3","xmm3",0xff);
  438. &paddd ("xmm0",&QWP(16*3,"eax")); # fix counters
  439. &pshufd ("xmm4","xmm7",0x00);
  440. &pshufd ("xmm5","xmm7",0x55);
  441. &psubd ("xmm0",&QWP(16*4,"eax"));
  442. &pshufd ("xmm6","xmm7",0xaa);
  443. &pshufd ("xmm7","xmm7",0xff);
  444. &movdqa (&QWP(16*12-128,"ebp"),"xmm0");
  445. &movdqa (&QWP(16*13-128,"ebp"),"xmm1");
  446. &movdqa (&QWP(16*14-128,"ebp"),"xmm2");
  447. &movdqa (&QWP(16*15-128,"ebp"),"xmm3");
  448. &movdqu ("xmm3",&QWP(16,"edx")); # key
  449. &movdqa (&QWP(16*4-128,"ebp"),"xmm4");
  450. &movdqa (&QWP(16*5-128,"ebp"),"xmm5");
  451. &movdqa (&QWP(16*6-128,"ebp"),"xmm6");
  452. &movdqa (&QWP(16*7-128,"ebp"),"xmm7");
  453. &movdqa ("xmm7",&QWP(16*2,"eax")); # sigma
  454. &lea ("ebx",&DWP(128,"esp")); # size optimization
  455. &pshufd ("xmm0","xmm3",0x00);
  456. &pshufd ("xmm1","xmm3",0x55);
  457. &pshufd ("xmm2","xmm3",0xaa);
  458. &pshufd ("xmm3","xmm3",0xff);
  459. &pshufd ("xmm4","xmm7",0x00);
  460. &pshufd ("xmm5","xmm7",0x55);
  461. &pshufd ("xmm6","xmm7",0xaa);
  462. &pshufd ("xmm7","xmm7",0xff);
  463. &movdqa (&QWP(16*8-128,"ebp"),"xmm0");
  464. &movdqa (&QWP(16*9-128,"ebp"),"xmm1");
  465. &movdqa (&QWP(16*10-128,"ebp"),"xmm2");
  466. &movdqa (&QWP(16*11-128,"ebp"),"xmm3");
  467. &movdqa (&QWP(16*0-128,"ebp"),"xmm4");
  468. &movdqa (&QWP(16*1-128,"ebp"),"xmm5");
  469. &movdqa (&QWP(16*2-128,"ebp"),"xmm6");
  470. &movdqa (&QWP(16*3-128,"ebp"),"xmm7");
  471. &lea ($inp,&DWP(128,$inp)); # size optimization
  472. &lea ($out,&DWP(128,$out)); # size optimization
  473. &jmp (&label("outer_loop"));
  474. &set_label("outer_loop",16);
  475. #&movdqa ("xmm0",&QWP(16*0-128,"ebp")); # copy key material
  476. &movdqa ("xmm1",&QWP(16*1-128,"ebp"));
  477. &movdqa ("xmm2",&QWP(16*2-128,"ebp"));
  478. &movdqa ("xmm3",&QWP(16*3-128,"ebp"));
  479. #&movdqa ("xmm4",&QWP(16*4-128,"ebp"));
  480. &movdqa ("xmm5",&QWP(16*5-128,"ebp"));
  481. &movdqa ("xmm6",&QWP(16*6-128,"ebp"));
  482. &movdqa ("xmm7",&QWP(16*7-128,"ebp"));
  483. #&movdqa (&QWP(16*0-128,"ebx"),"xmm0");
  484. &movdqa (&QWP(16*1-128,"ebx"),"xmm1");
  485. &movdqa (&QWP(16*2-128,"ebx"),"xmm2");
  486. &movdqa (&QWP(16*3-128,"ebx"),"xmm3");
  487. #&movdqa (&QWP(16*4-128,"ebx"),"xmm4");
  488. &movdqa (&QWP(16*5-128,"ebx"),"xmm5");
  489. &movdqa (&QWP(16*6-128,"ebx"),"xmm6");
  490. &movdqa (&QWP(16*7-128,"ebx"),"xmm7");
  491. #&movdqa ("xmm0",&QWP(16*8-128,"ebp"));
  492. #&movdqa ("xmm1",&QWP(16*9-128,"ebp"));
  493. &movdqa ("xmm2",&QWP(16*10-128,"ebp"));
  494. &movdqa ("xmm3",&QWP(16*11-128,"ebp"));
  495. &movdqa ("xmm4",&QWP(16*12-128,"ebp"));
  496. &movdqa ("xmm5",&QWP(16*13-128,"ebp"));
  497. &movdqa ("xmm6",&QWP(16*14-128,"ebp"));
  498. &movdqa ("xmm7",&QWP(16*15-128,"ebp"));
  499. &paddd ("xmm4",&QWP(16*4,"eax")); # counter value
  500. #&movdqa (&QWP(16*8-128,"ebx"),"xmm0");
  501. #&movdqa (&QWP(16*9-128,"ebx"),"xmm1");
  502. &movdqa (&QWP(16*10-128,"ebx"),"xmm2");
  503. &movdqa (&QWP(16*11-128,"ebx"),"xmm3");
  504. &movdqa (&QWP(16*12-128,"ebx"),"xmm4");
  505. &movdqa (&QWP(16*13-128,"ebx"),"xmm5");
  506. &movdqa (&QWP(16*14-128,"ebx"),"xmm6");
  507. &movdqa (&QWP(16*15-128,"ebx"),"xmm7");
  508. &movdqa (&QWP(16*12-128,"ebp"),"xmm4"); # save counter value
  509. &movdqa ($xa, &QWP(16*0-128,"ebp"));
  510. &movdqa ($xd, "xmm4");
  511. &movdqa ($xb_,&QWP(16*4-128,"ebp"));
  512. &movdqa ($xc, &QWP(16*8-128,"ebp"));
  513. &movdqa ($xc_,&QWP(16*9-128,"ebp"));
  514. &mov ("edx",10); # loop counter
  515. &nop ();
  516. &set_label("loop",16);
  517. &paddd ($xa,$xb_); # elsewhere
  518. &movdqa ($xb,$xb_);
  519. &pxor ($xd,$xa); # elsewhere
  520. &QUARTERROUND_SSSE3(0, 4, 8, 12, 0);
  521. &QUARTERROUND_SSSE3(1, 5, 9, 13, 1);
  522. &QUARTERROUND_SSSE3(2, 6,10, 14, 2);
  523. &QUARTERROUND_SSSE3(3, 7,11, 15, 3);
  524. &QUARTERROUND_SSSE3(0, 5,10, 15, 4);
  525. &QUARTERROUND_SSSE3(1, 6,11, 12, 5);
  526. &QUARTERROUND_SSSE3(2, 7, 8, 13, 6);
  527. &QUARTERROUND_SSSE3(3, 4, 9, 14, 7);
  528. &dec ("edx");
  529. &jnz (&label("loop"));
  530. &movdqa (&QWP(16*4-128,"ebx"),$xb_);
  531. &movdqa (&QWP(16*8-128,"ebx"),$xc);
  532. &movdqa (&QWP(16*9-128,"ebx"),$xc_);
  533. &movdqa (&QWP(16*12-128,"ebx"),$xd);
  534. &movdqa (&QWP(16*14-128,"ebx"),$xd_);
  535. my ($xa0,$xa1,$xa2,$xa3,$xt0,$xt1,$xt2,$xt3)=map("xmm$_",(0..7));
  536. #&movdqa ($xa0,&QWP(16*0-128,"ebx")); # it's there
  537. &movdqa ($xa1,&QWP(16*1-128,"ebx"));
  538. &movdqa ($xa2,&QWP(16*2-128,"ebx"));
  539. &movdqa ($xa3,&QWP(16*3-128,"ebx"));
  540. for($i=0;$i<256;$i+=64) {
  541. &paddd ($xa0,&QWP($i+16*0-128,"ebp")); # accumulate key material
  542. &paddd ($xa1,&QWP($i+16*1-128,"ebp"));
  543. &paddd ($xa2,&QWP($i+16*2-128,"ebp"));
  544. &paddd ($xa3,&QWP($i+16*3-128,"ebp"));
  545. &movdqa ($xt2,$xa0); # "de-interlace" data
  546. &punpckldq ($xa0,$xa1);
  547. &movdqa ($xt3,$xa2);
  548. &punpckldq ($xa2,$xa3);
  549. &punpckhdq ($xt2,$xa1);
  550. &punpckhdq ($xt3,$xa3);
  551. &movdqa ($xa1,$xa0);
  552. &punpcklqdq ($xa0,$xa2); # "a0"
  553. &movdqa ($xa3,$xt2);
  554. &punpcklqdq ($xt2,$xt3); # "a2"
  555. &punpckhqdq ($xa1,$xa2); # "a1"
  556. &punpckhqdq ($xa3,$xt3); # "a3"
  557. #($xa2,$xt2)=($xt2,$xa2);
  558. &movdqu ($xt0,&QWP(64*0-128,$inp)); # load input
  559. &movdqu ($xt1,&QWP(64*1-128,$inp));
  560. &movdqu ($xa2,&QWP(64*2-128,$inp));
  561. &movdqu ($xt3,&QWP(64*3-128,$inp));
  562. &lea ($inp,&QWP($i<192?16:(64*4-16*3),$inp));
  563. &pxor ($xt0,$xa0);
  564. &movdqa ($xa0,&QWP($i+16*4-128,"ebx")) if ($i<192);
  565. &pxor ($xt1,$xa1);
  566. &movdqa ($xa1,&QWP($i+16*5-128,"ebx")) if ($i<192);
  567. &pxor ($xt2,$xa2);
  568. &movdqa ($xa2,&QWP($i+16*6-128,"ebx")) if ($i<192);
  569. &pxor ($xt3,$xa3);
  570. &movdqa ($xa3,&QWP($i+16*7-128,"ebx")) if ($i<192);
  571. &movdqu (&QWP(64*0-128,$out),$xt0); # store output
  572. &movdqu (&QWP(64*1-128,$out),$xt1);
  573. &movdqu (&QWP(64*2-128,$out),$xt2);
  574. &movdqu (&QWP(64*3-128,$out),$xt3);
  575. &lea ($out,&QWP($i<192?16:(64*4-16*3),$out));
  576. }
  577. &sub ($len,64*4);
  578. &jnc (&label("outer_loop"));
  579. &add ($len,64*4);
  580. &jz (&label("done"));
  581. &mov ("ebx",&DWP(512+8,"esp")); # restore pointers
  582. &lea ($inp,&DWP(-128,$inp));
  583. &mov ("edx",&DWP(512+4,"esp"));
  584. &lea ($out,&DWP(-128,$out));
  585. &movd ("xmm2",&DWP(16*12-128,"ebp")); # counter value
  586. &movdqu ("xmm3",&QWP(0,"ebx"));
  587. &paddd ("xmm2",&QWP(16*6,"eax")); # +four
  588. &pand ("xmm3",&QWP(16*7,"eax"));
  589. &por ("xmm3","xmm2"); # counter value
  590. }
  591. {
  592. my ($a,$b,$c,$d,$t,$t1,$rot16,$rot24)=map("xmm$_",(0..7));
  593. sub SSSE3ROUND { # critical path is 20 "SIMD ticks" per round
  594. &paddd ($a,$b);
  595. &pxor ($d,$a);
  596. &pshufb ($d,$rot16);
  597. &paddd ($c,$d);
  598. &pxor ($b,$c);
  599. &movdqa ($t,$b);
  600. &psrld ($b,20);
  601. &pslld ($t,12);
  602. &por ($b,$t);
  603. &paddd ($a,$b);
  604. &pxor ($d,$a);
  605. &pshufb ($d,$rot24);
  606. &paddd ($c,$d);
  607. &pxor ($b,$c);
  608. &movdqa ($t,$b);
  609. &psrld ($b,25);
  610. &pslld ($t,7);
  611. &por ($b,$t);
  612. }
  613. &set_label("1x");
  614. &movdqa ($a,&QWP(16*2,"eax")); # sigma
  615. &movdqu ($b,&QWP(0,"edx"));
  616. &movdqu ($c,&QWP(16,"edx"));
  617. #&movdqu ($d,&QWP(0,"ebx")); # already loaded
  618. &movdqa ($rot16,&QWP(0,"eax"));
  619. &movdqa ($rot24,&QWP(16,"eax"));
  620. &mov (&DWP(16*3,"esp"),"ebp");
  621. &movdqa (&QWP(16*0,"esp"),$a);
  622. &movdqa (&QWP(16*1,"esp"),$b);
  623. &movdqa (&QWP(16*2,"esp"),$c);
  624. &movdqa (&QWP(16*3,"esp"),$d);
  625. &mov ("edx",10);
  626. &jmp (&label("loop1x"));
  627. &set_label("outer1x",16);
  628. &movdqa ($d,&QWP(16*5,"eax")); # one
  629. &movdqa ($a,&QWP(16*0,"esp"));
  630. &movdqa ($b,&QWP(16*1,"esp"));
  631. &movdqa ($c,&QWP(16*2,"esp"));
  632. &paddd ($d,&QWP(16*3,"esp"));
  633. &mov ("edx",10);
  634. &movdqa (&QWP(16*3,"esp"),$d);
  635. &jmp (&label("loop1x"));
  636. &set_label("loop1x",16);
  637. &SSSE3ROUND();
  638. &pshufd ($c,$c,0b01001110);
  639. &pshufd ($b,$b,0b00111001);
  640. &pshufd ($d,$d,0b10010011);
  641. &nop ();
  642. &SSSE3ROUND();
  643. &pshufd ($c,$c,0b01001110);
  644. &pshufd ($b,$b,0b10010011);
  645. &pshufd ($d,$d,0b00111001);
  646. &dec ("edx");
  647. &jnz (&label("loop1x"));
  648. &paddd ($a,&QWP(16*0,"esp"));
  649. &paddd ($b,&QWP(16*1,"esp"));
  650. &paddd ($c,&QWP(16*2,"esp"));
  651. &paddd ($d,&QWP(16*3,"esp"));
  652. &cmp ($len,64);
  653. &jb (&label("tail"));
  654. &movdqu ($t,&QWP(16*0,$inp));
  655. &movdqu ($t1,&QWP(16*1,$inp));
  656. &pxor ($a,$t); # xor with input
  657. &movdqu ($t,&QWP(16*2,$inp));
  658. &pxor ($b,$t1);
  659. &movdqu ($t1,&QWP(16*3,$inp));
  660. &pxor ($c,$t);
  661. &pxor ($d,$t1);
  662. &lea ($inp,&DWP(16*4,$inp)); # inp+=64
  663. &movdqu (&QWP(16*0,$out),$a); # write output
  664. &movdqu (&QWP(16*1,$out),$b);
  665. &movdqu (&QWP(16*2,$out),$c);
  666. &movdqu (&QWP(16*3,$out),$d);
  667. &lea ($out,&DWP(16*4,$out)); # inp+=64
  668. &sub ($len,64);
  669. &jnz (&label("outer1x"));
  670. &jmp (&label("done"));
  671. &set_label("tail");
  672. &movdqa (&QWP(16*0,"esp"),$a);
  673. &movdqa (&QWP(16*1,"esp"),$b);
  674. &movdqa (&QWP(16*2,"esp"),$c);
  675. &movdqa (&QWP(16*3,"esp"),$d);
  676. &xor ("eax","eax");
  677. &xor ("edx","edx");
  678. &xor ("ebp","ebp");
  679. &set_label("tail_loop");
  680. &movb ("al",&BP(0,"esp","ebp"));
  681. &movb ("dl",&BP(0,$inp,"ebp"));
  682. &lea ("ebp",&DWP(1,"ebp"));
  683. &xor ("al","dl");
  684. &movb (&BP(-1,$out,"ebp"),"al");
  685. &dec ($len);
  686. &jnz (&label("tail_loop"));
  687. }
  688. &set_label("done");
  689. &mov ("esp",&DWP(512,"esp"));
  690. &function_end("ChaCha20_ssse3");
  691. &align (64);
  692. &set_label("ssse3_data");
  693. &data_byte(0x2,0x3,0x0,0x1, 0x6,0x7,0x4,0x5, 0xa,0xb,0x8,0x9, 0xe,0xf,0xc,0xd);
  694. &data_byte(0x3,0x0,0x1,0x2, 0x7,0x4,0x5,0x6, 0xb,0x8,0x9,0xa, 0xf,0xc,0xd,0xe);
  695. &data_word(0x61707865,0x3320646e,0x79622d32,0x6b206574);
  696. &data_word(0,1,2,3);
  697. &data_word(4,4,4,4);
  698. &data_word(1,0,0,0);
  699. &data_word(4,0,0,0);
  700. &data_word(0,-1,-1,-1);
  701. &align (64);
  702. }
  703. &asciz ("ChaCha20 for x86, CRYPTOGAMS by <appro\@openssl.org>");
  704. if ($ymm) {
  705. my ($xa,$xa_,$xb,$xb_,$xc,$xc_,$xd,$xd_)=map("xmm$_",(0..7));
  706. my ($out,$inp,$len)=("edi","esi","ecx");
  707. sub QUARTERROUND_XOP {
  708. my ($ai,$bi,$ci,$di,$i)=@_;
  709. my ($an,$bn,$cn,$dn)=map(($_&~3)+(($_+1)&3),($ai,$bi,$ci,$di)); # next
  710. my ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_-1)&3),($ai,$bi,$ci,$di)); # previous
  711. # a b c d
  712. #
  713. # 0 4 8 12 < even round
  714. # 1 5 9 13
  715. # 2 6 10 14
  716. # 3 7 11 15
  717. # 0 5 10 15 < odd round
  718. # 1 6 11 12
  719. # 2 7 8 13
  720. # 3 4 9 14
  721. if ($i==0) {
  722. my $j=4;
  723. ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_-$j--)&3),($ap,$bp,$cp,$dp));
  724. } elsif ($i==3) {
  725. my $j=0;
  726. ($an,$bn,$cn,$dn)=map(($_&~3)+(($_+$j++)&3),($an,$bn,$cn,$dn));
  727. } elsif ($i==4) {
  728. my $j=4;
  729. ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_+$j--)&3),($ap,$bp,$cp,$dp));
  730. } elsif ($i==7) {
  731. my $j=0;
  732. ($an,$bn,$cn,$dn)=map(($_&~3)+(($_-$j++)&3),($an,$bn,$cn,$dn));
  733. }
  734. #&vpaddd ($xa,$xa,$xb); # see elsewhere
  735. #&vpxor ($xd,$xd,$xa); # see elsewhere
  736. &vmovdqa (&QWP(16*$cp-128,"ebx"),$xc_) if ($ai>0 && $ai<3);
  737. &vprotd ($xd,$xd,16);
  738. &vmovdqa (&QWP(16*$bp-128,"ebx"),$xb_) if ($i!=0);
  739. &vpaddd ($xc,$xc,$xd);
  740. &vmovdqa ($xc_,&QWP(16*$cn-128,"ebx")) if ($ai>0 && $ai<3);
  741. &vpxor ($xb,$i!=0?$xb:$xb_,$xc);
  742. &vmovdqa ($xa_,&QWP(16*$an-128,"ebx"));
  743. &vprotd ($xb,$xb,12);
  744. &vmovdqa ($xb_,&QWP(16*$bn-128,"ebx")) if ($i<7);
  745. &vpaddd ($xa,$xa,$xb);
  746. &vmovdqa ($xd_,&QWP(16*$dn-128,"ebx")) if ($di!=$dn);
  747. &vpxor ($xd,$xd,$xa);
  748. &vpaddd ($xa_,$xa_,$xb_) if ($i<7); # elsewhere
  749. &vprotd ($xd,$xd,8);
  750. &vmovdqa (&QWP(16*$ai-128,"ebx"),$xa);
  751. &vpaddd ($xc,$xc,$xd);
  752. &vmovdqa (&QWP(16*$di-128,"ebx"),$xd) if ($di!=$dn);
  753. &vpxor ($xb,$xb,$xc);
  754. &vpxor ($xd_,$di==$dn?$xd:$xd_,$xa_) if ($i<7); # elsewhere
  755. &vprotd ($xb,$xb,7);
  756. ($xa,$xa_)=($xa_,$xa);
  757. ($xb,$xb_)=($xb_,$xb);
  758. ($xc,$xc_)=($xc_,$xc);
  759. ($xd,$xd_)=($xd_,$xd);
  760. }
  761. &function_begin("ChaCha20_xop");
  762. &set_label("xop_shortcut");
  763. &mov ($out,&wparam(0));
  764. &mov ($inp,&wparam(1));
  765. &mov ($len,&wparam(2));
  766. &mov ("edx",&wparam(3)); # key
  767. &mov ("ebx",&wparam(4)); # counter and nonce
  768. &vzeroupper ();
  769. &mov ("ebp","esp");
  770. &stack_push (131);
  771. &and ("esp",-64);
  772. &mov (&DWP(512,"esp"),"ebp");
  773. &lea ("eax",&DWP(&label("ssse3_data")."-".
  774. &label("pic_point"),"eax"));
  775. &vmovdqu ("xmm3",&QWP(0,"ebx")); # counter and nonce
  776. &cmp ($len,64*4);
  777. &jb (&label("1x"));
  778. &mov (&DWP(512+4,"esp"),"edx"); # offload pointers
  779. &mov (&DWP(512+8,"esp"),"ebx");
  780. &sub ($len,64*4); # bias len
  781. &lea ("ebp",&DWP(256+128,"esp")); # size optimization
  782. &vmovdqu ("xmm7",&QWP(0,"edx")); # key
  783. &vpshufd ("xmm0","xmm3",0x00);
  784. &vpshufd ("xmm1","xmm3",0x55);
  785. &vpshufd ("xmm2","xmm3",0xaa);
  786. &vpshufd ("xmm3","xmm3",0xff);
  787. &vpaddd ("xmm0","xmm0",&QWP(16*3,"eax")); # fix counters
  788. &vpshufd ("xmm4","xmm7",0x00);
  789. &vpshufd ("xmm5","xmm7",0x55);
  790. &vpsubd ("xmm0","xmm0",&QWP(16*4,"eax"));
  791. &vpshufd ("xmm6","xmm7",0xaa);
  792. &vpshufd ("xmm7","xmm7",0xff);
  793. &vmovdqa (&QWP(16*12-128,"ebp"),"xmm0");
  794. &vmovdqa (&QWP(16*13-128,"ebp"),"xmm1");
  795. &vmovdqa (&QWP(16*14-128,"ebp"),"xmm2");
  796. &vmovdqa (&QWP(16*15-128,"ebp"),"xmm3");
  797. &vmovdqu ("xmm3",&QWP(16,"edx")); # key
  798. &vmovdqa (&QWP(16*4-128,"ebp"),"xmm4");
  799. &vmovdqa (&QWP(16*5-128,"ebp"),"xmm5");
  800. &vmovdqa (&QWP(16*6-128,"ebp"),"xmm6");
  801. &vmovdqa (&QWP(16*7-128,"ebp"),"xmm7");
  802. &vmovdqa ("xmm7",&QWP(16*2,"eax")); # sigma
  803. &lea ("ebx",&DWP(128,"esp")); # size optimization
  804. &vpshufd ("xmm0","xmm3",0x00);
  805. &vpshufd ("xmm1","xmm3",0x55);
  806. &vpshufd ("xmm2","xmm3",0xaa);
  807. &vpshufd ("xmm3","xmm3",0xff);
  808. &vpshufd ("xmm4","xmm7",0x00);
  809. &vpshufd ("xmm5","xmm7",0x55);
  810. &vpshufd ("xmm6","xmm7",0xaa);
  811. &vpshufd ("xmm7","xmm7",0xff);
  812. &vmovdqa (&QWP(16*8-128,"ebp"),"xmm0");
  813. &vmovdqa (&QWP(16*9-128,"ebp"),"xmm1");
  814. &vmovdqa (&QWP(16*10-128,"ebp"),"xmm2");
  815. &vmovdqa (&QWP(16*11-128,"ebp"),"xmm3");
  816. &vmovdqa (&QWP(16*0-128,"ebp"),"xmm4");
  817. &vmovdqa (&QWP(16*1-128,"ebp"),"xmm5");
  818. &vmovdqa (&QWP(16*2-128,"ebp"),"xmm6");
  819. &vmovdqa (&QWP(16*3-128,"ebp"),"xmm7");
  820. &lea ($inp,&DWP(128,$inp)); # size optimization
  821. &lea ($out,&DWP(128,$out)); # size optimization
  822. &jmp (&label("outer_loop"));
  823. &set_label("outer_loop",32);
  824. #&vmovdqa ("xmm0",&QWP(16*0-128,"ebp")); # copy key material
  825. &vmovdqa ("xmm1",&QWP(16*1-128,"ebp"));
  826. &vmovdqa ("xmm2",&QWP(16*2-128,"ebp"));
  827. &vmovdqa ("xmm3",&QWP(16*3-128,"ebp"));
  828. #&vmovdqa ("xmm4",&QWP(16*4-128,"ebp"));
  829. &vmovdqa ("xmm5",&QWP(16*5-128,"ebp"));
  830. &vmovdqa ("xmm6",&QWP(16*6-128,"ebp"));
  831. &vmovdqa ("xmm7",&QWP(16*7-128,"ebp"));
  832. #&vmovdqa (&QWP(16*0-128,"ebx"),"xmm0");
  833. &vmovdqa (&QWP(16*1-128,"ebx"),"xmm1");
  834. &vmovdqa (&QWP(16*2-128,"ebx"),"xmm2");
  835. &vmovdqa (&QWP(16*3-128,"ebx"),"xmm3");
  836. #&vmovdqa (&QWP(16*4-128,"ebx"),"xmm4");
  837. &vmovdqa (&QWP(16*5-128,"ebx"),"xmm5");
  838. &vmovdqa (&QWP(16*6-128,"ebx"),"xmm6");
  839. &vmovdqa (&QWP(16*7-128,"ebx"),"xmm7");
  840. #&vmovdqa ("xmm0",&QWP(16*8-128,"ebp"));
  841. #&vmovdqa ("xmm1",&QWP(16*9-128,"ebp"));
  842. &vmovdqa ("xmm2",&QWP(16*10-128,"ebp"));
  843. &vmovdqa ("xmm3",&QWP(16*11-128,"ebp"));
  844. &vmovdqa ("xmm4",&QWP(16*12-128,"ebp"));
  845. &vmovdqa ("xmm5",&QWP(16*13-128,"ebp"));
  846. &vmovdqa ("xmm6",&QWP(16*14-128,"ebp"));
  847. &vmovdqa ("xmm7",&QWP(16*15-128,"ebp"));
  848. &vpaddd ("xmm4","xmm4",&QWP(16*4,"eax")); # counter value
  849. #&vmovdqa (&QWP(16*8-128,"ebx"),"xmm0");
  850. #&vmovdqa (&QWP(16*9-128,"ebx"),"xmm1");
  851. &vmovdqa (&QWP(16*10-128,"ebx"),"xmm2");
  852. &vmovdqa (&QWP(16*11-128,"ebx"),"xmm3");
  853. &vmovdqa (&QWP(16*12-128,"ebx"),"xmm4");
  854. &vmovdqa (&QWP(16*13-128,"ebx"),"xmm5");
  855. &vmovdqa (&QWP(16*14-128,"ebx"),"xmm6");
  856. &vmovdqa (&QWP(16*15-128,"ebx"),"xmm7");
  857. &vmovdqa (&QWP(16*12-128,"ebp"),"xmm4"); # save counter value
  858. &vmovdqa ($xa, &QWP(16*0-128,"ebp"));
  859. &vmovdqa ($xd, "xmm4");
  860. &vmovdqa ($xb_,&QWP(16*4-128,"ebp"));
  861. &vmovdqa ($xc, &QWP(16*8-128,"ebp"));
  862. &vmovdqa ($xc_,&QWP(16*9-128,"ebp"));
  863. &mov ("edx",10); # loop counter
  864. &nop ();
  865. &set_label("loop",32);
  866. &vpaddd ($xa,$xa,$xb_); # elsewhere
  867. &vpxor ($xd,$xd,$xa); # elsewhere
  868. &QUARTERROUND_XOP(0, 4, 8, 12, 0);
  869. &QUARTERROUND_XOP(1, 5, 9, 13, 1);
  870. &QUARTERROUND_XOP(2, 6,10, 14, 2);
  871. &QUARTERROUND_XOP(3, 7,11, 15, 3);
  872. &QUARTERROUND_XOP(0, 5,10, 15, 4);
  873. &QUARTERROUND_XOP(1, 6,11, 12, 5);
  874. &QUARTERROUND_XOP(2, 7, 8, 13, 6);
  875. &QUARTERROUND_XOP(3, 4, 9, 14, 7);
  876. &dec ("edx");
  877. &jnz (&label("loop"));
  878. &vmovdqa (&QWP(16*4-128,"ebx"),$xb_);
  879. &vmovdqa (&QWP(16*8-128,"ebx"),$xc);
  880. &vmovdqa (&QWP(16*9-128,"ebx"),$xc_);
  881. &vmovdqa (&QWP(16*12-128,"ebx"),$xd);
  882. &vmovdqa (&QWP(16*14-128,"ebx"),$xd_);
  883. my ($xa0,$xa1,$xa2,$xa3,$xt0,$xt1,$xt2,$xt3)=map("xmm$_",(0..7));
  884. #&vmovdqa ($xa0,&QWP(16*0-128,"ebx")); # it's there
  885. &vmovdqa ($xa1,&QWP(16*1-128,"ebx"));
  886. &vmovdqa ($xa2,&QWP(16*2-128,"ebx"));
  887. &vmovdqa ($xa3,&QWP(16*3-128,"ebx"));
  888. for($i=0;$i<256;$i+=64) {
  889. &vpaddd ($xa0,$xa0,&QWP($i+16*0-128,"ebp")); # accumulate key material
  890. &vpaddd ($xa1,$xa1,&QWP($i+16*1-128,"ebp"));
  891. &vpaddd ($xa2,$xa2,&QWP($i+16*2-128,"ebp"));
  892. &vpaddd ($xa3,$xa3,&QWP($i+16*3-128,"ebp"));
  893. &vpunpckldq ($xt2,$xa0,$xa1); # "de-interlace" data
  894. &vpunpckldq ($xt3,$xa2,$xa3);
  895. &vpunpckhdq ($xa0,$xa0,$xa1);
  896. &vpunpckhdq ($xa2,$xa2,$xa3);
  897. &vpunpcklqdq ($xa1,$xt2,$xt3); # "a0"
  898. &vpunpckhqdq ($xt2,$xt2,$xt3); # "a1"
  899. &vpunpcklqdq ($xt3,$xa0,$xa2); # "a2"
  900. &vpunpckhqdq ($xa3,$xa0,$xa2); # "a3"
  901. &vpxor ($xt0,$xa1,&QWP(64*0-128,$inp));
  902. &vpxor ($xt1,$xt2,&QWP(64*1-128,$inp));
  903. &vpxor ($xt2,$xt3,&QWP(64*2-128,$inp));
  904. &vpxor ($xt3,$xa3,&QWP(64*3-128,$inp));
  905. &lea ($inp,&QWP($i<192?16:(64*4-16*3),$inp));
  906. &vmovdqa ($xa0,&QWP($i+16*4-128,"ebx")) if ($i<192);
  907. &vmovdqa ($xa1,&QWP($i+16*5-128,"ebx")) if ($i<192);
  908. &vmovdqa ($xa2,&QWP($i+16*6-128,"ebx")) if ($i<192);
  909. &vmovdqa ($xa3,&QWP($i+16*7-128,"ebx")) if ($i<192);
  910. &vmovdqu (&QWP(64*0-128,$out),$xt0); # store output
  911. &vmovdqu (&QWP(64*1-128,$out),$xt1);
  912. &vmovdqu (&QWP(64*2-128,$out),$xt2);
  913. &vmovdqu (&QWP(64*3-128,$out),$xt3);
  914. &lea ($out,&QWP($i<192?16:(64*4-16*3),$out));
  915. }
  916. &sub ($len,64*4);
  917. &jnc (&label("outer_loop"));
  918. &add ($len,64*4);
  919. &jz (&label("done"));
  920. &mov ("ebx",&DWP(512+8,"esp")); # restore pointers
  921. &lea ($inp,&DWP(-128,$inp));
  922. &mov ("edx",&DWP(512+4,"esp"));
  923. &lea ($out,&DWP(-128,$out));
  924. &vmovd ("xmm2",&DWP(16*12-128,"ebp")); # counter value
  925. &vmovdqu ("xmm3",&QWP(0,"ebx"));
  926. &vpaddd ("xmm2","xmm2",&QWP(16*6,"eax"));# +four
  927. &vpand ("xmm3","xmm3",&QWP(16*7,"eax"));
  928. &vpor ("xmm3","xmm3","xmm2"); # counter value
  929. {
  930. my ($a,$b,$c,$d,$t,$t1,$rot16,$rot24)=map("xmm$_",(0..7));
  931. sub XOPROUND {
  932. &vpaddd ($a,$a,$b);
  933. &vpxor ($d,$d,$a);
  934. &vprotd ($d,$d,16);
  935. &vpaddd ($c,$c,$d);
  936. &vpxor ($b,$b,$c);
  937. &vprotd ($b,$b,12);
  938. &vpaddd ($a,$a,$b);
  939. &vpxor ($d,$d,$a);
  940. &vprotd ($d,$d,8);
  941. &vpaddd ($c,$c,$d);
  942. &vpxor ($b,$b,$c);
  943. &vprotd ($b,$b,7);
  944. }
  945. &set_label("1x");
  946. &vmovdqa ($a,&QWP(16*2,"eax")); # sigma
  947. &vmovdqu ($b,&QWP(0,"edx"));
  948. &vmovdqu ($c,&QWP(16,"edx"));
  949. #&vmovdqu ($d,&QWP(0,"ebx")); # already loaded
  950. &vmovdqa ($rot16,&QWP(0,"eax"));
  951. &vmovdqa ($rot24,&QWP(16,"eax"));
  952. &mov (&DWP(16*3,"esp"),"ebp");
  953. &vmovdqa (&QWP(16*0,"esp"),$a);
  954. &vmovdqa (&QWP(16*1,"esp"),$b);
  955. &vmovdqa (&QWP(16*2,"esp"),$c);
  956. &vmovdqa (&QWP(16*3,"esp"),$d);
  957. &mov ("edx",10);
  958. &jmp (&label("loop1x"));
  959. &set_label("outer1x",16);
  960. &vmovdqa ($d,&QWP(16*5,"eax")); # one
  961. &vmovdqa ($a,&QWP(16*0,"esp"));
  962. &vmovdqa ($b,&QWP(16*1,"esp"));
  963. &vmovdqa ($c,&QWP(16*2,"esp"));
  964. &vpaddd ($d,$d,&QWP(16*3,"esp"));
  965. &mov ("edx",10);
  966. &vmovdqa (&QWP(16*3,"esp"),$d);
  967. &jmp (&label("loop1x"));
  968. &set_label("loop1x",16);
  969. &XOPROUND();
  970. &vpshufd ($c,$c,0b01001110);
  971. &vpshufd ($b,$b,0b00111001);
  972. &vpshufd ($d,$d,0b10010011);
  973. &XOPROUND();
  974. &vpshufd ($c,$c,0b01001110);
  975. &vpshufd ($b,$b,0b10010011);
  976. &vpshufd ($d,$d,0b00111001);
  977. &dec ("edx");
  978. &jnz (&label("loop1x"));
  979. &vpaddd ($a,$a,&QWP(16*0,"esp"));
  980. &vpaddd ($b,$b,&QWP(16*1,"esp"));
  981. &vpaddd ($c,$c,&QWP(16*2,"esp"));
  982. &vpaddd ($d,$d,&QWP(16*3,"esp"));
  983. &cmp ($len,64);
  984. &jb (&label("tail"));
  985. &vpxor ($a,$a,&QWP(16*0,$inp)); # xor with input
  986. &vpxor ($b,$b,&QWP(16*1,$inp));
  987. &vpxor ($c,$c,&QWP(16*2,$inp));
  988. &vpxor ($d,$d,&QWP(16*3,$inp));
  989. &lea ($inp,&DWP(16*4,$inp)); # inp+=64
  990. &vmovdqu (&QWP(16*0,$out),$a); # write output
  991. &vmovdqu (&QWP(16*1,$out),$b);
  992. &vmovdqu (&QWP(16*2,$out),$c);
  993. &vmovdqu (&QWP(16*3,$out),$d);
  994. &lea ($out,&DWP(16*4,$out)); # inp+=64
  995. &sub ($len,64);
  996. &jnz (&label("outer1x"));
  997. &jmp (&label("done"));
  998. &set_label("tail");
  999. &vmovdqa (&QWP(16*0,"esp"),$a);
  1000. &vmovdqa (&QWP(16*1,"esp"),$b);
  1001. &vmovdqa (&QWP(16*2,"esp"),$c);
  1002. &vmovdqa (&QWP(16*3,"esp"),$d);
  1003. &xor ("eax","eax");
  1004. &xor ("edx","edx");
  1005. &xor ("ebp","ebp");
  1006. &set_label("tail_loop");
  1007. &movb ("al",&BP(0,"esp","ebp"));
  1008. &movb ("dl",&BP(0,$inp,"ebp"));
  1009. &lea ("ebp",&DWP(1,"ebp"));
  1010. &xor ("al","dl");
  1011. &movb (&BP(-1,$out,"ebp"),"al");
  1012. &dec ($len);
  1013. &jnz (&label("tail_loop"));
  1014. }
  1015. &set_label("done");
  1016. &vzeroupper ();
  1017. &mov ("esp",&DWP(512,"esp"));
  1018. &function_end("ChaCha20_xop");
  1019. }
  1020. &asm_finish();
  1021. close STDOUT or die "error closing STDOUT: $!";