2
0

rc4-586.pl 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426
  1. #! /usr/bin/env perl
  2. # Copyright 1998-2020 The OpenSSL Project Authors. All Rights Reserved.
  3. #
  4. # Licensed under the OpenSSL license (the "License"). You may not use
  5. # this file except in compliance with the License. You can obtain a copy
  6. # in the file LICENSE in the source distribution or at
  7. # https://www.openssl.org/source/license.html
  8. # ====================================================================
  9. # [Re]written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  10. # project. The module is, however, dual licensed under OpenSSL and
  11. # CRYPTOGAMS licenses depending on where you obtain it. For further
  12. # details see http://www.openssl.org/~appro/cryptogams/.
  13. # ====================================================================
  14. # At some point it became apparent that the original SSLeay RC4
  15. # assembler implementation performs suboptimally on latest IA-32
  16. # microarchitectures. After re-tuning performance has changed as
  17. # following:
  18. #
  19. # Pentium -10%
  20. # Pentium III +12%
  21. # AMD +50%(*)
  22. # P4 +250%(**)
  23. #
  24. # (*) This number is actually a trade-off:-) It's possible to
  25. # achieve +72%, but at the cost of -48% off PIII performance.
  26. # In other words code performing further 13% faster on AMD
  27. # would perform almost 2 times slower on Intel PIII...
  28. # For reference! This code delivers ~80% of rc4-amd64.pl
  29. # performance on the same Opteron machine.
  30. # (**) This number requires compressed key schedule set up by
  31. # RC4_set_key [see commentary below for further details].
  32. # May 2011
  33. #
  34. # Optimize for Core2 and Westmere [and incidentally Opteron]. Current
  35. # performance in cycles per processed byte (less is better) and
  36. # improvement relative to previous version of this module is:
  37. #
  38. # Pentium 10.2 # original numbers
  39. # Pentium III 7.8(*)
  40. # Intel P4 7.5
  41. #
  42. # Opteron 6.1/+20% # new MMX numbers
  43. # Core2 5.3/+67%(**)
  44. # Westmere 5.1/+94%(**)
  45. # Sandy Bridge 5.0/+8%
  46. # Atom 12.6/+6%
  47. # VIA Nano 6.4/+9%
  48. # Ivy Bridge 4.9/±0%
  49. # Bulldozer 4.9/+15%
  50. #
  51. # (*) PIII can actually deliver 6.6 cycles per byte with MMX code,
  52. # but this specific code performs poorly on Core2. And vice
  53. # versa, below MMX/SSE code delivering 5.8/7.1 on Core2 performs
  54. # poorly on PIII, at 8.0/14.5:-( As PIII is not a "hot" CPU
  55. # [anymore], I chose to discard PIII-specific code path and opt
  56. # for original IALU-only code, which is why MMX/SSE code path
  57. # is guarded by SSE2 bit (see below), not MMX/SSE.
  58. # (**) Performance vs. block size on Core2 and Westmere had a maximum
  59. # at ... 64 bytes block size. And it was quite a maximum, 40-60%
  60. # in comparison to largest 8KB block size. Above improvement
  61. # coefficients are for the largest block size.
  62. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  63. push(@INC,"${dir}","${dir}../../perlasm");
  64. require "x86asm.pl";
  65. $output=pop;
  66. open STDOUT,">$output";
  67. &asm_init($ARGV[0],$x86only = $ARGV[$#ARGV] eq "386");
  68. $xx="eax";
  69. $yy="ebx";
  70. $tx="ecx";
  71. $ty="edx";
  72. $inp="esi";
  73. $out="ebp";
  74. $dat="edi";
  75. sub RC4_loop {
  76. my $i=shift;
  77. my $func = ($i==0)?*mov:*or;
  78. &add (&LB($yy),&LB($tx));
  79. &mov ($ty,&DWP(0,$dat,$yy,4));
  80. &mov (&DWP(0,$dat,$yy,4),$tx);
  81. &mov (&DWP(0,$dat,$xx,4),$ty);
  82. &add ($ty,$tx);
  83. &inc (&LB($xx));
  84. &and ($ty,0xff);
  85. &ror ($out,8) if ($i!=0);
  86. if ($i<3) {
  87. &mov ($tx,&DWP(0,$dat,$xx,4));
  88. } else {
  89. &mov ($tx,&wparam(3)); # reload [re-biased] out
  90. }
  91. &$func ($out,&DWP(0,$dat,$ty,4));
  92. }
  93. if ($alt=0) {
  94. # >20% faster on Atom and Sandy Bridge[!], 8% faster on Opteron,
  95. # but ~40% slower on Core2 and Westmere... Attempt to add movz
  96. # brings down Opteron by 25%, Atom and Sandy Bridge by 15%, yet
  97. # on Core2 with movz it's almost 20% slower than below alternative
  98. # code... Yes, it's a total mess...
  99. my @XX=($xx,$out);
  100. $RC4_loop_mmx = sub { # SSE actually...
  101. my $i=shift;
  102. my $j=$i<=0?0:$i>>1;
  103. my $mm=$i<=0?"mm0":"mm".($i&1);
  104. &add (&LB($yy),&LB($tx));
  105. &lea (@XX[1],&DWP(1,@XX[0]));
  106. &pxor ("mm2","mm0") if ($i==0);
  107. &psllq ("mm1",8) if ($i==0);
  108. &and (@XX[1],0xff);
  109. &pxor ("mm0","mm0") if ($i<=0);
  110. &mov ($ty,&DWP(0,$dat,$yy,4));
  111. &mov (&DWP(0,$dat,$yy,4),$tx);
  112. &pxor ("mm1","mm2") if ($i==0);
  113. &mov (&DWP(0,$dat,$XX[0],4),$ty);
  114. &add (&LB($ty),&LB($tx));
  115. &movd (@XX[0],"mm7") if ($i==0);
  116. &mov ($tx,&DWP(0,$dat,@XX[1],4));
  117. &pxor ("mm1","mm1") if ($i==1);
  118. &movq ("mm2",&QWP(0,$inp)) if ($i==1);
  119. &movq (&QWP(-8,(@XX[0],$inp)),"mm1") if ($i==0);
  120. &pinsrw ($mm,&DWP(0,$dat,$ty,4),$j);
  121. push (@XX,shift(@XX)) if ($i>=0);
  122. }
  123. } else {
  124. # Using pinsrw here improves performance on Intel CPUs by 2-3%, but
  125. # brings down AMD by 7%...
  126. $RC4_loop_mmx = sub {
  127. my $i=shift;
  128. &add (&LB($yy),&LB($tx));
  129. &psllq ("mm1",8*(($i-1)&7)) if (abs($i)!=1);
  130. &mov ($ty,&DWP(0,$dat,$yy,4));
  131. &mov (&DWP(0,$dat,$yy,4),$tx);
  132. &mov (&DWP(0,$dat,$xx,4),$ty);
  133. &inc ($xx);
  134. &add ($ty,$tx);
  135. &movz ($xx,&LB($xx)); # (*)
  136. &movz ($ty,&LB($ty)); # (*)
  137. &pxor ("mm2",$i==1?"mm0":"mm1") if ($i>=0);
  138. &movq ("mm0",&QWP(0,$inp)) if ($i<=0);
  139. &movq (&QWP(-8,($out,$inp)),"mm2") if ($i==0);
  140. &mov ($tx,&DWP(0,$dat,$xx,4));
  141. &movd ($i>0?"mm1":"mm2",&DWP(0,$dat,$ty,4));
  142. # (*) This is the key to Core2 and Westmere performance.
  143. # Without movz out-of-order execution logic confuses
  144. # itself and fails to reorder loads and stores. Problem
  145. # appears to be fixed in Sandy Bridge...
  146. }
  147. }
  148. &external_label("OPENSSL_ia32cap_P");
  149. # void RC4(RC4_KEY *key,size_t len,const unsigned char *inp,unsigned char *out);
  150. &function_begin("RC4");
  151. &mov ($dat,&wparam(0)); # load key schedule pointer
  152. &mov ($ty, &wparam(1)); # load len
  153. &mov ($inp,&wparam(2)); # load inp
  154. &mov ($out,&wparam(3)); # load out
  155. &xor ($xx,$xx); # avoid partial register stalls
  156. &xor ($yy,$yy);
  157. &cmp ($ty,0); # safety net
  158. &je (&label("abort"));
  159. &mov (&LB($xx),&BP(0,$dat)); # load key->x
  160. &mov (&LB($yy),&BP(4,$dat)); # load key->y
  161. &add ($dat,8);
  162. &lea ($tx,&DWP(0,$inp,$ty));
  163. &sub ($out,$inp); # re-bias out
  164. &mov (&wparam(1),$tx); # save input+len
  165. &inc (&LB($xx));
  166. # detect compressed key schedule...
  167. &cmp (&DWP(256,$dat),-1);
  168. &je (&label("RC4_CHAR"));
  169. &mov ($tx,&DWP(0,$dat,$xx,4));
  170. &and ($ty,-4); # how many 4-byte chunks?
  171. &jz (&label("loop1"));
  172. &mov (&wparam(3),$out); # $out as accumulator in these loops
  173. if ($x86only) {
  174. &jmp (&label("go4loop4"));
  175. } else {
  176. &test ($ty,-8);
  177. &jz (&label("go4loop4"));
  178. &picmeup($out,"OPENSSL_ia32cap_P");
  179. &bt (&DWP(0,$out),26); # check SSE2 bit [could have been MMX]
  180. &jnc (&label("go4loop4"));
  181. &mov ($out,&wparam(3)) if (!$alt);
  182. &movd ("mm7",&wparam(3)) if ($alt);
  183. &and ($ty,-8);
  184. &lea ($ty,&DWP(-8,$inp,$ty));
  185. &mov (&DWP(-4,$dat),$ty); # save input+(len/8)*8-8
  186. &$RC4_loop_mmx(-1);
  187. &jmp(&label("loop_mmx_enter"));
  188. &set_label("loop_mmx",16);
  189. &$RC4_loop_mmx(0);
  190. &set_label("loop_mmx_enter");
  191. for ($i=1;$i<8;$i++) { &$RC4_loop_mmx($i); }
  192. &mov ($ty,$yy);
  193. &xor ($yy,$yy); # this is second key to Core2
  194. &mov (&LB($yy),&LB($ty)); # and Westmere performance...
  195. &cmp ($inp,&DWP(-4,$dat));
  196. &lea ($inp,&DWP(8,$inp));
  197. &jb (&label("loop_mmx"));
  198. if ($alt) {
  199. &movd ($out,"mm7");
  200. &pxor ("mm2","mm0");
  201. &psllq ("mm1",8);
  202. &pxor ("mm1","mm2");
  203. &movq (&QWP(-8,$out,$inp),"mm1");
  204. } else {
  205. &psllq ("mm1",56);
  206. &pxor ("mm2","mm1");
  207. &movq (&QWP(-8,$out,$inp),"mm2");
  208. }
  209. &emms ();
  210. &cmp ($inp,&wparam(1)); # compare to input+len
  211. &je (&label("done"));
  212. &jmp (&label("loop1"));
  213. }
  214. &set_label("go4loop4",16);
  215. &lea ($ty,&DWP(-4,$inp,$ty));
  216. &mov (&wparam(2),$ty); # save input+(len/4)*4-4
  217. &set_label("loop4");
  218. for ($i=0;$i<4;$i++) { RC4_loop($i); }
  219. &ror ($out,8);
  220. &xor ($out,&DWP(0,$inp));
  221. &cmp ($inp,&wparam(2)); # compare to input+(len/4)*4-4
  222. &mov (&DWP(0,$tx,$inp),$out);# $tx holds re-biased out here
  223. &lea ($inp,&DWP(4,$inp));
  224. &mov ($tx,&DWP(0,$dat,$xx,4));
  225. &jb (&label("loop4"));
  226. &cmp ($inp,&wparam(1)); # compare to input+len
  227. &je (&label("done"));
  228. &mov ($out,&wparam(3)); # restore $out
  229. &set_label("loop1",16);
  230. &add (&LB($yy),&LB($tx));
  231. &mov ($ty,&DWP(0,$dat,$yy,4));
  232. &mov (&DWP(0,$dat,$yy,4),$tx);
  233. &mov (&DWP(0,$dat,$xx,4),$ty);
  234. &add ($ty,$tx);
  235. &inc (&LB($xx));
  236. &and ($ty,0xff);
  237. &mov ($ty,&DWP(0,$dat,$ty,4));
  238. &xor (&LB($ty),&BP(0,$inp));
  239. &lea ($inp,&DWP(1,$inp));
  240. &mov ($tx,&DWP(0,$dat,$xx,4));
  241. &cmp ($inp,&wparam(1)); # compare to input+len
  242. &mov (&BP(-1,$out,$inp),&LB($ty));
  243. &jb (&label("loop1"));
  244. &jmp (&label("done"));
  245. # this is essentially Intel P4 specific codepath...
  246. &set_label("RC4_CHAR",16);
  247. &movz ($tx,&BP(0,$dat,$xx));
  248. # strangely enough unrolled loop performs over 20% slower...
  249. &set_label("cloop1");
  250. &add (&LB($yy),&LB($tx));
  251. &movz ($ty,&BP(0,$dat,$yy));
  252. &mov (&BP(0,$dat,$yy),&LB($tx));
  253. &mov (&BP(0,$dat,$xx),&LB($ty));
  254. &add (&LB($ty),&LB($tx));
  255. &movz ($ty,&BP(0,$dat,$ty));
  256. &add (&LB($xx),1);
  257. &xor (&LB($ty),&BP(0,$inp));
  258. &lea ($inp,&DWP(1,$inp));
  259. &movz ($tx,&BP(0,$dat,$xx));
  260. &cmp ($inp,&wparam(1));
  261. &mov (&BP(-1,$out,$inp),&LB($ty));
  262. &jb (&label("cloop1"));
  263. &set_label("done");
  264. &dec (&LB($xx));
  265. &mov (&DWP(-4,$dat),$yy); # save key->y
  266. &mov (&BP(-8,$dat),&LB($xx)); # save key->x
  267. &set_label("abort");
  268. &function_end("RC4");
  269. ########################################################################
  270. $inp="esi";
  271. $out="edi";
  272. $idi="ebp";
  273. $ido="ecx";
  274. $idx="edx";
  275. # void RC4_set_key(RC4_KEY *key,int len,const unsigned char *data);
  276. &function_begin("RC4_set_key");
  277. &mov ($out,&wparam(0)); # load key
  278. &mov ($idi,&wparam(1)); # load len
  279. &mov ($inp,&wparam(2)); # load data
  280. &picmeup($idx,"OPENSSL_ia32cap_P");
  281. &lea ($out,&DWP(2*4,$out)); # &key->data
  282. &lea ($inp,&DWP(0,$inp,$idi)); # $inp to point at the end
  283. &neg ($idi);
  284. &xor ("eax","eax");
  285. &mov (&DWP(-4,$out),$idi); # borrow key->y
  286. &bt (&DWP(0,$idx),20); # check for bit#20
  287. &jc (&label("c1stloop"));
  288. &set_label("w1stloop",16);
  289. &mov (&DWP(0,$out,"eax",4),"eax"); # key->data[i]=i;
  290. &add (&LB("eax"),1); # i++;
  291. &jnc (&label("w1stloop"));
  292. &xor ($ido,$ido);
  293. &xor ($idx,$idx);
  294. &set_label("w2ndloop",16);
  295. &mov ("eax",&DWP(0,$out,$ido,4));
  296. &add (&LB($idx),&BP(0,$inp,$idi));
  297. &add (&LB($idx),&LB("eax"));
  298. &add ($idi,1);
  299. &mov ("ebx",&DWP(0,$out,$idx,4));
  300. &jnz (&label("wnowrap"));
  301. &mov ($idi,&DWP(-4,$out));
  302. &set_label("wnowrap");
  303. &mov (&DWP(0,$out,$idx,4),"eax");
  304. &mov (&DWP(0,$out,$ido,4),"ebx");
  305. &add (&LB($ido),1);
  306. &jnc (&label("w2ndloop"));
  307. &jmp (&label("exit"));
  308. # Unlike all other x86 [and x86_64] implementations, Intel P4 core
  309. # [including EM64T] was found to perform poorly with above "32-bit" key
  310. # schedule, a.k.a. RC4_INT. Performance improvement for IA-32 hand-coded
  311. # assembler turned out to be 3.5x if re-coded for compressed 8-bit one,
  312. # a.k.a. RC4_CHAR! It's however inappropriate to just switch to 8-bit
  313. # schedule for x86[_64], because non-P4 implementations suffer from
  314. # significant performance losses then, e.g. PIII exhibits >2x
  315. # deterioration, and so does Opteron. In order to assure optimal
  316. # all-round performance, we detect P4 at run-time and set up compressed
  317. # key schedule, which is recognized by RC4 procedure.
  318. &set_label("c1stloop",16);
  319. &mov (&BP(0,$out,"eax"),&LB("eax")); # key->data[i]=i;
  320. &add (&LB("eax"),1); # i++;
  321. &jnc (&label("c1stloop"));
  322. &xor ($ido,$ido);
  323. &xor ($idx,$idx);
  324. &xor ("ebx","ebx");
  325. &set_label("c2ndloop",16);
  326. &mov (&LB("eax"),&BP(0,$out,$ido));
  327. &add (&LB($idx),&BP(0,$inp,$idi));
  328. &add (&LB($idx),&LB("eax"));
  329. &add ($idi,1);
  330. &mov (&LB("ebx"),&BP(0,$out,$idx));
  331. &jnz (&label("cnowrap"));
  332. &mov ($idi,&DWP(-4,$out));
  333. &set_label("cnowrap");
  334. &mov (&BP(0,$out,$idx),&LB("eax"));
  335. &mov (&BP(0,$out,$ido),&LB("ebx"));
  336. &add (&LB($ido),1);
  337. &jnc (&label("c2ndloop"));
  338. &mov (&DWP(256,$out),-1); # mark schedule as compressed
  339. &set_label("exit");
  340. &xor ("eax","eax");
  341. &mov (&DWP(-8,$out),"eax"); # key->x=0;
  342. &mov (&DWP(-4,$out),"eax"); # key->y=0;
  343. &function_end("RC4_set_key");
  344. # const char *RC4_options(void);
  345. &function_begin_B("RC4_options");
  346. &call (&label("pic_point"));
  347. &set_label("pic_point");
  348. &blindpop("eax");
  349. &lea ("eax",&DWP(&label("opts")."-".&label("pic_point"),"eax"));
  350. &picmeup("edx","OPENSSL_ia32cap_P");
  351. &mov ("edx",&DWP(0,"edx"));
  352. &bt ("edx",20);
  353. &jc (&label("1xchar"));
  354. &bt ("edx",26);
  355. &jnc (&label("ret"));
  356. &add ("eax",25);
  357. &ret ();
  358. &set_label("1xchar");
  359. &add ("eax",12);
  360. &set_label("ret");
  361. &ret ();
  362. &set_label("opts",64);
  363. &asciz ("rc4(4x,int)");
  364. &asciz ("rc4(1x,char)");
  365. &asciz ("rc4(8x,mmx)");
  366. &asciz ("RC4 for x86, CRYPTOGAMS by <appro\@openssl.org>");
  367. &align (64);
  368. &function_end_B("RC4_options");
  369. &asm_finish();
  370. close STDOUT or die "error closing STDOUT: $!";