parisc-mont.pl 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006
  1. #! /usr/bin/env perl
  2. # Copyright 2009-2020 The OpenSSL Project Authors. All Rights Reserved.
  3. #
  4. # Licensed under the OpenSSL license (the "License"). You may not use
  5. # this file except in compliance with the License. You can obtain a copy
  6. # in the file LICENSE in the source distribution or at
  7. # https://www.openssl.org/source/license.html
  8. # ====================================================================
  9. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  10. # project. The module is, however, dual licensed under OpenSSL and
  11. # CRYPTOGAMS licenses depending on where you obtain it. For further
  12. # details see http://www.openssl.org/~appro/cryptogams/.
  13. # ====================================================================
  14. # On PA-7100LC this module performs ~90-50% better, less for longer
  15. # keys, than code generated by gcc 3.2 for PA-RISC 1.1. Latter means
  16. # that compiler utilized xmpyu instruction to perform 32x32=64-bit
  17. # multiplication, which in turn means that "baseline" performance was
  18. # optimal in respect to instruction set capabilities. Fair comparison
  19. # with vendor compiler is problematic, because OpenSSL doesn't define
  20. # BN_LLONG [presumably] for historical reasons, which drives compiler
  21. # toward 4 times 16x16=32-bit multiplications [plus complementary
  22. # shifts and additions] instead. This means that you should observe
  23. # several times improvement over code generated by vendor compiler
  24. # for PA-RISC 1.1, but the "baseline" is far from optimal. The actual
  25. # improvement coefficient was never collected on PA-7100LC, or any
  26. # other 1.1 CPU, because I don't have access to such machine with
  27. # vendor compiler. But to give you a taste, PA-RISC 1.1 code path
  28. # reportedly outperformed code generated by cc +DA1.1 +O3 by factor
  29. # of ~5x on PA-8600.
  30. #
  31. # On PA-RISC 2.0 it has to compete with pa-risc2[W].s, which is
  32. # reportedly ~2x faster than vendor compiler generated code [according
  33. # to comment in pa-risc2[W].s]. Here comes a catch. Execution core of
  34. # this implementation is actually 32-bit one, in the sense that it
  35. # operates on 32-bit values. But pa-risc2[W].s operates on arrays of
  36. # 64-bit BN_LONGs... How do they interoperate then? No problem. This
  37. # module picks halves of 64-bit values in reverse order and pretends
  38. # they were 32-bit BN_LONGs. But can 32-bit core compete with "pure"
  39. # 64-bit code such as pa-risc2[W].s then? Well, the thing is that
  40. # 32x32=64-bit multiplication is the best even PA-RISC 2.0 can do,
  41. # i.e. there is no "wider" multiplication like on most other 64-bit
  42. # platforms. This means that even being effectively 32-bit, this
  43. # implementation performs "64-bit" computational task in same amount
  44. # of arithmetic operations, most notably multiplications. It requires
  45. # more memory references, most notably to tp[num], but this doesn't
  46. # seem to exhaust memory port capacity. And indeed, dedicated PA-RISC
  47. # 2.0 code path provides virtually same performance as pa-risc2[W].s:
  48. # it's ~10% better for shortest key length and ~10% worse for longest
  49. # one.
  50. #
  51. # In case it wasn't clear. The module has two distinct code paths:
  52. # PA-RISC 1.1 and PA-RISC 2.0 ones. Latter features carry-free 64-bit
  53. # additions and 64-bit integer loads, not to mention specific
  54. # instruction scheduling. In 64-bit build naturally only 2.0 code path
  55. # is assembled. In 32-bit application context both code paths are
  56. # assembled, PA-RISC 2.0 CPU is detected at run-time and proper path
  57. # is taken automatically. Also, in 32-bit build the module imposes
  58. # couple of limitations: vector lengths has to be even and vector
  59. # addresses has to be 64-bit aligned. Normally neither is a problem:
  60. # most common key lengths are even and vectors are commonly malloc-ed,
  61. # which ensures alignment.
  62. #
  63. # Special thanks to polarhome.com for providing HP-UX account on
  64. # PA-RISC 1.1 machine, and to correspondent who chose to remain
  65. # anonymous for testing the code on PA-RISC 2.0 machine.
  66. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  67. $flavour = shift;
  68. $output = shift;
  69. open STDOUT,">$output";
  70. if ($flavour =~ /64/) {
  71. $LEVEL ="2.0W";
  72. $SIZE_T =8;
  73. $FRAME_MARKER =80;
  74. $SAVED_RP =16;
  75. $PUSH ="std";
  76. $PUSHMA ="std,ma";
  77. $POP ="ldd";
  78. $POPMB ="ldd,mb";
  79. $BN_SZ =$SIZE_T;
  80. } else {
  81. $LEVEL ="1.1"; #$LEVEL.="\n\t.ALLOW\t2.0";
  82. $SIZE_T =4;
  83. $FRAME_MARKER =48;
  84. $SAVED_RP =20;
  85. $PUSH ="stw";
  86. $PUSHMA ="stwm";
  87. $POP ="ldw";
  88. $POPMB ="ldwm";
  89. $BN_SZ =$SIZE_T;
  90. if (open CONF,"<${dir}../../opensslconf.h") {
  91. while(<CONF>) {
  92. if (m/#\s*define\s+SIXTY_FOUR_BIT/) {
  93. $BN_SZ=8;
  94. $LEVEL="2.0";
  95. last;
  96. }
  97. }
  98. close CONF;
  99. }
  100. }
  101. $FRAME=8*$SIZE_T+$FRAME_MARKER; # 8 saved regs + frame marker
  102. # [+ argument transfer]
  103. $LOCALS=$FRAME-$FRAME_MARKER;
  104. $FRAME+=32; # local variables
  105. $tp="%r31";
  106. $ti1="%r29";
  107. $ti0="%r28";
  108. $rp="%r26";
  109. $ap="%r25";
  110. $bp="%r24";
  111. $np="%r23";
  112. $n0="%r22"; # passed through stack in 32-bit
  113. $num="%r21"; # passed through stack in 32-bit
  114. $idx="%r20";
  115. $arrsz="%r19";
  116. $nm1="%r7";
  117. $nm0="%r6";
  118. $ab1="%r5";
  119. $ab0="%r4";
  120. $fp="%r3";
  121. $hi1="%r2";
  122. $hi0="%r1";
  123. $xfer=$n0; # accommodates [-16..15] offset in fld[dw]s
  124. $fm0="%fr4"; $fti=$fm0;
  125. $fbi="%fr5L";
  126. $fn0="%fr5R";
  127. $fai="%fr6"; $fab0="%fr7"; $fab1="%fr8";
  128. $fni="%fr9"; $fnm0="%fr10"; $fnm1="%fr11";
  129. $code=<<___;
  130. .LEVEL $LEVEL
  131. .SPACE \$TEXT\$
  132. .SUBSPA \$CODE\$,QUAD=0,ALIGN=8,ACCESS=0x2C,CODE_ONLY
  133. .EXPORT bn_mul_mont,ENTRY,ARGW0=GR,ARGW1=GR,ARGW2=GR,ARGW3=GR
  134. .ALIGN 64
  135. bn_mul_mont
  136. .PROC
  137. .CALLINFO FRAME=`$FRAME-8*$SIZE_T`,NO_CALLS,SAVE_RP,SAVE_SP,ENTRY_GR=6
  138. .ENTRY
  139. $PUSH %r2,-$SAVED_RP(%sp) ; standard prologue
  140. $PUSHMA %r3,$FRAME(%sp)
  141. $PUSH %r4,`-$FRAME+1*$SIZE_T`(%sp)
  142. $PUSH %r5,`-$FRAME+2*$SIZE_T`(%sp)
  143. $PUSH %r6,`-$FRAME+3*$SIZE_T`(%sp)
  144. $PUSH %r7,`-$FRAME+4*$SIZE_T`(%sp)
  145. $PUSH %r8,`-$FRAME+5*$SIZE_T`(%sp)
  146. $PUSH %r9,`-$FRAME+6*$SIZE_T`(%sp)
  147. $PUSH %r10,`-$FRAME+7*$SIZE_T`(%sp)
  148. ldo -$FRAME(%sp),$fp
  149. ___
  150. $code.=<<___ if ($SIZE_T==4);
  151. ldw `-$FRAME_MARKER-4`($fp),$n0
  152. ldw `-$FRAME_MARKER-8`($fp),$num
  153. nop
  154. nop ; alignment
  155. ___
  156. $code.=<<___ if ($BN_SZ==4);
  157. comiclr,<= 6,$num,%r0 ; are vectors long enough?
  158. b L\$abort
  159. ldi 0,%r28 ; signal "unhandled"
  160. add,ev %r0,$num,$num ; is $num even?
  161. b L\$abort
  162. nop
  163. or $ap,$np,$ti1
  164. extru,= $ti1,31,3,%r0 ; are ap and np 64-bit aligned?
  165. b L\$abort
  166. nop
  167. nop ; alignment
  168. nop
  169. fldws 0($n0),${fn0}
  170. fldws,ma 4($bp),${fbi} ; bp[0]
  171. ___
  172. $code.=<<___ if ($BN_SZ==8);
  173. comib,> 3,$num,L\$abort ; are vectors long enough?
  174. ldi 0,%r28 ; signal "unhandled"
  175. addl $num,$num,$num ; I operate on 32-bit values
  176. fldws 4($n0),${fn0} ; only low part of n0
  177. fldws 4($bp),${fbi} ; bp[0] in flipped word order
  178. ___
  179. $code.=<<___;
  180. fldds 0($ap),${fai} ; ap[0,1]
  181. fldds 0($np),${fni} ; np[0,1]
  182. sh2addl $num,%r0,$arrsz
  183. ldi 31,$hi0
  184. ldo 36($arrsz),$hi1 ; space for tp[num+1]
  185. andcm $hi1,$hi0,$hi1 ; align
  186. addl $hi1,%sp,%sp
  187. $PUSH $fp,-$SIZE_T(%sp)
  188. ldo `$LOCALS+16`($fp),$xfer
  189. ldo `$LOCALS+32+4`($fp),$tp
  190. xmpyu ${fai}L,${fbi},${fab0} ; ap[0]*bp[0]
  191. xmpyu ${fai}R,${fbi},${fab1} ; ap[1]*bp[0]
  192. xmpyu ${fn0},${fab0}R,${fm0}
  193. addl $arrsz,$ap,$ap ; point at the end
  194. addl $arrsz,$np,$np
  195. subi 0,$arrsz,$idx ; j=0
  196. ldo 8($idx),$idx ; j++++
  197. xmpyu ${fni}L,${fm0}R,${fnm0} ; np[0]*m
  198. xmpyu ${fni}R,${fm0}R,${fnm1} ; np[1]*m
  199. fstds ${fab0},-16($xfer)
  200. fstds ${fnm0},-8($xfer)
  201. fstds ${fab1},0($xfer)
  202. fstds ${fnm1},8($xfer)
  203. flddx $idx($ap),${fai} ; ap[2,3]
  204. flddx $idx($np),${fni} ; np[2,3]
  205. ___
  206. $code.=<<___ if ($BN_SZ==4);
  207. mtctl $hi0,%cr11 ; $hi0 still holds 31
  208. extrd,u,*= $hi0,%sar,1,$hi0 ; executes on PA-RISC 1.0
  209. b L\$parisc11
  210. nop
  211. ___
  212. $code.=<<___; # PA-RISC 2.0 code-path
  213. xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[0]
  214. xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
  215. ldd -16($xfer),$ab0
  216. fstds ${fab0},-16($xfer)
  217. extrd,u $ab0,31,32,$hi0
  218. extrd,u $ab0,63,32,$ab0
  219. ldd -8($xfer),$nm0
  220. fstds ${fnm0},-8($xfer)
  221. ldo 8($idx),$idx ; j++++
  222. addl $ab0,$nm0,$nm0 ; low part is discarded
  223. extrd,u $nm0,31,32,$hi1
  224. L\$1st
  225. xmpyu ${fai}R,${fbi},${fab1} ; ap[j+1]*bp[0]
  226. xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j+1]*m
  227. ldd 0($xfer),$ab1
  228. fstds ${fab1},0($xfer)
  229. addl $hi0,$ab1,$ab1
  230. extrd,u $ab1,31,32,$hi0
  231. ldd 8($xfer),$nm1
  232. fstds ${fnm1},8($xfer)
  233. extrd,u $ab1,63,32,$ab1
  234. addl $hi1,$nm1,$nm1
  235. flddx $idx($ap),${fai} ; ap[j,j+1]
  236. flddx $idx($np),${fni} ; np[j,j+1]
  237. addl $ab1,$nm1,$nm1
  238. extrd,u $nm1,31,32,$hi1
  239. xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[0]
  240. xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
  241. ldd -16($xfer),$ab0
  242. fstds ${fab0},-16($xfer)
  243. addl $hi0,$ab0,$ab0
  244. extrd,u $ab0,31,32,$hi0
  245. ldd -8($xfer),$nm0
  246. fstds ${fnm0},-8($xfer)
  247. extrd,u $ab0,63,32,$ab0
  248. addl $hi1,$nm0,$nm0
  249. stw $nm1,-4($tp) ; tp[j-1]
  250. addl $ab0,$nm0,$nm0
  251. stw,ma $nm0,8($tp) ; tp[j-1]
  252. addib,<> 8,$idx,L\$1st ; j++++
  253. extrd,u $nm0,31,32,$hi1
  254. xmpyu ${fai}R,${fbi},${fab1} ; ap[j]*bp[0]
  255. xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j]*m
  256. ldd 0($xfer),$ab1
  257. fstds ${fab1},0($xfer)
  258. addl $hi0,$ab1,$ab1
  259. extrd,u $ab1,31,32,$hi0
  260. ldd 8($xfer),$nm1
  261. fstds ${fnm1},8($xfer)
  262. extrd,u $ab1,63,32,$ab1
  263. addl $hi1,$nm1,$nm1
  264. ldd -16($xfer),$ab0
  265. addl $ab1,$nm1,$nm1
  266. ldd -8($xfer),$nm0
  267. extrd,u $nm1,31,32,$hi1
  268. addl $hi0,$ab0,$ab0
  269. extrd,u $ab0,31,32,$hi0
  270. stw $nm1,-4($tp) ; tp[j-1]
  271. extrd,u $ab0,63,32,$ab0
  272. addl $hi1,$nm0,$nm0
  273. ldd 0($xfer),$ab1
  274. addl $ab0,$nm0,$nm0
  275. ldd,mb 8($xfer),$nm1
  276. extrd,u $nm0,31,32,$hi1
  277. stw,ma $nm0,8($tp) ; tp[j-1]
  278. ldo -1($num),$num ; i--
  279. subi 0,$arrsz,$idx ; j=0
  280. ___
  281. $code.=<<___ if ($BN_SZ==4);
  282. fldws,ma 4($bp),${fbi} ; bp[1]
  283. ___
  284. $code.=<<___ if ($BN_SZ==8);
  285. fldws 0($bp),${fbi} ; bp[1] in flipped word order
  286. ___
  287. $code.=<<___;
  288. flddx $idx($ap),${fai} ; ap[0,1]
  289. flddx $idx($np),${fni} ; np[0,1]
  290. fldws 8($xfer),${fti}R ; tp[0]
  291. addl $hi0,$ab1,$ab1
  292. extrd,u $ab1,31,32,$hi0
  293. extrd,u $ab1,63,32,$ab1
  294. ldo 8($idx),$idx ; j++++
  295. xmpyu ${fai}L,${fbi},${fab0} ; ap[0]*bp[1]
  296. xmpyu ${fai}R,${fbi},${fab1} ; ap[1]*bp[1]
  297. addl $hi1,$nm1,$nm1
  298. addl $ab1,$nm1,$nm1
  299. extrd,u $nm1,31,32,$hi1
  300. fstws,mb ${fab0}L,-8($xfer) ; save high part
  301. stw $nm1,-4($tp) ; tp[j-1]
  302. fcpy,sgl %fr0,${fti}L ; zero high part
  303. fcpy,sgl %fr0,${fab0}L
  304. addl $hi1,$hi0,$hi0
  305. extrd,u $hi0,31,32,$hi1
  306. fcnvxf,dbl,dbl ${fti},${fti} ; 32-bit unsigned int -> double
  307. fcnvxf,dbl,dbl ${fab0},${fab0}
  308. stw $hi0,0($tp)
  309. stw $hi1,4($tp)
  310. fadd,dbl ${fti},${fab0},${fab0} ; add tp[0]
  311. fcnvfx,dbl,dbl ${fab0},${fab0} ; double -> 33-bit unsigned int
  312. xmpyu ${fn0},${fab0}R,${fm0}
  313. ldo `$LOCALS+32+4`($fp),$tp
  314. L\$outer
  315. xmpyu ${fni}L,${fm0}R,${fnm0} ; np[0]*m
  316. xmpyu ${fni}R,${fm0}R,${fnm1} ; np[1]*m
  317. fstds ${fab0},-16($xfer) ; 33-bit value
  318. fstds ${fnm0},-8($xfer)
  319. flddx $idx($ap),${fai} ; ap[2]
  320. flddx $idx($np),${fni} ; np[2]
  321. ldo 8($idx),$idx ; j++++
  322. ldd -16($xfer),$ab0 ; 33-bit value
  323. ldd -8($xfer),$nm0
  324. ldw 0($xfer),$hi0 ; high part
  325. xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[i]
  326. xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
  327. extrd,u $ab0,31,32,$ti0 ; carry bit
  328. extrd,u $ab0,63,32,$ab0
  329. fstds ${fab1},0($xfer)
  330. addl $ti0,$hi0,$hi0 ; account carry bit
  331. fstds ${fnm1},8($xfer)
  332. addl $ab0,$nm0,$nm0 ; low part is discarded
  333. ldw 0($tp),$ti1 ; tp[1]
  334. extrd,u $nm0,31,32,$hi1
  335. fstds ${fab0},-16($xfer)
  336. fstds ${fnm0},-8($xfer)
  337. L\$inner
  338. xmpyu ${fai}R,${fbi},${fab1} ; ap[j+1]*bp[i]
  339. xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j+1]*m
  340. ldd 0($xfer),$ab1
  341. fstds ${fab1},0($xfer)
  342. addl $hi0,$ti1,$ti1
  343. addl $ti1,$ab1,$ab1
  344. ldd 8($xfer),$nm1
  345. fstds ${fnm1},8($xfer)
  346. extrd,u $ab1,31,32,$hi0
  347. extrd,u $ab1,63,32,$ab1
  348. flddx $idx($ap),${fai} ; ap[j,j+1]
  349. flddx $idx($np),${fni} ; np[j,j+1]
  350. addl $hi1,$nm1,$nm1
  351. addl $ab1,$nm1,$nm1
  352. ldw 4($tp),$ti0 ; tp[j]
  353. stw $nm1,-4($tp) ; tp[j-1]
  354. xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[i]
  355. xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
  356. ldd -16($xfer),$ab0
  357. fstds ${fab0},-16($xfer)
  358. addl $hi0,$ti0,$ti0
  359. addl $ti0,$ab0,$ab0
  360. ldd -8($xfer),$nm0
  361. fstds ${fnm0},-8($xfer)
  362. extrd,u $ab0,31,32,$hi0
  363. extrd,u $nm1,31,32,$hi1
  364. ldw 8($tp),$ti1 ; tp[j]
  365. extrd,u $ab0,63,32,$ab0
  366. addl $hi1,$nm0,$nm0
  367. addl $ab0,$nm0,$nm0
  368. stw,ma $nm0,8($tp) ; tp[j-1]
  369. addib,<> 8,$idx,L\$inner ; j++++
  370. extrd,u $nm0,31,32,$hi1
  371. xmpyu ${fai}R,${fbi},${fab1} ; ap[j]*bp[i]
  372. xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j]*m
  373. ldd 0($xfer),$ab1
  374. fstds ${fab1},0($xfer)
  375. addl $hi0,$ti1,$ti1
  376. addl $ti1,$ab1,$ab1
  377. ldd 8($xfer),$nm1
  378. fstds ${fnm1},8($xfer)
  379. extrd,u $ab1,31,32,$hi0
  380. extrd,u $ab1,63,32,$ab1
  381. ldw 4($tp),$ti0 ; tp[j]
  382. addl $hi1,$nm1,$nm1
  383. addl $ab1,$nm1,$nm1
  384. ldd -16($xfer),$ab0
  385. ldd -8($xfer),$nm0
  386. extrd,u $nm1,31,32,$hi1
  387. addl $hi0,$ab0,$ab0
  388. addl $ti0,$ab0,$ab0
  389. stw $nm1,-4($tp) ; tp[j-1]
  390. extrd,u $ab0,31,32,$hi0
  391. ldw 8($tp),$ti1 ; tp[j]
  392. extrd,u $ab0,63,32,$ab0
  393. addl $hi1,$nm0,$nm0
  394. ldd 0($xfer),$ab1
  395. addl $ab0,$nm0,$nm0
  396. ldd,mb 8($xfer),$nm1
  397. extrd,u $nm0,31,32,$hi1
  398. stw,ma $nm0,8($tp) ; tp[j-1]
  399. addib,= -1,$num,L\$outerdone ; i--
  400. subi 0,$arrsz,$idx ; j=0
  401. ___
  402. $code.=<<___ if ($BN_SZ==4);
  403. fldws,ma 4($bp),${fbi} ; bp[i]
  404. ___
  405. $code.=<<___ if ($BN_SZ==8);
  406. ldi 12,$ti0 ; bp[i] in flipped word order
  407. addl,ev %r0,$num,$num
  408. ldi -4,$ti0
  409. addl $ti0,$bp,$bp
  410. fldws 0($bp),${fbi}
  411. ___
  412. $code.=<<___;
  413. flddx $idx($ap),${fai} ; ap[0]
  414. addl $hi0,$ab1,$ab1
  415. flddx $idx($np),${fni} ; np[0]
  416. fldws 8($xfer),${fti}R ; tp[0]
  417. addl $ti1,$ab1,$ab1
  418. extrd,u $ab1,31,32,$hi0
  419. extrd,u $ab1,63,32,$ab1
  420. ldo 8($idx),$idx ; j++++
  421. xmpyu ${fai}L,${fbi},${fab0} ; ap[0]*bp[i]
  422. xmpyu ${fai}R,${fbi},${fab1} ; ap[1]*bp[i]
  423. ldw 4($tp),$ti0 ; tp[j]
  424. addl $hi1,$nm1,$nm1
  425. fstws,mb ${fab0}L,-8($xfer) ; save high part
  426. addl $ab1,$nm1,$nm1
  427. extrd,u $nm1,31,32,$hi1
  428. fcpy,sgl %fr0,${fti}L ; zero high part
  429. fcpy,sgl %fr0,${fab0}L
  430. stw $nm1,-4($tp) ; tp[j-1]
  431. fcnvxf,dbl,dbl ${fti},${fti} ; 32-bit unsigned int -> double
  432. fcnvxf,dbl,dbl ${fab0},${fab0}
  433. addl $hi1,$hi0,$hi0
  434. fadd,dbl ${fti},${fab0},${fab0} ; add tp[0]
  435. addl $ti0,$hi0,$hi0
  436. extrd,u $hi0,31,32,$hi1
  437. fcnvfx,dbl,dbl ${fab0},${fab0} ; double -> 33-bit unsigned int
  438. stw $hi0,0($tp)
  439. stw $hi1,4($tp)
  440. xmpyu ${fn0},${fab0}R,${fm0}
  441. b L\$outer
  442. ldo `$LOCALS+32+4`($fp),$tp
  443. L\$outerdone
  444. addl $hi0,$ab1,$ab1
  445. addl $ti1,$ab1,$ab1
  446. extrd,u $ab1,31,32,$hi0
  447. extrd,u $ab1,63,32,$ab1
  448. ldw 4($tp),$ti0 ; tp[j]
  449. addl $hi1,$nm1,$nm1
  450. addl $ab1,$nm1,$nm1
  451. extrd,u $nm1,31,32,$hi1
  452. stw $nm1,-4($tp) ; tp[j-1]
  453. addl $hi1,$hi0,$hi0
  454. addl $ti0,$hi0,$hi0
  455. extrd,u $hi0,31,32,$hi1
  456. stw $hi0,0($tp)
  457. stw $hi1,4($tp)
  458. ldo `$LOCALS+32`($fp),$tp
  459. sub %r0,%r0,%r0 ; clear borrow
  460. ___
  461. $code.=<<___ if ($BN_SZ==4);
  462. ldws,ma 4($tp),$ti0
  463. extru,= $rp,31,3,%r0 ; is rp 64-bit aligned?
  464. b L\$sub_pa11
  465. addl $tp,$arrsz,$tp
  466. L\$sub
  467. ldwx $idx($np),$hi0
  468. subb $ti0,$hi0,$hi1
  469. ldwx $idx($tp),$ti0
  470. addib,<> 4,$idx,L\$sub
  471. stws,ma $hi1,4($rp)
  472. subb $ti0,%r0,$hi1
  473. ___
  474. $code.=<<___ if ($BN_SZ==8);
  475. ldd,ma 8($tp),$ti0
  476. L\$sub
  477. ldd $idx($np),$hi0
  478. shrpd $ti0,$ti0,32,$ti0 ; flip word order
  479. std $ti0,-8($tp) ; save flipped value
  480. sub,db $ti0,$hi0,$hi1
  481. ldd,ma 8($tp),$ti0
  482. addib,<> 8,$idx,L\$sub
  483. std,ma $hi1,8($rp)
  484. extrd,u $ti0,31,32,$ti0 ; carry in flipped word order
  485. sub,db $ti0,%r0,$hi1
  486. ___
  487. $code.=<<___;
  488. ldo `$LOCALS+32`($fp),$tp
  489. sub $rp,$arrsz,$rp ; rewind rp
  490. subi 0,$arrsz,$idx
  491. L\$copy
  492. ldd 0($tp),$ti0
  493. ldd 0($rp),$hi0
  494. std,ma %r0,8($tp)
  495. comiclr,= 0,$hi1,%r0
  496. copy $ti0,$hi0
  497. addib,<> 8,$idx,L\$copy
  498. std,ma $hi0,8($rp)
  499. ___
  500. if ($BN_SZ==4) { # PA-RISC 1.1 code-path
  501. $ablo=$ab0;
  502. $abhi=$ab1;
  503. $nmlo0=$nm0;
  504. $nmhi0=$nm1;
  505. $nmlo1="%r9";
  506. $nmhi1="%r8";
  507. $code.=<<___;
  508. b L\$done
  509. nop
  510. .ALIGN 8
  511. L\$parisc11
  512. xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[0]
  513. xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
  514. ldw -12($xfer),$ablo
  515. ldw -16($xfer),$hi0
  516. ldw -4($xfer),$nmlo0
  517. ldw -8($xfer),$nmhi0
  518. fstds ${fab0},-16($xfer)
  519. fstds ${fnm0},-8($xfer)
  520. ldo 8($idx),$idx ; j++++
  521. add $ablo,$nmlo0,$nmlo0 ; discarded
  522. addc %r0,$nmhi0,$hi1
  523. ldw 4($xfer),$ablo
  524. ldw 0($xfer),$abhi
  525. nop
  526. L\$1st_pa11
  527. xmpyu ${fai}R,${fbi},${fab1} ; ap[j+1]*bp[0]
  528. flddx $idx($ap),${fai} ; ap[j,j+1]
  529. xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j+1]*m
  530. flddx $idx($np),${fni} ; np[j,j+1]
  531. add $hi0,$ablo,$ablo
  532. ldw 12($xfer),$nmlo1
  533. addc %r0,$abhi,$hi0
  534. ldw 8($xfer),$nmhi1
  535. add $ablo,$nmlo1,$nmlo1
  536. fstds ${fab1},0($xfer)
  537. addc %r0,$nmhi1,$nmhi1
  538. fstds ${fnm1},8($xfer)
  539. add $hi1,$nmlo1,$nmlo1
  540. ldw -12($xfer),$ablo
  541. addc %r0,$nmhi1,$hi1
  542. ldw -16($xfer),$abhi
  543. xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[0]
  544. ldw -4($xfer),$nmlo0
  545. xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
  546. ldw -8($xfer),$nmhi0
  547. add $hi0,$ablo,$ablo
  548. stw $nmlo1,-4($tp) ; tp[j-1]
  549. addc %r0,$abhi,$hi0
  550. fstds ${fab0},-16($xfer)
  551. add $ablo,$nmlo0,$nmlo0
  552. fstds ${fnm0},-8($xfer)
  553. addc %r0,$nmhi0,$nmhi0
  554. ldw 0($xfer),$abhi
  555. add $hi1,$nmlo0,$nmlo0
  556. ldw 4($xfer),$ablo
  557. stws,ma $nmlo0,8($tp) ; tp[j-1]
  558. addib,<> 8,$idx,L\$1st_pa11 ; j++++
  559. addc %r0,$nmhi0,$hi1
  560. ldw 8($xfer),$nmhi1
  561. ldw 12($xfer),$nmlo1
  562. xmpyu ${fai}R,${fbi},${fab1} ; ap[j]*bp[0]
  563. xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j]*m
  564. add $hi0,$ablo,$ablo
  565. fstds ${fab1},0($xfer)
  566. addc %r0,$abhi,$hi0
  567. fstds ${fnm1},8($xfer)
  568. add $ablo,$nmlo1,$nmlo1
  569. ldw -16($xfer),$abhi
  570. addc %r0,$nmhi1,$nmhi1
  571. ldw -12($xfer),$ablo
  572. add $hi1,$nmlo1,$nmlo1
  573. ldw -8($xfer),$nmhi0
  574. addc %r0,$nmhi1,$hi1
  575. ldw -4($xfer),$nmlo0
  576. add $hi0,$ablo,$ablo
  577. stw $nmlo1,-4($tp) ; tp[j-1]
  578. addc %r0,$abhi,$hi0
  579. ldw 0($xfer),$abhi
  580. add $ablo,$nmlo0,$nmlo0
  581. ldw 4($xfer),$ablo
  582. addc %r0,$nmhi0,$nmhi0
  583. ldws,mb 8($xfer),$nmhi1
  584. add $hi1,$nmlo0,$nmlo0
  585. ldw 4($xfer),$nmlo1
  586. addc %r0,$nmhi0,$hi1
  587. stws,ma $nmlo0,8($tp) ; tp[j-1]
  588. ldo -1($num),$num ; i--
  589. subi 0,$arrsz,$idx ; j=0
  590. fldws,ma 4($bp),${fbi} ; bp[1]
  591. flddx $idx($ap),${fai} ; ap[0,1]
  592. flddx $idx($np),${fni} ; np[0,1]
  593. fldws 8($xfer),${fti}R ; tp[0]
  594. add $hi0,$ablo,$ablo
  595. addc %r0,$abhi,$hi0
  596. ldo 8($idx),$idx ; j++++
  597. xmpyu ${fai}L,${fbi},${fab0} ; ap[0]*bp[1]
  598. xmpyu ${fai}R,${fbi},${fab1} ; ap[1]*bp[1]
  599. add $hi1,$nmlo1,$nmlo1
  600. addc %r0,$nmhi1,$nmhi1
  601. add $ablo,$nmlo1,$nmlo1
  602. addc %r0,$nmhi1,$hi1
  603. fstws,mb ${fab0}L,-8($xfer) ; save high part
  604. stw $nmlo1,-4($tp) ; tp[j-1]
  605. fcpy,sgl %fr0,${fti}L ; zero high part
  606. fcpy,sgl %fr0,${fab0}L
  607. add $hi1,$hi0,$hi0
  608. addc %r0,%r0,$hi1
  609. fcnvxf,dbl,dbl ${fti},${fti} ; 32-bit unsigned int -> double
  610. fcnvxf,dbl,dbl ${fab0},${fab0}
  611. stw $hi0,0($tp)
  612. stw $hi1,4($tp)
  613. fadd,dbl ${fti},${fab0},${fab0} ; add tp[0]
  614. fcnvfx,dbl,dbl ${fab0},${fab0} ; double -> 33-bit unsigned int
  615. xmpyu ${fn0},${fab0}R,${fm0}
  616. ldo `$LOCALS+32+4`($fp),$tp
  617. L\$outer_pa11
  618. xmpyu ${fni}L,${fm0}R,${fnm0} ; np[0]*m
  619. xmpyu ${fni}R,${fm0}R,${fnm1} ; np[1]*m
  620. fstds ${fab0},-16($xfer) ; 33-bit value
  621. fstds ${fnm0},-8($xfer)
  622. flddx $idx($ap),${fai} ; ap[2,3]
  623. flddx $idx($np),${fni} ; np[2,3]
  624. ldw -16($xfer),$abhi ; carry bit actually
  625. ldo 8($idx),$idx ; j++++
  626. ldw -12($xfer),$ablo
  627. ldw -8($xfer),$nmhi0
  628. ldw -4($xfer),$nmlo0
  629. ldw 0($xfer),$hi0 ; high part
  630. xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[i]
  631. xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
  632. fstds ${fab1},0($xfer)
  633. addl $abhi,$hi0,$hi0 ; account carry bit
  634. fstds ${fnm1},8($xfer)
  635. add $ablo,$nmlo0,$nmlo0 ; discarded
  636. ldw 0($tp),$ti1 ; tp[1]
  637. addc %r0,$nmhi0,$hi1
  638. fstds ${fab0},-16($xfer)
  639. fstds ${fnm0},-8($xfer)
  640. ldw 4($xfer),$ablo
  641. ldw 0($xfer),$abhi
  642. L\$inner_pa11
  643. xmpyu ${fai}R,${fbi},${fab1} ; ap[j+1]*bp[i]
  644. flddx $idx($ap),${fai} ; ap[j,j+1]
  645. xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j+1]*m
  646. flddx $idx($np),${fni} ; np[j,j+1]
  647. add $hi0,$ablo,$ablo
  648. ldw 4($tp),$ti0 ; tp[j]
  649. addc %r0,$abhi,$abhi
  650. ldw 12($xfer),$nmlo1
  651. add $ti1,$ablo,$ablo
  652. ldw 8($xfer),$nmhi1
  653. addc %r0,$abhi,$hi0
  654. fstds ${fab1},0($xfer)
  655. add $ablo,$nmlo1,$nmlo1
  656. fstds ${fnm1},8($xfer)
  657. addc %r0,$nmhi1,$nmhi1
  658. ldw -12($xfer),$ablo
  659. add $hi1,$nmlo1,$nmlo1
  660. ldw -16($xfer),$abhi
  661. addc %r0,$nmhi1,$hi1
  662. xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[i]
  663. ldw 8($tp),$ti1 ; tp[j]
  664. xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
  665. ldw -4($xfer),$nmlo0
  666. add $hi0,$ablo,$ablo
  667. ldw -8($xfer),$nmhi0
  668. addc %r0,$abhi,$abhi
  669. stw $nmlo1,-4($tp) ; tp[j-1]
  670. add $ti0,$ablo,$ablo
  671. fstds ${fab0},-16($xfer)
  672. addc %r0,$abhi,$hi0
  673. fstds ${fnm0},-8($xfer)
  674. add $ablo,$nmlo0,$nmlo0
  675. ldw 4($xfer),$ablo
  676. addc %r0,$nmhi0,$nmhi0
  677. ldw 0($xfer),$abhi
  678. add $hi1,$nmlo0,$nmlo0
  679. stws,ma $nmlo0,8($tp) ; tp[j-1]
  680. addib,<> 8,$idx,L\$inner_pa11 ; j++++
  681. addc %r0,$nmhi0,$hi1
  682. xmpyu ${fai}R,${fbi},${fab1} ; ap[j]*bp[i]
  683. ldw 12($xfer),$nmlo1
  684. xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j]*m
  685. ldw 8($xfer),$nmhi1
  686. add $hi0,$ablo,$ablo
  687. ldw 4($tp),$ti0 ; tp[j]
  688. addc %r0,$abhi,$abhi
  689. fstds ${fab1},0($xfer)
  690. add $ti1,$ablo,$ablo
  691. fstds ${fnm1},8($xfer)
  692. addc %r0,$abhi,$hi0
  693. ldw -16($xfer),$abhi
  694. add $ablo,$nmlo1,$nmlo1
  695. ldw -12($xfer),$ablo
  696. addc %r0,$nmhi1,$nmhi1
  697. ldw -8($xfer),$nmhi0
  698. add $hi1,$nmlo1,$nmlo1
  699. ldw -4($xfer),$nmlo0
  700. addc %r0,$nmhi1,$hi1
  701. add $hi0,$ablo,$ablo
  702. stw $nmlo1,-4($tp) ; tp[j-1]
  703. addc %r0,$abhi,$abhi
  704. add $ti0,$ablo,$ablo
  705. ldw 8($tp),$ti1 ; tp[j]
  706. addc %r0,$abhi,$hi0
  707. ldw 0($xfer),$abhi
  708. add $ablo,$nmlo0,$nmlo0
  709. ldw 4($xfer),$ablo
  710. addc %r0,$nmhi0,$nmhi0
  711. ldws,mb 8($xfer),$nmhi1
  712. add $hi1,$nmlo0,$nmlo0
  713. ldw 4($xfer),$nmlo1
  714. addc %r0,$nmhi0,$hi1
  715. stws,ma $nmlo0,8($tp) ; tp[j-1]
  716. addib,= -1,$num,L\$outerdone_pa11; i--
  717. subi 0,$arrsz,$idx ; j=0
  718. fldws,ma 4($bp),${fbi} ; bp[i]
  719. flddx $idx($ap),${fai} ; ap[0]
  720. add $hi0,$ablo,$ablo
  721. addc %r0,$abhi,$abhi
  722. flddx $idx($np),${fni} ; np[0]
  723. fldws 8($xfer),${fti}R ; tp[0]
  724. add $ti1,$ablo,$ablo
  725. addc %r0,$abhi,$hi0
  726. ldo 8($idx),$idx ; j++++
  727. xmpyu ${fai}L,${fbi},${fab0} ; ap[0]*bp[i]
  728. xmpyu ${fai}R,${fbi},${fab1} ; ap[1]*bp[i]
  729. ldw 4($tp),$ti0 ; tp[j]
  730. add $hi1,$nmlo1,$nmlo1
  731. addc %r0,$nmhi1,$nmhi1
  732. fstws,mb ${fab0}L,-8($xfer) ; save high part
  733. add $ablo,$nmlo1,$nmlo1
  734. addc %r0,$nmhi1,$hi1
  735. fcpy,sgl %fr0,${fti}L ; zero high part
  736. fcpy,sgl %fr0,${fab0}L
  737. stw $nmlo1,-4($tp) ; tp[j-1]
  738. fcnvxf,dbl,dbl ${fti},${fti} ; 32-bit unsigned int -> double
  739. fcnvxf,dbl,dbl ${fab0},${fab0}
  740. add $hi1,$hi0,$hi0
  741. addc %r0,%r0,$hi1
  742. fadd,dbl ${fti},${fab0},${fab0} ; add tp[0]
  743. add $ti0,$hi0,$hi0
  744. addc %r0,$hi1,$hi1
  745. fcnvfx,dbl,dbl ${fab0},${fab0} ; double -> 33-bit unsigned int
  746. stw $hi0,0($tp)
  747. stw $hi1,4($tp)
  748. xmpyu ${fn0},${fab0}R,${fm0}
  749. b L\$outer_pa11
  750. ldo `$LOCALS+32+4`($fp),$tp
  751. L\$outerdone_pa11
  752. add $hi0,$ablo,$ablo
  753. addc %r0,$abhi,$abhi
  754. add $ti1,$ablo,$ablo
  755. addc %r0,$abhi,$hi0
  756. ldw 4($tp),$ti0 ; tp[j]
  757. add $hi1,$nmlo1,$nmlo1
  758. addc %r0,$nmhi1,$nmhi1
  759. add $ablo,$nmlo1,$nmlo1
  760. addc %r0,$nmhi1,$hi1
  761. stw $nmlo1,-4($tp) ; tp[j-1]
  762. add $hi1,$hi0,$hi0
  763. addc %r0,%r0,$hi1
  764. add $ti0,$hi0,$hi0
  765. addc %r0,$hi1,$hi1
  766. stw $hi0,0($tp)
  767. stw $hi1,4($tp)
  768. ldo `$LOCALS+32+4`($fp),$tp
  769. sub %r0,%r0,%r0 ; clear borrow
  770. ldw -4($tp),$ti0
  771. addl $tp,$arrsz,$tp
  772. L\$sub_pa11
  773. ldwx $idx($np),$hi0
  774. subb $ti0,$hi0,$hi1
  775. ldwx $idx($tp),$ti0
  776. addib,<> 4,$idx,L\$sub_pa11
  777. stws,ma $hi1,4($rp)
  778. subb $ti0,%r0,$hi1
  779. ldo `$LOCALS+32`($fp),$tp
  780. sub $rp,$arrsz,$rp ; rewind rp
  781. subi 0,$arrsz,$idx
  782. L\$copy_pa11
  783. ldw 0($tp),$ti0
  784. ldw 0($rp),$hi0
  785. stws,ma %r0,4($tp)
  786. comiclr,= 0,$hi1,%r0
  787. copy $ti0,$hi0
  788. addib,<> 4,$idx,L\$copy_pa11
  789. stws,ma $hi0,4($rp)
  790. nop ; alignment
  791. L\$done
  792. ___
  793. }
  794. $code.=<<___;
  795. ldi 1,%r28 ; signal "handled"
  796. ldo $FRAME($fp),%sp ; destroy tp[num+1]
  797. $POP `-$FRAME-$SAVED_RP`(%sp),%r2 ; standard epilogue
  798. $POP `-$FRAME+1*$SIZE_T`(%sp),%r4
  799. $POP `-$FRAME+2*$SIZE_T`(%sp),%r5
  800. $POP `-$FRAME+3*$SIZE_T`(%sp),%r6
  801. $POP `-$FRAME+4*$SIZE_T`(%sp),%r7
  802. $POP `-$FRAME+5*$SIZE_T`(%sp),%r8
  803. $POP `-$FRAME+6*$SIZE_T`(%sp),%r9
  804. $POP `-$FRAME+7*$SIZE_T`(%sp),%r10
  805. L\$abort
  806. bv (%r2)
  807. .EXIT
  808. $POPMB -$FRAME(%sp),%r3
  809. .PROCEND
  810. .STRINGZ "Montgomery Multiplication for PA-RISC, CRYPTOGAMS by <appro\@openssl.org>"
  811. ___
  812. # Explicitly encode PA-RISC 2.0 instructions used in this module, so
  813. # that it can be compiled with .LEVEL 1.0. It should be noted that I
  814. # wouldn't have to do this, if GNU assembler understood .ALLOW 2.0
  815. # directive...
  816. my $ldd = sub {
  817. my ($mod,$args) = @_;
  818. my $orig = "ldd$mod\t$args";
  819. if ($args =~ /%r([0-9]+)\(%r([0-9]+)\),%r([0-9]+)/) # format 4
  820. { my $opcode=(0x03<<26)|($2<<21)|($1<<16)|(3<<6)|$3;
  821. sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
  822. }
  823. elsif ($args =~ /(\-?[0-9]+)\(%r([0-9]+)\),%r([0-9]+)/) # format 5
  824. { my $opcode=(0x03<<26)|($2<<21)|(1<<12)|(3<<6)|$3;
  825. $opcode|=(($1&0xF)<<17)|(($1&0x10)<<12); # encode offset
  826. $opcode|=(1<<5) if ($mod =~ /^,m/);
  827. $opcode|=(1<<13) if ($mod =~ /^,mb/);
  828. sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
  829. }
  830. else { "\t".$orig; }
  831. };
  832. my $std = sub {
  833. my ($mod,$args) = @_;
  834. my $orig = "std$mod\t$args";
  835. if ($args =~ /%r([0-9]+),(\-?[0-9]+)\(%r([0-9]+)\)/) # format 6
  836. { my $opcode=(0x03<<26)|($3<<21)|($1<<16)|(1<<12)|(0xB<<6);
  837. $opcode|=(($2&0xF)<<1)|(($2&0x10)>>4); # encode offset
  838. $opcode|=(1<<5) if ($mod =~ /^,m/);
  839. $opcode|=(1<<13) if ($mod =~ /^,mb/);
  840. sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
  841. }
  842. else { "\t".$orig; }
  843. };
  844. my $extrd = sub {
  845. my ($mod,$args) = @_;
  846. my $orig = "extrd$mod\t$args";
  847. # I only have ",u" completer, it's implicitly encoded...
  848. if ($args =~ /%r([0-9]+),([0-9]+),([0-9]+),%r([0-9]+)/) # format 15
  849. { my $opcode=(0x36<<26)|($1<<21)|($4<<16);
  850. my $len=32-$3;
  851. $opcode |= (($2&0x20)<<6)|(($2&0x1f)<<5); # encode pos
  852. $opcode |= (($len&0x20)<<7)|($len&0x1f); # encode len
  853. sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
  854. }
  855. elsif ($args =~ /%r([0-9]+),%sar,([0-9]+),%r([0-9]+)/) # format 12
  856. { my $opcode=(0x34<<26)|($1<<21)|($3<<16)|(2<<11)|(1<<9);
  857. my $len=32-$2;
  858. $opcode |= (($len&0x20)<<3)|($len&0x1f); # encode len
  859. $opcode |= (1<<13) if ($mod =~ /,\**=/);
  860. sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
  861. }
  862. else { "\t".$orig; }
  863. };
  864. my $shrpd = sub {
  865. my ($mod,$args) = @_;
  866. my $orig = "shrpd$mod\t$args";
  867. if ($args =~ /%r([0-9]+),%r([0-9]+),([0-9]+),%r([0-9]+)/) # format 14
  868. { my $opcode=(0x34<<26)|($2<<21)|($1<<16)|(1<<10)|$4;
  869. my $cpos=63-$3;
  870. $opcode |= (($cpos&0x20)<<6)|(($cpos&0x1f)<<5); # encode sa
  871. sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
  872. }
  873. else { "\t".$orig; }
  874. };
  875. my $sub = sub {
  876. my ($mod,$args) = @_;
  877. my $orig = "sub$mod\t$args";
  878. if ($mod eq ",db" && $args =~ /%r([0-9]+),%r([0-9]+),%r([0-9]+)/) {
  879. my $opcode=(0x02<<26)|($2<<21)|($1<<16)|$3;
  880. $opcode|=(1<<10); # e1
  881. $opcode|=(1<<8); # e2
  882. $opcode|=(1<<5); # d
  883. sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig
  884. }
  885. else { "\t".$orig; }
  886. };
  887. sub assemble {
  888. my ($mnemonic,$mod,$args)=@_;
  889. my $opcode = eval("\$$mnemonic");
  890. ref($opcode) eq 'CODE' ? &$opcode($mod,$args) : "\t$mnemonic$mod\t$args";
  891. }
  892. if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
  893. =~ /GNU assembler/) {
  894. $gnuas = 1;
  895. }
  896. foreach (split("\n",$code)) {
  897. s/\`([^\`]*)\`/eval $1/ge;
  898. # flip word order in 64-bit mode...
  899. s/(xmpyu\s+)($fai|$fni)([LR])/$1.$2.($3 eq "L"?"R":"L")/e if ($BN_SZ==8);
  900. # assemble 2.0 instructions in 32-bit mode...
  901. s/^\s+([a-z]+)([\S]*)\s+([\S]*)/&assemble($1,$2,$3)/e if ($BN_SZ==4);
  902. s/(\.LEVEL\s+2\.0)W/$1w/ if ($gnuas && $SIZE_T==8);
  903. s/\.SPACE\s+\$TEXT\$/.text/ if ($gnuas && $SIZE_T==8);
  904. s/\.SUBSPA.*// if ($gnuas && $SIZE_T==8);
  905. s/\bbv\b/bve/ if ($SIZE_T==8);
  906. print $_,"\n";
  907. }
  908. close STDOUT or die "error closing STDOUT: $!";