poly1305-c64xplus.pl 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331
  1. #! /usr/bin/env perl
  2. # Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
  3. #
  4. # Licensed under the OpenSSL license (the "License"). You may not use
  5. # this file except in compliance with the License. You can obtain a copy
  6. # in the file LICENSE in the source distribution or at
  7. # https://www.openssl.org/source/license.html
  8. #
  9. # ====================================================================
  10. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  11. # project. The module is, however, dual licensed under OpenSSL and
  12. # CRYPTOGAMS licenses depending on where you obtain it. For further
  13. # details see http://www.openssl.org/~appro/cryptogams/.
  14. # ====================================================================
  15. #
  16. # Poly1305 hash for C64x+.
  17. #
  18. # October 2015
  19. #
  20. # Performance is [incredible for a 32-bit processor] 1.82 cycles per
  21. # processed byte. Comparison to compiler-generated code is problematic,
  22. # because results were observed to vary from 2.1 to 7.6 cpb depending
  23. # on compiler's ability to inline small functions. Compiler also
  24. # disables interrupts for some reason, thus making interrupt response
  25. # time dependent on input length. This module on the other hand is free
  26. # from such limitation.
  27. $output=pop;
  28. open STDOUT,">$output";
  29. ($CTXA,$INPB,$LEN,$PADBIT)=("A4","B4","A6","B6");
  30. ($H0,$H1,$H2,$H3,$H4,$H4a)=("A8","B8","A10","B10","B2",$LEN);
  31. ($D0,$D1,$D2,$D3)= ("A9","B9","A11","B11");
  32. ($R0,$R1,$R2,$R3,$S1,$S2,$S3,$S3b)=("A0","B0","A1","B1","A12","B12","A13","B13");
  33. ($THREE,$R0b,$S2a)=("B7","B5","A5");
  34. $code.=<<___;
  35. .text
  36. .if .ASSEMBLER_VERSION<7000000
  37. .asg 0,__TI_EABI__
  38. .endif
  39. .if __TI_EABI__
  40. .asg poly1305_init,_poly1305_init
  41. .asg poly1305_blocks,_poly1305_blocks
  42. .asg poly1305_emit,_poly1305_emit
  43. .endif
  44. .asg B3,RA
  45. .asg A15,FP
  46. .asg B15,SP
  47. .if .LITTLE_ENDIAN
  48. .asg MV,SWAP2
  49. .asg MV.L,SWAP4
  50. .endif
  51. .global _poly1305_init
  52. _poly1305_init:
  53. .asmfunc
  54. LDNDW *${INPB}[0],B17:B16 ; load key material
  55. LDNDW *${INPB}[1],A17:A16
  56. || ZERO B9:B8
  57. || MVK -1,B0
  58. STDW B9:B8,*${CTXA}[0] ; initialize h1:h0
  59. || SHRU B0,4,B0 ; 0x0fffffff
  60. || MVK -4,B1
  61. STDW B9:B8,*${CTXA}[1] ; initialize h3:h2
  62. || AND B0,B1,B1 ; 0x0ffffffc
  63. STW B8,*${CTXA}[4] ; initialize h4
  64. .if .BIG_ENDIAN
  65. SWAP2 B16,B17
  66. || SWAP2 B17,B16
  67. SWAP2 A16,A17
  68. || SWAP2 A17,A16
  69. SWAP4 B16,B16
  70. || SWAP4 A16,A16
  71. SWAP4 B17,B17
  72. || SWAP4 A17,A17
  73. .endif
  74. AND B16,B0,B20 ; r0 = key[0] & 0x0fffffff
  75. || AND B17,B1,B22 ; r1 = key[1] & 0x0ffffffc
  76. || EXTU B17,4,6,B16 ; r1>>2
  77. AND A16,B1,B21 ; r2 = key[2] & 0x0ffffffc
  78. || AND A17,B1,A23 ; r3 = key[3] & 0x0ffffffc
  79. || BNOP RA
  80. SHRU B21,2,B18
  81. || ADD B22,B16,B16 ; s1 = r1 + r1>>2
  82. STDW B21:B20,*${CTXA}[3] ; save r2:r0
  83. || ADD B21,B18,B18 ; s2 = r2 + r2>>2
  84. || SHRU A23,2,B17
  85. || MV A23,B23
  86. STDW B23:B22,*${CTXA}[4] ; save r3:r1
  87. || ADD B23,B17,B19 ; s3 = r3 + r3>>2
  88. || ADD B23,B17,B17 ; s3 = r3 + r3>>2
  89. STDW B17:B16,*${CTXA}[5] ; save s3:s1
  90. STDW B19:B18,*${CTXA}[6] ; save s3:s2
  91. || ZERO A4 ; return 0
  92. .endasmfunc
  93. .global _poly1305_blocks
  94. .align 32
  95. _poly1305_blocks:
  96. .asmfunc stack_usage(40)
  97. SHRU $LEN,4,A2 ; A2 is loop counter, number of blocks
  98. [!A2] BNOP RA ; no data
  99. || [A2] STW FP,*SP--(40) ; save frame pointer and alloca(40)
  100. || [A2] MV SP,FP
  101. [A2] STDW B13:B12,*SP[4] ; ABI says so
  102. || [A2] MV $CTXA,$S3b ; borrow $S3b
  103. [A2] STDW B11:B10,*SP[3]
  104. || [A2] STDW A13:A12,*FP[-3]
  105. [A2] STDW A11:A10,*FP[-4]
  106. || [A2] LDDW *${S3b}[0],B25:B24 ; load h1:h0
  107. [A2] LDNW *${INPB}++[4],$D0 ; load inp[0]
  108. [A2] LDNW *${INPB}[-3],$D1 ; load inp[1]
  109. LDDW *${CTXA}[1],B29:B28 ; load h3:h2, B28 is h2
  110. LDNW *${INPB}[-2],$D2 ; load inp[2]
  111. LDNW *${INPB}[-1],$D3 ; load inp[3]
  112. LDDW *${CTXA}[3],$R2:$R0 ; load r2:r0
  113. || LDDW *${S3b}[4],$R3:$R1 ; load r3:r1
  114. || SWAP2 $D0,$D0
  115. LDDW *${CTXA}[5],$S3:$S1 ; load s3:s1
  116. || LDDW *${S3b}[6],$S3b:$S2 ; load s3:s2
  117. || SWAP4 $D0,$D0
  118. || SWAP2 $D1,$D1
  119. ADDU $D0,B24,$D0:$H0 ; h0+=inp[0]
  120. || ADD $D0,B24,B27 ; B-copy of h0+inp[0]
  121. || SWAP4 $D1,$D1
  122. ADDU $D1,B25,$D1:$H1 ; h1+=inp[1]
  123. || MVK 3,$THREE
  124. || SWAP2 $D2,$D2
  125. LDW *${CTXA}[4],$H4 ; load h4
  126. || SWAP4 $D2,$D2
  127. || MV B29,B30 ; B30 is h3
  128. MV $R0,$R0b
  129. loop?:
  130. MPY32U $H0,$R0,A17:A16
  131. || MPY32U B27,$R1,B17:B16 ; MPY32U $H0,$R1,B17:B16
  132. || ADDU $D0,$D1:$H1,B25:B24 ; ADDU $D0,$D1:$H1,$D1:$H1
  133. || ADDU $D2,B28,$D2:$H2 ; h2+=inp[2]
  134. || SWAP2 $D3,$D3
  135. MPY32U $H0,$R2,A19:A18
  136. || MPY32U B27,$R3,B19:B18 ; MPY32U $H0,$R3,B19:B18
  137. || ADD $D0,$H1,A24 ; A-copy of B24
  138. || SWAP4 $D3,$D3
  139. || [A2] SUB A2,1,A2 ; decrement loop counter
  140. MPY32U A24,$S3,A21:A20 ; MPY32U $H1,$S3,A21:A20
  141. || MPY32U B24,$R0b,B21:B20 ; MPY32U $H1,$R0,B21:B20
  142. || ADDU B25,$D2:$H2,$D2:$H2 ; ADDU $D1,$D2:$H2,$D2:$H2
  143. || ADDU $D3,B30,$D3:$H3 ; h3+=inp[3]
  144. || ADD B25,$H2,B25 ; B-copy of $H2
  145. MPY32U A24,$R1,A23:A22 ; MPY32U $H1,$R1,A23:A22
  146. || MPY32U B24,$R2,B23:B22 ; MPY32U $H1,$R2,B23:B22
  147. MPY32U $H2,$S2,A25:A24
  148. || MPY32U B25,$S3b,B25:B24 ; MPY32U $H2,$S3,B25:B24
  149. || ADDU $D2,$D3:$H3,$D3:$H3
  150. || ADD $PADBIT,$H4,$H4 ; h4+=padbit
  151. MPY32U $H2,$R0,A27:A26
  152. || MPY32U $H2,$R1,B27:B26
  153. || ADD $D3,$H4,$H4
  154. || MV $S2,$S2a
  155. MPY32U $H3,$S1,A29:A28
  156. || MPY32U $H3,$S2,B29:B28
  157. || ADD A21,A17,A21 ; start accumulating "d3:d0"
  158. || ADD B21,B17,B21
  159. || ADDU A20,A16,A17:A16
  160. || ADDU B20,B16,B17:B16
  161. || [A2] LDNW *${INPB}++[4],$D0 ; load inp[0]
  162. MPY32U $H3,$S3,A31:A30
  163. || MPY32U $H3,$R0b,B31:B30
  164. || ADD A23,A19,A23
  165. || ADD B23,B19,B23
  166. || ADDU A22,A18,A19:A18
  167. || ADDU B22,B18,B19:B18
  168. || [A2] LDNW *${INPB}[-3],$D1 ; load inp[1]
  169. MPY32 $H4,$S1,B20
  170. || MPY32 $H4,$S2a,A20
  171. || ADD A25,A21,A21
  172. || ADD B25,B21,B21
  173. || ADDU A24,A17:A16,A17:A16
  174. || ADDU B24,B17:B16,B17:B16
  175. || [A2] LDNW *${INPB}[-2],$D2 ; load inp[2]
  176. MPY32 $H4,$S3b,B22
  177. || ADD A27,A23,A23
  178. || ADD B27,B23,B23
  179. || ADDU A26,A19:A18,A19:A18
  180. || ADDU B26,B19:B18,B19:B18
  181. || [A2] LDNW *${INPB}[-1],$D3 ; load inp[3]
  182. MPY32 $H4,$R0b,$H4
  183. || ADD A29,A21,A21 ; final hi("d0")
  184. || ADD B29,B21,B21 ; final hi("d1")
  185. || ADDU A28,A17:A16,A17:A16 ; final lo("d0")
  186. || ADDU B28,B17:B16,B17:B16
  187. ADD A31,A23,A23 ; final hi("d2")
  188. || ADD B31,B23,B23 ; final hi("d3")
  189. || ADDU A30,A19:A18,A19:A18
  190. || ADDU B30,B19:B18,B19:B18
  191. ADDU B20,B17:B16,B17:B16 ; final lo("d1")
  192. || ADDU A20,A19:A18,A19:A18 ; final lo("d2")
  193. ADDU B22,B19:B18,B19:B18 ; final lo("d3")
  194. || ADD A17,A21,A21 ; "flatten" "d3:d0"
  195. MV A19,B29 ; move to avoid cross-path stalls
  196. ADDU A21,B17:B16,B27:B26 ; B26 is h1
  197. ADD B21,B27,B27
  198. || DMV B29,A18,B29:B28 ; move to avoid cross-path stalls
  199. ADDU B27,B29:B28,B29:B28 ; B28 is h2
  200. || [A2] SWAP2 $D0,$D0
  201. ADD A23,B29,B29
  202. || [A2] SWAP4 $D0,$D0
  203. ADDU B29,B19:B18,B31:B30 ; B30 is h3
  204. ADD B23,B31,B31
  205. || MV A16,B24 ; B24 is h0
  206. || [A2] SWAP2 $D1,$D1
  207. ADD B31,$H4,$H4
  208. || [A2] SWAP4 $D1,$D1
  209. SHRU $H4,2,B16 ; last reduction step
  210. || AND $H4,$THREE,$H4
  211. ADDAW B16,B16,B16 ; 5*(h4>>2)
  212. || [A2] BNOP loop?
  213. ADDU B24,B16,B25:B24 ; B24 is h0
  214. || [A2] SWAP2 $D2,$D2
  215. ADDU B26,B25,B27:B26 ; B26 is h1
  216. || [A2] SWAP4 $D2,$D2
  217. ADDU B28,B27,B29:B28 ; B28 is h2
  218. || [A2] ADDU $D0,B24,$D0:$H0 ; h0+=inp[0]
  219. || [A2] ADD $D0,B24,B27 ; B-copy of h0+inp[0]
  220. ADDU B30,B29,B31:B30 ; B30 is h3
  221. ADD B31,$H4,$H4
  222. || [A2] ADDU $D1,B26,$D1:$H1 ; h1+=inp[1]
  223. ;;===== branch to loop? is taken here
  224. LDDW *FP[-4],A11:A10 ; ABI says so
  225. LDDW *FP[-3],A13:A12
  226. || LDDW *SP[3],B11:B10
  227. LDDW *SP[4],B13:B12
  228. || MV B26,B25
  229. || BNOP RA
  230. LDW *++SP(40),FP ; restore frame pointer
  231. || MV B30,B29
  232. STDW B25:B24,*${CTXA}[0] ; save h1:h0
  233. STDW B29:B28,*${CTXA}[1] ; save h3:h2
  234. STW $H4,*${CTXA}[4] ; save h4
  235. NOP 1
  236. .endasmfunc
  237. ___
  238. {
  239. my ($MAC,$NONCEA,$NONCEB)=($INPB,$LEN,$PADBIT);
  240. $code.=<<___;
  241. .global _poly1305_emit
  242. .align 32
  243. _poly1305_emit:
  244. .asmfunc
  245. LDDW *${CTXA}[0],A17:A16 ; load h1:h0
  246. LDDW *${CTXA}[1],A19:A18 ; load h3:h2
  247. LDW *${CTXA}[4],A20 ; load h4
  248. MV $NONCEA,$NONCEB
  249. MVK 5,A22 ; compare to modulus
  250. ADDU A16,A22,A23:A22
  251. || LDW *${NONCEA}[0],A8
  252. || LDW *${NONCEB}[1],B8
  253. ADDU A17,A23,A25:A24
  254. || LDW *${NONCEA}[2],A9
  255. || LDW *${NONCEB}[3],B9
  256. ADDU A19,A25,A27:A26
  257. ADDU A19,A27,A29:A28
  258. ADD A20,A29,A29
  259. SHRU A29,2,A2 ; check for overflow in 130-th bit
  260. [A2] MV A22,A16 ; select
  261. || [A2] MV A24,A17
  262. [A2] MV A26,A18
  263. || [A2] MV A28,A19
  264. || ADDU A8,A16,A23:A22 ; accumulate nonce
  265. ADDU B8,A17,A25:A24
  266. || SWAP2 A22,A22
  267. ADDU A23,A25:A24,A25:A24
  268. ADDU A9,A18,A27:A26
  269. || SWAP2 A24,A24
  270. ADDU A25,A27:A26,A27:A26
  271. || ADD B9,A19,A28
  272. ADD A27,A28,A28
  273. || SWAP2 A26,A26
  274. .if .BIG_ENDIAN
  275. SWAP2 A28,A28
  276. || SWAP4 A22,A22
  277. || SWAP4 A24,B24
  278. SWAP4 A26,A26
  279. SWAP4 A28,A28
  280. || MV B24,A24
  281. .endif
  282. BNOP RA,1
  283. STNW A22,*${MAC}[0] ; write the result
  284. STNW A24,*${MAC}[1]
  285. STNW A26,*${MAC}[2]
  286. STNW A28,*${MAC}[3]
  287. .endasmfunc
  288. ___
  289. }
  290. $code.=<<___;
  291. .sect .const
  292. .cstring "Poly1305 for C64x+, CRYPTOGAMS by <appro\@openssl.org>"
  293. .align 4
  294. ___
  295. print $code;