2
0

lls.asm 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290
  1. ;******************************************************************************
  2. ;* linear least squares model
  3. ;*
  4. ;* Copyright (c) 2013 Loren Merritt
  5. ;*
  6. ;* This file is part of FFmpeg.
  7. ;*
  8. ;* FFmpeg is free software; you can redistribute it and/or
  9. ;* modify it under the terms of the GNU Lesser General Public
  10. ;* License as published by the Free Software Foundation; either
  11. ;* version 2.1 of the License, or (at your option) any later version.
  12. ;*
  13. ;* FFmpeg is distributed in the hope that it will be useful,
  14. ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. ;* Lesser General Public License for more details.
  17. ;*
  18. ;* You should have received a copy of the GNU Lesser General Public
  19. ;* License along with FFmpeg; if not, write to the Free Software
  20. ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. ;******************************************************************************
  22. %include "x86util.asm"
  23. SECTION .text
  24. %define MAX_VARS 32
  25. %define MAX_VARS_ALIGN (MAX_VARS+4)
  26. %define COVAR_STRIDE MAX_VARS_ALIGN*8
  27. %define COVAR(x,y) [covarq + (x)*8 + (y)*COVAR_STRIDE]
  28. struc LLSModel
  29. .covariance: resq MAX_VARS_ALIGN*MAX_VARS_ALIGN
  30. .coeff: resq MAX_VARS*MAX_VARS
  31. .variance: resq MAX_VARS
  32. .indep_count: resd 1
  33. endstruc
  34. %macro ADDPD_MEM 2
  35. %if cpuflag(avx)
  36. vaddpd %2, %2, %1
  37. %else
  38. addpd %2, %1
  39. %endif
  40. mova %1, %2
  41. %endmacro
  42. INIT_XMM sse2
  43. %define movdqa movaps
  44. cglobal update_lls, 2,5,8, ctx, var, i, j, covar2
  45. %define covarq ctxq
  46. mov id, [ctxq + LLSModel.indep_count]
  47. lea varq, [varq + iq*8]
  48. neg iq
  49. mov covar2q, covarq
  50. .loopi:
  51. ; Compute all 3 pairwise products of a 2x2 block that lies on the diagonal
  52. mova m1, [varq + iq*8]
  53. mova m3, [varq + iq*8 + 16]
  54. pshufd m4, m1, q1010
  55. pshufd m5, m1, q3232
  56. pshufd m6, m3, q1010
  57. pshufd m7, m3, q3232
  58. mulpd m0, m1, m4
  59. mulpd m1, m1, m5
  60. lea covarq, [covar2q + 16]
  61. ADDPD_MEM COVAR(-2,0), m0
  62. ADDPD_MEM COVAR(-2,1), m1
  63. lea jq, [iq + 2]
  64. cmp jd, -2
  65. jg .skip4x4
  66. .loop4x4:
  67. ; Compute all 16 pairwise products of a 4x4 block
  68. mulpd m0, m4, m3
  69. mulpd m1, m5, m3
  70. mulpd m2, m6, m3
  71. mulpd m3, m3, m7
  72. ADDPD_MEM COVAR(0,0), m0
  73. ADDPD_MEM COVAR(0,1), m1
  74. ADDPD_MEM COVAR(0,2), m2
  75. ADDPD_MEM COVAR(0,3), m3
  76. mova m3, [varq + jq*8 + 16]
  77. mulpd m0, m4, m3
  78. mulpd m1, m5, m3
  79. mulpd m2, m6, m3
  80. mulpd m3, m3, m7
  81. ADDPD_MEM COVAR(2,0), m0
  82. ADDPD_MEM COVAR(2,1), m1
  83. ADDPD_MEM COVAR(2,2), m2
  84. ADDPD_MEM COVAR(2,3), m3
  85. mova m3, [varq + jq*8 + 32]
  86. add covarq, 32
  87. add jq, 4
  88. cmp jd, -2
  89. jle .loop4x4
  90. .skip4x4:
  91. test jd, jd
  92. jg .skip2x4
  93. mulpd m4, m3
  94. mulpd m5, m3
  95. mulpd m6, m3
  96. mulpd m7, m3
  97. ADDPD_MEM COVAR(0,0), m4
  98. ADDPD_MEM COVAR(0,1), m5
  99. ADDPD_MEM COVAR(0,2), m6
  100. ADDPD_MEM COVAR(0,3), m7
  101. .skip2x4:
  102. add iq, 4
  103. add covar2q, 4*COVAR_STRIDE+32
  104. cmp id, -2
  105. jle .loopi
  106. test id, id
  107. jg .ret
  108. mov jq, iq
  109. %define covarq covar2q
  110. .loop2x1:
  111. movsd m0, [varq + iq*8]
  112. movlhps m0, m0
  113. mulpd m0, [varq + jq*8]
  114. ADDPD_MEM COVAR(0,0), m0
  115. inc iq
  116. add covarq, COVAR_STRIDE
  117. test id, id
  118. jle .loop2x1
  119. .ret:
  120. REP_RET
  121. %macro UPDATE_LLS 0
  122. cglobal update_lls, 3,6,8, ctx, var, count, i, j, count2
  123. %define covarq ctxq
  124. mov countd, [ctxq + LLSModel.indep_count]
  125. lea count2d, [countq-2]
  126. xor id, id
  127. .loopi:
  128. ; Compute all 10 pairwise products of a 4x4 block that lies on the diagonal
  129. mova ymm1, [varq + iq*8]
  130. vbroadcastsd ymm4, [varq + iq*8]
  131. vbroadcastsd ymm5, [varq + iq*8 + 8]
  132. vbroadcastsd ymm6, [varq + iq*8 + 16]
  133. vbroadcastsd ymm7, [varq + iq*8 + 24]
  134. vextractf128 xmm3, ymm1, 1
  135. %if cpuflag(fma3)
  136. mova ymm0, COVAR(iq ,0)
  137. mova xmm2, COVAR(iq+2,2)
  138. fmaddpd ymm0, ymm1, ymm4, ymm0
  139. fmaddpd xmm2, xmm3, xmm6, xmm2
  140. fmaddpd ymm1, ymm5, ymm1, COVAR(iq ,1)
  141. fmaddpd xmm3, xmm7, xmm3, COVAR(iq+2,3)
  142. mova COVAR(iq ,0), ymm0
  143. mova COVAR(iq ,1), ymm1
  144. mova COVAR(iq+2,2), xmm2
  145. mova COVAR(iq+2,3), xmm3
  146. %else
  147. vmulpd ymm0, ymm1, ymm4
  148. vmulpd ymm1, ymm1, ymm5
  149. vmulpd xmm2, xmm3, xmm6
  150. vmulpd xmm3, xmm3, xmm7
  151. ADDPD_MEM COVAR(iq ,0), ymm0
  152. ADDPD_MEM COVAR(iq ,1), ymm1
  153. ADDPD_MEM COVAR(iq+2,2), xmm2
  154. ADDPD_MEM COVAR(iq+2,3), xmm3
  155. %endif ; cpuflag(fma3)
  156. lea jd, [iq + 4]
  157. cmp jd, count2d
  158. jg .skip4x4
  159. .loop4x4:
  160. ; Compute all 16 pairwise products of a 4x4 block
  161. mova ymm3, [varq + jq*8]
  162. %if cpuflag(fma3)
  163. mova ymm0, COVAR(jq, 0)
  164. mova ymm1, COVAR(jq, 1)
  165. mova ymm2, COVAR(jq, 2)
  166. fmaddpd ymm0, ymm3, ymm4, ymm0
  167. fmaddpd ymm1, ymm3, ymm5, ymm1
  168. fmaddpd ymm2, ymm3, ymm6, ymm2
  169. fmaddpd ymm3, ymm7, ymm3, COVAR(jq,3)
  170. mova COVAR(jq, 0), ymm0
  171. mova COVAR(jq, 1), ymm1
  172. mova COVAR(jq, 2), ymm2
  173. mova COVAR(jq, 3), ymm3
  174. %else
  175. vmulpd ymm0, ymm3, ymm4
  176. vmulpd ymm1, ymm3, ymm5
  177. vmulpd ymm2, ymm3, ymm6
  178. vmulpd ymm3, ymm3, ymm7
  179. ADDPD_MEM COVAR(jq,0), ymm0
  180. ADDPD_MEM COVAR(jq,1), ymm1
  181. ADDPD_MEM COVAR(jq,2), ymm2
  182. ADDPD_MEM COVAR(jq,3), ymm3
  183. %endif ; cpuflag(fma3)
  184. add jd, 4
  185. cmp jd, count2d
  186. jle .loop4x4
  187. .skip4x4:
  188. cmp jd, countd
  189. jg .skip2x4
  190. mova xmm3, [varq + jq*8]
  191. %if cpuflag(fma3)
  192. mova xmm0, COVAR(jq, 0)
  193. mova xmm1, COVAR(jq, 1)
  194. mova xmm2, COVAR(jq, 2)
  195. fmaddpd xmm0, xmm3, xmm4, xmm0
  196. fmaddpd xmm1, xmm3, xmm5, xmm1
  197. fmaddpd xmm2, xmm3, xmm6, xmm2
  198. fmaddpd xmm3, xmm7, xmm3, COVAR(jq,3)
  199. mova COVAR(jq, 0), xmm0
  200. mova COVAR(jq, 1), xmm1
  201. mova COVAR(jq, 2), xmm2
  202. mova COVAR(jq, 3), xmm3
  203. %else
  204. vmulpd xmm0, xmm3, xmm4
  205. vmulpd xmm1, xmm3, xmm5
  206. vmulpd xmm2, xmm3, xmm6
  207. vmulpd xmm3, xmm3, xmm7
  208. ADDPD_MEM COVAR(jq,0), xmm0
  209. ADDPD_MEM COVAR(jq,1), xmm1
  210. ADDPD_MEM COVAR(jq,2), xmm2
  211. ADDPD_MEM COVAR(jq,3), xmm3
  212. %endif ; cpuflag(fma3)
  213. .skip2x4:
  214. add id, 4
  215. add covarq, 4*COVAR_STRIDE
  216. cmp id, count2d
  217. jle .loopi
  218. cmp id, countd
  219. jg .ret
  220. mov jd, id
  221. .loop2x1:
  222. vmovddup xmm0, [varq + iq*8]
  223. %if cpuflag(fma3)
  224. mova xmm1, [varq + jq*8]
  225. fmaddpd xmm0, xmm1, xmm0, COVAR(jq,0)
  226. mova COVAR(jq,0), xmm0
  227. %else
  228. vmulpd xmm0, [varq + jq*8]
  229. ADDPD_MEM COVAR(jq,0), xmm0
  230. %endif ; cpuflag(fma3)
  231. inc id
  232. add covarq, COVAR_STRIDE
  233. cmp id, countd
  234. jle .loop2x1
  235. .ret:
  236. REP_RET
  237. %endmacro ; UPDATE_LLS
  238. %if HAVE_AVX_EXTERNAL
  239. INIT_YMM avx
  240. UPDATE_LLS
  241. %endif
  242. %if HAVE_FMA3_EXTERNAL
  243. INIT_YMM fma3
  244. UPDATE_LLS
  245. %endif
  246. INIT_XMM sse2
  247. cglobal evaluate_lls, 3,4,2, ctx, var, order, i
  248. ; This function is often called on the same buffer as update_lls, but with
  249. ; an offset. They can't both be aligned.
  250. ; Load halves rather than movu to avoid store-forwarding stalls, since the
  251. ; input was initialized immediately prior to this function using scalar math.
  252. %define coefsq ctxq
  253. mov id, orderd
  254. imul orderd, MAX_VARS
  255. lea coefsq, [ctxq + LLSModel.coeff + orderq*8]
  256. movsd m0, [varq]
  257. movhpd m0, [varq + 8]
  258. mulpd m0, [coefsq]
  259. lea coefsq, [coefsq + iq*8]
  260. lea varq, [varq + iq*8]
  261. neg iq
  262. add iq, 2
  263. .loop:
  264. movsd m1, [varq + iq*8]
  265. movhpd m1, [varq + iq*8 + 8]
  266. mulpd m1, [coefsq + iq*8]
  267. addpd m0, m1
  268. add iq, 2
  269. jl .loop
  270. jg .skip1
  271. movsd m1, [varq + iq*8]
  272. mulsd m1, [coefsq + iq*8]
  273. addpd m0, m1
  274. .skip1:
  275. movhlps m1, m0
  276. addsd m0, m1
  277. %if ARCH_X86_32
  278. movsd r0m, m0
  279. fld qword r0m
  280. %endif
  281. RET