pixelutils.asm 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386
  1. ;******************************************************************************
  2. ;* Pixel utilities SIMD
  3. ;*
  4. ;* Copyright (C) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. ;* Copyright (C) 2014 Clément Bœsch <u pkh me>
  6. ;*
  7. ;* This file is part of FFmpeg.
  8. ;*
  9. ;* FFmpeg is free software; you can redistribute it and/or
  10. ;* modify it under the terms of the GNU Lesser General Public
  11. ;* License as published by the Free Software Foundation; either
  12. ;* version 2.1 of the License, or (at your option) any later version.
  13. ;*
  14. ;* FFmpeg is distributed in the hope that it will be useful,
  15. ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. ;* Lesser General Public License for more details.
  18. ;*
  19. ;* You should have received a copy of the GNU Lesser General Public
  20. ;* License along with FFmpeg; if not, write to the Free Software
  21. ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. ;******************************************************************************
  23. %include "x86util.asm"
  24. SECTION .text
  25. ;-------------------------------------------------------------------------------
  26. ; int ff_pixelutils_sad_8x8_mmx(const uint8_t *src1, ptrdiff_t stride1,
  27. ; const uint8_t *src2, ptrdiff_t stride2);
  28. ;-------------------------------------------------------------------------------
  29. INIT_MMX mmx
  30. cglobal pixelutils_sad_8x8, 4,4,0, src1, stride1, src2, stride2
  31. pxor m7, m7
  32. pxor m6, m6
  33. %rep 4
  34. mova m0, [src1q]
  35. mova m2, [src1q + stride1q]
  36. mova m1, [src2q]
  37. mova m3, [src2q + stride2q]
  38. psubusb m4, m0, m1
  39. psubusb m5, m2, m3
  40. psubusb m1, m0
  41. psubusb m3, m2
  42. por m1, m4
  43. por m3, m5
  44. punpcklbw m0, m1, m7
  45. punpcklbw m2, m3, m7
  46. punpckhbw m1, m7
  47. punpckhbw m3, m7
  48. paddw m0, m1
  49. paddw m2, m3
  50. paddw m0, m2
  51. paddw m6, m0
  52. lea src1q, [src1q + 2*stride1q]
  53. lea src2q, [src2q + 2*stride2q]
  54. %endrep
  55. psrlq m0, m6, 32
  56. paddw m6, m0
  57. psrlq m0, m6, 16
  58. paddw m6, m0
  59. movd eax, m6
  60. movzx eax, ax
  61. RET
  62. ;-------------------------------------------------------------------------------
  63. ; int ff_pixelutils_sad_8x8_mmxext(const uint8_t *src1, ptrdiff_t stride1,
  64. ; const uint8_t *src2, ptrdiff_t stride2);
  65. ;-------------------------------------------------------------------------------
  66. INIT_MMX mmxext
  67. cglobal pixelutils_sad_8x8, 4,4,0, src1, stride1, src2, stride2
  68. pxor m2, m2
  69. %rep 4
  70. mova m0, [src1q]
  71. mova m1, [src1q + stride1q]
  72. psadbw m0, [src2q]
  73. psadbw m1, [src2q + stride2q]
  74. paddw m2, m0
  75. paddw m2, m1
  76. lea src1q, [src1q + 2*stride1q]
  77. lea src2q, [src2q + 2*stride2q]
  78. %endrep
  79. movd eax, m2
  80. RET
  81. ;-------------------------------------------------------------------------------
  82. ; int ff_pixelutils_sad_16x16_mmxext(const uint8_t *src1, ptrdiff_t stride1,
  83. ; const uint8_t *src2, ptrdiff_t stride2);
  84. ;-------------------------------------------------------------------------------
  85. INIT_MMX mmxext
  86. cglobal pixelutils_sad_16x16, 4,4,0, src1, stride1, src2, stride2
  87. pxor m2, m2
  88. %rep 16
  89. mova m0, [src1q]
  90. mova m1, [src1q + 8]
  91. psadbw m0, [src2q]
  92. psadbw m1, [src2q + 8]
  93. paddw m2, m0
  94. paddw m2, m1
  95. add src1q, stride1q
  96. add src2q, stride2q
  97. %endrep
  98. movd eax, m2
  99. RET
  100. ;-------------------------------------------------------------------------------
  101. ; int ff_pixelutils_sad_16x16_sse2(const uint8_t *src1, ptrdiff_t stride1,
  102. ; const uint8_t *src2, ptrdiff_t stride2);
  103. ;-------------------------------------------------------------------------------
  104. INIT_XMM sse2
  105. cglobal pixelutils_sad_16x16, 4,4,5, src1, stride1, src2, stride2
  106. movu m4, [src1q]
  107. movu m2, [src2q]
  108. movu m1, [src1q + stride1q]
  109. movu m3, [src2q + stride2q]
  110. psadbw m4, m2
  111. psadbw m1, m3
  112. paddw m4, m1
  113. %rep 7
  114. lea src1q, [src1q + 2*stride1q]
  115. lea src2q, [src2q + 2*stride2q]
  116. movu m0, [src1q]
  117. movu m2, [src2q]
  118. movu m1, [src1q + stride1q]
  119. movu m3, [src2q + stride2q]
  120. psadbw m0, m2
  121. psadbw m1, m3
  122. paddw m4, m0
  123. paddw m4, m1
  124. %endrep
  125. movhlps m0, m4
  126. paddw m4, m0
  127. movd eax, m4
  128. RET
  129. ;-------------------------------------------------------------------------------
  130. ; int ff_pixelutils_sad_[au]_16x16_sse2(const uint8_t *src1, ptrdiff_t stride1,
  131. ; const uint8_t *src2, ptrdiff_t stride2);
  132. ;-------------------------------------------------------------------------------
  133. %macro SAD_XMM_16x16 1
  134. INIT_XMM sse2
  135. cglobal pixelutils_sad_%1_16x16, 4,4,3, src1, stride1, src2, stride2
  136. mov%1 m2, [src2q]
  137. psadbw m2, [src1q]
  138. mov%1 m1, [src2q + stride2q]
  139. psadbw m1, [src1q + stride1q]
  140. paddw m2, m1
  141. %rep 7
  142. lea src1q, [src1q + 2*stride1q]
  143. lea src2q, [src2q + 2*stride2q]
  144. mov%1 m0, [src2q]
  145. psadbw m0, [src1q]
  146. mov%1 m1, [src2q + stride2q]
  147. psadbw m1, [src1q + stride1q]
  148. paddw m2, m0
  149. paddw m2, m1
  150. %endrep
  151. movhlps m0, m2
  152. paddw m2, m0
  153. movd eax, m2
  154. RET
  155. %endmacro
  156. SAD_XMM_16x16 a
  157. SAD_XMM_16x16 u
  158. %macro PROCESS_SAD_32x4_U 0
  159. movu m1, [r2]
  160. movu m2, [r2 + 16]
  161. movu m3, [r0]
  162. movu m4, [r0 + 16]
  163. psadbw m1, m3
  164. psadbw m2, m4
  165. paddd m1, m2
  166. paddd m0, m1
  167. lea r2, [r2 + r3]
  168. lea r0, [r0 + r1]
  169. movu m1, [r2]
  170. movu m2, [r2 + 16]
  171. movu m3, [r0]
  172. movu m4, [r0 + 16]
  173. psadbw m1, m3
  174. psadbw m2, m4
  175. paddd m1, m2
  176. paddd m0, m1
  177. lea r2, [r2 + r3]
  178. lea r0, [r0 + r1]
  179. movu m1, [r2]
  180. movu m2, [r2 + 16]
  181. movu m3, [r0]
  182. movu m4, [r0 + 16]
  183. psadbw m1, m3
  184. psadbw m2, m4
  185. paddd m1, m2
  186. paddd m0, m1
  187. lea r2, [r2 + r3]
  188. lea r0, [r0 + r1]
  189. movu m1, [r2]
  190. movu m2, [r2 + 16]
  191. movu m3, [r0]
  192. movu m4, [r0 + 16]
  193. psadbw m1, m3
  194. psadbw m2, m4
  195. paddd m1, m2
  196. paddd m0, m1
  197. lea r2, [r2 + r3]
  198. lea r0, [r0 + r1]
  199. %endmacro
  200. %macro PROCESS_SAD_32x4 1
  201. mov%1 m1, [r2]
  202. mov%1 m2, [r2 + 16]
  203. psadbw m1, [r0]
  204. psadbw m2, [r0 + 16]
  205. paddd m1, m2
  206. paddd m0, m1
  207. lea r2, [r2 + r3]
  208. lea r0, [r0 + r1]
  209. mov%1 m1, [r2]
  210. mov%1 m2, [r2 + 16]
  211. psadbw m1, [r0]
  212. psadbw m2, [r0 + 16]
  213. paddd m1, m2
  214. paddd m0, m1
  215. lea r2, [r2 + r3]
  216. lea r0, [r0 + r1]
  217. mov%1 m1, [r2]
  218. mov%1 m2, [r2 + 16]
  219. psadbw m1, [r0]
  220. psadbw m2, [r0 + 16]
  221. paddd m1, m2
  222. paddd m0, m1
  223. lea r2, [r2 + r3]
  224. lea r0, [r0 + r1]
  225. mov%1 m1, [r2]
  226. mov%1 m2, [r2 + 16]
  227. psadbw m1, [r0]
  228. psadbw m2, [r0 + 16]
  229. paddd m1, m2
  230. paddd m0, m1
  231. lea r2, [r2 + r3]
  232. lea r0, [r0 + r1]
  233. %endmacro
  234. ;-----------------------------------------------------------------------------
  235. ; int ff_pixelutils_sad_32x32_sse2(const uint8_t *src1, ptrdiff_t stride1,
  236. ; const uint8_t *src2, ptrdiff_t stride2);
  237. ;-----------------------------------------------------------------------------
  238. INIT_XMM sse2
  239. cglobal pixelutils_sad_32x32, 4,5,5, src1, stride1, src2, stride2
  240. pxor m0, m0
  241. mov r4d, 4
  242. .loop:
  243. PROCESS_SAD_32x4_U
  244. PROCESS_SAD_32x4_U
  245. dec r4d
  246. jnz .loop
  247. movhlps m1, m0
  248. paddd m0, m1
  249. movd eax, m0
  250. RET
  251. ;-------------------------------------------------------------------------------
  252. ; int ff_pixelutils_sad_[au]_32x32_sse2(const uint8_t *src1, ptrdiff_t stride1,
  253. ; const uint8_t *src2, ptrdiff_t stride2);
  254. ;-------------------------------------------------------------------------------
  255. %macro SAD_XMM_32x32 1
  256. INIT_XMM sse2
  257. cglobal pixelutils_sad_%1_32x32, 4,5,3, src1, stride1, src2, stride2
  258. pxor m0, m0
  259. mov r4d, 4
  260. .loop:
  261. PROCESS_SAD_32x4 %1
  262. PROCESS_SAD_32x4 %1
  263. dec r4d
  264. jnz .loop
  265. movhlps m1, m0
  266. paddd m0, m1
  267. movd eax, m0
  268. RET
  269. %endmacro
  270. SAD_XMM_32x32 a
  271. SAD_XMM_32x32 u
  272. %if HAVE_AVX2_EXTERNAL
  273. ;-------------------------------------------------------------------------------
  274. ; int ff_pixelutils_sad_32x32_avx2(const uint8_t *src1, ptrdiff_t stride1,
  275. ; const uint8_t *src2, ptrdiff_t stride2);
  276. ;-------------------------------------------------------------------------------
  277. INIT_YMM avx2
  278. cglobal pixelutils_sad_32x32, 4,7,5, src1, stride1, src2, stride2
  279. pxor m0, m0
  280. mov r4d, 32/4
  281. lea r5, [stride1q * 3]
  282. lea r6, [stride2q * 3]
  283. .loop:
  284. movu m1, [src1q] ; row 0 of pix0
  285. movu m2, [src2q] ; row 0 of pix1
  286. movu m3, [src1q + stride1q] ; row 1 of pix0
  287. movu m4, [src2q + stride2q] ; row 1 of pix1
  288. psadbw m1, m2
  289. psadbw m3, m4
  290. paddd m0, m1
  291. paddd m0, m3
  292. movu m1, [src1q + 2 * stride1q] ; row 2 of pix0
  293. movu m2, [src2q + 2 * stride2q] ; row 2 of pix1
  294. movu m3, [src1q + r5] ; row 3 of pix0
  295. movu m4, [src2q + r6] ; row 3 of pix1
  296. psadbw m1, m2
  297. psadbw m3, m4
  298. paddd m0, m1
  299. paddd m0, m3
  300. lea src2q, [src2q + 4 * stride2q]
  301. lea src1q, [src1q + 4 * stride1q]
  302. dec r4d
  303. jnz .loop
  304. vextracti128 xm1, m0, 1
  305. paddd xm0, xm1
  306. pshufd xm1, xm0, 2
  307. paddd xm0, xm1
  308. movd eax, xm0
  309. RET
  310. ;-------------------------------------------------------------------------------
  311. ; int ff_pixelutils_sad_[au]_32x32_avx2(const uint8_t *src1, ptrdiff_t stride1,
  312. ; const uint8_t *src2, ptrdiff_t stride2);
  313. ;-------------------------------------------------------------------------------
  314. %macro SAD_AVX2_32x32 1
  315. INIT_YMM avx2
  316. cglobal pixelutils_sad_%1_32x32, 4,7,3, src1, stride1, src2, stride2
  317. pxor m0, m0
  318. mov r4d, 32/4
  319. lea r5, [stride1q * 3]
  320. lea r6, [stride2q * 3]
  321. .loop:
  322. mov%1 m1, [src2q] ; row 0 of pix1
  323. psadbw m1, [src1q]
  324. mov%1 m2, [src2q + stride2q] ; row 1 of pix1
  325. psadbw m2, [src1q + stride1q]
  326. paddd m0, m1
  327. paddd m0, m2
  328. mov%1 m1, [src2q + 2 * stride2q] ; row 2 of pix1
  329. psadbw m1, [src1q + 2 * stride1q]
  330. mov%1 m2, [src2q + r6] ; row 3 of pix1
  331. psadbw m2, [src1q + r5]
  332. paddd m0, m1
  333. paddd m0, m2
  334. lea src2q, [src2q + 4 * stride2q]
  335. lea src1q, [src1q + 4 * stride1q]
  336. dec r4d
  337. jnz .loop
  338. vextracti128 xm1, m0, 1
  339. paddd xm0, xm1
  340. pshufd xm1, xm0, 2
  341. paddd xm0, xm1
  342. movd eax, xm0
  343. RET
  344. %endmacro
  345. SAD_AVX2_32x32 a
  346. SAD_AVX2_32x32 u
  347. %endif