highbd_sad4d_sse2.asm 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287
  1. ;
  2. ; Copyright (c) 2014 The WebM project authors. All Rights Reserved.
  3. ;
  4. ; Use of this source code is governed by a BSD-style license
  5. ; that can be found in the LICENSE file in the root of the source
  6. ; tree. An additional intellectual property rights grant can be found
  7. ; in the file PATENTS. All contributing project authors may
  8. ; be found in the AUTHORS file in the root of the source tree.
  9. ;
  10. %include "third_party/x86inc/x86inc.asm"
  11. SECTION .text
  12. ; HIGH_PROCESS_4x2x4 first, off_{first,second}_{src,ref}, advance_at_end
  13. %macro HIGH_PROCESS_4x2x4 5-6 0
  14. movh m0, [srcq +%2*2]
  15. %if %1 == 1
  16. movu m4, [ref1q+%3*2]
  17. movu m5, [ref2q+%3*2]
  18. movu m6, [ref3q+%3*2]
  19. movu m7, [ref4q+%3*2]
  20. movhps m0, [srcq +%4*2]
  21. movhps m4, [ref1q+%5*2]
  22. movhps m5, [ref2q+%5*2]
  23. movhps m6, [ref3q+%5*2]
  24. movhps m7, [ref4q+%5*2]
  25. mova m3, m0
  26. mova m2, m0
  27. psubusw m3, m4
  28. psubusw m2, m5
  29. psubusw m4, m0
  30. psubusw m5, m0
  31. por m4, m3
  32. por m5, m2
  33. pmaddwd m4, m1
  34. pmaddwd m5, m1
  35. mova m3, m0
  36. mova m2, m0
  37. psubusw m3, m6
  38. psubusw m2, m7
  39. psubusw m6, m0
  40. psubusw m7, m0
  41. por m6, m3
  42. por m7, m2
  43. pmaddwd m6, m1
  44. pmaddwd m7, m1
  45. %else
  46. movu m2, [ref1q+%3*2]
  47. movhps m0, [srcq +%4*2]
  48. movhps m2, [ref1q+%5*2]
  49. mova m3, m0
  50. psubusw m3, m2
  51. psubusw m2, m0
  52. por m2, m3
  53. pmaddwd m2, m1
  54. paddd m4, m2
  55. movu m2, [ref2q+%3*2]
  56. mova m3, m0
  57. movhps m2, [ref2q+%5*2]
  58. psubusw m3, m2
  59. psubusw m2, m0
  60. por m2, m3
  61. pmaddwd m2, m1
  62. paddd m5, m2
  63. movu m2, [ref3q+%3*2]
  64. mova m3, m0
  65. movhps m2, [ref3q+%5*2]
  66. psubusw m3, m2
  67. psubusw m2, m0
  68. por m2, m3
  69. pmaddwd m2, m1
  70. paddd m6, m2
  71. movu m2, [ref4q+%3*2]
  72. mova m3, m0
  73. movhps m2, [ref4q+%5*2]
  74. psubusw m3, m2
  75. psubusw m2, m0
  76. por m2, m3
  77. pmaddwd m2, m1
  78. paddd m7, m2
  79. %endif
  80. %if %6 == 1
  81. lea srcq, [srcq +src_strideq*4]
  82. lea ref1q, [ref1q+ref_strideq*4]
  83. lea ref2q, [ref2q+ref_strideq*4]
  84. lea ref3q, [ref3q+ref_strideq*4]
  85. lea ref4q, [ref4q+ref_strideq*4]
  86. %endif
  87. %endmacro
  88. ; PROCESS_8x2x4 first, off_{first,second}_{src,ref}, advance_at_end
  89. %macro HIGH_PROCESS_8x2x4 5-6 0
  90. ; 1st 8 px
  91. mova m0, [srcq +%2*2]
  92. %if %1 == 1
  93. movu m4, [ref1q+%3*2]
  94. movu m5, [ref2q+%3*2]
  95. movu m6, [ref3q+%3*2]
  96. movu m7, [ref4q+%3*2]
  97. mova m3, m0
  98. mova m2, m0
  99. psubusw m3, m4
  100. psubusw m2, m5
  101. psubusw m4, m0
  102. psubusw m5, m0
  103. por m4, m3
  104. por m5, m2
  105. pmaddwd m4, m1
  106. pmaddwd m5, m1
  107. mova m3, m0
  108. mova m2, m0
  109. psubusw m3, m6
  110. psubusw m2, m7
  111. psubusw m6, m0
  112. psubusw m7, m0
  113. por m6, m3
  114. por m7, m2
  115. pmaddwd m6, m1
  116. pmaddwd m7, m1
  117. %else
  118. mova m3, m0
  119. movu m2, [ref1q+%3*2]
  120. psubusw m3, m2
  121. psubusw m2, m0
  122. por m2, m3
  123. mova m3, m0
  124. pmaddwd m2, m1
  125. paddd m4, m2
  126. movu m2, [ref2q+%3*2]
  127. psubusw m3, m2
  128. psubusw m2, m0
  129. por m2, m3
  130. mova m3, m0
  131. pmaddwd m2, m1
  132. paddd m5, m2
  133. movu m2, [ref3q+%3*2]
  134. psubusw m3, m2
  135. psubusw m2, m0
  136. por m2, m3
  137. mova m3, m0
  138. pmaddwd m2, m1
  139. paddd m6, m2
  140. movu m2, [ref4q+%3*2]
  141. psubusw m3, m2
  142. psubusw m2, m0
  143. por m2, m3
  144. pmaddwd m2, m1
  145. paddd m7, m2
  146. %endif
  147. ; 2nd 8 px
  148. mova m0, [srcq +(%4)*2]
  149. mova m3, m0
  150. movu m2, [ref1q+(%5)*2]
  151. psubusw m3, m2
  152. psubusw m2, m0
  153. por m2, m3
  154. mova m3, m0
  155. pmaddwd m2, m1
  156. paddd m4, m2
  157. movu m2, [ref2q+(%5)*2]
  158. psubusw m3, m2
  159. psubusw m2, m0
  160. por m2, m3
  161. mova m3, m0
  162. pmaddwd m2, m1
  163. paddd m5, m2
  164. movu m2, [ref3q+(%5)*2]
  165. psubusw m3, m2
  166. psubusw m2, m0
  167. por m2, m3
  168. mova m3, m0
  169. pmaddwd m2, m1
  170. paddd m6, m2
  171. movu m2, [ref4q+(%5)*2]
  172. psubusw m3, m2
  173. psubusw m2, m0
  174. %if %6 == 1
  175. lea srcq, [srcq +src_strideq*4]
  176. lea ref1q, [ref1q+ref_strideq*4]
  177. lea ref2q, [ref2q+ref_strideq*4]
  178. lea ref3q, [ref3q+ref_strideq*4]
  179. lea ref4q, [ref4q+ref_strideq*4]
  180. %endif
  181. por m2, m3
  182. pmaddwd m2, m1
  183. paddd m7, m2
  184. %endmacro
  185. ; HIGH_PROCESS_16x2x4 first, off_{first,second}_{src,ref}, advance_at_end
  186. %macro HIGH_PROCESS_16x2x4 5-6 0
  187. HIGH_PROCESS_8x2x4 %1, %2, %3, (%2 + 8), (%3 + 8)
  188. HIGH_PROCESS_8x2x4 0, %4, %5, (%4 + 8), (%5 + 8), %6
  189. %endmacro
  190. ; HIGH_PROCESS_32x2x4 first, off_{first,second}_{src,ref}, advance_at_end
  191. %macro HIGH_PROCESS_32x2x4 5-6 0
  192. HIGH_PROCESS_16x2x4 %1, %2, %3, (%2 + 16), (%3 + 16)
  193. HIGH_PROCESS_16x2x4 0, %4, %5, (%4 + 16), (%5 + 16), %6
  194. %endmacro
  195. ; HIGH_PROCESS_64x2x4 first, off_{first,second}_{src,ref}, advance_at_end
  196. %macro HIGH_PROCESS_64x2x4 5-6 0
  197. HIGH_PROCESS_32x2x4 %1, %2, %3, (%2 + 32), (%3 + 32)
  198. HIGH_PROCESS_32x2x4 0, %4, %5, (%4 + 32), (%5 + 32), %6
  199. %endmacro
  200. ; void vpx_highbd_sadNxNx4d_sse2(uint8_t *src, int src_stride,
  201. ; uint8_t *ref[4], int ref_stride,
  202. ; uint32_t res[4]);
  203. ; where NxN = 64x64, 32x32, 16x16, 16x8, 8x16 or 8x8
  204. %macro HIGH_SADNXN4D 2
  205. %if UNIX64
  206. cglobal highbd_sad%1x%2x4d, 5, 8, 8, src, src_stride, ref1, ref_stride, \
  207. res, ref2, ref3, ref4
  208. %else
  209. cglobal highbd_sad%1x%2x4d, 4, 7, 8, src, src_stride, ref1, ref_stride, \
  210. ref2, ref3, ref4
  211. %endif
  212. ; set m1
  213. push srcq
  214. mov srcd, 0x00010001
  215. movd m1, srcd
  216. pshufd m1, m1, 0x0
  217. pop srcq
  218. movsxdifnidn src_strideq, src_strided
  219. movsxdifnidn ref_strideq, ref_strided
  220. mov ref2q, [ref1q+gprsize*1]
  221. mov ref3q, [ref1q+gprsize*2]
  222. mov ref4q, [ref1q+gprsize*3]
  223. mov ref1q, [ref1q+gprsize*0]
  224. ; convert byte pointers to short pointers
  225. shl srcq, 1
  226. shl ref2q, 1
  227. shl ref3q, 1
  228. shl ref4q, 1
  229. shl ref1q, 1
  230. HIGH_PROCESS_%1x2x4 1, 0, 0, src_strideq, ref_strideq, 1
  231. %rep (%2-4)/2
  232. HIGH_PROCESS_%1x2x4 0, 0, 0, src_strideq, ref_strideq, 1
  233. %endrep
  234. HIGH_PROCESS_%1x2x4 0, 0, 0, src_strideq, ref_strideq, 0
  235. ; N.B. HIGH_PROCESS outputs dwords (32 bits)
  236. ; so in high bit depth even the smallest width (4) needs 128bits i.e. XMM
  237. movhlps m0, m4
  238. movhlps m1, m5
  239. movhlps m2, m6
  240. movhlps m3, m7
  241. paddd m4, m0
  242. paddd m5, m1
  243. paddd m6, m2
  244. paddd m7, m3
  245. punpckldq m4, m5
  246. punpckldq m6, m7
  247. movhlps m0, m4
  248. movhlps m1, m6
  249. paddd m4, m0
  250. paddd m6, m1
  251. punpcklqdq m4, m6
  252. movifnidn r4, r4mp
  253. movu [r4], m4
  254. RET
  255. %endmacro
  256. INIT_XMM sse2
  257. HIGH_SADNXN4D 64, 64
  258. HIGH_SADNXN4D 64, 32
  259. HIGH_SADNXN4D 32, 64
  260. HIGH_SADNXN4D 32, 32
  261. HIGH_SADNXN4D 32, 16
  262. HIGH_SADNXN4D 16, 32
  263. HIGH_SADNXN4D 16, 16
  264. HIGH_SADNXN4D 16, 8
  265. HIGH_SADNXN4D 8, 16
  266. HIGH_SADNXN4D 8, 8
  267. HIGH_SADNXN4D 8, 4
  268. HIGH_SADNXN4D 4, 8
  269. HIGH_SADNXN4D 4, 4