rv40dsp_init.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278
  1. /*
  2. * RV40 decoder motion compensation functions x86-optimised
  3. * Copyright (c) 2008 Konstantin Shishkov
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * RV40 decoder motion compensation functions x86-optimised
  24. * 2,0 and 0,2 have h264 equivalents.
  25. * 3,3 is bugged in the rv40 format and maps to _xy2 version
  26. */
  27. #include "libavcodec/rv34dsp.h"
  28. #include "libavutil/attributes.h"
  29. #include "libavutil/mem.h"
  30. #include "libavutil/x86/cpu.h"
  31. #include "hpeldsp.h"
  32. #define DEFINE_FN(op, size, insn) \
  33. static void op##_rv40_qpel##size##_mc33_##insn(uint8_t *dst, const uint8_t *src, \
  34. ptrdiff_t stride) \
  35. { \
  36. ff_##op##_pixels##size##_xy2_##insn(dst, src, stride, size); \
  37. }
  38. #if HAVE_X86ASM
  39. void ff_put_rv40_chroma_mc8_mmx (uint8_t *dst, uint8_t *src,
  40. ptrdiff_t stride, int h, int x, int y);
  41. void ff_avg_rv40_chroma_mc8_mmxext(uint8_t *dst, uint8_t *src,
  42. ptrdiff_t stride, int h, int x, int y);
  43. void ff_avg_rv40_chroma_mc8_3dnow(uint8_t *dst, uint8_t *src,
  44. ptrdiff_t stride, int h, int x, int y);
  45. void ff_put_rv40_chroma_mc4_mmx (uint8_t *dst, uint8_t *src,
  46. ptrdiff_t stride, int h, int x, int y);
  47. void ff_avg_rv40_chroma_mc4_mmxext(uint8_t *dst, uint8_t *src,
  48. ptrdiff_t stride, int h, int x, int y);
  49. void ff_avg_rv40_chroma_mc4_3dnow(uint8_t *dst, uint8_t *src,
  50. ptrdiff_t stride, int h, int x, int y);
  51. #define DECLARE_WEIGHT(opt) \
  52. void ff_rv40_weight_func_rnd_16_##opt(uint8_t *dst, uint8_t *src1, uint8_t *src2, \
  53. int w1, int w2, ptrdiff_t stride); \
  54. void ff_rv40_weight_func_rnd_8_##opt (uint8_t *dst, uint8_t *src1, uint8_t *src2, \
  55. int w1, int w2, ptrdiff_t stride); \
  56. void ff_rv40_weight_func_nornd_16_##opt(uint8_t *dst, uint8_t *src1, uint8_t *src2, \
  57. int w1, int w2, ptrdiff_t stride); \
  58. void ff_rv40_weight_func_nornd_8_##opt (uint8_t *dst, uint8_t *src1, uint8_t *src2, \
  59. int w1, int w2, ptrdiff_t stride);
  60. DECLARE_WEIGHT(mmxext)
  61. DECLARE_WEIGHT(sse2)
  62. DECLARE_WEIGHT(ssse3)
  63. /** @{ */
  64. /**
  65. * Define one qpel function.
  66. * LOOPSIZE must be already set to the number of pixels processed per
  67. * iteration in the inner loop of the called functions.
  68. * COFF(x) must be already defined so as to provide the offset into any
  69. * array of coeffs used by the called function for the qpel position x.
  70. */
  71. #define QPEL_FUNC_DECL(OP, SIZE, PH, PV, OPT) \
  72. static void OP ## rv40_qpel ##SIZE ##_mc ##PH ##PV ##OPT(uint8_t *dst, \
  73. const uint8_t *src, \
  74. ptrdiff_t stride) \
  75. { \
  76. int i; \
  77. if (PH && PV) { \
  78. LOCAL_ALIGNED(16, uint8_t, tmp, [SIZE * (SIZE + 5)]); \
  79. uint8_t *tmpptr = tmp + SIZE * 2; \
  80. src -= stride * 2; \
  81. \
  82. for (i = 0; i < SIZE; i += LOOPSIZE) \
  83. ff_put_rv40_qpel_h ##OPT(tmp + i, SIZE, src + i, stride, \
  84. SIZE + 5, HCOFF(PH)); \
  85. for (i = 0; i < SIZE; i += LOOPSIZE) \
  86. ff_ ##OP ##rv40_qpel_v ##OPT(dst + i, stride, tmpptr + i, \
  87. SIZE, SIZE, VCOFF(PV)); \
  88. } else if (PV) { \
  89. for (i = 0; i < SIZE; i += LOOPSIZE) \
  90. ff_ ##OP ##rv40_qpel_v ## OPT(dst + i, stride, src + i, \
  91. stride, SIZE, VCOFF(PV)); \
  92. } else { \
  93. for (i = 0; i < SIZE; i += LOOPSIZE) \
  94. ff_ ##OP ##rv40_qpel_h ## OPT(dst + i, stride, src + i, \
  95. stride, SIZE, HCOFF(PH)); \
  96. } \
  97. }
  98. /** Declare functions for sizes 8 and 16 and given operations
  99. * and qpel position. */
  100. #define QPEL_FUNCS_DECL(OP, PH, PV, OPT) \
  101. QPEL_FUNC_DECL(OP, 8, PH, PV, OPT) \
  102. QPEL_FUNC_DECL(OP, 16, PH, PV, OPT)
  103. /** Declare all functions for all sizes and qpel positions */
  104. #define QPEL_MC_DECL(OP, OPT) \
  105. void ff_ ##OP ##rv40_qpel_h ##OPT(uint8_t *dst, ptrdiff_t dstStride, \
  106. const uint8_t *src, \
  107. ptrdiff_t srcStride, \
  108. int len, int m); \
  109. void ff_ ##OP ##rv40_qpel_v ##OPT(uint8_t *dst, ptrdiff_t dstStride, \
  110. const uint8_t *src, \
  111. ptrdiff_t srcStride, \
  112. int len, int m); \
  113. QPEL_FUNCS_DECL(OP, 0, 1, OPT) \
  114. QPEL_FUNCS_DECL(OP, 0, 3, OPT) \
  115. QPEL_FUNCS_DECL(OP, 1, 0, OPT) \
  116. QPEL_FUNCS_DECL(OP, 1, 1, OPT) \
  117. QPEL_FUNCS_DECL(OP, 1, 2, OPT) \
  118. QPEL_FUNCS_DECL(OP, 1, 3, OPT) \
  119. QPEL_FUNCS_DECL(OP, 2, 1, OPT) \
  120. QPEL_FUNCS_DECL(OP, 2, 2, OPT) \
  121. QPEL_FUNCS_DECL(OP, 2, 3, OPT) \
  122. QPEL_FUNCS_DECL(OP, 3, 0, OPT) \
  123. QPEL_FUNCS_DECL(OP, 3, 1, OPT) \
  124. QPEL_FUNCS_DECL(OP, 3, 2, OPT)
  125. /** @} */
  126. #define LOOPSIZE 8
  127. #define HCOFF(x) (32 * ((x) - 1))
  128. #define VCOFF(x) (32 * ((x) - 1))
  129. QPEL_MC_DECL(put_, _ssse3)
  130. QPEL_MC_DECL(avg_, _ssse3)
  131. #undef LOOPSIZE
  132. #undef HCOFF
  133. #undef VCOFF
  134. #define LOOPSIZE 8
  135. #define HCOFF(x) (64 * ((x) - 1))
  136. #define VCOFF(x) (64 * ((x) - 1))
  137. QPEL_MC_DECL(put_, _sse2)
  138. QPEL_MC_DECL(avg_, _sse2)
  139. #if ARCH_X86_32
  140. #undef LOOPSIZE
  141. #undef HCOFF
  142. #undef VCOFF
  143. #define LOOPSIZE 4
  144. #define HCOFF(x) (64 * ((x) - 1))
  145. #define VCOFF(x) (64 * ((x) - 1))
  146. QPEL_MC_DECL(put_, _mmx)
  147. #define ff_put_rv40_qpel_h_mmxext ff_put_rv40_qpel_h_mmx
  148. #define ff_put_rv40_qpel_v_mmxext ff_put_rv40_qpel_v_mmx
  149. QPEL_MC_DECL(avg_, _mmxext)
  150. #define ff_put_rv40_qpel_h_3dnow ff_put_rv40_qpel_h_mmx
  151. #define ff_put_rv40_qpel_v_3dnow ff_put_rv40_qpel_v_mmx
  152. QPEL_MC_DECL(avg_, _3dnow)
  153. #endif
  154. /** @{ */
  155. /** Set one function */
  156. #define QPEL_FUNC_SET(OP, SIZE, PH, PV, OPT) \
  157. c-> OP ## pixels_tab[2 - SIZE / 8][4 * PV + PH] = OP ## rv40_qpel ##SIZE ## _mc ##PH ##PV ##OPT;
  158. /** Set functions put and avg for sizes 8 and 16 and a given qpel position */
  159. #define QPEL_FUNCS_SET(OP, PH, PV, OPT) \
  160. QPEL_FUNC_SET(OP, 8, PH, PV, OPT) \
  161. QPEL_FUNC_SET(OP, 16, PH, PV, OPT)
  162. /** Set all functions for all sizes and qpel positions */
  163. #define QPEL_MC_SET(OP, OPT) \
  164. QPEL_FUNCS_SET (OP, 0, 1, OPT) \
  165. QPEL_FUNCS_SET (OP, 0, 3, OPT) \
  166. QPEL_FUNCS_SET (OP, 1, 0, OPT) \
  167. QPEL_FUNCS_SET (OP, 1, 1, OPT) \
  168. QPEL_FUNCS_SET (OP, 1, 2, OPT) \
  169. QPEL_FUNCS_SET (OP, 1, 3, OPT) \
  170. QPEL_FUNCS_SET (OP, 2, 1, OPT) \
  171. QPEL_FUNCS_SET (OP, 2, 2, OPT) \
  172. QPEL_FUNCS_SET (OP, 2, 3, OPT) \
  173. QPEL_FUNCS_SET (OP, 3, 0, OPT) \
  174. QPEL_FUNCS_SET (OP, 3, 1, OPT) \
  175. QPEL_FUNCS_SET (OP, 3, 2, OPT)
  176. /** @} */
  177. DEFINE_FN(put, 8, ssse3)
  178. DEFINE_FN(put, 16, sse2)
  179. DEFINE_FN(put, 16, ssse3)
  180. DEFINE_FN(avg, 8, mmxext)
  181. DEFINE_FN(avg, 8, ssse3)
  182. DEFINE_FN(avg, 16, sse2)
  183. DEFINE_FN(avg, 16, ssse3)
  184. #endif /* HAVE_X86ASM */
  185. #if HAVE_MMX_INLINE
  186. DEFINE_FN(put, 8, mmx)
  187. DEFINE_FN(avg, 8, mmx)
  188. DEFINE_FN(put, 16, mmx)
  189. DEFINE_FN(avg, 16, mmx)
  190. #endif
  191. av_cold void ff_rv40dsp_init_x86(RV34DSPContext *c)
  192. {
  193. av_unused int cpu_flags = av_get_cpu_flags();
  194. #if HAVE_MMX_INLINE
  195. if (INLINE_MMX(cpu_flags)) {
  196. c->put_pixels_tab[0][15] = put_rv40_qpel16_mc33_mmx;
  197. c->put_pixels_tab[1][15] = put_rv40_qpel8_mc33_mmx;
  198. c->avg_pixels_tab[0][15] = avg_rv40_qpel16_mc33_mmx;
  199. c->avg_pixels_tab[1][15] = avg_rv40_qpel8_mc33_mmx;
  200. }
  201. #endif /* HAVE_MMX_INLINE */
  202. #if HAVE_X86ASM
  203. if (EXTERNAL_MMX(cpu_flags)) {
  204. c->put_chroma_pixels_tab[0] = ff_put_rv40_chroma_mc8_mmx;
  205. c->put_chroma_pixels_tab[1] = ff_put_rv40_chroma_mc4_mmx;
  206. #if ARCH_X86_32
  207. QPEL_MC_SET(put_, _mmx)
  208. #endif
  209. }
  210. if (EXTERNAL_AMD3DNOW(cpu_flags)) {
  211. c->avg_chroma_pixels_tab[0] = ff_avg_rv40_chroma_mc8_3dnow;
  212. c->avg_chroma_pixels_tab[1] = ff_avg_rv40_chroma_mc4_3dnow;
  213. #if ARCH_X86_32
  214. QPEL_MC_SET(avg_, _3dnow)
  215. #endif
  216. }
  217. if (EXTERNAL_MMXEXT(cpu_flags)) {
  218. c->avg_pixels_tab[1][15] = avg_rv40_qpel8_mc33_mmxext;
  219. c->avg_chroma_pixels_tab[0] = ff_avg_rv40_chroma_mc8_mmxext;
  220. c->avg_chroma_pixels_tab[1] = ff_avg_rv40_chroma_mc4_mmxext;
  221. c->rv40_weight_pixels_tab[0][0] = ff_rv40_weight_func_rnd_16_mmxext;
  222. c->rv40_weight_pixels_tab[0][1] = ff_rv40_weight_func_rnd_8_mmxext;
  223. c->rv40_weight_pixels_tab[1][0] = ff_rv40_weight_func_nornd_16_mmxext;
  224. c->rv40_weight_pixels_tab[1][1] = ff_rv40_weight_func_nornd_8_mmxext;
  225. #if ARCH_X86_32
  226. QPEL_MC_SET(avg_, _mmxext)
  227. #endif
  228. }
  229. if (EXTERNAL_SSE2(cpu_flags)) {
  230. c->put_pixels_tab[0][15] = put_rv40_qpel16_mc33_sse2;
  231. c->avg_pixels_tab[0][15] = avg_rv40_qpel16_mc33_sse2;
  232. c->rv40_weight_pixels_tab[0][0] = ff_rv40_weight_func_rnd_16_sse2;
  233. c->rv40_weight_pixels_tab[0][1] = ff_rv40_weight_func_rnd_8_sse2;
  234. c->rv40_weight_pixels_tab[1][0] = ff_rv40_weight_func_nornd_16_sse2;
  235. c->rv40_weight_pixels_tab[1][1] = ff_rv40_weight_func_nornd_8_sse2;
  236. QPEL_MC_SET(put_, _sse2)
  237. QPEL_MC_SET(avg_, _sse2)
  238. }
  239. if (EXTERNAL_SSSE3(cpu_flags)) {
  240. c->put_pixels_tab[0][15] = put_rv40_qpel16_mc33_ssse3;
  241. c->put_pixels_tab[1][15] = put_rv40_qpel8_mc33_ssse3;
  242. c->avg_pixels_tab[0][15] = avg_rv40_qpel16_mc33_ssse3;
  243. c->avg_pixels_tab[1][15] = avg_rv40_qpel8_mc33_ssse3;
  244. c->rv40_weight_pixels_tab[0][0] = ff_rv40_weight_func_rnd_16_ssse3;
  245. c->rv40_weight_pixels_tab[0][1] = ff_rv40_weight_func_rnd_8_ssse3;
  246. c->rv40_weight_pixels_tab[1][0] = ff_rv40_weight_func_nornd_16_ssse3;
  247. c->rv40_weight_pixels_tab[1][1] = ff_rv40_weight_func_nornd_8_ssse3;
  248. QPEL_MC_SET(put_, _ssse3)
  249. QPEL_MC_SET(avg_, _ssse3)
  250. }
  251. #endif /* HAVE_X86ASM */
  252. }