vf_spp.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243
  1. /*
  2. * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License along
  17. * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
  18. * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  19. */
  20. #include "libavutil/attributes.h"
  21. #include "libavutil/cpu.h"
  22. #include "libavutil/crc.h"
  23. #include "libavutil/mem.h"
  24. #include "libavutil/x86/asm.h"
  25. #include "libavfilter/vf_spp.h"
  26. #if HAVE_MMX_INLINE
  27. static void hardthresh_mmx(int16_t dst[64], const int16_t src[64],
  28. int qp, const uint8_t *permutation)
  29. {
  30. int bias = 0; //FIXME
  31. unsigned int threshold1;
  32. threshold1 = qp * ((1<<4) - bias) - 1;
  33. #define REQUANT_CORE(dst0, dst1, dst2, dst3, src0, src1, src2, src3) \
  34. "movq " #src0 ", %%mm0 \n" \
  35. "movq " #src1 ", %%mm1 \n" \
  36. "movq " #src2 ", %%mm2 \n" \
  37. "movq " #src3 ", %%mm3 \n" \
  38. "psubw %%mm4, %%mm0 \n" \
  39. "psubw %%mm4, %%mm1 \n" \
  40. "psubw %%mm4, %%mm2 \n" \
  41. "psubw %%mm4, %%mm3 \n" \
  42. "paddusw %%mm5, %%mm0 \n" \
  43. "paddusw %%mm5, %%mm1 \n" \
  44. "paddusw %%mm5, %%mm2 \n" \
  45. "paddusw %%mm5, %%mm3 \n" \
  46. "paddw %%mm6, %%mm0 \n" \
  47. "paddw %%mm6, %%mm1 \n" \
  48. "paddw %%mm6, %%mm2 \n" \
  49. "paddw %%mm6, %%mm3 \n" \
  50. "psubusw %%mm6, %%mm0 \n" \
  51. "psubusw %%mm6, %%mm1 \n" \
  52. "psubusw %%mm6, %%mm2 \n" \
  53. "psubusw %%mm6, %%mm3 \n" \
  54. "psraw $3, %%mm0 \n" \
  55. "psraw $3, %%mm1 \n" \
  56. "psraw $3, %%mm2 \n" \
  57. "psraw $3, %%mm3 \n" \
  58. \
  59. "movq %%mm0, %%mm7 \n" \
  60. "punpcklwd %%mm2, %%mm0 \n" /*A*/ \
  61. "punpckhwd %%mm2, %%mm7 \n" /*C*/ \
  62. "movq %%mm1, %%mm2 \n" \
  63. "punpcklwd %%mm3, %%mm1 \n" /*B*/ \
  64. "punpckhwd %%mm3, %%mm2 \n" /*D*/ \
  65. "movq %%mm0, %%mm3 \n" \
  66. "punpcklwd %%mm1, %%mm0 \n" /*A*/ \
  67. "punpckhwd %%mm7, %%mm3 \n" /*C*/ \
  68. "punpcklwd %%mm2, %%mm7 \n" /*B*/ \
  69. "punpckhwd %%mm2, %%mm1 \n" /*D*/ \
  70. \
  71. "movq %%mm0, " #dst0 " \n" \
  72. "movq %%mm7, " #dst1 " \n" \
  73. "movq %%mm3, " #dst2 " \n" \
  74. "movq %%mm1, " #dst3 " \n"
  75. __asm__ volatile(
  76. "movd %2, %%mm4 \n"
  77. "movd %3, %%mm5 \n"
  78. "movd %4, %%mm6 \n"
  79. "packssdw %%mm4, %%mm4 \n"
  80. "packssdw %%mm5, %%mm5 \n"
  81. "packssdw %%mm6, %%mm6 \n"
  82. "packssdw %%mm4, %%mm4 \n"
  83. "packssdw %%mm5, %%mm5 \n"
  84. "packssdw %%mm6, %%mm6 \n"
  85. REQUANT_CORE( (%1), 8(%1), 16(%1), 24(%1), (%0), 8(%0), 64(%0), 72(%0))
  86. REQUANT_CORE(32(%1), 40(%1), 48(%1), 56(%1),16(%0),24(%0), 48(%0), 56(%0))
  87. REQUANT_CORE(64(%1), 72(%1), 80(%1), 88(%1),32(%0),40(%0), 96(%0),104(%0))
  88. REQUANT_CORE(96(%1),104(%1),112(%1),120(%1),80(%0),88(%0),112(%0),120(%0))
  89. : : "r" (src), "r" (dst), "g" (threshold1+1), "g" (threshold1+5), "g" (threshold1-4) //FIXME maybe more accurate then needed?
  90. );
  91. dst[0] = (src[0] + 4) >> 3;
  92. }
  93. static void softthresh_mmx(int16_t dst[64], const int16_t src[64],
  94. int qp, const uint8_t *permutation)
  95. {
  96. int bias = 0; //FIXME
  97. unsigned int threshold1;
  98. threshold1 = qp*((1<<4) - bias) - 1;
  99. #undef REQUANT_CORE
  100. #define REQUANT_CORE(dst0, dst1, dst2, dst3, src0, src1, src2, src3) \
  101. "movq " #src0 ", %%mm0 \n" \
  102. "movq " #src1 ", %%mm1 \n" \
  103. "pxor %%mm6, %%mm6 \n" \
  104. "pxor %%mm7, %%mm7 \n" \
  105. "pcmpgtw %%mm0, %%mm6 \n" \
  106. "pcmpgtw %%mm1, %%mm7 \n" \
  107. "pxor %%mm6, %%mm0 \n" \
  108. "pxor %%mm7, %%mm1 \n" \
  109. "psubusw %%mm4, %%mm0 \n" \
  110. "psubusw %%mm4, %%mm1 \n" \
  111. "pxor %%mm6, %%mm0 \n" \
  112. "pxor %%mm7, %%mm1 \n" \
  113. "movq " #src2 ", %%mm2 \n" \
  114. "movq " #src3 ", %%mm3 \n" \
  115. "pxor %%mm6, %%mm6 \n" \
  116. "pxor %%mm7, %%mm7 \n" \
  117. "pcmpgtw %%mm2, %%mm6 \n" \
  118. "pcmpgtw %%mm3, %%mm7 \n" \
  119. "pxor %%mm6, %%mm2 \n" \
  120. "pxor %%mm7, %%mm3 \n" \
  121. "psubusw %%mm4, %%mm2 \n" \
  122. "psubusw %%mm4, %%mm3 \n" \
  123. "pxor %%mm6, %%mm2 \n" \
  124. "pxor %%mm7, %%mm3 \n" \
  125. \
  126. "paddsw %%mm5, %%mm0 \n" \
  127. "paddsw %%mm5, %%mm1 \n" \
  128. "paddsw %%mm5, %%mm2 \n" \
  129. "paddsw %%mm5, %%mm3 \n" \
  130. "psraw $3, %%mm0 \n" \
  131. "psraw $3, %%mm1 \n" \
  132. "psraw $3, %%mm2 \n" \
  133. "psraw $3, %%mm3 \n" \
  134. \
  135. "movq %%mm0, %%mm7 \n" \
  136. "punpcklwd %%mm2, %%mm0 \n" /*A*/ \
  137. "punpckhwd %%mm2, %%mm7 \n" /*C*/ \
  138. "movq %%mm1, %%mm2 \n" \
  139. "punpcklwd %%mm3, %%mm1 \n" /*B*/ \
  140. "punpckhwd %%mm3, %%mm2 \n" /*D*/ \
  141. "movq %%mm0, %%mm3 \n" \
  142. "punpcklwd %%mm1, %%mm0 \n" /*A*/ \
  143. "punpckhwd %%mm7, %%mm3 \n" /*C*/ \
  144. "punpcklwd %%mm2, %%mm7 \n" /*B*/ \
  145. "punpckhwd %%mm2, %%mm1 \n" /*D*/ \
  146. \
  147. "movq %%mm0, " #dst0 " \n" \
  148. "movq %%mm7, " #dst1 " \n" \
  149. "movq %%mm3, " #dst2 " \n" \
  150. "movq %%mm1, " #dst3 " \n"
  151. __asm__ volatile(
  152. "movd %2, %%mm4 \n"
  153. "movd %3, %%mm5 \n"
  154. "packssdw %%mm4, %%mm4 \n"
  155. "packssdw %%mm5, %%mm5 \n"
  156. "packssdw %%mm4, %%mm4 \n"
  157. "packssdw %%mm5, %%mm5 \n"
  158. REQUANT_CORE( (%1), 8(%1), 16(%1), 24(%1), (%0), 8(%0), 64(%0), 72(%0))
  159. REQUANT_CORE(32(%1), 40(%1), 48(%1), 56(%1),16(%0),24(%0), 48(%0), 56(%0))
  160. REQUANT_CORE(64(%1), 72(%1), 80(%1), 88(%1),32(%0),40(%0), 96(%0),104(%0))
  161. REQUANT_CORE(96(%1),104(%1),112(%1),120(%1),80(%0),88(%0),112(%0),120(%0))
  162. : : "r" (src), "r" (dst), "g" (threshold1), "rm" (4) //FIXME maybe more accurate then needed?
  163. );
  164. dst[0] = (src[0] + 4) >> 3;
  165. }
  166. static void store_slice_mmx(uint8_t *dst, const int16_t *src,
  167. int dst_stride, int src_stride,
  168. int width, int height, int log2_scale,
  169. const uint8_t dither[8][8])
  170. {
  171. int y;
  172. for (y = 0; y < height; y++) {
  173. uint8_t *dst1 = dst;
  174. const int16_t *src1 = src;
  175. __asm__ volatile(
  176. "movq (%3), %%mm3 \n"
  177. "movq (%3), %%mm4 \n"
  178. "movd %4, %%mm2 \n"
  179. "pxor %%mm0, %%mm0 \n"
  180. "punpcklbw %%mm0, %%mm3 \n"
  181. "punpckhbw %%mm0, %%mm4 \n"
  182. "psraw %%mm2, %%mm3 \n"
  183. "psraw %%mm2, %%mm4 \n"
  184. "movd %5, %%mm2 \n"
  185. "1: \n"
  186. "movq (%0), %%mm0 \n"
  187. "movq 8(%0), %%mm1 \n"
  188. "paddw %%mm3, %%mm0 \n"
  189. "paddw %%mm4, %%mm1 \n"
  190. "psraw %%mm2, %%mm0 \n"
  191. "psraw %%mm2, %%mm1 \n"
  192. "packuswb %%mm1, %%mm0 \n"
  193. "movq %%mm0, (%1) \n"
  194. "add $16, %0 \n"
  195. "add $8, %1 \n"
  196. "cmp %2, %1 \n"
  197. " jb 1b \n"
  198. : "+r" (src1), "+r"(dst1)
  199. : "r"(dst + width), "r"(dither[y]), "g"(log2_scale), "g"(MAX_LEVEL - log2_scale)
  200. );
  201. src += src_stride;
  202. dst += dst_stride;
  203. }
  204. }
  205. #endif /* HAVE_MMX_INLINE */
  206. av_cold void ff_spp_init_x86(SPPContext *s)
  207. {
  208. #if HAVE_MMX_INLINE
  209. int cpu_flags = av_get_cpu_flags();
  210. if (cpu_flags & AV_CPU_FLAG_MMX) {
  211. static const uint32_t mmx_idct_perm_crc = 0xe5e8adc4;
  212. uint32_t idct_perm_crc =
  213. av_crc(av_crc_get_table(AV_CRC_32_IEEE), 0,
  214. s->dct->idct_permutation,
  215. sizeof(s->dct->idct_permutation));
  216. int64_t bps;
  217. s->store_slice = store_slice_mmx;
  218. av_opt_get_int(s->dct, "bits_per_sample", 0, &bps);
  219. if (bps <= 8 && idct_perm_crc == mmx_idct_perm_crc) {
  220. switch (s->mode) {
  221. case 0: s->requantize = hardthresh_mmx; break;
  222. case 1: s->requantize = softthresh_mmx; break;
  223. }
  224. }
  225. }
  226. #endif
  227. }