2
0

mpegaudiodsp.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289
  1. /*
  2. * SIMD-optimized MP3 decoding functions
  3. * Copyright (c) 2010 Vitor Sessak
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "libavutil/attributes.h"
  22. #include "libavutil/cpu.h"
  23. #include "libavutil/internal.h"
  24. #include "libavutil/x86/asm.h"
  25. #include "libavutil/x86/cpu.h"
  26. #include "libavcodec/mpegaudiodsp.h"
  27. #define DECL(CPU)\
  28. static void imdct36_blocks_ ## CPU(float *out, float *buf, float *in, int count, int switch_point, int block_type);\
  29. void ff_imdct36_float_ ## CPU(float *out, float *buf, float *in, float *win);
  30. #if HAVE_X86ASM
  31. #if ARCH_X86_32
  32. DECL(sse)
  33. #endif
  34. DECL(sse2)
  35. DECL(sse3)
  36. DECL(ssse3)
  37. DECL(avx)
  38. #endif /* HAVE_X86ASM */
  39. void ff_four_imdct36_float_sse(float *out, float *buf, float *in, float *win,
  40. float *tmpbuf);
  41. void ff_four_imdct36_float_avx(float *out, float *buf, float *in, float *win,
  42. float *tmpbuf);
  43. DECLARE_ALIGNED(16, static float, mdct_win_sse)[2][4][4*40];
  44. #if HAVE_6REGS && HAVE_SSE_INLINE
  45. #define MACS(rt, ra, rb) rt+=(ra)*(rb)
  46. #define MLSS(rt, ra, rb) rt-=(ra)*(rb)
  47. #define SUM8(op, sum, w, p) \
  48. { \
  49. op(sum, (w)[0 * 64], (p)[0 * 64]); \
  50. op(sum, (w)[1 * 64], (p)[1 * 64]); \
  51. op(sum, (w)[2 * 64], (p)[2 * 64]); \
  52. op(sum, (w)[3 * 64], (p)[3 * 64]); \
  53. op(sum, (w)[4 * 64], (p)[4 * 64]); \
  54. op(sum, (w)[5 * 64], (p)[5 * 64]); \
  55. op(sum, (w)[6 * 64], (p)[6 * 64]); \
  56. op(sum, (w)[7 * 64], (p)[7 * 64]); \
  57. }
  58. static void apply_window(const float *buf, const float *win1,
  59. const float *win2, float *sum1, float *sum2, int len)
  60. {
  61. x86_reg count = - 4*len;
  62. const float *win1a = win1+len;
  63. const float *win2a = win2+len;
  64. const float *bufa = buf+len;
  65. float *sum1a = sum1+len;
  66. float *sum2a = sum2+len;
  67. #define MULT(a, b) \
  68. "movaps " #a "(%1,%0), %%xmm1 \n\t" \
  69. "movaps " #a "(%3,%0), %%xmm2 \n\t" \
  70. "mulps %%xmm2, %%xmm1 \n\t" \
  71. "subps %%xmm1, %%xmm0 \n\t" \
  72. "mulps " #b "(%2,%0), %%xmm2 \n\t" \
  73. "subps %%xmm2, %%xmm4 \n\t" \
  74. __asm__ volatile(
  75. "1: \n\t"
  76. "xorps %%xmm0, %%xmm0 \n\t"
  77. "xorps %%xmm4, %%xmm4 \n\t"
  78. MULT( 0, 0)
  79. MULT( 256, 64)
  80. MULT( 512, 128)
  81. MULT( 768, 192)
  82. MULT(1024, 256)
  83. MULT(1280, 320)
  84. MULT(1536, 384)
  85. MULT(1792, 448)
  86. "movaps %%xmm0, (%4,%0) \n\t"
  87. "movaps %%xmm4, (%5,%0) \n\t"
  88. "add $16, %0 \n\t"
  89. "jl 1b \n\t"
  90. :"+&r"(count)
  91. :"r"(win1a), "r"(win2a), "r"(bufa), "r"(sum1a), "r"(sum2a)
  92. );
  93. #undef MULT
  94. }
  95. static void apply_window_mp3(float *in, float *win, int *unused, float *out,
  96. ptrdiff_t incr)
  97. {
  98. LOCAL_ALIGNED_16(float, suma, [17]);
  99. LOCAL_ALIGNED_16(float, sumb, [17]);
  100. LOCAL_ALIGNED_16(float, sumc, [17]);
  101. LOCAL_ALIGNED_16(float, sumd, [17]);
  102. float sum;
  103. /* copy to avoid wrap */
  104. __asm__ volatile(
  105. "movaps 0(%0), %%xmm0 \n\t" \
  106. "movaps 16(%0), %%xmm1 \n\t" \
  107. "movaps 32(%0), %%xmm2 \n\t" \
  108. "movaps 48(%0), %%xmm3 \n\t" \
  109. "movaps %%xmm0, 0(%1) \n\t" \
  110. "movaps %%xmm1, 16(%1) \n\t" \
  111. "movaps %%xmm2, 32(%1) \n\t" \
  112. "movaps %%xmm3, 48(%1) \n\t" \
  113. "movaps 64(%0), %%xmm0 \n\t" \
  114. "movaps 80(%0), %%xmm1 \n\t" \
  115. "movaps 96(%0), %%xmm2 \n\t" \
  116. "movaps 112(%0), %%xmm3 \n\t" \
  117. "movaps %%xmm0, 64(%1) \n\t" \
  118. "movaps %%xmm1, 80(%1) \n\t" \
  119. "movaps %%xmm2, 96(%1) \n\t" \
  120. "movaps %%xmm3, 112(%1) \n\t"
  121. ::"r"(in), "r"(in+512)
  122. :"memory"
  123. );
  124. apply_window(in + 16, win , win + 512, suma, sumc, 16);
  125. apply_window(in + 32, win + 48, win + 640, sumb, sumd, 16);
  126. SUM8(MACS, suma[0], win + 32, in + 48);
  127. sumc[ 0] = 0;
  128. sumb[16] = 0;
  129. sumd[16] = 0;
  130. #define SUMS(suma, sumb, sumc, sumd, out1, out2) \
  131. "movups " #sumd "(%4), %%xmm0 \n\t" \
  132. "shufps $0x1b, %%xmm0, %%xmm0 \n\t" \
  133. "subps " #suma "(%1), %%xmm0 \n\t" \
  134. "movaps %%xmm0," #out1 "(%0) \n\t" \
  135. \
  136. "movups " #sumc "(%3), %%xmm0 \n\t" \
  137. "shufps $0x1b, %%xmm0, %%xmm0 \n\t" \
  138. "addps " #sumb "(%2), %%xmm0 \n\t" \
  139. "movaps %%xmm0," #out2 "(%0) \n\t"
  140. if (incr == 1) {
  141. __asm__ volatile(
  142. SUMS( 0, 48, 4, 52, 0, 112)
  143. SUMS(16, 32, 20, 36, 16, 96)
  144. SUMS(32, 16, 36, 20, 32, 80)
  145. SUMS(48, 0, 52, 4, 48, 64)
  146. :"+&r"(out)
  147. :"r"(&suma[0]), "r"(&sumb[0]), "r"(&sumc[0]), "r"(&sumd[0])
  148. :"memory"
  149. );
  150. out += 16*incr;
  151. } else {
  152. int j;
  153. float *out2 = out + 32 * incr;
  154. out[0 ] = -suma[ 0];
  155. out += incr;
  156. out2 -= incr;
  157. for(j=1;j<16;j++) {
  158. *out = -suma[ j] + sumd[16-j];
  159. *out2 = sumb[16-j] + sumc[ j];
  160. out += incr;
  161. out2 -= incr;
  162. }
  163. }
  164. sum = 0;
  165. SUM8(MLSS, sum, win + 16 + 32, in + 32);
  166. *out = sum;
  167. }
  168. #endif /* HAVE_6REGS && HAVE_SSE_INLINE */
  169. #if HAVE_X86ASM
  170. #define DECL_IMDCT_BLOCKS(CPU1, CPU2) \
  171. static void imdct36_blocks_ ## CPU1(float *out, float *buf, float *in, \
  172. int count, int switch_point, int block_type) \
  173. { \
  174. int align_end = count - (count & 3); \
  175. int j; \
  176. for (j = 0; j < align_end; j+= 4) { \
  177. LOCAL_ALIGNED_16(float, tmpbuf, [1024]); \
  178. float *win = mdct_win_sse[switch_point && j < 4][block_type]; \
  179. /* apply window & overlap with previous buffer */ \
  180. \
  181. /* select window */ \
  182. ff_four_imdct36_float_ ## CPU2(out, buf, in, win, tmpbuf); \
  183. in += 4*18; \
  184. buf += 4*18; \
  185. out += 4; \
  186. } \
  187. for (; j < count; j++) { \
  188. /* apply window & overlap with previous buffer */ \
  189. \
  190. /* select window */ \
  191. int win_idx = (switch_point && j < 2) ? 0 : block_type; \
  192. float *win = ff_mdct_win_float[win_idx + (4 & -(j & 1))]; \
  193. \
  194. ff_imdct36_float_ ## CPU1(out, buf, in, win); \
  195. \
  196. in += 18; \
  197. buf++; \
  198. out++; \
  199. } \
  200. }
  201. #if HAVE_SSE
  202. #if ARCH_X86_32
  203. DECL_IMDCT_BLOCKS(sse,sse)
  204. #endif
  205. DECL_IMDCT_BLOCKS(sse2,sse)
  206. DECL_IMDCT_BLOCKS(sse3,sse)
  207. DECL_IMDCT_BLOCKS(ssse3,sse)
  208. #endif
  209. #if HAVE_AVX_EXTERNAL
  210. DECL_IMDCT_BLOCKS(avx,avx)
  211. #endif
  212. #endif /* HAVE_X86ASM */
  213. av_cold void ff_mpadsp_init_x86(MPADSPContext *s)
  214. {
  215. av_unused int cpu_flags = av_get_cpu_flags();
  216. int i, j;
  217. for (j = 0; j < 4; j++) {
  218. for (i = 0; i < 40; i ++) {
  219. mdct_win_sse[0][j][4*i ] = ff_mdct_win_float[j ][i];
  220. mdct_win_sse[0][j][4*i + 1] = ff_mdct_win_float[j + 4][i];
  221. mdct_win_sse[0][j][4*i + 2] = ff_mdct_win_float[j ][i];
  222. mdct_win_sse[0][j][4*i + 3] = ff_mdct_win_float[j + 4][i];
  223. mdct_win_sse[1][j][4*i ] = ff_mdct_win_float[0 ][i];
  224. mdct_win_sse[1][j][4*i + 1] = ff_mdct_win_float[4 ][i];
  225. mdct_win_sse[1][j][4*i + 2] = ff_mdct_win_float[j ][i];
  226. mdct_win_sse[1][j][4*i + 3] = ff_mdct_win_float[j + 4][i];
  227. }
  228. }
  229. #if HAVE_6REGS && HAVE_SSE_INLINE
  230. if (INLINE_SSE(cpu_flags)) {
  231. s->apply_window_float = apply_window_mp3;
  232. }
  233. #endif /* HAVE_SSE_INLINE */
  234. #if HAVE_X86ASM
  235. #if HAVE_SSE
  236. #if ARCH_X86_32
  237. if (EXTERNAL_SSE(cpu_flags)) {
  238. s->imdct36_blocks_float = imdct36_blocks_sse;
  239. }
  240. #endif
  241. if (EXTERNAL_SSE2(cpu_flags)) {
  242. s->imdct36_blocks_float = imdct36_blocks_sse2;
  243. }
  244. if (EXTERNAL_SSE3(cpu_flags)) {
  245. s->imdct36_blocks_float = imdct36_blocks_sse3;
  246. }
  247. if (EXTERNAL_SSSE3(cpu_flags)) {
  248. s->imdct36_blocks_float = imdct36_blocks_ssse3;
  249. }
  250. #endif
  251. #if HAVE_AVX_EXTERNAL
  252. if (EXTERNAL_AVX(cpu_flags)) {
  253. s->imdct36_blocks_float = imdct36_blocks_avx;
  254. }
  255. #endif
  256. #endif /* HAVE_X86ASM */
  257. }