2
0

mdct15.asm 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221
  1. ;******************************************************************************
  2. ;* SIMD optimized non-power-of-two MDCT functions
  3. ;*
  4. ;* Copyright (C) 2017 Rostislav Pehlivanov <atomnuker@gmail.com>
  5. ;*
  6. ;* This file is part of FFmpeg.
  7. ;*
  8. ;* FFmpeg is free software; you can redistribute it and/or
  9. ;* modify it under the terms of the GNU Lesser General Public
  10. ;* License as published by the Free Software Foundation; either
  11. ;* version 2.1 of the License, or (at your option) any later version.
  12. ;*
  13. ;* FFmpeg is distributed in the hope that it will be useful,
  14. ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. ;* Lesser General Public License for more details.
  17. ;*
  18. ;* You should have received a copy of the GNU Lesser General Public
  19. ;* License along with FFmpeg; if not, write to the Free Software
  20. ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. ;******************************************************************************
  22. %include "libavutil/x86/x86util.asm"
  23. SECTION_RODATA 32
  24. perm_neg: dd 2, 5, 3, 4, 6, 1, 7, 0
  25. perm_pos: dd 0, 7, 1, 6, 4, 3, 5, 2
  26. sign_adjust_r: times 4 dd 0x80000000, 0x00000000
  27. sign_adjust_5: dd 0x00000000, 0x80000000, 0x80000000, 0x00000000
  28. SECTION .text
  29. %if ARCH_X86_64
  30. ;*****************************************************************************************
  31. ;void ff_fft15_avx(FFTComplex *out, FFTComplex *in, FFTComplex *exptab, ptrdiff_t stride);
  32. ;*****************************************************************************************
  33. %macro FFT5 3 ; %1 - in_offset, %2 - dst1 (64bit used), %3 - dst2
  34. VBROADCASTSD m0, [inq + %1] ; in[ 0].re, in[ 0].im, in[ 0].re, in[ 0].im
  35. movsd xm1, [inq + 1*16 + 8 + %1] ; in[ 3].re, in[ 3].im, 0, 0
  36. movsd xm4, [inq + 6*16 + 0 + %1] ; in[12].re, in[12].im, 0, 0
  37. movhps xm1, [inq + 3*16 + 0 + %1] ; in[ 3].re, in[ 3].im, in[ 6].re, in[ 6].im
  38. movhps xm4, [inq + 4*16 + 8 + %1] ; in[12].re, in[12].im, in[ 9].re, in[ 9].im
  39. subps xm2, xm1, xm4 ; t[2].im, t[2].re, t[3].im, t[3].re
  40. addps xm1, xm4 ; t[0].re, t[0].im, t[1].re, t[1].im
  41. movhlps %2, xm1 ; t[0].re, t[1].re, t[0].im, t[1].im
  42. addps %2, xm1
  43. addps %2, xm0 ; DC[0].re, DC[0].im, junk...
  44. movlhps %2, %2 ; DC[0].re, DC[0].im, DC[0].re, DC[0].im
  45. shufps xm3, xm1, xm2, q0110 ; t[0].re, t[0].im, t[2].re, t[2].im
  46. shufps xm1, xm2, q2332 ; t[1].re, t[1].im, t[3].re, t[3].im
  47. mulps xm%3, xm1, xm5
  48. mulps xm4, xm3, xm6
  49. mulps xm1, xm6
  50. xorps xm1, xm7
  51. mulps xm3, xm5
  52. addsubps xm3, xm1 ; t[0].re, t[0].im, t[2].re, t[2].im
  53. subps xm%3, xm4 ; t[4].re, t[4].im, t[5].re, t[5].im
  54. movhlps xm2, xm%3, xm3 ; t[2].re, t[2].im, t[5].re, t[5].im
  55. movlhps xm3, xm%3 ; t[0].re, t[0].im, t[4].re, t[4].im
  56. xorps xm2, xm7
  57. addps xm%3, xm2, xm3
  58. subps xm3, xm2
  59. shufps xm3, xm3, q1032
  60. vinsertf128 m%3, m%3, xm3, 1 ; All ACs (tmp[1] through to tmp[4])
  61. addps m%3, m%3, m0 ; Finally offset with DCs
  62. %endmacro
  63. %macro BUTTERFLIES_DC 1 ; %1 - exptab_offset
  64. mulps xm0, xm9, [exptabq + %1 + 16*0]
  65. mulps xm1, xm10, [exptabq + %1 + 16*1]
  66. haddps xm0, xm1
  67. movhlps xm1, xm0 ; t[0].re, t[1].re, t[0].im, t[1].im
  68. addps xm0, xm1
  69. addps xm0, xm8
  70. movsd [outq], xm0
  71. %endmacro
  72. %macro BUTTERFLIES_AC 1 ; %1 - exptab_offset
  73. mulps m0, m12, [exptabq + 64*0 + 0*mmsize + %1]
  74. mulps m1, m12, [exptabq + 64*0 + 1*mmsize + %1]
  75. mulps m2, m13, [exptabq + 64*1 + 0*mmsize + %1]
  76. mulps m3, m13, [exptabq + 64*1 + 1*mmsize + %1]
  77. addps m0, m0, m2
  78. addps m1, m1, m3
  79. addps m0, m0, m11
  80. shufps m1, m1, m1, q2301
  81. addps m0, m0, m1
  82. vextractf128 xm1, m0, 1
  83. movlps [outq + strideq*1], xm0
  84. movhps [outq + strideq*2], xm0
  85. movlps [outq + stride3q], xm1
  86. movhps [outq + strideq*4], xm1
  87. %endmacro
  88. INIT_YMM avx
  89. cglobal fft15, 4, 5, 14, out, in, exptab, stride, stride5
  90. shl strideq, 3
  91. movaps xm5, [exptabq + 480 + 16*0]
  92. movaps xm6, [exptabq + 480 + 16*1]
  93. movaps xm7, [sign_adjust_5]
  94. FFT5 0, xm8, 11
  95. FFT5 8, xm9, 12
  96. FFT5 16, xm10, 13
  97. %define stride3q inq
  98. lea stride3q, [strideq + strideq*2]
  99. lea stride5q, [strideq + strideq*4]
  100. BUTTERFLIES_DC (8*6 + 4*0)*2*4
  101. BUTTERFLIES_AC (8*0 + 0*0)*2*4
  102. add outq, stride5q
  103. BUTTERFLIES_DC (8*6 + 4*1)*2*4
  104. BUTTERFLIES_AC (8*2 + 0*0)*2*4
  105. add outq, stride5q
  106. BUTTERFLIES_DC (8*6 + 4*2)*2*4
  107. BUTTERFLIES_AC (8*4 + 0*0)*2*4
  108. RET
  109. %endif ; ARCH_X86_64
  110. ;*******************************************************************************************************
  111. ;void ff_mdct15_postreindex(FFTComplex *out, FFTComplex *in, FFTComplex *exp, int *lut, ptrdiff_t len8);
  112. ;*******************************************************************************************************
  113. %macro LUT_LOAD_4D 3
  114. mov r4d, [lutq + %3q*4 + 0]
  115. movsd xmm%1, [inq + r4q*8]
  116. mov r4d, [lutq + %3q*4 + 4]
  117. movhps xmm%1, [inq + r4q*8]
  118. %if cpuflag(avx2)
  119. mov r4d, [lutq + %3q*4 + 8]
  120. movsd %2, [inq + r4q*8]
  121. mov r4d, [lutq + %3q*4 + 12]
  122. movhps %2, [inq + r4q*8]
  123. vinsertf128 %1, %1, %2, 1
  124. %endif
  125. %endmacro
  126. %macro POSTROTATE_FN 1
  127. cglobal mdct15_postreindex, 5, 7, 8 + cpuflag(avx2)*2, out, in, exp, lut, len8, offset_p, offset_n
  128. xor offset_nq, offset_nq
  129. lea offset_pq, [len8q*2 - %1]
  130. movaps m7, [sign_adjust_r]
  131. %if cpuflag(avx2)
  132. movaps m8, [perm_pos]
  133. movaps m9, [perm_neg]
  134. %endif
  135. .loop:
  136. movups m0, [expq + offset_pq*8] ; exp[p0].re, exp[p0].im, exp[p1].re, exp[p1].im, exp[p2].re, exp[p2].im, exp[p3].re, exp[p3].im
  137. movups m1, [expq + offset_nq*8] ; exp[n3].re, exp[n3].im, exp[n2].re, exp[n2].im, exp[n1].re, exp[n1].im, exp[n0].re, exp[n0].im
  138. LUT_LOAD_4D m3, xm4, offset_p ; in[p0].re, in[p0].im, in[p1].re, in[p1].im, in[p2].re, in[p2].im, in[p3].re, in[p3].im
  139. LUT_LOAD_4D m4, xm5, offset_n ; in[n3].re, in[n3].im, in[n2].re, in[n2].im, in[n1].re, in[n1].im, in[n0].re, in[n0].im
  140. mulps m5, m3, m0 ; in[p].reim * exp[p].reim
  141. mulps m6, m4, m1 ; in[n].reim * exp[n].reim
  142. xorps m5, m7 ; in[p].re *= -1, in[p].im *= 1
  143. xorps m6, m7 ; in[n].re *= -1, in[n].im *= 1
  144. shufps m3, m3, m3, q2301 ; in[p].imre
  145. shufps m4, m4, m4, q2301 ; in[n].imre
  146. mulps m3, m0 ; in[p].imre * exp[p].reim
  147. mulps m4, m1 ; in[n].imre * exp[n].reim
  148. haddps m3, m6 ; out[n0].im, out[n1].im, out[n3].re, out[n2].re, out[n2].im, out[n3].im, out[n1].re, out[n0].re
  149. haddps m5, m4 ; out[p0].re, out[p1].re, out[p3].im, out[p2].im, out[p2].re, out[p3].re, out[p1].im, out[p0].im
  150. %if cpuflag(avx2)
  151. vpermps m3, m9, m3 ; out[n3].im, out[n3].re, out[n2].im, out[n2].re, out[n1].im, out[n1].re, out[n0].im, out[n0].re
  152. vpermps m5, m8, m5 ; out[p0].re, out[p0].im, out[p1].re, out[p1].im, out[p2].re, out[p2].im, out[p3].re, out[p3].im
  153. %else
  154. shufps m3, m3, m3, q0312
  155. shufps m5, m5, m5, q2130
  156. %endif
  157. movups [outq + offset_nq*8], m3
  158. movups [outq + offset_pq*8], m5
  159. sub offset_pq, %1
  160. add offset_nq, %1
  161. cmp offset_nq, offset_pq
  162. jle .loop
  163. REP_RET
  164. %endmacro
  165. INIT_XMM sse3
  166. POSTROTATE_FN 2
  167. %if ARCH_X86_64 && HAVE_AVX2_EXTERNAL
  168. INIT_YMM avx2
  169. POSTROTATE_FN 4
  170. %endif