2
0

lpc.c 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162
  1. /*
  2. * SIMD-optimized LPC functions
  3. * Copyright (c) 2007 Loren Merritt
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "libavutil/attributes.h"
  22. #include "libavutil/cpu.h"
  23. #include "libavutil/mem.h"
  24. #include "libavutil/x86/asm.h"
  25. #include "libavutil/x86/cpu.h"
  26. #include "libavcodec/lpc.h"
  27. DECLARE_ASM_CONST(16, double, pd_1)[2] = { 1.0, 1.0 };
  28. DECLARE_ASM_CONST(16, double, pd_2)[2] = { 2.0, 2.0 };
  29. #if HAVE_SSE2_INLINE
  30. static void lpc_apply_welch_window_sse2(const int32_t *data, int len,
  31. double *w_data)
  32. {
  33. double c = 2.0 / (len-1.0);
  34. int n2 = len>>1;
  35. x86_reg i = -n2*sizeof(int32_t);
  36. x86_reg j = n2*sizeof(int32_t);
  37. __asm__ volatile(
  38. "movsd %4, %%xmm7 \n\t"
  39. "movapd "MANGLE(pd_1)", %%xmm6 \n\t"
  40. "movapd "MANGLE(pd_2)", %%xmm5 \n\t"
  41. "movlhps %%xmm7, %%xmm7 \n\t"
  42. "subpd %%xmm5, %%xmm7 \n\t"
  43. "addsd %%xmm6, %%xmm7 \n\t"
  44. "test $1, %5 \n\t"
  45. "jz 2f \n\t"
  46. #define WELCH(MOVPD, offset)\
  47. "1: \n\t"\
  48. "movapd %%xmm7, %%xmm1 \n\t"\
  49. "mulpd %%xmm1, %%xmm1 \n\t"\
  50. "movapd %%xmm6, %%xmm0 \n\t"\
  51. "subpd %%xmm1, %%xmm0 \n\t"\
  52. "pshufd $0x4e, %%xmm0, %%xmm1 \n\t"\
  53. "cvtpi2pd (%3,%0), %%xmm2 \n\t"\
  54. "cvtpi2pd "#offset"*4(%3,%1), %%xmm3 \n\t"\
  55. "mulpd %%xmm0, %%xmm2 \n\t"\
  56. "mulpd %%xmm1, %%xmm3 \n\t"\
  57. "movapd %%xmm2, (%2,%0,2) \n\t"\
  58. MOVPD" %%xmm3, "#offset"*8(%2,%1,2) \n\t"\
  59. "subpd %%xmm5, %%xmm7 \n\t"\
  60. "sub $8, %1 \n\t"\
  61. "add $8, %0 \n\t"\
  62. "jl 1b \n\t"\
  63. WELCH("movupd", -1)
  64. "jmp 3f \n\t"
  65. "2: \n\t"
  66. WELCH("movapd", -2)
  67. "3: \n\t"
  68. :"+&r"(i), "+&r"(j)
  69. :"r"(w_data+n2), "r"(data+n2), "m"(c), "r"(len)
  70. NAMED_CONSTRAINTS_ARRAY_ADD(pd_1,pd_2)
  71. XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3",
  72. "%xmm5", "%xmm6", "%xmm7")
  73. );
  74. #undef WELCH
  75. }
  76. static void lpc_compute_autocorr_sse2(const double *data, int len, int lag,
  77. double *autoc)
  78. {
  79. int j;
  80. if((x86_reg)data & 15)
  81. data++;
  82. for(j=0; j<lag; j+=2){
  83. x86_reg i = -len*sizeof(double);
  84. if(j == lag-2) {
  85. __asm__ volatile(
  86. "movsd "MANGLE(pd_1)", %%xmm0 \n\t"
  87. "movsd "MANGLE(pd_1)", %%xmm1 \n\t"
  88. "movsd "MANGLE(pd_1)", %%xmm2 \n\t"
  89. "1: \n\t"
  90. "movapd (%2,%0), %%xmm3 \n\t"
  91. "movupd -8(%3,%0), %%xmm4 \n\t"
  92. "movapd (%3,%0), %%xmm5 \n\t"
  93. "mulpd %%xmm3, %%xmm4 \n\t"
  94. "mulpd %%xmm3, %%xmm5 \n\t"
  95. "mulpd -16(%3,%0), %%xmm3 \n\t"
  96. "addpd %%xmm4, %%xmm1 \n\t"
  97. "addpd %%xmm5, %%xmm0 \n\t"
  98. "addpd %%xmm3, %%xmm2 \n\t"
  99. "add $16, %0 \n\t"
  100. "jl 1b \n\t"
  101. "movhlps %%xmm0, %%xmm3 \n\t"
  102. "movhlps %%xmm1, %%xmm4 \n\t"
  103. "movhlps %%xmm2, %%xmm5 \n\t"
  104. "addsd %%xmm3, %%xmm0 \n\t"
  105. "addsd %%xmm4, %%xmm1 \n\t"
  106. "addsd %%xmm5, %%xmm2 \n\t"
  107. "movsd %%xmm0, (%1) \n\t"
  108. "movsd %%xmm1, 8(%1) \n\t"
  109. "movsd %%xmm2, 16(%1) \n\t"
  110. :"+&r"(i)
  111. :"r"(autoc+j), "r"(data+len), "r"(data+len-j)
  112. NAMED_CONSTRAINTS_ARRAY_ADD(pd_1)
  113. :"memory"
  114. );
  115. } else {
  116. __asm__ volatile(
  117. "movsd "MANGLE(pd_1)", %%xmm0 \n\t"
  118. "movsd "MANGLE(pd_1)", %%xmm1 \n\t"
  119. "1: \n\t"
  120. "movapd (%3,%0), %%xmm3 \n\t"
  121. "movupd -8(%4,%0), %%xmm4 \n\t"
  122. "mulpd %%xmm3, %%xmm4 \n\t"
  123. "mulpd (%4,%0), %%xmm3 \n\t"
  124. "addpd %%xmm4, %%xmm1 \n\t"
  125. "addpd %%xmm3, %%xmm0 \n\t"
  126. "add $16, %0 \n\t"
  127. "jl 1b \n\t"
  128. "movhlps %%xmm0, %%xmm3 \n\t"
  129. "movhlps %%xmm1, %%xmm4 \n\t"
  130. "addsd %%xmm3, %%xmm0 \n\t"
  131. "addsd %%xmm4, %%xmm1 \n\t"
  132. "movsd %%xmm0, %1 \n\t"
  133. "movsd %%xmm1, %2 \n\t"
  134. :"+&r"(i), "=m"(autoc[j]), "=m"(autoc[j+1])
  135. :"r"(data+len), "r"(data+len-j)
  136. NAMED_CONSTRAINTS_ARRAY_ADD(pd_1)
  137. );
  138. }
  139. }
  140. }
  141. #endif /* HAVE_SSE2_INLINE */
  142. av_cold void ff_lpc_init_x86(LPCContext *c)
  143. {
  144. #if HAVE_SSE2_INLINE
  145. int cpu_flags = av_get_cpu_flags();
  146. if (INLINE_SSE2(cpu_flags) || INLINE_SSE2_SLOW(cpu_flags)) {
  147. c->lpc_apply_welch_window = lpc_apply_welch_window_sse2;
  148. c->lpc_compute_autocorr = lpc_compute_autocorr_sse2;
  149. }
  150. #endif /* HAVE_SSE2_INLINE */
  151. }