mpegvideodsp.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161
  1. /*
  2. * This file is part of FFmpeg.
  3. *
  4. * FFmpeg is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU Lesser General Public
  6. * License as published by the Free Software Foundation; either
  7. * version 2.1 of the License, or (at your option) any later version.
  8. *
  9. * FFmpeg is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * Lesser General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU Lesser General Public
  15. * License along with FFmpeg; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include "config.h"
  19. #include "libavutil/attributes.h"
  20. #include "libavutil/cpu.h"
  21. #include "libavutil/x86/cpu.h"
  22. #include "libavcodec/mpegvideodsp.h"
  23. #include "libavcodec/videodsp.h"
  24. #if HAVE_INLINE_ASM
  25. static void gmc_mmx(uint8_t *dst, uint8_t *src,
  26. int stride, int h, int ox, int oy,
  27. int dxx, int dxy, int dyx, int dyy,
  28. int shift, int r, int width, int height)
  29. {
  30. const int w = 8;
  31. const int ix = ox >> (16 + shift);
  32. const int iy = oy >> (16 + shift);
  33. const int oxs = ox >> 4;
  34. const int oys = oy >> 4;
  35. const int dxxs = dxx >> 4;
  36. const int dxys = dxy >> 4;
  37. const int dyxs = dyx >> 4;
  38. const int dyys = dyy >> 4;
  39. const uint16_t r4[4] = { r, r, r, r };
  40. const uint16_t dxy4[4] = { dxys, dxys, dxys, dxys };
  41. const uint16_t dyy4[4] = { dyys, dyys, dyys, dyys };
  42. const uint64_t shift2 = 2 * shift;
  43. #define MAX_STRIDE 4096U
  44. #define MAX_H 8U
  45. uint8_t edge_buf[(MAX_H + 1) * MAX_STRIDE];
  46. int x, y;
  47. const int dxw = (dxx - (1 << (16 + shift))) * (w - 1);
  48. const int dyh = (dyy - (1 << (16 + shift))) * (h - 1);
  49. const int dxh = dxy * (h - 1);
  50. const int dyw = dyx * (w - 1);
  51. int need_emu = (unsigned) ix >= width - w || width < w ||
  52. (unsigned) iy >= height - h || height< h
  53. ;
  54. if ( // non-constant fullpel offset (3% of blocks)
  55. ((ox ^ (ox + dxw)) | (ox ^ (ox + dxh)) | (ox ^ (ox + dxw + dxh)) |
  56. (oy ^ (oy + dyw)) | (oy ^ (oy + dyh)) | (oy ^ (oy + dyw + dyh))) >> (16 + shift) ||
  57. // uses more than 16 bits of subpel mv (only at huge resolution)
  58. (dxx | dxy | dyx | dyy) & 15 ||
  59. (need_emu && (h > MAX_H || stride > MAX_STRIDE))) {
  60. // FIXME could still use mmx for some of the rows
  61. ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy,
  62. shift, r, width, height);
  63. return;
  64. }
  65. src += ix + iy * stride;
  66. if (need_emu) {
  67. ff_emulated_edge_mc_8(edge_buf, src, stride, stride, w + 1, h + 1, ix, iy, width, height);
  68. src = edge_buf;
  69. }
  70. __asm__ volatile (
  71. "movd %0, %%mm6 \n\t"
  72. "pxor %%mm7, %%mm7 \n\t"
  73. "punpcklwd %%mm6, %%mm6 \n\t"
  74. "punpcklwd %%mm6, %%mm6 \n\t"
  75. :: "r" (1 << shift));
  76. for (x = 0; x < w; x += 4) {
  77. uint16_t dx4[4] = { oxs - dxys + dxxs * (x + 0),
  78. oxs - dxys + dxxs * (x + 1),
  79. oxs - dxys + dxxs * (x + 2),
  80. oxs - dxys + dxxs * (x + 3) };
  81. uint16_t dy4[4] = { oys - dyys + dyxs * (x + 0),
  82. oys - dyys + dyxs * (x + 1),
  83. oys - dyys + dyxs * (x + 2),
  84. oys - dyys + dyxs * (x + 3) };
  85. for (y = 0; y < h; y++) {
  86. __asm__ volatile (
  87. "movq %0, %%mm4 \n\t"
  88. "movq %1, %%mm5 \n\t"
  89. "paddw %2, %%mm4 \n\t"
  90. "paddw %3, %%mm5 \n\t"
  91. "movq %%mm4, %0 \n\t"
  92. "movq %%mm5, %1 \n\t"
  93. "psrlw $12, %%mm4 \n\t"
  94. "psrlw $12, %%mm5 \n\t"
  95. : "+m" (*dx4), "+m" (*dy4)
  96. : "m" (*dxy4), "m" (*dyy4));
  97. __asm__ volatile (
  98. "movq %%mm6, %%mm2 \n\t"
  99. "movq %%mm6, %%mm1 \n\t"
  100. "psubw %%mm4, %%mm2 \n\t"
  101. "psubw %%mm5, %%mm1 \n\t"
  102. "movq %%mm2, %%mm0 \n\t"
  103. "movq %%mm4, %%mm3 \n\t"
  104. "pmullw %%mm1, %%mm0 \n\t" // (s - dx) * (s - dy)
  105. "pmullw %%mm5, %%mm3 \n\t" // dx * dy
  106. "pmullw %%mm5, %%mm2 \n\t" // (s - dx) * dy
  107. "pmullw %%mm4, %%mm1 \n\t" // dx * (s - dy)
  108. "movd %4, %%mm5 \n\t"
  109. "movd %3, %%mm4 \n\t"
  110. "punpcklbw %%mm7, %%mm5 \n\t"
  111. "punpcklbw %%mm7, %%mm4 \n\t"
  112. "pmullw %%mm5, %%mm3 \n\t" // src[1, 1] * dx * dy
  113. "pmullw %%mm4, %%mm2 \n\t" // src[0, 1] * (s - dx) * dy
  114. "movd %2, %%mm5 \n\t"
  115. "movd %1, %%mm4 \n\t"
  116. "punpcklbw %%mm7, %%mm5 \n\t"
  117. "punpcklbw %%mm7, %%mm4 \n\t"
  118. "pmullw %%mm5, %%mm1 \n\t" // src[1, 0] * dx * (s - dy)
  119. "pmullw %%mm4, %%mm0 \n\t" // src[0, 0] * (s - dx) * (s - dy)
  120. "paddw %5, %%mm1 \n\t"
  121. "paddw %%mm3, %%mm2 \n\t"
  122. "paddw %%mm1, %%mm0 \n\t"
  123. "paddw %%mm2, %%mm0 \n\t"
  124. "psrlw %6, %%mm0 \n\t"
  125. "packuswb %%mm0, %%mm0 \n\t"
  126. "movd %%mm0, %0 \n\t"
  127. : "=m" (dst[x + y * stride])
  128. : "m" (src[0]), "m" (src[1]),
  129. "m" (src[stride]), "m" (src[stride + 1]),
  130. "m" (*r4), "m" (shift2));
  131. src += stride;
  132. }
  133. src += 4 - h * stride;
  134. }
  135. }
  136. #endif /* HAVE_INLINE_ASM */
  137. av_cold void ff_mpegvideodsp_init_x86(MpegVideoDSPContext *c)
  138. {
  139. #if HAVE_INLINE_ASM
  140. int cpu_flags = av_get_cpu_flags();
  141. if (INLINE_MMX(cpu_flags))
  142. c->gmc = gmc_mmx;
  143. #endif /* HAVE_INLINE_ASM */
  144. }