hpeldsp_rnd_template.c 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202
  1. /*
  2. * SIMD-optimized halfpel functions are compiled twice for rnd/no_rnd
  3. * Copyright (c) 2000, 2001 Fabrice Bellard
  4. * Copyright (c) 2003-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
  7. * mostly rewritten by Michael Niedermayer <michaelni@gmx.at>
  8. * and improved by Zdenek Kabelac <kabi@users.sf.net>
  9. *
  10. * This file is part of FFmpeg.
  11. *
  12. * FFmpeg is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU Lesser General Public
  14. * License as published by the Free Software Foundation; either
  15. * version 2.1 of the License, or (at your option) any later version.
  16. *
  17. * FFmpeg is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  20. * Lesser General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU Lesser General Public
  23. * License along with FFmpeg; if not, write to the Free Software
  24. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  25. */
  26. #include <stddef.h>
  27. #include <stdint.h>
  28. // put_pixels
  29. av_unused static void DEF(put, pixels8_x2)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
  30. {
  31. MOVQ_BFE(mm6);
  32. __asm__ volatile(
  33. "lea (%3, %3), %%"FF_REG_a" \n\t"
  34. ".p2align 3 \n\t"
  35. "1: \n\t"
  36. "movq (%1), %%mm0 \n\t"
  37. "movq 1(%1), %%mm1 \n\t"
  38. "movq (%1, %3), %%mm2 \n\t"
  39. "movq 1(%1, %3), %%mm3 \n\t"
  40. PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5)
  41. "movq %%mm4, (%2) \n\t"
  42. "movq %%mm5, (%2, %3) \n\t"
  43. "add %%"FF_REG_a", %1 \n\t"
  44. "add %%"FF_REG_a", %2 \n\t"
  45. "movq (%1), %%mm0 \n\t"
  46. "movq 1(%1), %%mm1 \n\t"
  47. "movq (%1, %3), %%mm2 \n\t"
  48. "movq 1(%1, %3), %%mm3 \n\t"
  49. PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5)
  50. "movq %%mm4, (%2) \n\t"
  51. "movq %%mm5, (%2, %3) \n\t"
  52. "add %%"FF_REG_a", %1 \n\t"
  53. "add %%"FF_REG_a", %2 \n\t"
  54. "subl $4, %0 \n\t"
  55. "jnz 1b \n\t"
  56. :"+g"(h), "+S"(pixels), "+D"(block)
  57. :"r"((x86_reg)line_size)
  58. :FF_REG_a, "memory");
  59. }
  60. av_unused static void DEF(put, pixels16_x2)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
  61. {
  62. MOVQ_BFE(mm6);
  63. __asm__ volatile(
  64. "lea (%3, %3), %%"FF_REG_a" \n\t"
  65. ".p2align 3 \n\t"
  66. "1: \n\t"
  67. "movq (%1), %%mm0 \n\t"
  68. "movq 1(%1), %%mm1 \n\t"
  69. "movq (%1, %3), %%mm2 \n\t"
  70. "movq 1(%1, %3), %%mm3 \n\t"
  71. PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5)
  72. "movq %%mm4, (%2) \n\t"
  73. "movq %%mm5, (%2, %3) \n\t"
  74. "movq 8(%1), %%mm0 \n\t"
  75. "movq 9(%1), %%mm1 \n\t"
  76. "movq 8(%1, %3), %%mm2 \n\t"
  77. "movq 9(%1, %3), %%mm3 \n\t"
  78. PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5)
  79. "movq %%mm4, 8(%2) \n\t"
  80. "movq %%mm5, 8(%2, %3) \n\t"
  81. "add %%"FF_REG_a", %1 \n\t"
  82. "add %%"FF_REG_a", %2 \n\t"
  83. "movq (%1), %%mm0 \n\t"
  84. "movq 1(%1), %%mm1 \n\t"
  85. "movq (%1, %3), %%mm2 \n\t"
  86. "movq 1(%1, %3), %%mm3 \n\t"
  87. PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5)
  88. "movq %%mm4, (%2) \n\t"
  89. "movq %%mm5, (%2, %3) \n\t"
  90. "movq 8(%1), %%mm0 \n\t"
  91. "movq 9(%1), %%mm1 \n\t"
  92. "movq 8(%1, %3), %%mm2 \n\t"
  93. "movq 9(%1, %3), %%mm3 \n\t"
  94. PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5)
  95. "movq %%mm4, 8(%2) \n\t"
  96. "movq %%mm5, 8(%2, %3) \n\t"
  97. "add %%"FF_REG_a", %1 \n\t"
  98. "add %%"FF_REG_a", %2 \n\t"
  99. "subl $4, %0 \n\t"
  100. "jnz 1b \n\t"
  101. :"+g"(h), "+S"(pixels), "+D"(block)
  102. :"r"((x86_reg)line_size)
  103. :FF_REG_a, "memory");
  104. }
  105. av_unused static void DEF(put, pixels8_y2)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
  106. {
  107. MOVQ_BFE(mm6);
  108. __asm__ volatile(
  109. "lea (%3, %3), %%"FF_REG_a" \n\t"
  110. "movq (%1), %%mm0 \n\t"
  111. ".p2align 3 \n\t"
  112. "1: \n\t"
  113. "movq (%1, %3), %%mm1 \n\t"
  114. "movq (%1, %%"FF_REG_a"),%%mm2\n\t"
  115. PAVGBP(%%mm1, %%mm0, %%mm4, %%mm2, %%mm1, %%mm5)
  116. "movq %%mm4, (%2) \n\t"
  117. "movq %%mm5, (%2, %3) \n\t"
  118. "add %%"FF_REG_a", %1 \n\t"
  119. "add %%"FF_REG_a", %2 \n\t"
  120. "movq (%1, %3), %%mm1 \n\t"
  121. "movq (%1, %%"FF_REG_a"),%%mm0\n\t"
  122. PAVGBP(%%mm1, %%mm2, %%mm4, %%mm0, %%mm1, %%mm5)
  123. "movq %%mm4, (%2) \n\t"
  124. "movq %%mm5, (%2, %3) \n\t"
  125. "add %%"FF_REG_a", %1 \n\t"
  126. "add %%"FF_REG_a", %2 \n\t"
  127. "subl $4, %0 \n\t"
  128. "jnz 1b \n\t"
  129. :"+g"(h), "+S"(pixels), "+D"(block)
  130. :"r"((x86_reg)line_size)
  131. :FF_REG_a, "memory");
  132. }
  133. av_unused static void DEF(avg, pixels16_x2)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
  134. {
  135. MOVQ_BFE(mm6);
  136. __asm__ volatile(
  137. ".p2align 3 \n\t"
  138. "1: \n\t"
  139. "movq (%1), %%mm0 \n\t"
  140. "movq 1(%1), %%mm1 \n\t"
  141. "movq (%2), %%mm3 \n\t"
  142. PAVGB(%%mm0, %%mm1, %%mm2, %%mm6)
  143. PAVGB_MMX(%%mm3, %%mm2, %%mm0, %%mm6)
  144. "movq %%mm0, (%2) \n\t"
  145. "movq 8(%1), %%mm0 \n\t"
  146. "movq 9(%1), %%mm1 \n\t"
  147. "movq 8(%2), %%mm3 \n\t"
  148. PAVGB(%%mm0, %%mm1, %%mm2, %%mm6)
  149. PAVGB_MMX(%%mm3, %%mm2, %%mm0, %%mm6)
  150. "movq %%mm0, 8(%2) \n\t"
  151. "add %3, %1 \n\t"
  152. "add %3, %2 \n\t"
  153. "subl $1, %0 \n\t"
  154. "jnz 1b \n\t"
  155. :"+g"(h), "+S"(pixels), "+D"(block)
  156. :"r"((x86_reg)line_size)
  157. :"memory");
  158. }
  159. av_unused static void DEF(avg, pixels8_y2)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
  160. {
  161. MOVQ_BFE(mm6);
  162. __asm__ volatile(
  163. "lea (%3, %3), %%"FF_REG_a" \n\t"
  164. "movq (%1), %%mm0 \n\t"
  165. ".p2align 3 \n\t"
  166. "1: \n\t"
  167. "movq (%1, %3), %%mm1 \n\t"
  168. "movq (%1, %%"FF_REG_a"), %%mm2 \n\t"
  169. PAVGBP(%%mm1, %%mm0, %%mm4, %%mm2, %%mm1, %%mm5)
  170. "movq (%2), %%mm3 \n\t"
  171. PAVGB_MMX(%%mm3, %%mm4, %%mm0, %%mm6)
  172. "movq (%2, %3), %%mm3 \n\t"
  173. PAVGB_MMX(%%mm3, %%mm5, %%mm1, %%mm6)
  174. "movq %%mm0, (%2) \n\t"
  175. "movq %%mm1, (%2, %3) \n\t"
  176. "add %%"FF_REG_a", %1 \n\t"
  177. "add %%"FF_REG_a", %2 \n\t"
  178. "movq (%1, %3), %%mm1 \n\t"
  179. "movq (%1, %%"FF_REG_a"), %%mm0 \n\t"
  180. PAVGBP(%%mm1, %%mm2, %%mm4, %%mm0, %%mm1, %%mm5)
  181. "movq (%2), %%mm3 \n\t"
  182. PAVGB_MMX(%%mm3, %%mm4, %%mm2, %%mm6)
  183. "movq (%2, %3), %%mm3 \n\t"
  184. PAVGB_MMX(%%mm3, %%mm5, %%mm1, %%mm6)
  185. "movq %%mm2, (%2) \n\t"
  186. "movq %%mm1, (%2, %3) \n\t"
  187. "add %%"FF_REG_a", %1 \n\t"
  188. "add %%"FF_REG_a", %2 \n\t"
  189. "subl $4, %0 \n\t"
  190. "jnz 1b \n\t"
  191. :"+g"(h), "+S"(pixels), "+D"(block)
  192. :"r"((x86_reg)line_size)
  193. :FF_REG_a, "memory");
  194. }