inv_txfm_sse2.c 44 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235
  1. /*
  2. * Copyright (c) 2015 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include <emmintrin.h> // SSE2
  11. #include "./vpx_dsp_rtcd.h"
  12. #include "vpx_dsp/x86/inv_txfm_sse2.h"
  13. #include "vpx_dsp/x86/transpose_sse2.h"
  14. #include "vpx_dsp/x86/txfm_common_sse2.h"
  15. static INLINE void transpose_16bit_4(__m128i *res) {
  16. const __m128i tr0_0 = _mm_unpacklo_epi16(res[0], res[1]);
  17. const __m128i tr0_1 = _mm_unpackhi_epi16(res[0], res[1]);
  18. res[0] = _mm_unpacklo_epi16(tr0_0, tr0_1);
  19. res[1] = _mm_unpackhi_epi16(tr0_0, tr0_1);
  20. }
  21. void vpx_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest,
  22. int stride) {
  23. const __m128i eight = _mm_set1_epi16(8);
  24. __m128i in[2];
  25. // Rows
  26. in[0] = load_input_data8(input);
  27. in[1] = load_input_data8(input + 8);
  28. idct4_sse2(in);
  29. // Columns
  30. idct4_sse2(in);
  31. // Final round and shift
  32. in[0] = _mm_add_epi16(in[0], eight);
  33. in[1] = _mm_add_epi16(in[1], eight);
  34. in[0] = _mm_srai_epi16(in[0], 4);
  35. in[1] = _mm_srai_epi16(in[1], 4);
  36. recon_and_store4x4_sse2(in, dest, stride);
  37. }
  38. void vpx_idct4x4_1_add_sse2(const tran_low_t *input, uint8_t *dest,
  39. int stride) {
  40. const __m128i zero = _mm_setzero_si128();
  41. int a;
  42. __m128i dc_value, d[2];
  43. a = (int)dct_const_round_shift((int16_t)input[0] * cospi_16_64);
  44. a = (int)dct_const_round_shift(a * cospi_16_64);
  45. a = ROUND_POWER_OF_TWO(a, 4);
  46. dc_value = _mm_set1_epi16(a);
  47. // Reconstruction and Store
  48. d[0] = _mm_cvtsi32_si128(*(const int *)(dest));
  49. d[1] = _mm_cvtsi32_si128(*(const int *)(dest + stride * 3));
  50. d[0] = _mm_unpacklo_epi32(d[0],
  51. _mm_cvtsi32_si128(*(const int *)(dest + stride)));
  52. d[1] = _mm_unpacklo_epi32(
  53. _mm_cvtsi32_si128(*(const int *)(dest + stride * 2)), d[1]);
  54. d[0] = _mm_unpacklo_epi8(d[0], zero);
  55. d[1] = _mm_unpacklo_epi8(d[1], zero);
  56. d[0] = _mm_add_epi16(d[0], dc_value);
  57. d[1] = _mm_add_epi16(d[1], dc_value);
  58. d[0] = _mm_packus_epi16(d[0], d[1]);
  59. *(int *)dest = _mm_cvtsi128_si32(d[0]);
  60. d[0] = _mm_srli_si128(d[0], 4);
  61. *(int *)(dest + stride) = _mm_cvtsi128_si32(d[0]);
  62. d[0] = _mm_srli_si128(d[0], 4);
  63. *(int *)(dest + stride * 2) = _mm_cvtsi128_si32(d[0]);
  64. d[0] = _mm_srli_si128(d[0], 4);
  65. *(int *)(dest + stride * 3) = _mm_cvtsi128_si32(d[0]);
  66. }
  67. void idct4_sse2(__m128i *const in) {
  68. const __m128i k__cospi_p16_p16 = pair_set_epi16(cospi_16_64, cospi_16_64);
  69. const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
  70. const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64);
  71. const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64);
  72. __m128i u[2];
  73. transpose_16bit_4(in);
  74. // stage 1
  75. u[0] = _mm_unpacklo_epi16(in[0], in[1]);
  76. u[1] = _mm_unpackhi_epi16(in[0], in[1]);
  77. u[0] = idct_calc_wraplow_sse2(k__cospi_p16_p16, k__cospi_p16_m16, u[0]);
  78. u[1] = idct_calc_wraplow_sse2(k__cospi_p08_p24, k__cospi_p24_m08, u[1]);
  79. // stage 2
  80. in[0] = _mm_add_epi16(u[0], u[1]);
  81. in[1] = _mm_sub_epi16(u[0], u[1]);
  82. in[1] = _mm_shuffle_epi32(in[1], 0x4E);
  83. }
  84. void iadst4_sse2(__m128i *const in) {
  85. const __m128i k__sinpi_1_3 = pair_set_epi16(sinpi_1_9, sinpi_3_9);
  86. const __m128i k__sinpi_4_2 = pair_set_epi16(sinpi_4_9, sinpi_2_9);
  87. const __m128i k__sinpi_2_3 = pair_set_epi16(sinpi_2_9, sinpi_3_9);
  88. const __m128i k__sinpi_1_4 = pair_set_epi16(sinpi_1_9, sinpi_4_9);
  89. const __m128i k__sinpi_12_n3 =
  90. pair_set_epi16(sinpi_1_9 + sinpi_2_9, -sinpi_3_9);
  91. __m128i u[4], v[5];
  92. // 00 01 20 21 02 03 22 23
  93. // 10 11 30 31 12 13 32 33
  94. const __m128i tr0_0 = _mm_unpacklo_epi32(in[0], in[1]);
  95. const __m128i tr0_1 = _mm_unpackhi_epi32(in[0], in[1]);
  96. // 00 01 10 11 20 21 30 31
  97. // 02 03 12 13 22 23 32 33
  98. in[0] = _mm_unpacklo_epi32(tr0_0, tr0_1);
  99. in[1] = _mm_unpackhi_epi32(tr0_0, tr0_1);
  100. v[0] = _mm_madd_epi16(in[0], k__sinpi_1_3); // s_1 * x0 + s_3 * x1
  101. v[1] = _mm_madd_epi16(in[1], k__sinpi_4_2); // s_4 * x2 + s_2 * x3
  102. v[2] = _mm_madd_epi16(in[0], k__sinpi_2_3); // s_2 * x0 + s_3 * x1
  103. v[3] = _mm_madd_epi16(in[1], k__sinpi_1_4); // s_1 * x2 + s_4 * x3
  104. v[4] = _mm_madd_epi16(in[0], k__sinpi_12_n3); // (s_1 + s_2) * x0 - s_3 * x1
  105. in[0] = _mm_sub_epi16(in[0], in[1]); // x0 - x2
  106. in[1] = _mm_srli_epi32(in[1], 16);
  107. in[0] = _mm_add_epi16(in[0], in[1]);
  108. in[0] = _mm_slli_epi32(in[0], 16); // x0 - x2 + x3
  109. u[0] = _mm_add_epi32(v[0], v[1]);
  110. u[1] = _mm_sub_epi32(v[2], v[3]);
  111. u[2] = _mm_madd_epi16(in[0], k__sinpi_1_3);
  112. u[3] = _mm_sub_epi32(v[1], v[3]);
  113. u[3] = _mm_add_epi32(u[3], v[4]);
  114. u[0] = dct_const_round_shift_sse2(u[0]);
  115. u[1] = dct_const_round_shift_sse2(u[1]);
  116. u[2] = dct_const_round_shift_sse2(u[2]);
  117. u[3] = dct_const_round_shift_sse2(u[3]);
  118. in[0] = _mm_packs_epi32(u[0], u[1]);
  119. in[1] = _mm_packs_epi32(u[2], u[3]);
  120. }
  121. static INLINE void load_buffer_8x8(const tran_low_t *const input,
  122. __m128i *const in) {
  123. in[0] = load_input_data8(input + 0 * 8);
  124. in[1] = load_input_data8(input + 1 * 8);
  125. in[2] = load_input_data8(input + 2 * 8);
  126. in[3] = load_input_data8(input + 3 * 8);
  127. in[4] = load_input_data8(input + 4 * 8);
  128. in[5] = load_input_data8(input + 5 * 8);
  129. in[6] = load_input_data8(input + 6 * 8);
  130. in[7] = load_input_data8(input + 7 * 8);
  131. }
  132. void vpx_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest,
  133. int stride) {
  134. __m128i in[8];
  135. int i;
  136. // Load input data.
  137. load_buffer_8x8(input, in);
  138. // 2-D
  139. for (i = 0; i < 2; i++) {
  140. vpx_idct8_sse2(in);
  141. }
  142. write_buffer_8x8(in, dest, stride);
  143. }
  144. void vpx_idct8x8_12_add_sse2(const tran_low_t *input, uint8_t *dest,
  145. int stride) {
  146. __m128i io[8];
  147. io[0] = load_input_data4(input + 0 * 8);
  148. io[1] = load_input_data4(input + 1 * 8);
  149. io[2] = load_input_data4(input + 2 * 8);
  150. io[3] = load_input_data4(input + 3 * 8);
  151. idct8x8_12_add_kernel_sse2(io);
  152. write_buffer_8x8(io, dest, stride);
  153. }
  154. static INLINE void recon_and_store_8_dual(uint8_t *const dest,
  155. const __m128i in_x,
  156. const int stride) {
  157. const __m128i zero = _mm_setzero_si128();
  158. __m128i d0, d1;
  159. d0 = _mm_loadl_epi64((__m128i *)(dest + 0 * stride));
  160. d1 = _mm_loadl_epi64((__m128i *)(dest + 1 * stride));
  161. d0 = _mm_unpacklo_epi8(d0, zero);
  162. d1 = _mm_unpacklo_epi8(d1, zero);
  163. d0 = _mm_add_epi16(in_x, d0);
  164. d1 = _mm_add_epi16(in_x, d1);
  165. d0 = _mm_packus_epi16(d0, d1);
  166. _mm_storel_epi64((__m128i *)(dest + 0 * stride), d0);
  167. _mm_storeh_pi((__m64 *)(dest + 1 * stride), _mm_castsi128_ps(d0));
  168. }
  169. void vpx_idct8x8_1_add_sse2(const tran_low_t *input, uint8_t *dest,
  170. int stride) {
  171. __m128i dc_value;
  172. tran_high_t a1;
  173. tran_low_t out =
  174. WRAPLOW(dct_const_round_shift((int16_t)input[0] * cospi_16_64));
  175. out = WRAPLOW(dct_const_round_shift(out * cospi_16_64));
  176. a1 = ROUND_POWER_OF_TWO(out, 5);
  177. dc_value = _mm_set1_epi16((int16_t)a1);
  178. recon_and_store_8_dual(dest, dc_value, stride);
  179. dest += 2 * stride;
  180. recon_and_store_8_dual(dest, dc_value, stride);
  181. dest += 2 * stride;
  182. recon_and_store_8_dual(dest, dc_value, stride);
  183. dest += 2 * stride;
  184. recon_and_store_8_dual(dest, dc_value, stride);
  185. }
  186. void vpx_idct8_sse2(__m128i *const in) {
  187. // 8x8 Transpose is copied from vpx_fdct8x8_sse2()
  188. transpose_16bit_8x8(in, in);
  189. // 4-stage 1D idct8x8
  190. idct8(in, in);
  191. }
  192. void iadst8_sse2(__m128i *const in) {
  193. const __m128i k__cospi_p02_p30 = pair_set_epi16(cospi_2_64, cospi_30_64);
  194. const __m128i k__cospi_p30_m02 = pair_set_epi16(cospi_30_64, -cospi_2_64);
  195. const __m128i k__cospi_p10_p22 = pair_set_epi16(cospi_10_64, cospi_22_64);
  196. const __m128i k__cospi_p22_m10 = pair_set_epi16(cospi_22_64, -cospi_10_64);
  197. const __m128i k__cospi_p18_p14 = pair_set_epi16(cospi_18_64, cospi_14_64);
  198. const __m128i k__cospi_p14_m18 = pair_set_epi16(cospi_14_64, -cospi_18_64);
  199. const __m128i k__cospi_p26_p06 = pair_set_epi16(cospi_26_64, cospi_6_64);
  200. const __m128i k__cospi_p06_m26 = pair_set_epi16(cospi_6_64, -cospi_26_64);
  201. const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64);
  202. const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64);
  203. const __m128i k__cospi_m24_p08 = pair_set_epi16(-cospi_24_64, cospi_8_64);
  204. const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
  205. const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);
  206. const __m128i kZero = _mm_set1_epi16(0);
  207. __m128i s[8], u[16], v[8], w[16];
  208. // transpose
  209. transpose_16bit_8x8(in, in);
  210. // column transformation
  211. // stage 1
  212. // interleave and multiply/add into 32-bit integer
  213. s[0] = _mm_unpacklo_epi16(in[7], in[0]);
  214. s[1] = _mm_unpackhi_epi16(in[7], in[0]);
  215. s[2] = _mm_unpacklo_epi16(in[5], in[2]);
  216. s[3] = _mm_unpackhi_epi16(in[5], in[2]);
  217. s[4] = _mm_unpacklo_epi16(in[3], in[4]);
  218. s[5] = _mm_unpackhi_epi16(in[3], in[4]);
  219. s[6] = _mm_unpacklo_epi16(in[1], in[6]);
  220. s[7] = _mm_unpackhi_epi16(in[1], in[6]);
  221. u[0] = _mm_madd_epi16(s[0], k__cospi_p02_p30);
  222. u[1] = _mm_madd_epi16(s[1], k__cospi_p02_p30);
  223. u[2] = _mm_madd_epi16(s[0], k__cospi_p30_m02);
  224. u[3] = _mm_madd_epi16(s[1], k__cospi_p30_m02);
  225. u[4] = _mm_madd_epi16(s[2], k__cospi_p10_p22);
  226. u[5] = _mm_madd_epi16(s[3], k__cospi_p10_p22);
  227. u[6] = _mm_madd_epi16(s[2], k__cospi_p22_m10);
  228. u[7] = _mm_madd_epi16(s[3], k__cospi_p22_m10);
  229. u[8] = _mm_madd_epi16(s[4], k__cospi_p18_p14);
  230. u[9] = _mm_madd_epi16(s[5], k__cospi_p18_p14);
  231. u[10] = _mm_madd_epi16(s[4], k__cospi_p14_m18);
  232. u[11] = _mm_madd_epi16(s[5], k__cospi_p14_m18);
  233. u[12] = _mm_madd_epi16(s[6], k__cospi_p26_p06);
  234. u[13] = _mm_madd_epi16(s[7], k__cospi_p26_p06);
  235. u[14] = _mm_madd_epi16(s[6], k__cospi_p06_m26);
  236. u[15] = _mm_madd_epi16(s[7], k__cospi_p06_m26);
  237. // addition
  238. w[0] = _mm_add_epi32(u[0], u[8]);
  239. w[1] = _mm_add_epi32(u[1], u[9]);
  240. w[2] = _mm_add_epi32(u[2], u[10]);
  241. w[3] = _mm_add_epi32(u[3], u[11]);
  242. w[4] = _mm_add_epi32(u[4], u[12]);
  243. w[5] = _mm_add_epi32(u[5], u[13]);
  244. w[6] = _mm_add_epi32(u[6], u[14]);
  245. w[7] = _mm_add_epi32(u[7], u[15]);
  246. w[8] = _mm_sub_epi32(u[0], u[8]);
  247. w[9] = _mm_sub_epi32(u[1], u[9]);
  248. w[10] = _mm_sub_epi32(u[2], u[10]);
  249. w[11] = _mm_sub_epi32(u[3], u[11]);
  250. w[12] = _mm_sub_epi32(u[4], u[12]);
  251. w[13] = _mm_sub_epi32(u[5], u[13]);
  252. w[14] = _mm_sub_epi32(u[6], u[14]);
  253. w[15] = _mm_sub_epi32(u[7], u[15]);
  254. // shift and rounding
  255. u[0] = dct_const_round_shift_sse2(w[0]);
  256. u[1] = dct_const_round_shift_sse2(w[1]);
  257. u[2] = dct_const_round_shift_sse2(w[2]);
  258. u[3] = dct_const_round_shift_sse2(w[3]);
  259. u[4] = dct_const_round_shift_sse2(w[4]);
  260. u[5] = dct_const_round_shift_sse2(w[5]);
  261. u[6] = dct_const_round_shift_sse2(w[6]);
  262. u[7] = dct_const_round_shift_sse2(w[7]);
  263. u[8] = dct_const_round_shift_sse2(w[8]);
  264. u[9] = dct_const_round_shift_sse2(w[9]);
  265. u[10] = dct_const_round_shift_sse2(w[10]);
  266. u[11] = dct_const_round_shift_sse2(w[11]);
  267. u[12] = dct_const_round_shift_sse2(w[12]);
  268. u[13] = dct_const_round_shift_sse2(w[13]);
  269. u[14] = dct_const_round_shift_sse2(w[14]);
  270. u[15] = dct_const_round_shift_sse2(w[15]);
  271. // back to 16-bit and pack 8 integers into __m128i
  272. in[0] = _mm_packs_epi32(u[0], u[1]);
  273. in[1] = _mm_packs_epi32(u[2], u[3]);
  274. in[2] = _mm_packs_epi32(u[4], u[5]);
  275. in[3] = _mm_packs_epi32(u[6], u[7]);
  276. in[4] = _mm_packs_epi32(u[8], u[9]);
  277. in[5] = _mm_packs_epi32(u[10], u[11]);
  278. in[6] = _mm_packs_epi32(u[12], u[13]);
  279. in[7] = _mm_packs_epi32(u[14], u[15]);
  280. // stage 2
  281. s[0] = _mm_add_epi16(in[0], in[2]);
  282. s[1] = _mm_add_epi16(in[1], in[3]);
  283. s[2] = _mm_sub_epi16(in[0], in[2]);
  284. s[3] = _mm_sub_epi16(in[1], in[3]);
  285. u[0] = _mm_unpacklo_epi16(in[4], in[5]);
  286. u[1] = _mm_unpackhi_epi16(in[4], in[5]);
  287. u[2] = _mm_unpacklo_epi16(in[6], in[7]);
  288. u[3] = _mm_unpackhi_epi16(in[6], in[7]);
  289. v[0] = _mm_madd_epi16(u[0], k__cospi_p08_p24);
  290. v[1] = _mm_madd_epi16(u[1], k__cospi_p08_p24);
  291. v[2] = _mm_madd_epi16(u[0], k__cospi_p24_m08);
  292. v[3] = _mm_madd_epi16(u[1], k__cospi_p24_m08);
  293. v[4] = _mm_madd_epi16(u[2], k__cospi_m24_p08);
  294. v[5] = _mm_madd_epi16(u[3], k__cospi_m24_p08);
  295. v[6] = _mm_madd_epi16(u[2], k__cospi_p08_p24);
  296. v[7] = _mm_madd_epi16(u[3], k__cospi_p08_p24);
  297. w[0] = _mm_add_epi32(v[0], v[4]);
  298. w[1] = _mm_add_epi32(v[1], v[5]);
  299. w[2] = _mm_add_epi32(v[2], v[6]);
  300. w[3] = _mm_add_epi32(v[3], v[7]);
  301. w[4] = _mm_sub_epi32(v[0], v[4]);
  302. w[5] = _mm_sub_epi32(v[1], v[5]);
  303. w[6] = _mm_sub_epi32(v[2], v[6]);
  304. w[7] = _mm_sub_epi32(v[3], v[7]);
  305. u[0] = dct_const_round_shift_sse2(w[0]);
  306. u[1] = dct_const_round_shift_sse2(w[1]);
  307. u[2] = dct_const_round_shift_sse2(w[2]);
  308. u[3] = dct_const_round_shift_sse2(w[3]);
  309. u[4] = dct_const_round_shift_sse2(w[4]);
  310. u[5] = dct_const_round_shift_sse2(w[5]);
  311. u[6] = dct_const_round_shift_sse2(w[6]);
  312. u[7] = dct_const_round_shift_sse2(w[7]);
  313. // back to 16-bit intergers
  314. s[4] = _mm_packs_epi32(u[0], u[1]);
  315. s[5] = _mm_packs_epi32(u[2], u[3]);
  316. s[6] = _mm_packs_epi32(u[4], u[5]);
  317. s[7] = _mm_packs_epi32(u[6], u[7]);
  318. // stage 3
  319. u[0] = _mm_unpacklo_epi16(s[2], s[3]);
  320. u[1] = _mm_unpackhi_epi16(s[2], s[3]);
  321. u[2] = _mm_unpacklo_epi16(s[6], s[7]);
  322. u[3] = _mm_unpackhi_epi16(s[6], s[7]);
  323. s[2] = idct_calc_wraplow_sse2(u[0], u[1], k__cospi_p16_p16);
  324. s[3] = idct_calc_wraplow_sse2(u[0], u[1], k__cospi_p16_m16);
  325. s[6] = idct_calc_wraplow_sse2(u[2], u[3], k__cospi_p16_p16);
  326. s[7] = idct_calc_wraplow_sse2(u[2], u[3], k__cospi_p16_m16);
  327. in[0] = s[0];
  328. in[1] = _mm_sub_epi16(kZero, s[4]);
  329. in[2] = s[6];
  330. in[3] = _mm_sub_epi16(kZero, s[2]);
  331. in[4] = s[3];
  332. in[5] = _mm_sub_epi16(kZero, s[7]);
  333. in[6] = s[5];
  334. in[7] = _mm_sub_epi16(kZero, s[1]);
  335. }
  336. static INLINE void idct16_load8x8(const tran_low_t *const input,
  337. __m128i *const in) {
  338. in[0] = load_input_data8(input + 0 * 16);
  339. in[1] = load_input_data8(input + 1 * 16);
  340. in[2] = load_input_data8(input + 2 * 16);
  341. in[3] = load_input_data8(input + 3 * 16);
  342. in[4] = load_input_data8(input + 4 * 16);
  343. in[5] = load_input_data8(input + 5 * 16);
  344. in[6] = load_input_data8(input + 6 * 16);
  345. in[7] = load_input_data8(input + 7 * 16);
  346. }
  347. void vpx_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest,
  348. int stride) {
  349. __m128i l[16], r[16], out[16], *in;
  350. int i;
  351. in = l;
  352. for (i = 0; i < 2; i++) {
  353. idct16_load8x8(input, in);
  354. transpose_16bit_8x8(in, in);
  355. idct16_load8x8(input + 8, in + 8);
  356. transpose_16bit_8x8(in + 8, in + 8);
  357. idct16_8col(in, in);
  358. in = r;
  359. input += 128;
  360. }
  361. for (i = 0; i < 16; i += 8) {
  362. int j;
  363. transpose_16bit_8x8(l + i, out);
  364. transpose_16bit_8x8(r + i, out + 8);
  365. idct16_8col(out, out);
  366. for (j = 0; j < 16; ++j) {
  367. write_buffer_8x1(dest + j * stride, out[j]);
  368. }
  369. dest += 8;
  370. }
  371. }
  372. void vpx_idct16x16_38_add_sse2(const tran_low_t *input, uint8_t *dest,
  373. int stride) {
  374. __m128i in[16], temp[16], out[16];
  375. int i;
  376. idct16_load8x8(input, in);
  377. transpose_16bit_8x8(in, in);
  378. for (i = 8; i < 16; i++) {
  379. in[i] = _mm_setzero_si128();
  380. }
  381. idct16_8col(in, temp);
  382. for (i = 0; i < 16; i += 8) {
  383. int j;
  384. transpose_16bit_8x8(temp + i, in);
  385. idct16_8col(in, out);
  386. for (j = 0; j < 16; ++j) {
  387. write_buffer_8x1(dest + j * stride, out[j]);
  388. }
  389. dest += 8;
  390. }
  391. }
  392. void vpx_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest,
  393. int stride) {
  394. __m128i in[16], l[16];
  395. int i;
  396. // First 1-D inverse DCT
  397. // Load input data.
  398. in[0] = load_input_data4(input + 0 * 16);
  399. in[1] = load_input_data4(input + 1 * 16);
  400. in[2] = load_input_data4(input + 2 * 16);
  401. in[3] = load_input_data4(input + 3 * 16);
  402. idct16x16_10_pass1(in, l);
  403. // Second 1-D inverse transform, performed per 8x16 block
  404. for (i = 0; i < 16; i += 8) {
  405. int j;
  406. idct16x16_10_pass2(l + i, in);
  407. for (j = 0; j < 16; ++j) {
  408. write_buffer_8x1(dest + j * stride, in[j]);
  409. }
  410. dest += 8;
  411. }
  412. }
  413. static INLINE void recon_and_store_16(uint8_t *const dest, const __m128i in_x) {
  414. const __m128i zero = _mm_setzero_si128();
  415. __m128i d0, d1;
  416. d0 = _mm_load_si128((__m128i *)(dest));
  417. d1 = _mm_unpackhi_epi8(d0, zero);
  418. d0 = _mm_unpacklo_epi8(d0, zero);
  419. d0 = _mm_add_epi16(in_x, d0);
  420. d1 = _mm_add_epi16(in_x, d1);
  421. d0 = _mm_packus_epi16(d0, d1);
  422. _mm_store_si128((__m128i *)(dest), d0);
  423. }
  424. void vpx_idct16x16_1_add_sse2(const tran_low_t *input, uint8_t *dest,
  425. int stride) {
  426. __m128i dc_value;
  427. int i;
  428. tran_high_t a1;
  429. tran_low_t out =
  430. WRAPLOW(dct_const_round_shift((int16_t)input[0] * cospi_16_64));
  431. out = WRAPLOW(dct_const_round_shift(out * cospi_16_64));
  432. a1 = ROUND_POWER_OF_TWO(out, 6);
  433. dc_value = _mm_set1_epi16((int16_t)a1);
  434. for (i = 0; i < 16; ++i) {
  435. recon_and_store_16(dest, dc_value);
  436. dest += stride;
  437. }
  438. }
  439. void vpx_iadst16_8col_sse2(__m128i *const in) {
  440. // perform 16x16 1-D ADST for 8 columns
  441. __m128i s[16], x[16], u[32], v[32];
  442. const __m128i k__cospi_p01_p31 = pair_set_epi16(cospi_1_64, cospi_31_64);
  443. const __m128i k__cospi_p31_m01 = pair_set_epi16(cospi_31_64, -cospi_1_64);
  444. const __m128i k__cospi_p05_p27 = pair_set_epi16(cospi_5_64, cospi_27_64);
  445. const __m128i k__cospi_p27_m05 = pair_set_epi16(cospi_27_64, -cospi_5_64);
  446. const __m128i k__cospi_p09_p23 = pair_set_epi16(cospi_9_64, cospi_23_64);
  447. const __m128i k__cospi_p23_m09 = pair_set_epi16(cospi_23_64, -cospi_9_64);
  448. const __m128i k__cospi_p13_p19 = pair_set_epi16(cospi_13_64, cospi_19_64);
  449. const __m128i k__cospi_p19_m13 = pair_set_epi16(cospi_19_64, -cospi_13_64);
  450. const __m128i k__cospi_p17_p15 = pair_set_epi16(cospi_17_64, cospi_15_64);
  451. const __m128i k__cospi_p15_m17 = pair_set_epi16(cospi_15_64, -cospi_17_64);
  452. const __m128i k__cospi_p21_p11 = pair_set_epi16(cospi_21_64, cospi_11_64);
  453. const __m128i k__cospi_p11_m21 = pair_set_epi16(cospi_11_64, -cospi_21_64);
  454. const __m128i k__cospi_p25_p07 = pair_set_epi16(cospi_25_64, cospi_7_64);
  455. const __m128i k__cospi_p07_m25 = pair_set_epi16(cospi_7_64, -cospi_25_64);
  456. const __m128i k__cospi_p29_p03 = pair_set_epi16(cospi_29_64, cospi_3_64);
  457. const __m128i k__cospi_p03_m29 = pair_set_epi16(cospi_3_64, -cospi_29_64);
  458. const __m128i k__cospi_p04_p28 = pair_set_epi16(cospi_4_64, cospi_28_64);
  459. const __m128i k__cospi_p28_m04 = pair_set_epi16(cospi_28_64, -cospi_4_64);
  460. const __m128i k__cospi_p20_p12 = pair_set_epi16(cospi_20_64, cospi_12_64);
  461. const __m128i k__cospi_p12_m20 = pair_set_epi16(cospi_12_64, -cospi_20_64);
  462. const __m128i k__cospi_m28_p04 = pair_set_epi16(-cospi_28_64, cospi_4_64);
  463. const __m128i k__cospi_m12_p20 = pair_set_epi16(-cospi_12_64, cospi_20_64);
  464. const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64);
  465. const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64);
  466. const __m128i k__cospi_m24_p08 = pair_set_epi16(-cospi_24_64, cospi_8_64);
  467. const __m128i k__cospi_m16_m16 = _mm_set1_epi16(-cospi_16_64);
  468. const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);
  469. const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
  470. const __m128i k__cospi_m16_p16 = pair_set_epi16(-cospi_16_64, cospi_16_64);
  471. const __m128i kZero = _mm_set1_epi16(0);
  472. u[0] = _mm_unpacklo_epi16(in[15], in[0]);
  473. u[1] = _mm_unpackhi_epi16(in[15], in[0]);
  474. u[2] = _mm_unpacklo_epi16(in[13], in[2]);
  475. u[3] = _mm_unpackhi_epi16(in[13], in[2]);
  476. u[4] = _mm_unpacklo_epi16(in[11], in[4]);
  477. u[5] = _mm_unpackhi_epi16(in[11], in[4]);
  478. u[6] = _mm_unpacklo_epi16(in[9], in[6]);
  479. u[7] = _mm_unpackhi_epi16(in[9], in[6]);
  480. u[8] = _mm_unpacklo_epi16(in[7], in[8]);
  481. u[9] = _mm_unpackhi_epi16(in[7], in[8]);
  482. u[10] = _mm_unpacklo_epi16(in[5], in[10]);
  483. u[11] = _mm_unpackhi_epi16(in[5], in[10]);
  484. u[12] = _mm_unpacklo_epi16(in[3], in[12]);
  485. u[13] = _mm_unpackhi_epi16(in[3], in[12]);
  486. u[14] = _mm_unpacklo_epi16(in[1], in[14]);
  487. u[15] = _mm_unpackhi_epi16(in[1], in[14]);
  488. v[0] = _mm_madd_epi16(u[0], k__cospi_p01_p31);
  489. v[1] = _mm_madd_epi16(u[1], k__cospi_p01_p31);
  490. v[2] = _mm_madd_epi16(u[0], k__cospi_p31_m01);
  491. v[3] = _mm_madd_epi16(u[1], k__cospi_p31_m01);
  492. v[4] = _mm_madd_epi16(u[2], k__cospi_p05_p27);
  493. v[5] = _mm_madd_epi16(u[3], k__cospi_p05_p27);
  494. v[6] = _mm_madd_epi16(u[2], k__cospi_p27_m05);
  495. v[7] = _mm_madd_epi16(u[3], k__cospi_p27_m05);
  496. v[8] = _mm_madd_epi16(u[4], k__cospi_p09_p23);
  497. v[9] = _mm_madd_epi16(u[5], k__cospi_p09_p23);
  498. v[10] = _mm_madd_epi16(u[4], k__cospi_p23_m09);
  499. v[11] = _mm_madd_epi16(u[5], k__cospi_p23_m09);
  500. v[12] = _mm_madd_epi16(u[6], k__cospi_p13_p19);
  501. v[13] = _mm_madd_epi16(u[7], k__cospi_p13_p19);
  502. v[14] = _mm_madd_epi16(u[6], k__cospi_p19_m13);
  503. v[15] = _mm_madd_epi16(u[7], k__cospi_p19_m13);
  504. v[16] = _mm_madd_epi16(u[8], k__cospi_p17_p15);
  505. v[17] = _mm_madd_epi16(u[9], k__cospi_p17_p15);
  506. v[18] = _mm_madd_epi16(u[8], k__cospi_p15_m17);
  507. v[19] = _mm_madd_epi16(u[9], k__cospi_p15_m17);
  508. v[20] = _mm_madd_epi16(u[10], k__cospi_p21_p11);
  509. v[21] = _mm_madd_epi16(u[11], k__cospi_p21_p11);
  510. v[22] = _mm_madd_epi16(u[10], k__cospi_p11_m21);
  511. v[23] = _mm_madd_epi16(u[11], k__cospi_p11_m21);
  512. v[24] = _mm_madd_epi16(u[12], k__cospi_p25_p07);
  513. v[25] = _mm_madd_epi16(u[13], k__cospi_p25_p07);
  514. v[26] = _mm_madd_epi16(u[12], k__cospi_p07_m25);
  515. v[27] = _mm_madd_epi16(u[13], k__cospi_p07_m25);
  516. v[28] = _mm_madd_epi16(u[14], k__cospi_p29_p03);
  517. v[29] = _mm_madd_epi16(u[15], k__cospi_p29_p03);
  518. v[30] = _mm_madd_epi16(u[14], k__cospi_p03_m29);
  519. v[31] = _mm_madd_epi16(u[15], k__cospi_p03_m29);
  520. u[0] = _mm_add_epi32(v[0], v[16]);
  521. u[1] = _mm_add_epi32(v[1], v[17]);
  522. u[2] = _mm_add_epi32(v[2], v[18]);
  523. u[3] = _mm_add_epi32(v[3], v[19]);
  524. u[4] = _mm_add_epi32(v[4], v[20]);
  525. u[5] = _mm_add_epi32(v[5], v[21]);
  526. u[6] = _mm_add_epi32(v[6], v[22]);
  527. u[7] = _mm_add_epi32(v[7], v[23]);
  528. u[8] = _mm_add_epi32(v[8], v[24]);
  529. u[9] = _mm_add_epi32(v[9], v[25]);
  530. u[10] = _mm_add_epi32(v[10], v[26]);
  531. u[11] = _mm_add_epi32(v[11], v[27]);
  532. u[12] = _mm_add_epi32(v[12], v[28]);
  533. u[13] = _mm_add_epi32(v[13], v[29]);
  534. u[14] = _mm_add_epi32(v[14], v[30]);
  535. u[15] = _mm_add_epi32(v[15], v[31]);
  536. u[16] = _mm_sub_epi32(v[0], v[16]);
  537. u[17] = _mm_sub_epi32(v[1], v[17]);
  538. u[18] = _mm_sub_epi32(v[2], v[18]);
  539. u[19] = _mm_sub_epi32(v[3], v[19]);
  540. u[20] = _mm_sub_epi32(v[4], v[20]);
  541. u[21] = _mm_sub_epi32(v[5], v[21]);
  542. u[22] = _mm_sub_epi32(v[6], v[22]);
  543. u[23] = _mm_sub_epi32(v[7], v[23]);
  544. u[24] = _mm_sub_epi32(v[8], v[24]);
  545. u[25] = _mm_sub_epi32(v[9], v[25]);
  546. u[26] = _mm_sub_epi32(v[10], v[26]);
  547. u[27] = _mm_sub_epi32(v[11], v[27]);
  548. u[28] = _mm_sub_epi32(v[12], v[28]);
  549. u[29] = _mm_sub_epi32(v[13], v[29]);
  550. u[30] = _mm_sub_epi32(v[14], v[30]);
  551. u[31] = _mm_sub_epi32(v[15], v[31]);
  552. u[0] = dct_const_round_shift_sse2(u[0]);
  553. u[1] = dct_const_round_shift_sse2(u[1]);
  554. u[2] = dct_const_round_shift_sse2(u[2]);
  555. u[3] = dct_const_round_shift_sse2(u[3]);
  556. u[4] = dct_const_round_shift_sse2(u[4]);
  557. u[5] = dct_const_round_shift_sse2(u[5]);
  558. u[6] = dct_const_round_shift_sse2(u[6]);
  559. u[7] = dct_const_round_shift_sse2(u[7]);
  560. u[8] = dct_const_round_shift_sse2(u[8]);
  561. u[9] = dct_const_round_shift_sse2(u[9]);
  562. u[10] = dct_const_round_shift_sse2(u[10]);
  563. u[11] = dct_const_round_shift_sse2(u[11]);
  564. u[12] = dct_const_round_shift_sse2(u[12]);
  565. u[13] = dct_const_round_shift_sse2(u[13]);
  566. u[14] = dct_const_round_shift_sse2(u[14]);
  567. u[15] = dct_const_round_shift_sse2(u[15]);
  568. u[16] = dct_const_round_shift_sse2(u[16]);
  569. u[17] = dct_const_round_shift_sse2(u[17]);
  570. u[18] = dct_const_round_shift_sse2(u[18]);
  571. u[19] = dct_const_round_shift_sse2(u[19]);
  572. u[20] = dct_const_round_shift_sse2(u[20]);
  573. u[21] = dct_const_round_shift_sse2(u[21]);
  574. u[22] = dct_const_round_shift_sse2(u[22]);
  575. u[23] = dct_const_round_shift_sse2(u[23]);
  576. u[24] = dct_const_round_shift_sse2(u[24]);
  577. u[25] = dct_const_round_shift_sse2(u[25]);
  578. u[26] = dct_const_round_shift_sse2(u[26]);
  579. u[27] = dct_const_round_shift_sse2(u[27]);
  580. u[28] = dct_const_round_shift_sse2(u[28]);
  581. u[29] = dct_const_round_shift_sse2(u[29]);
  582. u[30] = dct_const_round_shift_sse2(u[30]);
  583. u[31] = dct_const_round_shift_sse2(u[31]);
  584. s[0] = _mm_packs_epi32(u[0], u[1]);
  585. s[1] = _mm_packs_epi32(u[2], u[3]);
  586. s[2] = _mm_packs_epi32(u[4], u[5]);
  587. s[3] = _mm_packs_epi32(u[6], u[7]);
  588. s[4] = _mm_packs_epi32(u[8], u[9]);
  589. s[5] = _mm_packs_epi32(u[10], u[11]);
  590. s[6] = _mm_packs_epi32(u[12], u[13]);
  591. s[7] = _mm_packs_epi32(u[14], u[15]);
  592. s[8] = _mm_packs_epi32(u[16], u[17]);
  593. s[9] = _mm_packs_epi32(u[18], u[19]);
  594. s[10] = _mm_packs_epi32(u[20], u[21]);
  595. s[11] = _mm_packs_epi32(u[22], u[23]);
  596. s[12] = _mm_packs_epi32(u[24], u[25]);
  597. s[13] = _mm_packs_epi32(u[26], u[27]);
  598. s[14] = _mm_packs_epi32(u[28], u[29]);
  599. s[15] = _mm_packs_epi32(u[30], u[31]);
  600. // stage 2
  601. u[0] = _mm_unpacklo_epi16(s[8], s[9]);
  602. u[1] = _mm_unpackhi_epi16(s[8], s[9]);
  603. u[2] = _mm_unpacklo_epi16(s[10], s[11]);
  604. u[3] = _mm_unpackhi_epi16(s[10], s[11]);
  605. u[4] = _mm_unpacklo_epi16(s[12], s[13]);
  606. u[5] = _mm_unpackhi_epi16(s[12], s[13]);
  607. u[6] = _mm_unpacklo_epi16(s[14], s[15]);
  608. u[7] = _mm_unpackhi_epi16(s[14], s[15]);
  609. v[0] = _mm_madd_epi16(u[0], k__cospi_p04_p28);
  610. v[1] = _mm_madd_epi16(u[1], k__cospi_p04_p28);
  611. v[2] = _mm_madd_epi16(u[0], k__cospi_p28_m04);
  612. v[3] = _mm_madd_epi16(u[1], k__cospi_p28_m04);
  613. v[4] = _mm_madd_epi16(u[2], k__cospi_p20_p12);
  614. v[5] = _mm_madd_epi16(u[3], k__cospi_p20_p12);
  615. v[6] = _mm_madd_epi16(u[2], k__cospi_p12_m20);
  616. v[7] = _mm_madd_epi16(u[3], k__cospi_p12_m20);
  617. v[8] = _mm_madd_epi16(u[4], k__cospi_m28_p04);
  618. v[9] = _mm_madd_epi16(u[5], k__cospi_m28_p04);
  619. v[10] = _mm_madd_epi16(u[4], k__cospi_p04_p28);
  620. v[11] = _mm_madd_epi16(u[5], k__cospi_p04_p28);
  621. v[12] = _mm_madd_epi16(u[6], k__cospi_m12_p20);
  622. v[13] = _mm_madd_epi16(u[7], k__cospi_m12_p20);
  623. v[14] = _mm_madd_epi16(u[6], k__cospi_p20_p12);
  624. v[15] = _mm_madd_epi16(u[7], k__cospi_p20_p12);
  625. u[0] = _mm_add_epi32(v[0], v[8]);
  626. u[1] = _mm_add_epi32(v[1], v[9]);
  627. u[2] = _mm_add_epi32(v[2], v[10]);
  628. u[3] = _mm_add_epi32(v[3], v[11]);
  629. u[4] = _mm_add_epi32(v[4], v[12]);
  630. u[5] = _mm_add_epi32(v[5], v[13]);
  631. u[6] = _mm_add_epi32(v[6], v[14]);
  632. u[7] = _mm_add_epi32(v[7], v[15]);
  633. u[8] = _mm_sub_epi32(v[0], v[8]);
  634. u[9] = _mm_sub_epi32(v[1], v[9]);
  635. u[10] = _mm_sub_epi32(v[2], v[10]);
  636. u[11] = _mm_sub_epi32(v[3], v[11]);
  637. u[12] = _mm_sub_epi32(v[4], v[12]);
  638. u[13] = _mm_sub_epi32(v[5], v[13]);
  639. u[14] = _mm_sub_epi32(v[6], v[14]);
  640. u[15] = _mm_sub_epi32(v[7], v[15]);
  641. u[0] = dct_const_round_shift_sse2(u[0]);
  642. u[1] = dct_const_round_shift_sse2(u[1]);
  643. u[2] = dct_const_round_shift_sse2(u[2]);
  644. u[3] = dct_const_round_shift_sse2(u[3]);
  645. u[4] = dct_const_round_shift_sse2(u[4]);
  646. u[5] = dct_const_round_shift_sse2(u[5]);
  647. u[6] = dct_const_round_shift_sse2(u[6]);
  648. u[7] = dct_const_round_shift_sse2(u[7]);
  649. u[8] = dct_const_round_shift_sse2(u[8]);
  650. u[9] = dct_const_round_shift_sse2(u[9]);
  651. u[10] = dct_const_round_shift_sse2(u[10]);
  652. u[11] = dct_const_round_shift_sse2(u[11]);
  653. u[12] = dct_const_round_shift_sse2(u[12]);
  654. u[13] = dct_const_round_shift_sse2(u[13]);
  655. u[14] = dct_const_round_shift_sse2(u[14]);
  656. u[15] = dct_const_round_shift_sse2(u[15]);
  657. x[0] = _mm_add_epi16(s[0], s[4]);
  658. x[1] = _mm_add_epi16(s[1], s[5]);
  659. x[2] = _mm_add_epi16(s[2], s[6]);
  660. x[3] = _mm_add_epi16(s[3], s[7]);
  661. x[4] = _mm_sub_epi16(s[0], s[4]);
  662. x[5] = _mm_sub_epi16(s[1], s[5]);
  663. x[6] = _mm_sub_epi16(s[2], s[6]);
  664. x[7] = _mm_sub_epi16(s[3], s[7]);
  665. x[8] = _mm_packs_epi32(u[0], u[1]);
  666. x[9] = _mm_packs_epi32(u[2], u[3]);
  667. x[10] = _mm_packs_epi32(u[4], u[5]);
  668. x[11] = _mm_packs_epi32(u[6], u[7]);
  669. x[12] = _mm_packs_epi32(u[8], u[9]);
  670. x[13] = _mm_packs_epi32(u[10], u[11]);
  671. x[14] = _mm_packs_epi32(u[12], u[13]);
  672. x[15] = _mm_packs_epi32(u[14], u[15]);
  673. // stage 3
  674. u[0] = _mm_unpacklo_epi16(x[4], x[5]);
  675. u[1] = _mm_unpackhi_epi16(x[4], x[5]);
  676. u[2] = _mm_unpacklo_epi16(x[6], x[7]);
  677. u[3] = _mm_unpackhi_epi16(x[6], x[7]);
  678. u[4] = _mm_unpacklo_epi16(x[12], x[13]);
  679. u[5] = _mm_unpackhi_epi16(x[12], x[13]);
  680. u[6] = _mm_unpacklo_epi16(x[14], x[15]);
  681. u[7] = _mm_unpackhi_epi16(x[14], x[15]);
  682. v[0] = _mm_madd_epi16(u[0], k__cospi_p08_p24);
  683. v[1] = _mm_madd_epi16(u[1], k__cospi_p08_p24);
  684. v[2] = _mm_madd_epi16(u[0], k__cospi_p24_m08);
  685. v[3] = _mm_madd_epi16(u[1], k__cospi_p24_m08);
  686. v[4] = _mm_madd_epi16(u[2], k__cospi_m24_p08);
  687. v[5] = _mm_madd_epi16(u[3], k__cospi_m24_p08);
  688. v[6] = _mm_madd_epi16(u[2], k__cospi_p08_p24);
  689. v[7] = _mm_madd_epi16(u[3], k__cospi_p08_p24);
  690. v[8] = _mm_madd_epi16(u[4], k__cospi_p08_p24);
  691. v[9] = _mm_madd_epi16(u[5], k__cospi_p08_p24);
  692. v[10] = _mm_madd_epi16(u[4], k__cospi_p24_m08);
  693. v[11] = _mm_madd_epi16(u[5], k__cospi_p24_m08);
  694. v[12] = _mm_madd_epi16(u[6], k__cospi_m24_p08);
  695. v[13] = _mm_madd_epi16(u[7], k__cospi_m24_p08);
  696. v[14] = _mm_madd_epi16(u[6], k__cospi_p08_p24);
  697. v[15] = _mm_madd_epi16(u[7], k__cospi_p08_p24);
  698. u[0] = _mm_add_epi32(v[0], v[4]);
  699. u[1] = _mm_add_epi32(v[1], v[5]);
  700. u[2] = _mm_add_epi32(v[2], v[6]);
  701. u[3] = _mm_add_epi32(v[3], v[7]);
  702. u[4] = _mm_sub_epi32(v[0], v[4]);
  703. u[5] = _mm_sub_epi32(v[1], v[5]);
  704. u[6] = _mm_sub_epi32(v[2], v[6]);
  705. u[7] = _mm_sub_epi32(v[3], v[7]);
  706. u[8] = _mm_add_epi32(v[8], v[12]);
  707. u[9] = _mm_add_epi32(v[9], v[13]);
  708. u[10] = _mm_add_epi32(v[10], v[14]);
  709. u[11] = _mm_add_epi32(v[11], v[15]);
  710. u[12] = _mm_sub_epi32(v[8], v[12]);
  711. u[13] = _mm_sub_epi32(v[9], v[13]);
  712. u[14] = _mm_sub_epi32(v[10], v[14]);
  713. u[15] = _mm_sub_epi32(v[11], v[15]);
  714. v[0] = dct_const_round_shift_sse2(u[0]);
  715. v[1] = dct_const_round_shift_sse2(u[1]);
  716. v[2] = dct_const_round_shift_sse2(u[2]);
  717. v[3] = dct_const_round_shift_sse2(u[3]);
  718. v[4] = dct_const_round_shift_sse2(u[4]);
  719. v[5] = dct_const_round_shift_sse2(u[5]);
  720. v[6] = dct_const_round_shift_sse2(u[6]);
  721. v[7] = dct_const_round_shift_sse2(u[7]);
  722. v[8] = dct_const_round_shift_sse2(u[8]);
  723. v[9] = dct_const_round_shift_sse2(u[9]);
  724. v[10] = dct_const_round_shift_sse2(u[10]);
  725. v[11] = dct_const_round_shift_sse2(u[11]);
  726. v[12] = dct_const_round_shift_sse2(u[12]);
  727. v[13] = dct_const_round_shift_sse2(u[13]);
  728. v[14] = dct_const_round_shift_sse2(u[14]);
  729. v[15] = dct_const_round_shift_sse2(u[15]);
  730. s[0] = _mm_add_epi16(x[0], x[2]);
  731. s[1] = _mm_add_epi16(x[1], x[3]);
  732. s[2] = _mm_sub_epi16(x[0], x[2]);
  733. s[3] = _mm_sub_epi16(x[1], x[3]);
  734. s[4] = _mm_packs_epi32(v[0], v[1]);
  735. s[5] = _mm_packs_epi32(v[2], v[3]);
  736. s[6] = _mm_packs_epi32(v[4], v[5]);
  737. s[7] = _mm_packs_epi32(v[6], v[7]);
  738. s[8] = _mm_add_epi16(x[8], x[10]);
  739. s[9] = _mm_add_epi16(x[9], x[11]);
  740. s[10] = _mm_sub_epi16(x[8], x[10]);
  741. s[11] = _mm_sub_epi16(x[9], x[11]);
  742. s[12] = _mm_packs_epi32(v[8], v[9]);
  743. s[13] = _mm_packs_epi32(v[10], v[11]);
  744. s[14] = _mm_packs_epi32(v[12], v[13]);
  745. s[15] = _mm_packs_epi32(v[14], v[15]);
  746. // stage 4
  747. u[0] = _mm_unpacklo_epi16(s[2], s[3]);
  748. u[1] = _mm_unpackhi_epi16(s[2], s[3]);
  749. u[2] = _mm_unpacklo_epi16(s[6], s[7]);
  750. u[3] = _mm_unpackhi_epi16(s[6], s[7]);
  751. u[4] = _mm_unpacklo_epi16(s[10], s[11]);
  752. u[5] = _mm_unpackhi_epi16(s[10], s[11]);
  753. u[6] = _mm_unpacklo_epi16(s[14], s[15]);
  754. u[7] = _mm_unpackhi_epi16(s[14], s[15]);
  755. in[7] = idct_calc_wraplow_sse2(u[0], u[1], k__cospi_m16_m16);
  756. in[8] = idct_calc_wraplow_sse2(u[0], u[1], k__cospi_p16_m16);
  757. in[4] = idct_calc_wraplow_sse2(u[2], u[3], k__cospi_p16_p16);
  758. in[11] = idct_calc_wraplow_sse2(u[2], u[3], k__cospi_m16_p16);
  759. in[6] = idct_calc_wraplow_sse2(u[4], u[5], k__cospi_p16_p16);
  760. in[9] = idct_calc_wraplow_sse2(u[4], u[5], k__cospi_m16_p16);
  761. in[5] = idct_calc_wraplow_sse2(u[6], u[7], k__cospi_m16_m16);
  762. in[10] = idct_calc_wraplow_sse2(u[6], u[7], k__cospi_p16_m16);
  763. in[0] = s[0];
  764. in[1] = _mm_sub_epi16(kZero, s[8]);
  765. in[2] = s[12];
  766. in[3] = _mm_sub_epi16(kZero, s[4]);
  767. in[12] = s[5];
  768. in[13] = _mm_sub_epi16(kZero, s[13]);
  769. in[14] = s[9];
  770. in[15] = _mm_sub_epi16(kZero, s[1]);
  771. }
  772. void idct16_sse2(__m128i *const in0, __m128i *const in1) {
  773. transpose_16bit_16x16(in0, in1);
  774. idct16_8col(in0, in0);
  775. idct16_8col(in1, in1);
  776. }
  777. void iadst16_sse2(__m128i *const in0, __m128i *const in1) {
  778. transpose_16bit_16x16(in0, in1);
  779. vpx_iadst16_8col_sse2(in0);
  780. vpx_iadst16_8col_sse2(in1);
  781. }
  782. // Group the coefficient calculation into smaller functions to prevent stack
  783. // spillover in 32x32 idct optimizations:
  784. // quarter_1: 0-7
  785. // quarter_2: 8-15
  786. // quarter_3_4: 16-23, 24-31
  787. // For each 8x32 block __m128i in[32],
  788. // Input with index, 0, 4
  789. // output pixels: 0-7 in __m128i out[32]
  790. static INLINE void idct32_34_8x32_quarter_1(const __m128i *const in /*in[32]*/,
  791. __m128i *const out /*out[8]*/) {
  792. const __m128i zero = _mm_setzero_si128();
  793. __m128i step1[8], step2[8];
  794. // stage 3
  795. butterfly(in[4], zero, cospi_28_64, cospi_4_64, &step1[4], &step1[7]);
  796. // stage 4
  797. step2[0] = butterfly_cospi16(in[0]);
  798. step2[4] = step1[4];
  799. step2[5] = step1[4];
  800. step2[6] = step1[7];
  801. step2[7] = step1[7];
  802. // stage 5
  803. step1[0] = step2[0];
  804. step1[1] = step2[0];
  805. step1[2] = step2[0];
  806. step1[3] = step2[0];
  807. step1[4] = step2[4];
  808. butterfly(step2[6], step2[5], cospi_16_64, cospi_16_64, &step1[5], &step1[6]);
  809. step1[7] = step2[7];
  810. // stage 6
  811. out[0] = _mm_add_epi16(step1[0], step1[7]);
  812. out[1] = _mm_add_epi16(step1[1], step1[6]);
  813. out[2] = _mm_add_epi16(step1[2], step1[5]);
  814. out[3] = _mm_add_epi16(step1[3], step1[4]);
  815. out[4] = _mm_sub_epi16(step1[3], step1[4]);
  816. out[5] = _mm_sub_epi16(step1[2], step1[5]);
  817. out[6] = _mm_sub_epi16(step1[1], step1[6]);
  818. out[7] = _mm_sub_epi16(step1[0], step1[7]);
  819. }
  820. // For each 8x32 block __m128i in[32],
  821. // Input with index, 2, 6
  822. // output pixels: 8-15 in __m128i out[32]
  823. static INLINE void idct32_34_8x32_quarter_2(const __m128i *const in /*in[32]*/,
  824. __m128i *const out /*out[16]*/) {
  825. const __m128i zero = _mm_setzero_si128();
  826. __m128i step1[16], step2[16];
  827. // stage 2
  828. butterfly(in[2], zero, cospi_30_64, cospi_2_64, &step2[8], &step2[15]);
  829. butterfly(zero, in[6], cospi_6_64, cospi_26_64, &step2[11], &step2[12]);
  830. // stage 3
  831. step1[8] = step2[8];
  832. step1[9] = step2[8];
  833. step1[14] = step2[15];
  834. step1[15] = step2[15];
  835. step1[10] = step2[11];
  836. step1[11] = step2[11];
  837. step1[12] = step2[12];
  838. step1[13] = step2[12];
  839. idct32_8x32_quarter_2_stage_4_to_6(step1, out);
  840. }
  841. static INLINE void idct32_34_8x32_quarter_1_2(
  842. const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) {
  843. __m128i temp[16];
  844. idct32_34_8x32_quarter_1(in, temp);
  845. idct32_34_8x32_quarter_2(in, temp);
  846. // stage 7
  847. add_sub_butterfly(temp, out, 16);
  848. }
  849. // For each 8x32 block __m128i in[32],
  850. // Input with odd index, 1, 3, 5, 7
  851. // output pixels: 16-23, 24-31 in __m128i out[32]
  852. static INLINE void idct32_34_8x32_quarter_3_4(
  853. const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) {
  854. const __m128i zero = _mm_setzero_si128();
  855. __m128i step1[32];
  856. // stage 1
  857. butterfly(in[1], zero, cospi_31_64, cospi_1_64, &step1[16], &step1[31]);
  858. butterfly(zero, in[7], cospi_7_64, cospi_25_64, &step1[19], &step1[28]);
  859. butterfly(in[5], zero, cospi_27_64, cospi_5_64, &step1[20], &step1[27]);
  860. butterfly(zero, in[3], cospi_3_64, cospi_29_64, &step1[23], &step1[24]);
  861. // stage 3
  862. butterfly(step1[31], step1[16], cospi_28_64, cospi_4_64, &step1[17],
  863. &step1[30]);
  864. butterfly(step1[28], step1[19], -cospi_4_64, cospi_28_64, &step1[18],
  865. &step1[29]);
  866. butterfly(step1[27], step1[20], cospi_12_64, cospi_20_64, &step1[21],
  867. &step1[26]);
  868. butterfly(step1[24], step1[23], -cospi_20_64, cospi_12_64, &step1[22],
  869. &step1[25]);
  870. idct32_8x32_quarter_3_4_stage_4_to_7(step1, out);
  871. }
  872. void idct32_34_8x32_sse2(const __m128i *const in /*in[32]*/,
  873. __m128i *const out /*out[32]*/) {
  874. __m128i temp[32];
  875. idct32_34_8x32_quarter_1_2(in, temp);
  876. idct32_34_8x32_quarter_3_4(in, temp);
  877. // final stage
  878. add_sub_butterfly(temp, out, 32);
  879. }
  880. // Only upper-left 8x8 has non-zero coeff
  881. void vpx_idct32x32_34_add_sse2(const tran_low_t *input, uint8_t *dest,
  882. int stride) {
  883. __m128i io[32], col[32];
  884. int i;
  885. // Load input data. Only need to load the top left 8x8 block.
  886. load_transpose_16bit_8x8(input, 32, io);
  887. idct32_34_8x32_sse2(io, col);
  888. for (i = 0; i < 32; i += 8) {
  889. int j;
  890. transpose_16bit_8x8(col + i, io);
  891. idct32_34_8x32_sse2(io, io);
  892. for (j = 0; j < 32; ++j) {
  893. write_buffer_8x1(dest + j * stride, io[j]);
  894. }
  895. dest += 8;
  896. }
  897. }
  898. // For each 8x32 block __m128i in[32],
  899. // Input with index, 0, 4, 8, 12, 16, 20, 24, 28
  900. // output pixels: 0-7 in __m128i out[32]
  901. static INLINE void idct32_1024_8x32_quarter_1(
  902. const __m128i *const in /*in[32]*/, __m128i *const out /*out[8]*/) {
  903. __m128i step1[8], step2[8];
  904. // stage 3
  905. butterfly(in[4], in[28], cospi_28_64, cospi_4_64, &step1[4], &step1[7]);
  906. butterfly(in[20], in[12], cospi_12_64, cospi_20_64, &step1[5], &step1[6]);
  907. // stage 4
  908. butterfly(in[0], in[16], cospi_16_64, cospi_16_64, &step2[1], &step2[0]);
  909. butterfly(in[8], in[24], cospi_24_64, cospi_8_64, &step2[2], &step2[3]);
  910. step2[4] = _mm_add_epi16(step1[4], step1[5]);
  911. step2[5] = _mm_sub_epi16(step1[4], step1[5]);
  912. step2[6] = _mm_sub_epi16(step1[7], step1[6]);
  913. step2[7] = _mm_add_epi16(step1[7], step1[6]);
  914. // stage 5
  915. step1[0] = _mm_add_epi16(step2[0], step2[3]);
  916. step1[1] = _mm_add_epi16(step2[1], step2[2]);
  917. step1[2] = _mm_sub_epi16(step2[1], step2[2]);
  918. step1[3] = _mm_sub_epi16(step2[0], step2[3]);
  919. step1[4] = step2[4];
  920. butterfly(step2[6], step2[5], cospi_16_64, cospi_16_64, &step1[5], &step1[6]);
  921. step1[7] = step2[7];
  922. // stage 6
  923. out[0] = _mm_add_epi16(step1[0], step1[7]);
  924. out[1] = _mm_add_epi16(step1[1], step1[6]);
  925. out[2] = _mm_add_epi16(step1[2], step1[5]);
  926. out[3] = _mm_add_epi16(step1[3], step1[4]);
  927. out[4] = _mm_sub_epi16(step1[3], step1[4]);
  928. out[5] = _mm_sub_epi16(step1[2], step1[5]);
  929. out[6] = _mm_sub_epi16(step1[1], step1[6]);
  930. out[7] = _mm_sub_epi16(step1[0], step1[7]);
  931. }
  932. // For each 8x32 block __m128i in[32],
  933. // Input with index, 2, 6, 10, 14, 18, 22, 26, 30
  934. // output pixels: 8-15 in __m128i out[32]
  935. static INLINE void idct32_1024_8x32_quarter_2(
  936. const __m128i *const in /*in[32]*/, __m128i *const out /*out[16]*/) {
  937. __m128i step1[16], step2[16];
  938. // stage 2
  939. butterfly(in[2], in[30], cospi_30_64, cospi_2_64, &step2[8], &step2[15]);
  940. butterfly(in[18], in[14], cospi_14_64, cospi_18_64, &step2[9], &step2[14]);
  941. butterfly(in[10], in[22], cospi_22_64, cospi_10_64, &step2[10], &step2[13]);
  942. butterfly(in[26], in[6], cospi_6_64, cospi_26_64, &step2[11], &step2[12]);
  943. // stage 3
  944. step1[8] = _mm_add_epi16(step2[8], step2[9]);
  945. step1[9] = _mm_sub_epi16(step2[8], step2[9]);
  946. step1[10] = _mm_sub_epi16(step2[11], step2[10]);
  947. step1[11] = _mm_add_epi16(step2[11], step2[10]);
  948. step1[12] = _mm_add_epi16(step2[12], step2[13]);
  949. step1[13] = _mm_sub_epi16(step2[12], step2[13]);
  950. step1[14] = _mm_sub_epi16(step2[15], step2[14]);
  951. step1[15] = _mm_add_epi16(step2[15], step2[14]);
  952. idct32_8x32_quarter_2_stage_4_to_6(step1, out);
  953. }
  954. static INLINE void idct32_1024_8x32_quarter_1_2(
  955. const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) {
  956. __m128i temp[16];
  957. idct32_1024_8x32_quarter_1(in, temp);
  958. idct32_1024_8x32_quarter_2(in, temp);
  959. // stage 7
  960. add_sub_butterfly(temp, out, 16);
  961. }
  962. // For each 8x32 block __m128i in[32],
  963. // Input with odd index,
  964. // 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31
  965. // output pixels: 16-23, 24-31 in __m128i out[32]
  966. static INLINE void idct32_1024_8x32_quarter_3_4(
  967. const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) {
  968. __m128i step1[32], step2[32];
  969. // stage 1
  970. butterfly(in[1], in[31], cospi_31_64, cospi_1_64, &step1[16], &step1[31]);
  971. butterfly(in[17], in[15], cospi_15_64, cospi_17_64, &step1[17], &step1[30]);
  972. butterfly(in[9], in[23], cospi_23_64, cospi_9_64, &step1[18], &step1[29]);
  973. butterfly(in[25], in[7], cospi_7_64, cospi_25_64, &step1[19], &step1[28]);
  974. butterfly(in[5], in[27], cospi_27_64, cospi_5_64, &step1[20], &step1[27]);
  975. butterfly(in[21], in[11], cospi_11_64, cospi_21_64, &step1[21], &step1[26]);
  976. butterfly(in[13], in[19], cospi_19_64, cospi_13_64, &step1[22], &step1[25]);
  977. butterfly(in[29], in[3], cospi_3_64, cospi_29_64, &step1[23], &step1[24]);
  978. // stage 2
  979. step2[16] = _mm_add_epi16(step1[16], step1[17]);
  980. step2[17] = _mm_sub_epi16(step1[16], step1[17]);
  981. step2[18] = _mm_sub_epi16(step1[19], step1[18]);
  982. step2[19] = _mm_add_epi16(step1[19], step1[18]);
  983. step2[20] = _mm_add_epi16(step1[20], step1[21]);
  984. step2[21] = _mm_sub_epi16(step1[20], step1[21]);
  985. step2[22] = _mm_sub_epi16(step1[23], step1[22]);
  986. step2[23] = _mm_add_epi16(step1[23], step1[22]);
  987. step2[24] = _mm_add_epi16(step1[24], step1[25]);
  988. step2[25] = _mm_sub_epi16(step1[24], step1[25]);
  989. step2[26] = _mm_sub_epi16(step1[27], step1[26]);
  990. step2[27] = _mm_add_epi16(step1[27], step1[26]);
  991. step2[28] = _mm_add_epi16(step1[28], step1[29]);
  992. step2[29] = _mm_sub_epi16(step1[28], step1[29]);
  993. step2[30] = _mm_sub_epi16(step1[31], step1[30]);
  994. step2[31] = _mm_add_epi16(step1[31], step1[30]);
  995. // stage 3
  996. step1[16] = step2[16];
  997. step1[31] = step2[31];
  998. butterfly(step2[30], step2[17], cospi_28_64, cospi_4_64, &step1[17],
  999. &step1[30]);
  1000. butterfly(step2[29], step2[18], -cospi_4_64, cospi_28_64, &step1[18],
  1001. &step1[29]);
  1002. step1[19] = step2[19];
  1003. step1[20] = step2[20];
  1004. butterfly(step2[26], step2[21], cospi_12_64, cospi_20_64, &step1[21],
  1005. &step1[26]);
  1006. butterfly(step2[25], step2[22], -cospi_20_64, cospi_12_64, &step1[22],
  1007. &step1[25]);
  1008. step1[23] = step2[23];
  1009. step1[24] = step2[24];
  1010. step1[27] = step2[27];
  1011. step1[28] = step2[28];
  1012. idct32_8x32_quarter_3_4_stage_4_to_7(step1, out);
  1013. }
  1014. void idct32_1024_8x32(const __m128i *const in /*in[32]*/,
  1015. __m128i *const out /*out[32]*/) {
  1016. __m128i temp[32];
  1017. idct32_1024_8x32_quarter_1_2(in, temp);
  1018. idct32_1024_8x32_quarter_3_4(in, temp);
  1019. // final stage
  1020. add_sub_butterfly(temp, out, 32);
  1021. }
  1022. void vpx_idct32x32_1024_add_sse2(const tran_low_t *input, uint8_t *dest,
  1023. int stride) {
  1024. __m128i col[4][32], io[32];
  1025. int i;
  1026. // rows
  1027. for (i = 0; i < 4; i++) {
  1028. load_transpose_16bit_8x8(&input[0], 32, &io[0]);
  1029. load_transpose_16bit_8x8(&input[8], 32, &io[8]);
  1030. load_transpose_16bit_8x8(&input[16], 32, &io[16]);
  1031. load_transpose_16bit_8x8(&input[24], 32, &io[24]);
  1032. idct32_1024_8x32(io, col[i]);
  1033. input += 32 << 3;
  1034. }
  1035. // columns
  1036. for (i = 0; i < 32; i += 8) {
  1037. // Transpose 32x8 block to 8x32 block
  1038. transpose_16bit_8x8(col[0] + i, io);
  1039. transpose_16bit_8x8(col[1] + i, io + 8);
  1040. transpose_16bit_8x8(col[2] + i, io + 16);
  1041. transpose_16bit_8x8(col[3] + i, io + 24);
  1042. idct32_1024_8x32(io, io);
  1043. store_buffer_8x32(io, dest, stride);
  1044. dest += 8;
  1045. }
  1046. }
  1047. void vpx_idct32x32_135_add_sse2(const tran_low_t *input, uint8_t *dest,
  1048. int stride) {
  1049. __m128i col[2][32], in[32], out[32];
  1050. int i;
  1051. for (i = 16; i < 32; i++) {
  1052. in[i] = _mm_setzero_si128();
  1053. }
  1054. // rows
  1055. for (i = 0; i < 2; i++) {
  1056. load_transpose_16bit_8x8(&input[0], 32, &in[0]);
  1057. load_transpose_16bit_8x8(&input[8], 32, &in[8]);
  1058. idct32_1024_8x32(in, col[i]);
  1059. input += 32 << 3;
  1060. }
  1061. // columns
  1062. for (i = 0; i < 32; i += 8) {
  1063. transpose_16bit_8x8(col[0] + i, in);
  1064. transpose_16bit_8x8(col[1] + i, in + 8);
  1065. idct32_1024_8x32(in, out);
  1066. store_buffer_8x32(out, dest, stride);
  1067. dest += 8;
  1068. }
  1069. }
  1070. void vpx_idct32x32_1_add_sse2(const tran_low_t *input, uint8_t *dest,
  1071. int stride) {
  1072. __m128i dc_value;
  1073. int j;
  1074. tran_high_t a1;
  1075. tran_low_t out =
  1076. WRAPLOW(dct_const_round_shift((int16_t)input[0] * cospi_16_64));
  1077. out = WRAPLOW(dct_const_round_shift(out * cospi_16_64));
  1078. a1 = ROUND_POWER_OF_TWO(out, 6);
  1079. dc_value = _mm_set1_epi16((int16_t)a1);
  1080. for (j = 0; j < 32; ++j) {
  1081. recon_and_store_16(dest + j * stride + 0, dc_value);
  1082. recon_and_store_16(dest + j * stride + 16, dc_value);
  1083. }
  1084. }