fwd_dct32x32_impl_avx2.h 156 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930
  1. /*
  2. * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include <immintrin.h> // AVX2
  11. #include "./vpx_dsp_rtcd.h"
  12. #include "vpx_dsp/txfm_common.h"
  13. #define pair256_set_epi16(a, b) \
  14. _mm256_set_epi16((int16_t)(b), (int16_t)(a), (int16_t)(b), (int16_t)(a), \
  15. (int16_t)(b), (int16_t)(a), (int16_t)(b), (int16_t)(a), \
  16. (int16_t)(b), (int16_t)(a), (int16_t)(b), (int16_t)(a), \
  17. (int16_t)(b), (int16_t)(a), (int16_t)(b), (int16_t)(a))
  18. #define pair256_set_epi32(a, b) \
  19. _mm256_set_epi32((int)(b), (int)(a), (int)(b), (int)(a), (int)(b), (int)(a), \
  20. (int)(b), (int)(a))
  21. #if FDCT32x32_HIGH_PRECISION
  22. static INLINE __m256i k_madd_epi32_avx2(__m256i a, __m256i b) {
  23. __m256i buf0, buf1;
  24. buf0 = _mm256_mul_epu32(a, b);
  25. a = _mm256_srli_epi64(a, 32);
  26. b = _mm256_srli_epi64(b, 32);
  27. buf1 = _mm256_mul_epu32(a, b);
  28. return _mm256_add_epi64(buf0, buf1);
  29. }
  30. static INLINE __m256i k_packs_epi64_avx2(__m256i a, __m256i b) {
  31. __m256i buf0 = _mm256_shuffle_epi32(a, _MM_SHUFFLE(0, 0, 2, 0));
  32. __m256i buf1 = _mm256_shuffle_epi32(b, _MM_SHUFFLE(0, 0, 2, 0));
  33. return _mm256_unpacklo_epi64(buf0, buf1);
  34. }
  35. #endif
  36. void FDCT32x32_2D_AVX2(const int16_t *input, int16_t *output_org, int stride) {
  37. // Calculate pre-multiplied strides
  38. const int str1 = stride;
  39. const int str2 = 2 * stride;
  40. const int str3 = 2 * stride + str1;
  41. // We need an intermediate buffer between passes.
  42. DECLARE_ALIGNED(32, int16_t, intermediate[32 * 32]);
  43. // Constants
  44. // When we use them, in one case, they are all the same. In all others
  45. // it's a pair of them that we need to repeat four times. This is done
  46. // by constructing the 32 bit constant corresponding to that pair.
  47. const __m256i k__cospi_p16_p16 = _mm256_set1_epi16(cospi_16_64);
  48. const __m256i k__cospi_p16_m16 =
  49. pair256_set_epi16(+cospi_16_64, -cospi_16_64);
  50. const __m256i k__cospi_m08_p24 = pair256_set_epi16(-cospi_8_64, cospi_24_64);
  51. const __m256i k__cospi_m24_m08 = pair256_set_epi16(-cospi_24_64, -cospi_8_64);
  52. const __m256i k__cospi_p24_p08 = pair256_set_epi16(+cospi_24_64, cospi_8_64);
  53. const __m256i k__cospi_p12_p20 = pair256_set_epi16(+cospi_12_64, cospi_20_64);
  54. const __m256i k__cospi_m20_p12 = pair256_set_epi16(-cospi_20_64, cospi_12_64);
  55. const __m256i k__cospi_m04_p28 = pair256_set_epi16(-cospi_4_64, cospi_28_64);
  56. const __m256i k__cospi_p28_p04 = pair256_set_epi16(+cospi_28_64, cospi_4_64);
  57. const __m256i k__cospi_m28_m04 = pair256_set_epi16(-cospi_28_64, -cospi_4_64);
  58. const __m256i k__cospi_m12_m20 =
  59. pair256_set_epi16(-cospi_12_64, -cospi_20_64);
  60. const __m256i k__cospi_p30_p02 = pair256_set_epi16(+cospi_30_64, cospi_2_64);
  61. const __m256i k__cospi_p14_p18 = pair256_set_epi16(+cospi_14_64, cospi_18_64);
  62. const __m256i k__cospi_p22_p10 = pair256_set_epi16(+cospi_22_64, cospi_10_64);
  63. const __m256i k__cospi_p06_p26 = pair256_set_epi16(+cospi_6_64, cospi_26_64);
  64. const __m256i k__cospi_m26_p06 = pair256_set_epi16(-cospi_26_64, cospi_6_64);
  65. const __m256i k__cospi_m10_p22 = pair256_set_epi16(-cospi_10_64, cospi_22_64);
  66. const __m256i k__cospi_m18_p14 = pair256_set_epi16(-cospi_18_64, cospi_14_64);
  67. const __m256i k__cospi_m02_p30 = pair256_set_epi16(-cospi_2_64, cospi_30_64);
  68. const __m256i k__cospi_p31_p01 = pair256_set_epi16(+cospi_31_64, cospi_1_64);
  69. const __m256i k__cospi_p15_p17 = pair256_set_epi16(+cospi_15_64, cospi_17_64);
  70. const __m256i k__cospi_p23_p09 = pair256_set_epi16(+cospi_23_64, cospi_9_64);
  71. const __m256i k__cospi_p07_p25 = pair256_set_epi16(+cospi_7_64, cospi_25_64);
  72. const __m256i k__cospi_m25_p07 = pair256_set_epi16(-cospi_25_64, cospi_7_64);
  73. const __m256i k__cospi_m09_p23 = pair256_set_epi16(-cospi_9_64, cospi_23_64);
  74. const __m256i k__cospi_m17_p15 = pair256_set_epi16(-cospi_17_64, cospi_15_64);
  75. const __m256i k__cospi_m01_p31 = pair256_set_epi16(-cospi_1_64, cospi_31_64);
  76. const __m256i k__cospi_p27_p05 = pair256_set_epi16(+cospi_27_64, cospi_5_64);
  77. const __m256i k__cospi_p11_p21 = pair256_set_epi16(+cospi_11_64, cospi_21_64);
  78. const __m256i k__cospi_p19_p13 = pair256_set_epi16(+cospi_19_64, cospi_13_64);
  79. const __m256i k__cospi_p03_p29 = pair256_set_epi16(+cospi_3_64, cospi_29_64);
  80. const __m256i k__cospi_m29_p03 = pair256_set_epi16(-cospi_29_64, cospi_3_64);
  81. const __m256i k__cospi_m13_p19 = pair256_set_epi16(-cospi_13_64, cospi_19_64);
  82. const __m256i k__cospi_m21_p11 = pair256_set_epi16(-cospi_21_64, cospi_11_64);
  83. const __m256i k__cospi_m05_p27 = pair256_set_epi16(-cospi_5_64, cospi_27_64);
  84. const __m256i k__DCT_CONST_ROUNDING = _mm256_set1_epi32(DCT_CONST_ROUNDING);
  85. const __m256i kZero = _mm256_set1_epi16(0);
  86. const __m256i kOne = _mm256_set1_epi16(1);
  87. // Do the two transform/transpose passes
  88. int pass;
  89. for (pass = 0; pass < 2; ++pass) {
  90. // We process sixteen columns (transposed rows in second pass) at a time.
  91. int column_start;
  92. for (column_start = 0; column_start < 32; column_start += 16) {
  93. __m256i step1[32];
  94. __m256i step2[32];
  95. __m256i step3[32];
  96. __m256i out[32];
  97. // Stage 1
  98. // Note: even though all the loads below are aligned, using the aligned
  99. // intrinsic make the code slightly slower.
  100. if (0 == pass) {
  101. const int16_t *in = &input[column_start];
  102. // step1[i] = (in[ 0 * stride] + in[(32 - 1) * stride]) << 2;
  103. // Note: the next four blocks could be in a loop. That would help the
  104. // instruction cache but is actually slower.
  105. {
  106. const int16_t *ina = in + 0 * str1;
  107. const int16_t *inb = in + 31 * str1;
  108. __m256i *step1a = &step1[0];
  109. __m256i *step1b = &step1[31];
  110. const __m256i ina0 = _mm256_loadu_si256((const __m256i *)(ina));
  111. const __m256i ina1 =
  112. _mm256_loadu_si256((const __m256i *)(ina + str1));
  113. const __m256i ina2 =
  114. _mm256_loadu_si256((const __m256i *)(ina + str2));
  115. const __m256i ina3 =
  116. _mm256_loadu_si256((const __m256i *)(ina + str3));
  117. const __m256i inb3 =
  118. _mm256_loadu_si256((const __m256i *)(inb - str3));
  119. const __m256i inb2 =
  120. _mm256_loadu_si256((const __m256i *)(inb - str2));
  121. const __m256i inb1 =
  122. _mm256_loadu_si256((const __m256i *)(inb - str1));
  123. const __m256i inb0 = _mm256_loadu_si256((const __m256i *)(inb));
  124. step1a[0] = _mm256_add_epi16(ina0, inb0);
  125. step1a[1] = _mm256_add_epi16(ina1, inb1);
  126. step1a[2] = _mm256_add_epi16(ina2, inb2);
  127. step1a[3] = _mm256_add_epi16(ina3, inb3);
  128. step1b[-3] = _mm256_sub_epi16(ina3, inb3);
  129. step1b[-2] = _mm256_sub_epi16(ina2, inb2);
  130. step1b[-1] = _mm256_sub_epi16(ina1, inb1);
  131. step1b[-0] = _mm256_sub_epi16(ina0, inb0);
  132. step1a[0] = _mm256_slli_epi16(step1a[0], 2);
  133. step1a[1] = _mm256_slli_epi16(step1a[1], 2);
  134. step1a[2] = _mm256_slli_epi16(step1a[2], 2);
  135. step1a[3] = _mm256_slli_epi16(step1a[3], 2);
  136. step1b[-3] = _mm256_slli_epi16(step1b[-3], 2);
  137. step1b[-2] = _mm256_slli_epi16(step1b[-2], 2);
  138. step1b[-1] = _mm256_slli_epi16(step1b[-1], 2);
  139. step1b[-0] = _mm256_slli_epi16(step1b[-0], 2);
  140. }
  141. {
  142. const int16_t *ina = in + 4 * str1;
  143. const int16_t *inb = in + 27 * str1;
  144. __m256i *step1a = &step1[4];
  145. __m256i *step1b = &step1[27];
  146. const __m256i ina0 = _mm256_loadu_si256((const __m256i *)(ina));
  147. const __m256i ina1 =
  148. _mm256_loadu_si256((const __m256i *)(ina + str1));
  149. const __m256i ina2 =
  150. _mm256_loadu_si256((const __m256i *)(ina + str2));
  151. const __m256i ina3 =
  152. _mm256_loadu_si256((const __m256i *)(ina + str3));
  153. const __m256i inb3 =
  154. _mm256_loadu_si256((const __m256i *)(inb - str3));
  155. const __m256i inb2 =
  156. _mm256_loadu_si256((const __m256i *)(inb - str2));
  157. const __m256i inb1 =
  158. _mm256_loadu_si256((const __m256i *)(inb - str1));
  159. const __m256i inb0 = _mm256_loadu_si256((const __m256i *)(inb));
  160. step1a[0] = _mm256_add_epi16(ina0, inb0);
  161. step1a[1] = _mm256_add_epi16(ina1, inb1);
  162. step1a[2] = _mm256_add_epi16(ina2, inb2);
  163. step1a[3] = _mm256_add_epi16(ina3, inb3);
  164. step1b[-3] = _mm256_sub_epi16(ina3, inb3);
  165. step1b[-2] = _mm256_sub_epi16(ina2, inb2);
  166. step1b[-1] = _mm256_sub_epi16(ina1, inb1);
  167. step1b[-0] = _mm256_sub_epi16(ina0, inb0);
  168. step1a[0] = _mm256_slli_epi16(step1a[0], 2);
  169. step1a[1] = _mm256_slli_epi16(step1a[1], 2);
  170. step1a[2] = _mm256_slli_epi16(step1a[2], 2);
  171. step1a[3] = _mm256_slli_epi16(step1a[3], 2);
  172. step1b[-3] = _mm256_slli_epi16(step1b[-3], 2);
  173. step1b[-2] = _mm256_slli_epi16(step1b[-2], 2);
  174. step1b[-1] = _mm256_slli_epi16(step1b[-1], 2);
  175. step1b[-0] = _mm256_slli_epi16(step1b[-0], 2);
  176. }
  177. {
  178. const int16_t *ina = in + 8 * str1;
  179. const int16_t *inb = in + 23 * str1;
  180. __m256i *step1a = &step1[8];
  181. __m256i *step1b = &step1[23];
  182. const __m256i ina0 = _mm256_loadu_si256((const __m256i *)(ina));
  183. const __m256i ina1 =
  184. _mm256_loadu_si256((const __m256i *)(ina + str1));
  185. const __m256i ina2 =
  186. _mm256_loadu_si256((const __m256i *)(ina + str2));
  187. const __m256i ina3 =
  188. _mm256_loadu_si256((const __m256i *)(ina + str3));
  189. const __m256i inb3 =
  190. _mm256_loadu_si256((const __m256i *)(inb - str3));
  191. const __m256i inb2 =
  192. _mm256_loadu_si256((const __m256i *)(inb - str2));
  193. const __m256i inb1 =
  194. _mm256_loadu_si256((const __m256i *)(inb - str1));
  195. const __m256i inb0 = _mm256_loadu_si256((const __m256i *)(inb));
  196. step1a[0] = _mm256_add_epi16(ina0, inb0);
  197. step1a[1] = _mm256_add_epi16(ina1, inb1);
  198. step1a[2] = _mm256_add_epi16(ina2, inb2);
  199. step1a[3] = _mm256_add_epi16(ina3, inb3);
  200. step1b[-3] = _mm256_sub_epi16(ina3, inb3);
  201. step1b[-2] = _mm256_sub_epi16(ina2, inb2);
  202. step1b[-1] = _mm256_sub_epi16(ina1, inb1);
  203. step1b[-0] = _mm256_sub_epi16(ina0, inb0);
  204. step1a[0] = _mm256_slli_epi16(step1a[0], 2);
  205. step1a[1] = _mm256_slli_epi16(step1a[1], 2);
  206. step1a[2] = _mm256_slli_epi16(step1a[2], 2);
  207. step1a[3] = _mm256_slli_epi16(step1a[3], 2);
  208. step1b[-3] = _mm256_slli_epi16(step1b[-3], 2);
  209. step1b[-2] = _mm256_slli_epi16(step1b[-2], 2);
  210. step1b[-1] = _mm256_slli_epi16(step1b[-1], 2);
  211. step1b[-0] = _mm256_slli_epi16(step1b[-0], 2);
  212. }
  213. {
  214. const int16_t *ina = in + 12 * str1;
  215. const int16_t *inb = in + 19 * str1;
  216. __m256i *step1a = &step1[12];
  217. __m256i *step1b = &step1[19];
  218. const __m256i ina0 = _mm256_loadu_si256((const __m256i *)(ina));
  219. const __m256i ina1 =
  220. _mm256_loadu_si256((const __m256i *)(ina + str1));
  221. const __m256i ina2 =
  222. _mm256_loadu_si256((const __m256i *)(ina + str2));
  223. const __m256i ina3 =
  224. _mm256_loadu_si256((const __m256i *)(ina + str3));
  225. const __m256i inb3 =
  226. _mm256_loadu_si256((const __m256i *)(inb - str3));
  227. const __m256i inb2 =
  228. _mm256_loadu_si256((const __m256i *)(inb - str2));
  229. const __m256i inb1 =
  230. _mm256_loadu_si256((const __m256i *)(inb - str1));
  231. const __m256i inb0 = _mm256_loadu_si256((const __m256i *)(inb));
  232. step1a[0] = _mm256_add_epi16(ina0, inb0);
  233. step1a[1] = _mm256_add_epi16(ina1, inb1);
  234. step1a[2] = _mm256_add_epi16(ina2, inb2);
  235. step1a[3] = _mm256_add_epi16(ina3, inb3);
  236. step1b[-3] = _mm256_sub_epi16(ina3, inb3);
  237. step1b[-2] = _mm256_sub_epi16(ina2, inb2);
  238. step1b[-1] = _mm256_sub_epi16(ina1, inb1);
  239. step1b[-0] = _mm256_sub_epi16(ina0, inb0);
  240. step1a[0] = _mm256_slli_epi16(step1a[0], 2);
  241. step1a[1] = _mm256_slli_epi16(step1a[1], 2);
  242. step1a[2] = _mm256_slli_epi16(step1a[2], 2);
  243. step1a[3] = _mm256_slli_epi16(step1a[3], 2);
  244. step1b[-3] = _mm256_slli_epi16(step1b[-3], 2);
  245. step1b[-2] = _mm256_slli_epi16(step1b[-2], 2);
  246. step1b[-1] = _mm256_slli_epi16(step1b[-1], 2);
  247. step1b[-0] = _mm256_slli_epi16(step1b[-0], 2);
  248. }
  249. } else {
  250. int16_t *in = &intermediate[column_start];
  251. // step1[i] = in[ 0 * 32] + in[(32 - 1) * 32];
  252. // Note: using the same approach as above to have common offset is
  253. // counter-productive as all offsets can be calculated at compile
  254. // time.
  255. // Note: the next four blocks could be in a loop. That would help the
  256. // instruction cache but is actually slower.
  257. {
  258. __m256i in00 = _mm256_loadu_si256((const __m256i *)(in + 0 * 32));
  259. __m256i in01 = _mm256_loadu_si256((const __m256i *)(in + 1 * 32));
  260. __m256i in02 = _mm256_loadu_si256((const __m256i *)(in + 2 * 32));
  261. __m256i in03 = _mm256_loadu_si256((const __m256i *)(in + 3 * 32));
  262. __m256i in28 = _mm256_loadu_si256((const __m256i *)(in + 28 * 32));
  263. __m256i in29 = _mm256_loadu_si256((const __m256i *)(in + 29 * 32));
  264. __m256i in30 = _mm256_loadu_si256((const __m256i *)(in + 30 * 32));
  265. __m256i in31 = _mm256_loadu_si256((const __m256i *)(in + 31 * 32));
  266. step1[0] = _mm256_add_epi16(in00, in31);
  267. step1[1] = _mm256_add_epi16(in01, in30);
  268. step1[2] = _mm256_add_epi16(in02, in29);
  269. step1[3] = _mm256_add_epi16(in03, in28);
  270. step1[28] = _mm256_sub_epi16(in03, in28);
  271. step1[29] = _mm256_sub_epi16(in02, in29);
  272. step1[30] = _mm256_sub_epi16(in01, in30);
  273. step1[31] = _mm256_sub_epi16(in00, in31);
  274. }
  275. {
  276. __m256i in04 = _mm256_loadu_si256((const __m256i *)(in + 4 * 32));
  277. __m256i in05 = _mm256_loadu_si256((const __m256i *)(in + 5 * 32));
  278. __m256i in06 = _mm256_loadu_si256((const __m256i *)(in + 6 * 32));
  279. __m256i in07 = _mm256_loadu_si256((const __m256i *)(in + 7 * 32));
  280. __m256i in24 = _mm256_loadu_si256((const __m256i *)(in + 24 * 32));
  281. __m256i in25 = _mm256_loadu_si256((const __m256i *)(in + 25 * 32));
  282. __m256i in26 = _mm256_loadu_si256((const __m256i *)(in + 26 * 32));
  283. __m256i in27 = _mm256_loadu_si256((const __m256i *)(in + 27 * 32));
  284. step1[4] = _mm256_add_epi16(in04, in27);
  285. step1[5] = _mm256_add_epi16(in05, in26);
  286. step1[6] = _mm256_add_epi16(in06, in25);
  287. step1[7] = _mm256_add_epi16(in07, in24);
  288. step1[24] = _mm256_sub_epi16(in07, in24);
  289. step1[25] = _mm256_sub_epi16(in06, in25);
  290. step1[26] = _mm256_sub_epi16(in05, in26);
  291. step1[27] = _mm256_sub_epi16(in04, in27);
  292. }
  293. {
  294. __m256i in08 = _mm256_loadu_si256((const __m256i *)(in + 8 * 32));
  295. __m256i in09 = _mm256_loadu_si256((const __m256i *)(in + 9 * 32));
  296. __m256i in10 = _mm256_loadu_si256((const __m256i *)(in + 10 * 32));
  297. __m256i in11 = _mm256_loadu_si256((const __m256i *)(in + 11 * 32));
  298. __m256i in20 = _mm256_loadu_si256((const __m256i *)(in + 20 * 32));
  299. __m256i in21 = _mm256_loadu_si256((const __m256i *)(in + 21 * 32));
  300. __m256i in22 = _mm256_loadu_si256((const __m256i *)(in + 22 * 32));
  301. __m256i in23 = _mm256_loadu_si256((const __m256i *)(in + 23 * 32));
  302. step1[8] = _mm256_add_epi16(in08, in23);
  303. step1[9] = _mm256_add_epi16(in09, in22);
  304. step1[10] = _mm256_add_epi16(in10, in21);
  305. step1[11] = _mm256_add_epi16(in11, in20);
  306. step1[20] = _mm256_sub_epi16(in11, in20);
  307. step1[21] = _mm256_sub_epi16(in10, in21);
  308. step1[22] = _mm256_sub_epi16(in09, in22);
  309. step1[23] = _mm256_sub_epi16(in08, in23);
  310. }
  311. {
  312. __m256i in12 = _mm256_loadu_si256((const __m256i *)(in + 12 * 32));
  313. __m256i in13 = _mm256_loadu_si256((const __m256i *)(in + 13 * 32));
  314. __m256i in14 = _mm256_loadu_si256((const __m256i *)(in + 14 * 32));
  315. __m256i in15 = _mm256_loadu_si256((const __m256i *)(in + 15 * 32));
  316. __m256i in16 = _mm256_loadu_si256((const __m256i *)(in + 16 * 32));
  317. __m256i in17 = _mm256_loadu_si256((const __m256i *)(in + 17 * 32));
  318. __m256i in18 = _mm256_loadu_si256((const __m256i *)(in + 18 * 32));
  319. __m256i in19 = _mm256_loadu_si256((const __m256i *)(in + 19 * 32));
  320. step1[12] = _mm256_add_epi16(in12, in19);
  321. step1[13] = _mm256_add_epi16(in13, in18);
  322. step1[14] = _mm256_add_epi16(in14, in17);
  323. step1[15] = _mm256_add_epi16(in15, in16);
  324. step1[16] = _mm256_sub_epi16(in15, in16);
  325. step1[17] = _mm256_sub_epi16(in14, in17);
  326. step1[18] = _mm256_sub_epi16(in13, in18);
  327. step1[19] = _mm256_sub_epi16(in12, in19);
  328. }
  329. }
  330. // Stage 2
  331. {
  332. step2[0] = _mm256_add_epi16(step1[0], step1[15]);
  333. step2[1] = _mm256_add_epi16(step1[1], step1[14]);
  334. step2[2] = _mm256_add_epi16(step1[2], step1[13]);
  335. step2[3] = _mm256_add_epi16(step1[3], step1[12]);
  336. step2[4] = _mm256_add_epi16(step1[4], step1[11]);
  337. step2[5] = _mm256_add_epi16(step1[5], step1[10]);
  338. step2[6] = _mm256_add_epi16(step1[6], step1[9]);
  339. step2[7] = _mm256_add_epi16(step1[7], step1[8]);
  340. step2[8] = _mm256_sub_epi16(step1[7], step1[8]);
  341. step2[9] = _mm256_sub_epi16(step1[6], step1[9]);
  342. step2[10] = _mm256_sub_epi16(step1[5], step1[10]);
  343. step2[11] = _mm256_sub_epi16(step1[4], step1[11]);
  344. step2[12] = _mm256_sub_epi16(step1[3], step1[12]);
  345. step2[13] = _mm256_sub_epi16(step1[2], step1[13]);
  346. step2[14] = _mm256_sub_epi16(step1[1], step1[14]);
  347. step2[15] = _mm256_sub_epi16(step1[0], step1[15]);
  348. }
  349. {
  350. const __m256i s2_20_0 = _mm256_unpacklo_epi16(step1[27], step1[20]);
  351. const __m256i s2_20_1 = _mm256_unpackhi_epi16(step1[27], step1[20]);
  352. const __m256i s2_21_0 = _mm256_unpacklo_epi16(step1[26], step1[21]);
  353. const __m256i s2_21_1 = _mm256_unpackhi_epi16(step1[26], step1[21]);
  354. const __m256i s2_22_0 = _mm256_unpacklo_epi16(step1[25], step1[22]);
  355. const __m256i s2_22_1 = _mm256_unpackhi_epi16(step1[25], step1[22]);
  356. const __m256i s2_23_0 = _mm256_unpacklo_epi16(step1[24], step1[23]);
  357. const __m256i s2_23_1 = _mm256_unpackhi_epi16(step1[24], step1[23]);
  358. const __m256i s2_20_2 = _mm256_madd_epi16(s2_20_0, k__cospi_p16_m16);
  359. const __m256i s2_20_3 = _mm256_madd_epi16(s2_20_1, k__cospi_p16_m16);
  360. const __m256i s2_21_2 = _mm256_madd_epi16(s2_21_0, k__cospi_p16_m16);
  361. const __m256i s2_21_3 = _mm256_madd_epi16(s2_21_1, k__cospi_p16_m16);
  362. const __m256i s2_22_2 = _mm256_madd_epi16(s2_22_0, k__cospi_p16_m16);
  363. const __m256i s2_22_3 = _mm256_madd_epi16(s2_22_1, k__cospi_p16_m16);
  364. const __m256i s2_23_2 = _mm256_madd_epi16(s2_23_0, k__cospi_p16_m16);
  365. const __m256i s2_23_3 = _mm256_madd_epi16(s2_23_1, k__cospi_p16_m16);
  366. const __m256i s2_24_2 = _mm256_madd_epi16(s2_23_0, k__cospi_p16_p16);
  367. const __m256i s2_24_3 = _mm256_madd_epi16(s2_23_1, k__cospi_p16_p16);
  368. const __m256i s2_25_2 = _mm256_madd_epi16(s2_22_0, k__cospi_p16_p16);
  369. const __m256i s2_25_3 = _mm256_madd_epi16(s2_22_1, k__cospi_p16_p16);
  370. const __m256i s2_26_2 = _mm256_madd_epi16(s2_21_0, k__cospi_p16_p16);
  371. const __m256i s2_26_3 = _mm256_madd_epi16(s2_21_1, k__cospi_p16_p16);
  372. const __m256i s2_27_2 = _mm256_madd_epi16(s2_20_0, k__cospi_p16_p16);
  373. const __m256i s2_27_3 = _mm256_madd_epi16(s2_20_1, k__cospi_p16_p16);
  374. // dct_const_round_shift
  375. const __m256i s2_20_4 =
  376. _mm256_add_epi32(s2_20_2, k__DCT_CONST_ROUNDING);
  377. const __m256i s2_20_5 =
  378. _mm256_add_epi32(s2_20_3, k__DCT_CONST_ROUNDING);
  379. const __m256i s2_21_4 =
  380. _mm256_add_epi32(s2_21_2, k__DCT_CONST_ROUNDING);
  381. const __m256i s2_21_5 =
  382. _mm256_add_epi32(s2_21_3, k__DCT_CONST_ROUNDING);
  383. const __m256i s2_22_4 =
  384. _mm256_add_epi32(s2_22_2, k__DCT_CONST_ROUNDING);
  385. const __m256i s2_22_5 =
  386. _mm256_add_epi32(s2_22_3, k__DCT_CONST_ROUNDING);
  387. const __m256i s2_23_4 =
  388. _mm256_add_epi32(s2_23_2, k__DCT_CONST_ROUNDING);
  389. const __m256i s2_23_5 =
  390. _mm256_add_epi32(s2_23_3, k__DCT_CONST_ROUNDING);
  391. const __m256i s2_24_4 =
  392. _mm256_add_epi32(s2_24_2, k__DCT_CONST_ROUNDING);
  393. const __m256i s2_24_5 =
  394. _mm256_add_epi32(s2_24_3, k__DCT_CONST_ROUNDING);
  395. const __m256i s2_25_4 =
  396. _mm256_add_epi32(s2_25_2, k__DCT_CONST_ROUNDING);
  397. const __m256i s2_25_5 =
  398. _mm256_add_epi32(s2_25_3, k__DCT_CONST_ROUNDING);
  399. const __m256i s2_26_4 =
  400. _mm256_add_epi32(s2_26_2, k__DCT_CONST_ROUNDING);
  401. const __m256i s2_26_5 =
  402. _mm256_add_epi32(s2_26_3, k__DCT_CONST_ROUNDING);
  403. const __m256i s2_27_4 =
  404. _mm256_add_epi32(s2_27_2, k__DCT_CONST_ROUNDING);
  405. const __m256i s2_27_5 =
  406. _mm256_add_epi32(s2_27_3, k__DCT_CONST_ROUNDING);
  407. const __m256i s2_20_6 = _mm256_srai_epi32(s2_20_4, DCT_CONST_BITS);
  408. const __m256i s2_20_7 = _mm256_srai_epi32(s2_20_5, DCT_CONST_BITS);
  409. const __m256i s2_21_6 = _mm256_srai_epi32(s2_21_4, DCT_CONST_BITS);
  410. const __m256i s2_21_7 = _mm256_srai_epi32(s2_21_5, DCT_CONST_BITS);
  411. const __m256i s2_22_6 = _mm256_srai_epi32(s2_22_4, DCT_CONST_BITS);
  412. const __m256i s2_22_7 = _mm256_srai_epi32(s2_22_5, DCT_CONST_BITS);
  413. const __m256i s2_23_6 = _mm256_srai_epi32(s2_23_4, DCT_CONST_BITS);
  414. const __m256i s2_23_7 = _mm256_srai_epi32(s2_23_5, DCT_CONST_BITS);
  415. const __m256i s2_24_6 = _mm256_srai_epi32(s2_24_4, DCT_CONST_BITS);
  416. const __m256i s2_24_7 = _mm256_srai_epi32(s2_24_5, DCT_CONST_BITS);
  417. const __m256i s2_25_6 = _mm256_srai_epi32(s2_25_4, DCT_CONST_BITS);
  418. const __m256i s2_25_7 = _mm256_srai_epi32(s2_25_5, DCT_CONST_BITS);
  419. const __m256i s2_26_6 = _mm256_srai_epi32(s2_26_4, DCT_CONST_BITS);
  420. const __m256i s2_26_7 = _mm256_srai_epi32(s2_26_5, DCT_CONST_BITS);
  421. const __m256i s2_27_6 = _mm256_srai_epi32(s2_27_4, DCT_CONST_BITS);
  422. const __m256i s2_27_7 = _mm256_srai_epi32(s2_27_5, DCT_CONST_BITS);
  423. // Combine
  424. step2[20] = _mm256_packs_epi32(s2_20_6, s2_20_7);
  425. step2[21] = _mm256_packs_epi32(s2_21_6, s2_21_7);
  426. step2[22] = _mm256_packs_epi32(s2_22_6, s2_22_7);
  427. step2[23] = _mm256_packs_epi32(s2_23_6, s2_23_7);
  428. step2[24] = _mm256_packs_epi32(s2_24_6, s2_24_7);
  429. step2[25] = _mm256_packs_epi32(s2_25_6, s2_25_7);
  430. step2[26] = _mm256_packs_epi32(s2_26_6, s2_26_7);
  431. step2[27] = _mm256_packs_epi32(s2_27_6, s2_27_7);
  432. }
  433. #if !FDCT32x32_HIGH_PRECISION
  434. // dump the magnitude by half, hence the intermediate values are within
  435. // the range of 16 bits.
  436. if (1 == pass) {
  437. __m256i s3_00_0 = _mm256_cmpgt_epi16(kZero, step2[0]);
  438. __m256i s3_01_0 = _mm256_cmpgt_epi16(kZero, step2[1]);
  439. __m256i s3_02_0 = _mm256_cmpgt_epi16(kZero, step2[2]);
  440. __m256i s3_03_0 = _mm256_cmpgt_epi16(kZero, step2[3]);
  441. __m256i s3_04_0 = _mm256_cmpgt_epi16(kZero, step2[4]);
  442. __m256i s3_05_0 = _mm256_cmpgt_epi16(kZero, step2[5]);
  443. __m256i s3_06_0 = _mm256_cmpgt_epi16(kZero, step2[6]);
  444. __m256i s3_07_0 = _mm256_cmpgt_epi16(kZero, step2[7]);
  445. __m256i s2_08_0 = _mm256_cmpgt_epi16(kZero, step2[8]);
  446. __m256i s2_09_0 = _mm256_cmpgt_epi16(kZero, step2[9]);
  447. __m256i s3_10_0 = _mm256_cmpgt_epi16(kZero, step2[10]);
  448. __m256i s3_11_0 = _mm256_cmpgt_epi16(kZero, step2[11]);
  449. __m256i s3_12_0 = _mm256_cmpgt_epi16(kZero, step2[12]);
  450. __m256i s3_13_0 = _mm256_cmpgt_epi16(kZero, step2[13]);
  451. __m256i s2_14_0 = _mm256_cmpgt_epi16(kZero, step2[14]);
  452. __m256i s2_15_0 = _mm256_cmpgt_epi16(kZero, step2[15]);
  453. __m256i s3_16_0 = _mm256_cmpgt_epi16(kZero, step1[16]);
  454. __m256i s3_17_0 = _mm256_cmpgt_epi16(kZero, step1[17]);
  455. __m256i s3_18_0 = _mm256_cmpgt_epi16(kZero, step1[18]);
  456. __m256i s3_19_0 = _mm256_cmpgt_epi16(kZero, step1[19]);
  457. __m256i s3_20_0 = _mm256_cmpgt_epi16(kZero, step2[20]);
  458. __m256i s3_21_0 = _mm256_cmpgt_epi16(kZero, step2[21]);
  459. __m256i s3_22_0 = _mm256_cmpgt_epi16(kZero, step2[22]);
  460. __m256i s3_23_0 = _mm256_cmpgt_epi16(kZero, step2[23]);
  461. __m256i s3_24_0 = _mm256_cmpgt_epi16(kZero, step2[24]);
  462. __m256i s3_25_0 = _mm256_cmpgt_epi16(kZero, step2[25]);
  463. __m256i s3_26_0 = _mm256_cmpgt_epi16(kZero, step2[26]);
  464. __m256i s3_27_0 = _mm256_cmpgt_epi16(kZero, step2[27]);
  465. __m256i s3_28_0 = _mm256_cmpgt_epi16(kZero, step1[28]);
  466. __m256i s3_29_0 = _mm256_cmpgt_epi16(kZero, step1[29]);
  467. __m256i s3_30_0 = _mm256_cmpgt_epi16(kZero, step1[30]);
  468. __m256i s3_31_0 = _mm256_cmpgt_epi16(kZero, step1[31]);
  469. step2[0] = _mm256_sub_epi16(step2[0], s3_00_0);
  470. step2[1] = _mm256_sub_epi16(step2[1], s3_01_0);
  471. step2[2] = _mm256_sub_epi16(step2[2], s3_02_0);
  472. step2[3] = _mm256_sub_epi16(step2[3], s3_03_0);
  473. step2[4] = _mm256_sub_epi16(step2[4], s3_04_0);
  474. step2[5] = _mm256_sub_epi16(step2[5], s3_05_0);
  475. step2[6] = _mm256_sub_epi16(step2[6], s3_06_0);
  476. step2[7] = _mm256_sub_epi16(step2[7], s3_07_0);
  477. step2[8] = _mm256_sub_epi16(step2[8], s2_08_0);
  478. step2[9] = _mm256_sub_epi16(step2[9], s2_09_0);
  479. step2[10] = _mm256_sub_epi16(step2[10], s3_10_0);
  480. step2[11] = _mm256_sub_epi16(step2[11], s3_11_0);
  481. step2[12] = _mm256_sub_epi16(step2[12], s3_12_0);
  482. step2[13] = _mm256_sub_epi16(step2[13], s3_13_0);
  483. step2[14] = _mm256_sub_epi16(step2[14], s2_14_0);
  484. step2[15] = _mm256_sub_epi16(step2[15], s2_15_0);
  485. step1[16] = _mm256_sub_epi16(step1[16], s3_16_0);
  486. step1[17] = _mm256_sub_epi16(step1[17], s3_17_0);
  487. step1[18] = _mm256_sub_epi16(step1[18], s3_18_0);
  488. step1[19] = _mm256_sub_epi16(step1[19], s3_19_0);
  489. step2[20] = _mm256_sub_epi16(step2[20], s3_20_0);
  490. step2[21] = _mm256_sub_epi16(step2[21], s3_21_0);
  491. step2[22] = _mm256_sub_epi16(step2[22], s3_22_0);
  492. step2[23] = _mm256_sub_epi16(step2[23], s3_23_0);
  493. step2[24] = _mm256_sub_epi16(step2[24], s3_24_0);
  494. step2[25] = _mm256_sub_epi16(step2[25], s3_25_0);
  495. step2[26] = _mm256_sub_epi16(step2[26], s3_26_0);
  496. step2[27] = _mm256_sub_epi16(step2[27], s3_27_0);
  497. step1[28] = _mm256_sub_epi16(step1[28], s3_28_0);
  498. step1[29] = _mm256_sub_epi16(step1[29], s3_29_0);
  499. step1[30] = _mm256_sub_epi16(step1[30], s3_30_0);
  500. step1[31] = _mm256_sub_epi16(step1[31], s3_31_0);
  501. step2[0] = _mm256_add_epi16(step2[0], kOne);
  502. step2[1] = _mm256_add_epi16(step2[1], kOne);
  503. step2[2] = _mm256_add_epi16(step2[2], kOne);
  504. step2[3] = _mm256_add_epi16(step2[3], kOne);
  505. step2[4] = _mm256_add_epi16(step2[4], kOne);
  506. step2[5] = _mm256_add_epi16(step2[5], kOne);
  507. step2[6] = _mm256_add_epi16(step2[6], kOne);
  508. step2[7] = _mm256_add_epi16(step2[7], kOne);
  509. step2[8] = _mm256_add_epi16(step2[8], kOne);
  510. step2[9] = _mm256_add_epi16(step2[9], kOne);
  511. step2[10] = _mm256_add_epi16(step2[10], kOne);
  512. step2[11] = _mm256_add_epi16(step2[11], kOne);
  513. step2[12] = _mm256_add_epi16(step2[12], kOne);
  514. step2[13] = _mm256_add_epi16(step2[13], kOne);
  515. step2[14] = _mm256_add_epi16(step2[14], kOne);
  516. step2[15] = _mm256_add_epi16(step2[15], kOne);
  517. step1[16] = _mm256_add_epi16(step1[16], kOne);
  518. step1[17] = _mm256_add_epi16(step1[17], kOne);
  519. step1[18] = _mm256_add_epi16(step1[18], kOne);
  520. step1[19] = _mm256_add_epi16(step1[19], kOne);
  521. step2[20] = _mm256_add_epi16(step2[20], kOne);
  522. step2[21] = _mm256_add_epi16(step2[21], kOne);
  523. step2[22] = _mm256_add_epi16(step2[22], kOne);
  524. step2[23] = _mm256_add_epi16(step2[23], kOne);
  525. step2[24] = _mm256_add_epi16(step2[24], kOne);
  526. step2[25] = _mm256_add_epi16(step2[25], kOne);
  527. step2[26] = _mm256_add_epi16(step2[26], kOne);
  528. step2[27] = _mm256_add_epi16(step2[27], kOne);
  529. step1[28] = _mm256_add_epi16(step1[28], kOne);
  530. step1[29] = _mm256_add_epi16(step1[29], kOne);
  531. step1[30] = _mm256_add_epi16(step1[30], kOne);
  532. step1[31] = _mm256_add_epi16(step1[31], kOne);
  533. step2[0] = _mm256_srai_epi16(step2[0], 2);
  534. step2[1] = _mm256_srai_epi16(step2[1], 2);
  535. step2[2] = _mm256_srai_epi16(step2[2], 2);
  536. step2[3] = _mm256_srai_epi16(step2[3], 2);
  537. step2[4] = _mm256_srai_epi16(step2[4], 2);
  538. step2[5] = _mm256_srai_epi16(step2[5], 2);
  539. step2[6] = _mm256_srai_epi16(step2[6], 2);
  540. step2[7] = _mm256_srai_epi16(step2[7], 2);
  541. step2[8] = _mm256_srai_epi16(step2[8], 2);
  542. step2[9] = _mm256_srai_epi16(step2[9], 2);
  543. step2[10] = _mm256_srai_epi16(step2[10], 2);
  544. step2[11] = _mm256_srai_epi16(step2[11], 2);
  545. step2[12] = _mm256_srai_epi16(step2[12], 2);
  546. step2[13] = _mm256_srai_epi16(step2[13], 2);
  547. step2[14] = _mm256_srai_epi16(step2[14], 2);
  548. step2[15] = _mm256_srai_epi16(step2[15], 2);
  549. step1[16] = _mm256_srai_epi16(step1[16], 2);
  550. step1[17] = _mm256_srai_epi16(step1[17], 2);
  551. step1[18] = _mm256_srai_epi16(step1[18], 2);
  552. step1[19] = _mm256_srai_epi16(step1[19], 2);
  553. step2[20] = _mm256_srai_epi16(step2[20], 2);
  554. step2[21] = _mm256_srai_epi16(step2[21], 2);
  555. step2[22] = _mm256_srai_epi16(step2[22], 2);
  556. step2[23] = _mm256_srai_epi16(step2[23], 2);
  557. step2[24] = _mm256_srai_epi16(step2[24], 2);
  558. step2[25] = _mm256_srai_epi16(step2[25], 2);
  559. step2[26] = _mm256_srai_epi16(step2[26], 2);
  560. step2[27] = _mm256_srai_epi16(step2[27], 2);
  561. step1[28] = _mm256_srai_epi16(step1[28], 2);
  562. step1[29] = _mm256_srai_epi16(step1[29], 2);
  563. step1[30] = _mm256_srai_epi16(step1[30], 2);
  564. step1[31] = _mm256_srai_epi16(step1[31], 2);
  565. }
  566. #endif
  567. #if FDCT32x32_HIGH_PRECISION
  568. if (pass == 0) {
  569. #endif
  570. // Stage 3
  571. {
  572. step3[0] = _mm256_add_epi16(step2[(8 - 1)], step2[0]);
  573. step3[1] = _mm256_add_epi16(step2[(8 - 2)], step2[1]);
  574. step3[2] = _mm256_add_epi16(step2[(8 - 3)], step2[2]);
  575. step3[3] = _mm256_add_epi16(step2[(8 - 4)], step2[3]);
  576. step3[4] = _mm256_sub_epi16(step2[(8 - 5)], step2[4]);
  577. step3[5] = _mm256_sub_epi16(step2[(8 - 6)], step2[5]);
  578. step3[6] = _mm256_sub_epi16(step2[(8 - 7)], step2[6]);
  579. step3[7] = _mm256_sub_epi16(step2[(8 - 8)], step2[7]);
  580. }
  581. {
  582. const __m256i s3_10_0 = _mm256_unpacklo_epi16(step2[13], step2[10]);
  583. const __m256i s3_10_1 = _mm256_unpackhi_epi16(step2[13], step2[10]);
  584. const __m256i s3_11_0 = _mm256_unpacklo_epi16(step2[12], step2[11]);
  585. const __m256i s3_11_1 = _mm256_unpackhi_epi16(step2[12], step2[11]);
  586. const __m256i s3_10_2 = _mm256_madd_epi16(s3_10_0, k__cospi_p16_m16);
  587. const __m256i s3_10_3 = _mm256_madd_epi16(s3_10_1, k__cospi_p16_m16);
  588. const __m256i s3_11_2 = _mm256_madd_epi16(s3_11_0, k__cospi_p16_m16);
  589. const __m256i s3_11_3 = _mm256_madd_epi16(s3_11_1, k__cospi_p16_m16);
  590. const __m256i s3_12_2 = _mm256_madd_epi16(s3_11_0, k__cospi_p16_p16);
  591. const __m256i s3_12_3 = _mm256_madd_epi16(s3_11_1, k__cospi_p16_p16);
  592. const __m256i s3_13_2 = _mm256_madd_epi16(s3_10_0, k__cospi_p16_p16);
  593. const __m256i s3_13_3 = _mm256_madd_epi16(s3_10_1, k__cospi_p16_p16);
  594. // dct_const_round_shift
  595. const __m256i s3_10_4 =
  596. _mm256_add_epi32(s3_10_2, k__DCT_CONST_ROUNDING);
  597. const __m256i s3_10_5 =
  598. _mm256_add_epi32(s3_10_3, k__DCT_CONST_ROUNDING);
  599. const __m256i s3_11_4 =
  600. _mm256_add_epi32(s3_11_2, k__DCT_CONST_ROUNDING);
  601. const __m256i s3_11_5 =
  602. _mm256_add_epi32(s3_11_3, k__DCT_CONST_ROUNDING);
  603. const __m256i s3_12_4 =
  604. _mm256_add_epi32(s3_12_2, k__DCT_CONST_ROUNDING);
  605. const __m256i s3_12_5 =
  606. _mm256_add_epi32(s3_12_3, k__DCT_CONST_ROUNDING);
  607. const __m256i s3_13_4 =
  608. _mm256_add_epi32(s3_13_2, k__DCT_CONST_ROUNDING);
  609. const __m256i s3_13_5 =
  610. _mm256_add_epi32(s3_13_3, k__DCT_CONST_ROUNDING);
  611. const __m256i s3_10_6 = _mm256_srai_epi32(s3_10_4, DCT_CONST_BITS);
  612. const __m256i s3_10_7 = _mm256_srai_epi32(s3_10_5, DCT_CONST_BITS);
  613. const __m256i s3_11_6 = _mm256_srai_epi32(s3_11_4, DCT_CONST_BITS);
  614. const __m256i s3_11_7 = _mm256_srai_epi32(s3_11_5, DCT_CONST_BITS);
  615. const __m256i s3_12_6 = _mm256_srai_epi32(s3_12_4, DCT_CONST_BITS);
  616. const __m256i s3_12_7 = _mm256_srai_epi32(s3_12_5, DCT_CONST_BITS);
  617. const __m256i s3_13_6 = _mm256_srai_epi32(s3_13_4, DCT_CONST_BITS);
  618. const __m256i s3_13_7 = _mm256_srai_epi32(s3_13_5, DCT_CONST_BITS);
  619. // Combine
  620. step3[10] = _mm256_packs_epi32(s3_10_6, s3_10_7);
  621. step3[11] = _mm256_packs_epi32(s3_11_6, s3_11_7);
  622. step3[12] = _mm256_packs_epi32(s3_12_6, s3_12_7);
  623. step3[13] = _mm256_packs_epi32(s3_13_6, s3_13_7);
  624. }
  625. {
  626. step3[16] = _mm256_add_epi16(step2[23], step1[16]);
  627. step3[17] = _mm256_add_epi16(step2[22], step1[17]);
  628. step3[18] = _mm256_add_epi16(step2[21], step1[18]);
  629. step3[19] = _mm256_add_epi16(step2[20], step1[19]);
  630. step3[20] = _mm256_sub_epi16(step1[19], step2[20]);
  631. step3[21] = _mm256_sub_epi16(step1[18], step2[21]);
  632. step3[22] = _mm256_sub_epi16(step1[17], step2[22]);
  633. step3[23] = _mm256_sub_epi16(step1[16], step2[23]);
  634. step3[24] = _mm256_sub_epi16(step1[31], step2[24]);
  635. step3[25] = _mm256_sub_epi16(step1[30], step2[25]);
  636. step3[26] = _mm256_sub_epi16(step1[29], step2[26]);
  637. step3[27] = _mm256_sub_epi16(step1[28], step2[27]);
  638. step3[28] = _mm256_add_epi16(step2[27], step1[28]);
  639. step3[29] = _mm256_add_epi16(step2[26], step1[29]);
  640. step3[30] = _mm256_add_epi16(step2[25], step1[30]);
  641. step3[31] = _mm256_add_epi16(step2[24], step1[31]);
  642. }
  643. // Stage 4
  644. {
  645. step1[0] = _mm256_add_epi16(step3[3], step3[0]);
  646. step1[1] = _mm256_add_epi16(step3[2], step3[1]);
  647. step1[2] = _mm256_sub_epi16(step3[1], step3[2]);
  648. step1[3] = _mm256_sub_epi16(step3[0], step3[3]);
  649. step1[8] = _mm256_add_epi16(step3[11], step2[8]);
  650. step1[9] = _mm256_add_epi16(step3[10], step2[9]);
  651. step1[10] = _mm256_sub_epi16(step2[9], step3[10]);
  652. step1[11] = _mm256_sub_epi16(step2[8], step3[11]);
  653. step1[12] = _mm256_sub_epi16(step2[15], step3[12]);
  654. step1[13] = _mm256_sub_epi16(step2[14], step3[13]);
  655. step1[14] = _mm256_add_epi16(step3[13], step2[14]);
  656. step1[15] = _mm256_add_epi16(step3[12], step2[15]);
  657. }
  658. {
  659. const __m256i s1_05_0 = _mm256_unpacklo_epi16(step3[6], step3[5]);
  660. const __m256i s1_05_1 = _mm256_unpackhi_epi16(step3[6], step3[5]);
  661. const __m256i s1_05_2 = _mm256_madd_epi16(s1_05_0, k__cospi_p16_m16);
  662. const __m256i s1_05_3 = _mm256_madd_epi16(s1_05_1, k__cospi_p16_m16);
  663. const __m256i s1_06_2 = _mm256_madd_epi16(s1_05_0, k__cospi_p16_p16);
  664. const __m256i s1_06_3 = _mm256_madd_epi16(s1_05_1, k__cospi_p16_p16);
  665. // dct_const_round_shift
  666. const __m256i s1_05_4 =
  667. _mm256_add_epi32(s1_05_2, k__DCT_CONST_ROUNDING);
  668. const __m256i s1_05_5 =
  669. _mm256_add_epi32(s1_05_3, k__DCT_CONST_ROUNDING);
  670. const __m256i s1_06_4 =
  671. _mm256_add_epi32(s1_06_2, k__DCT_CONST_ROUNDING);
  672. const __m256i s1_06_5 =
  673. _mm256_add_epi32(s1_06_3, k__DCT_CONST_ROUNDING);
  674. const __m256i s1_05_6 = _mm256_srai_epi32(s1_05_4, DCT_CONST_BITS);
  675. const __m256i s1_05_7 = _mm256_srai_epi32(s1_05_5, DCT_CONST_BITS);
  676. const __m256i s1_06_6 = _mm256_srai_epi32(s1_06_4, DCT_CONST_BITS);
  677. const __m256i s1_06_7 = _mm256_srai_epi32(s1_06_5, DCT_CONST_BITS);
  678. // Combine
  679. step1[5] = _mm256_packs_epi32(s1_05_6, s1_05_7);
  680. step1[6] = _mm256_packs_epi32(s1_06_6, s1_06_7);
  681. }
  682. {
  683. const __m256i s1_18_0 = _mm256_unpacklo_epi16(step3[18], step3[29]);
  684. const __m256i s1_18_1 = _mm256_unpackhi_epi16(step3[18], step3[29]);
  685. const __m256i s1_19_0 = _mm256_unpacklo_epi16(step3[19], step3[28]);
  686. const __m256i s1_19_1 = _mm256_unpackhi_epi16(step3[19], step3[28]);
  687. const __m256i s1_20_0 = _mm256_unpacklo_epi16(step3[20], step3[27]);
  688. const __m256i s1_20_1 = _mm256_unpackhi_epi16(step3[20], step3[27]);
  689. const __m256i s1_21_0 = _mm256_unpacklo_epi16(step3[21], step3[26]);
  690. const __m256i s1_21_1 = _mm256_unpackhi_epi16(step3[21], step3[26]);
  691. const __m256i s1_18_2 = _mm256_madd_epi16(s1_18_0, k__cospi_m08_p24);
  692. const __m256i s1_18_3 = _mm256_madd_epi16(s1_18_1, k__cospi_m08_p24);
  693. const __m256i s1_19_2 = _mm256_madd_epi16(s1_19_0, k__cospi_m08_p24);
  694. const __m256i s1_19_3 = _mm256_madd_epi16(s1_19_1, k__cospi_m08_p24);
  695. const __m256i s1_20_2 = _mm256_madd_epi16(s1_20_0, k__cospi_m24_m08);
  696. const __m256i s1_20_3 = _mm256_madd_epi16(s1_20_1, k__cospi_m24_m08);
  697. const __m256i s1_21_2 = _mm256_madd_epi16(s1_21_0, k__cospi_m24_m08);
  698. const __m256i s1_21_3 = _mm256_madd_epi16(s1_21_1, k__cospi_m24_m08);
  699. const __m256i s1_26_2 = _mm256_madd_epi16(s1_21_0, k__cospi_m08_p24);
  700. const __m256i s1_26_3 = _mm256_madd_epi16(s1_21_1, k__cospi_m08_p24);
  701. const __m256i s1_27_2 = _mm256_madd_epi16(s1_20_0, k__cospi_m08_p24);
  702. const __m256i s1_27_3 = _mm256_madd_epi16(s1_20_1, k__cospi_m08_p24);
  703. const __m256i s1_28_2 = _mm256_madd_epi16(s1_19_0, k__cospi_p24_p08);
  704. const __m256i s1_28_3 = _mm256_madd_epi16(s1_19_1, k__cospi_p24_p08);
  705. const __m256i s1_29_2 = _mm256_madd_epi16(s1_18_0, k__cospi_p24_p08);
  706. const __m256i s1_29_3 = _mm256_madd_epi16(s1_18_1, k__cospi_p24_p08);
  707. // dct_const_round_shift
  708. const __m256i s1_18_4 =
  709. _mm256_add_epi32(s1_18_2, k__DCT_CONST_ROUNDING);
  710. const __m256i s1_18_5 =
  711. _mm256_add_epi32(s1_18_3, k__DCT_CONST_ROUNDING);
  712. const __m256i s1_19_4 =
  713. _mm256_add_epi32(s1_19_2, k__DCT_CONST_ROUNDING);
  714. const __m256i s1_19_5 =
  715. _mm256_add_epi32(s1_19_3, k__DCT_CONST_ROUNDING);
  716. const __m256i s1_20_4 =
  717. _mm256_add_epi32(s1_20_2, k__DCT_CONST_ROUNDING);
  718. const __m256i s1_20_5 =
  719. _mm256_add_epi32(s1_20_3, k__DCT_CONST_ROUNDING);
  720. const __m256i s1_21_4 =
  721. _mm256_add_epi32(s1_21_2, k__DCT_CONST_ROUNDING);
  722. const __m256i s1_21_5 =
  723. _mm256_add_epi32(s1_21_3, k__DCT_CONST_ROUNDING);
  724. const __m256i s1_26_4 =
  725. _mm256_add_epi32(s1_26_2, k__DCT_CONST_ROUNDING);
  726. const __m256i s1_26_5 =
  727. _mm256_add_epi32(s1_26_3, k__DCT_CONST_ROUNDING);
  728. const __m256i s1_27_4 =
  729. _mm256_add_epi32(s1_27_2, k__DCT_CONST_ROUNDING);
  730. const __m256i s1_27_5 =
  731. _mm256_add_epi32(s1_27_3, k__DCT_CONST_ROUNDING);
  732. const __m256i s1_28_4 =
  733. _mm256_add_epi32(s1_28_2, k__DCT_CONST_ROUNDING);
  734. const __m256i s1_28_5 =
  735. _mm256_add_epi32(s1_28_3, k__DCT_CONST_ROUNDING);
  736. const __m256i s1_29_4 =
  737. _mm256_add_epi32(s1_29_2, k__DCT_CONST_ROUNDING);
  738. const __m256i s1_29_5 =
  739. _mm256_add_epi32(s1_29_3, k__DCT_CONST_ROUNDING);
  740. const __m256i s1_18_6 = _mm256_srai_epi32(s1_18_4, DCT_CONST_BITS);
  741. const __m256i s1_18_7 = _mm256_srai_epi32(s1_18_5, DCT_CONST_BITS);
  742. const __m256i s1_19_6 = _mm256_srai_epi32(s1_19_4, DCT_CONST_BITS);
  743. const __m256i s1_19_7 = _mm256_srai_epi32(s1_19_5, DCT_CONST_BITS);
  744. const __m256i s1_20_6 = _mm256_srai_epi32(s1_20_4, DCT_CONST_BITS);
  745. const __m256i s1_20_7 = _mm256_srai_epi32(s1_20_5, DCT_CONST_BITS);
  746. const __m256i s1_21_6 = _mm256_srai_epi32(s1_21_4, DCT_CONST_BITS);
  747. const __m256i s1_21_7 = _mm256_srai_epi32(s1_21_5, DCT_CONST_BITS);
  748. const __m256i s1_26_6 = _mm256_srai_epi32(s1_26_4, DCT_CONST_BITS);
  749. const __m256i s1_26_7 = _mm256_srai_epi32(s1_26_5, DCT_CONST_BITS);
  750. const __m256i s1_27_6 = _mm256_srai_epi32(s1_27_4, DCT_CONST_BITS);
  751. const __m256i s1_27_7 = _mm256_srai_epi32(s1_27_5, DCT_CONST_BITS);
  752. const __m256i s1_28_6 = _mm256_srai_epi32(s1_28_4, DCT_CONST_BITS);
  753. const __m256i s1_28_7 = _mm256_srai_epi32(s1_28_5, DCT_CONST_BITS);
  754. const __m256i s1_29_6 = _mm256_srai_epi32(s1_29_4, DCT_CONST_BITS);
  755. const __m256i s1_29_7 = _mm256_srai_epi32(s1_29_5, DCT_CONST_BITS);
  756. // Combine
  757. step1[18] = _mm256_packs_epi32(s1_18_6, s1_18_7);
  758. step1[19] = _mm256_packs_epi32(s1_19_6, s1_19_7);
  759. step1[20] = _mm256_packs_epi32(s1_20_6, s1_20_7);
  760. step1[21] = _mm256_packs_epi32(s1_21_6, s1_21_7);
  761. step1[26] = _mm256_packs_epi32(s1_26_6, s1_26_7);
  762. step1[27] = _mm256_packs_epi32(s1_27_6, s1_27_7);
  763. step1[28] = _mm256_packs_epi32(s1_28_6, s1_28_7);
  764. step1[29] = _mm256_packs_epi32(s1_29_6, s1_29_7);
  765. }
  766. // Stage 5
  767. {
  768. step2[4] = _mm256_add_epi16(step1[5], step3[4]);
  769. step2[5] = _mm256_sub_epi16(step3[4], step1[5]);
  770. step2[6] = _mm256_sub_epi16(step3[7], step1[6]);
  771. step2[7] = _mm256_add_epi16(step1[6], step3[7]);
  772. }
  773. {
  774. const __m256i out_00_0 = _mm256_unpacklo_epi16(step1[0], step1[1]);
  775. const __m256i out_00_1 = _mm256_unpackhi_epi16(step1[0], step1[1]);
  776. const __m256i out_08_0 = _mm256_unpacklo_epi16(step1[2], step1[3]);
  777. const __m256i out_08_1 = _mm256_unpackhi_epi16(step1[2], step1[3]);
  778. const __m256i out_00_2 =
  779. _mm256_madd_epi16(out_00_0, k__cospi_p16_p16);
  780. const __m256i out_00_3 =
  781. _mm256_madd_epi16(out_00_1, k__cospi_p16_p16);
  782. const __m256i out_16_2 =
  783. _mm256_madd_epi16(out_00_0, k__cospi_p16_m16);
  784. const __m256i out_16_3 =
  785. _mm256_madd_epi16(out_00_1, k__cospi_p16_m16);
  786. const __m256i out_08_2 =
  787. _mm256_madd_epi16(out_08_0, k__cospi_p24_p08);
  788. const __m256i out_08_3 =
  789. _mm256_madd_epi16(out_08_1, k__cospi_p24_p08);
  790. const __m256i out_24_2 =
  791. _mm256_madd_epi16(out_08_0, k__cospi_m08_p24);
  792. const __m256i out_24_3 =
  793. _mm256_madd_epi16(out_08_1, k__cospi_m08_p24);
  794. // dct_const_round_shift
  795. const __m256i out_00_4 =
  796. _mm256_add_epi32(out_00_2, k__DCT_CONST_ROUNDING);
  797. const __m256i out_00_5 =
  798. _mm256_add_epi32(out_00_3, k__DCT_CONST_ROUNDING);
  799. const __m256i out_16_4 =
  800. _mm256_add_epi32(out_16_2, k__DCT_CONST_ROUNDING);
  801. const __m256i out_16_5 =
  802. _mm256_add_epi32(out_16_3, k__DCT_CONST_ROUNDING);
  803. const __m256i out_08_4 =
  804. _mm256_add_epi32(out_08_2, k__DCT_CONST_ROUNDING);
  805. const __m256i out_08_5 =
  806. _mm256_add_epi32(out_08_3, k__DCT_CONST_ROUNDING);
  807. const __m256i out_24_4 =
  808. _mm256_add_epi32(out_24_2, k__DCT_CONST_ROUNDING);
  809. const __m256i out_24_5 =
  810. _mm256_add_epi32(out_24_3, k__DCT_CONST_ROUNDING);
  811. const __m256i out_00_6 = _mm256_srai_epi32(out_00_4, DCT_CONST_BITS);
  812. const __m256i out_00_7 = _mm256_srai_epi32(out_00_5, DCT_CONST_BITS);
  813. const __m256i out_16_6 = _mm256_srai_epi32(out_16_4, DCT_CONST_BITS);
  814. const __m256i out_16_7 = _mm256_srai_epi32(out_16_5, DCT_CONST_BITS);
  815. const __m256i out_08_6 = _mm256_srai_epi32(out_08_4, DCT_CONST_BITS);
  816. const __m256i out_08_7 = _mm256_srai_epi32(out_08_5, DCT_CONST_BITS);
  817. const __m256i out_24_6 = _mm256_srai_epi32(out_24_4, DCT_CONST_BITS);
  818. const __m256i out_24_7 = _mm256_srai_epi32(out_24_5, DCT_CONST_BITS);
  819. // Combine
  820. out[0] = _mm256_packs_epi32(out_00_6, out_00_7);
  821. out[16] = _mm256_packs_epi32(out_16_6, out_16_7);
  822. out[8] = _mm256_packs_epi32(out_08_6, out_08_7);
  823. out[24] = _mm256_packs_epi32(out_24_6, out_24_7);
  824. }
  825. {
  826. const __m256i s2_09_0 = _mm256_unpacklo_epi16(step1[9], step1[14]);
  827. const __m256i s2_09_1 = _mm256_unpackhi_epi16(step1[9], step1[14]);
  828. const __m256i s2_10_0 = _mm256_unpacklo_epi16(step1[10], step1[13]);
  829. const __m256i s2_10_1 = _mm256_unpackhi_epi16(step1[10], step1[13]);
  830. const __m256i s2_09_2 = _mm256_madd_epi16(s2_09_0, k__cospi_m08_p24);
  831. const __m256i s2_09_3 = _mm256_madd_epi16(s2_09_1, k__cospi_m08_p24);
  832. const __m256i s2_10_2 = _mm256_madd_epi16(s2_10_0, k__cospi_m24_m08);
  833. const __m256i s2_10_3 = _mm256_madd_epi16(s2_10_1, k__cospi_m24_m08);
  834. const __m256i s2_13_2 = _mm256_madd_epi16(s2_10_0, k__cospi_m08_p24);
  835. const __m256i s2_13_3 = _mm256_madd_epi16(s2_10_1, k__cospi_m08_p24);
  836. const __m256i s2_14_2 = _mm256_madd_epi16(s2_09_0, k__cospi_p24_p08);
  837. const __m256i s2_14_3 = _mm256_madd_epi16(s2_09_1, k__cospi_p24_p08);
  838. // dct_const_round_shift
  839. const __m256i s2_09_4 =
  840. _mm256_add_epi32(s2_09_2, k__DCT_CONST_ROUNDING);
  841. const __m256i s2_09_5 =
  842. _mm256_add_epi32(s2_09_3, k__DCT_CONST_ROUNDING);
  843. const __m256i s2_10_4 =
  844. _mm256_add_epi32(s2_10_2, k__DCT_CONST_ROUNDING);
  845. const __m256i s2_10_5 =
  846. _mm256_add_epi32(s2_10_3, k__DCT_CONST_ROUNDING);
  847. const __m256i s2_13_4 =
  848. _mm256_add_epi32(s2_13_2, k__DCT_CONST_ROUNDING);
  849. const __m256i s2_13_5 =
  850. _mm256_add_epi32(s2_13_3, k__DCT_CONST_ROUNDING);
  851. const __m256i s2_14_4 =
  852. _mm256_add_epi32(s2_14_2, k__DCT_CONST_ROUNDING);
  853. const __m256i s2_14_5 =
  854. _mm256_add_epi32(s2_14_3, k__DCT_CONST_ROUNDING);
  855. const __m256i s2_09_6 = _mm256_srai_epi32(s2_09_4, DCT_CONST_BITS);
  856. const __m256i s2_09_7 = _mm256_srai_epi32(s2_09_5, DCT_CONST_BITS);
  857. const __m256i s2_10_6 = _mm256_srai_epi32(s2_10_4, DCT_CONST_BITS);
  858. const __m256i s2_10_7 = _mm256_srai_epi32(s2_10_5, DCT_CONST_BITS);
  859. const __m256i s2_13_6 = _mm256_srai_epi32(s2_13_4, DCT_CONST_BITS);
  860. const __m256i s2_13_7 = _mm256_srai_epi32(s2_13_5, DCT_CONST_BITS);
  861. const __m256i s2_14_6 = _mm256_srai_epi32(s2_14_4, DCT_CONST_BITS);
  862. const __m256i s2_14_7 = _mm256_srai_epi32(s2_14_5, DCT_CONST_BITS);
  863. // Combine
  864. step2[9] = _mm256_packs_epi32(s2_09_6, s2_09_7);
  865. step2[10] = _mm256_packs_epi32(s2_10_6, s2_10_7);
  866. step2[13] = _mm256_packs_epi32(s2_13_6, s2_13_7);
  867. step2[14] = _mm256_packs_epi32(s2_14_6, s2_14_7);
  868. }
  869. {
  870. step2[16] = _mm256_add_epi16(step1[19], step3[16]);
  871. step2[17] = _mm256_add_epi16(step1[18], step3[17]);
  872. step2[18] = _mm256_sub_epi16(step3[17], step1[18]);
  873. step2[19] = _mm256_sub_epi16(step3[16], step1[19]);
  874. step2[20] = _mm256_sub_epi16(step3[23], step1[20]);
  875. step2[21] = _mm256_sub_epi16(step3[22], step1[21]);
  876. step2[22] = _mm256_add_epi16(step1[21], step3[22]);
  877. step2[23] = _mm256_add_epi16(step1[20], step3[23]);
  878. step2[24] = _mm256_add_epi16(step1[27], step3[24]);
  879. step2[25] = _mm256_add_epi16(step1[26], step3[25]);
  880. step2[26] = _mm256_sub_epi16(step3[25], step1[26]);
  881. step2[27] = _mm256_sub_epi16(step3[24], step1[27]);
  882. step2[28] = _mm256_sub_epi16(step3[31], step1[28]);
  883. step2[29] = _mm256_sub_epi16(step3[30], step1[29]);
  884. step2[30] = _mm256_add_epi16(step1[29], step3[30]);
  885. step2[31] = _mm256_add_epi16(step1[28], step3[31]);
  886. }
  887. // Stage 6
  888. {
  889. const __m256i out_04_0 = _mm256_unpacklo_epi16(step2[4], step2[7]);
  890. const __m256i out_04_1 = _mm256_unpackhi_epi16(step2[4], step2[7]);
  891. const __m256i out_20_0 = _mm256_unpacklo_epi16(step2[5], step2[6]);
  892. const __m256i out_20_1 = _mm256_unpackhi_epi16(step2[5], step2[6]);
  893. const __m256i out_12_0 = _mm256_unpacklo_epi16(step2[5], step2[6]);
  894. const __m256i out_12_1 = _mm256_unpackhi_epi16(step2[5], step2[6]);
  895. const __m256i out_28_0 = _mm256_unpacklo_epi16(step2[4], step2[7]);
  896. const __m256i out_28_1 = _mm256_unpackhi_epi16(step2[4], step2[7]);
  897. const __m256i out_04_2 =
  898. _mm256_madd_epi16(out_04_0, k__cospi_p28_p04);
  899. const __m256i out_04_3 =
  900. _mm256_madd_epi16(out_04_1, k__cospi_p28_p04);
  901. const __m256i out_20_2 =
  902. _mm256_madd_epi16(out_20_0, k__cospi_p12_p20);
  903. const __m256i out_20_3 =
  904. _mm256_madd_epi16(out_20_1, k__cospi_p12_p20);
  905. const __m256i out_12_2 =
  906. _mm256_madd_epi16(out_12_0, k__cospi_m20_p12);
  907. const __m256i out_12_3 =
  908. _mm256_madd_epi16(out_12_1, k__cospi_m20_p12);
  909. const __m256i out_28_2 =
  910. _mm256_madd_epi16(out_28_0, k__cospi_m04_p28);
  911. const __m256i out_28_3 =
  912. _mm256_madd_epi16(out_28_1, k__cospi_m04_p28);
  913. // dct_const_round_shift
  914. const __m256i out_04_4 =
  915. _mm256_add_epi32(out_04_2, k__DCT_CONST_ROUNDING);
  916. const __m256i out_04_5 =
  917. _mm256_add_epi32(out_04_3, k__DCT_CONST_ROUNDING);
  918. const __m256i out_20_4 =
  919. _mm256_add_epi32(out_20_2, k__DCT_CONST_ROUNDING);
  920. const __m256i out_20_5 =
  921. _mm256_add_epi32(out_20_3, k__DCT_CONST_ROUNDING);
  922. const __m256i out_12_4 =
  923. _mm256_add_epi32(out_12_2, k__DCT_CONST_ROUNDING);
  924. const __m256i out_12_5 =
  925. _mm256_add_epi32(out_12_3, k__DCT_CONST_ROUNDING);
  926. const __m256i out_28_4 =
  927. _mm256_add_epi32(out_28_2, k__DCT_CONST_ROUNDING);
  928. const __m256i out_28_5 =
  929. _mm256_add_epi32(out_28_3, k__DCT_CONST_ROUNDING);
  930. const __m256i out_04_6 = _mm256_srai_epi32(out_04_4, DCT_CONST_BITS);
  931. const __m256i out_04_7 = _mm256_srai_epi32(out_04_5, DCT_CONST_BITS);
  932. const __m256i out_20_6 = _mm256_srai_epi32(out_20_4, DCT_CONST_BITS);
  933. const __m256i out_20_7 = _mm256_srai_epi32(out_20_5, DCT_CONST_BITS);
  934. const __m256i out_12_6 = _mm256_srai_epi32(out_12_4, DCT_CONST_BITS);
  935. const __m256i out_12_7 = _mm256_srai_epi32(out_12_5, DCT_CONST_BITS);
  936. const __m256i out_28_6 = _mm256_srai_epi32(out_28_4, DCT_CONST_BITS);
  937. const __m256i out_28_7 = _mm256_srai_epi32(out_28_5, DCT_CONST_BITS);
  938. // Combine
  939. out[4] = _mm256_packs_epi32(out_04_6, out_04_7);
  940. out[20] = _mm256_packs_epi32(out_20_6, out_20_7);
  941. out[12] = _mm256_packs_epi32(out_12_6, out_12_7);
  942. out[28] = _mm256_packs_epi32(out_28_6, out_28_7);
  943. }
  944. {
  945. step3[8] = _mm256_add_epi16(step2[9], step1[8]);
  946. step3[9] = _mm256_sub_epi16(step1[8], step2[9]);
  947. step3[10] = _mm256_sub_epi16(step1[11], step2[10]);
  948. step3[11] = _mm256_add_epi16(step2[10], step1[11]);
  949. step3[12] = _mm256_add_epi16(step2[13], step1[12]);
  950. step3[13] = _mm256_sub_epi16(step1[12], step2[13]);
  951. step3[14] = _mm256_sub_epi16(step1[15], step2[14]);
  952. step3[15] = _mm256_add_epi16(step2[14], step1[15]);
  953. }
  954. {
  955. const __m256i s3_17_0 = _mm256_unpacklo_epi16(step2[17], step2[30]);
  956. const __m256i s3_17_1 = _mm256_unpackhi_epi16(step2[17], step2[30]);
  957. const __m256i s3_18_0 = _mm256_unpacklo_epi16(step2[18], step2[29]);
  958. const __m256i s3_18_1 = _mm256_unpackhi_epi16(step2[18], step2[29]);
  959. const __m256i s3_21_0 = _mm256_unpacklo_epi16(step2[21], step2[26]);
  960. const __m256i s3_21_1 = _mm256_unpackhi_epi16(step2[21], step2[26]);
  961. const __m256i s3_22_0 = _mm256_unpacklo_epi16(step2[22], step2[25]);
  962. const __m256i s3_22_1 = _mm256_unpackhi_epi16(step2[22], step2[25]);
  963. const __m256i s3_17_2 = _mm256_madd_epi16(s3_17_0, k__cospi_m04_p28);
  964. const __m256i s3_17_3 = _mm256_madd_epi16(s3_17_1, k__cospi_m04_p28);
  965. const __m256i s3_18_2 = _mm256_madd_epi16(s3_18_0, k__cospi_m28_m04);
  966. const __m256i s3_18_3 = _mm256_madd_epi16(s3_18_1, k__cospi_m28_m04);
  967. const __m256i s3_21_2 = _mm256_madd_epi16(s3_21_0, k__cospi_m20_p12);
  968. const __m256i s3_21_3 = _mm256_madd_epi16(s3_21_1, k__cospi_m20_p12);
  969. const __m256i s3_22_2 = _mm256_madd_epi16(s3_22_0, k__cospi_m12_m20);
  970. const __m256i s3_22_3 = _mm256_madd_epi16(s3_22_1, k__cospi_m12_m20);
  971. const __m256i s3_25_2 = _mm256_madd_epi16(s3_22_0, k__cospi_m20_p12);
  972. const __m256i s3_25_3 = _mm256_madd_epi16(s3_22_1, k__cospi_m20_p12);
  973. const __m256i s3_26_2 = _mm256_madd_epi16(s3_21_0, k__cospi_p12_p20);
  974. const __m256i s3_26_3 = _mm256_madd_epi16(s3_21_1, k__cospi_p12_p20);
  975. const __m256i s3_29_2 = _mm256_madd_epi16(s3_18_0, k__cospi_m04_p28);
  976. const __m256i s3_29_3 = _mm256_madd_epi16(s3_18_1, k__cospi_m04_p28);
  977. const __m256i s3_30_2 = _mm256_madd_epi16(s3_17_0, k__cospi_p28_p04);
  978. const __m256i s3_30_3 = _mm256_madd_epi16(s3_17_1, k__cospi_p28_p04);
  979. // dct_const_round_shift
  980. const __m256i s3_17_4 =
  981. _mm256_add_epi32(s3_17_2, k__DCT_CONST_ROUNDING);
  982. const __m256i s3_17_5 =
  983. _mm256_add_epi32(s3_17_3, k__DCT_CONST_ROUNDING);
  984. const __m256i s3_18_4 =
  985. _mm256_add_epi32(s3_18_2, k__DCT_CONST_ROUNDING);
  986. const __m256i s3_18_5 =
  987. _mm256_add_epi32(s3_18_3, k__DCT_CONST_ROUNDING);
  988. const __m256i s3_21_4 =
  989. _mm256_add_epi32(s3_21_2, k__DCT_CONST_ROUNDING);
  990. const __m256i s3_21_5 =
  991. _mm256_add_epi32(s3_21_3, k__DCT_CONST_ROUNDING);
  992. const __m256i s3_22_4 =
  993. _mm256_add_epi32(s3_22_2, k__DCT_CONST_ROUNDING);
  994. const __m256i s3_22_5 =
  995. _mm256_add_epi32(s3_22_3, k__DCT_CONST_ROUNDING);
  996. const __m256i s3_17_6 = _mm256_srai_epi32(s3_17_4, DCT_CONST_BITS);
  997. const __m256i s3_17_7 = _mm256_srai_epi32(s3_17_5, DCT_CONST_BITS);
  998. const __m256i s3_18_6 = _mm256_srai_epi32(s3_18_4, DCT_CONST_BITS);
  999. const __m256i s3_18_7 = _mm256_srai_epi32(s3_18_5, DCT_CONST_BITS);
  1000. const __m256i s3_21_6 = _mm256_srai_epi32(s3_21_4, DCT_CONST_BITS);
  1001. const __m256i s3_21_7 = _mm256_srai_epi32(s3_21_5, DCT_CONST_BITS);
  1002. const __m256i s3_22_6 = _mm256_srai_epi32(s3_22_4, DCT_CONST_BITS);
  1003. const __m256i s3_22_7 = _mm256_srai_epi32(s3_22_5, DCT_CONST_BITS);
  1004. const __m256i s3_25_4 =
  1005. _mm256_add_epi32(s3_25_2, k__DCT_CONST_ROUNDING);
  1006. const __m256i s3_25_5 =
  1007. _mm256_add_epi32(s3_25_3, k__DCT_CONST_ROUNDING);
  1008. const __m256i s3_26_4 =
  1009. _mm256_add_epi32(s3_26_2, k__DCT_CONST_ROUNDING);
  1010. const __m256i s3_26_5 =
  1011. _mm256_add_epi32(s3_26_3, k__DCT_CONST_ROUNDING);
  1012. const __m256i s3_29_4 =
  1013. _mm256_add_epi32(s3_29_2, k__DCT_CONST_ROUNDING);
  1014. const __m256i s3_29_5 =
  1015. _mm256_add_epi32(s3_29_3, k__DCT_CONST_ROUNDING);
  1016. const __m256i s3_30_4 =
  1017. _mm256_add_epi32(s3_30_2, k__DCT_CONST_ROUNDING);
  1018. const __m256i s3_30_5 =
  1019. _mm256_add_epi32(s3_30_3, k__DCT_CONST_ROUNDING);
  1020. const __m256i s3_25_6 = _mm256_srai_epi32(s3_25_4, DCT_CONST_BITS);
  1021. const __m256i s3_25_7 = _mm256_srai_epi32(s3_25_5, DCT_CONST_BITS);
  1022. const __m256i s3_26_6 = _mm256_srai_epi32(s3_26_4, DCT_CONST_BITS);
  1023. const __m256i s3_26_7 = _mm256_srai_epi32(s3_26_5, DCT_CONST_BITS);
  1024. const __m256i s3_29_6 = _mm256_srai_epi32(s3_29_4, DCT_CONST_BITS);
  1025. const __m256i s3_29_7 = _mm256_srai_epi32(s3_29_5, DCT_CONST_BITS);
  1026. const __m256i s3_30_6 = _mm256_srai_epi32(s3_30_4, DCT_CONST_BITS);
  1027. const __m256i s3_30_7 = _mm256_srai_epi32(s3_30_5, DCT_CONST_BITS);
  1028. // Combine
  1029. step3[17] = _mm256_packs_epi32(s3_17_6, s3_17_7);
  1030. step3[18] = _mm256_packs_epi32(s3_18_6, s3_18_7);
  1031. step3[21] = _mm256_packs_epi32(s3_21_6, s3_21_7);
  1032. step3[22] = _mm256_packs_epi32(s3_22_6, s3_22_7);
  1033. // Combine
  1034. step3[25] = _mm256_packs_epi32(s3_25_6, s3_25_7);
  1035. step3[26] = _mm256_packs_epi32(s3_26_6, s3_26_7);
  1036. step3[29] = _mm256_packs_epi32(s3_29_6, s3_29_7);
  1037. step3[30] = _mm256_packs_epi32(s3_30_6, s3_30_7);
  1038. }
  1039. // Stage 7
  1040. {
  1041. const __m256i out_02_0 = _mm256_unpacklo_epi16(step3[8], step3[15]);
  1042. const __m256i out_02_1 = _mm256_unpackhi_epi16(step3[8], step3[15]);
  1043. const __m256i out_18_0 = _mm256_unpacklo_epi16(step3[9], step3[14]);
  1044. const __m256i out_18_1 = _mm256_unpackhi_epi16(step3[9], step3[14]);
  1045. const __m256i out_10_0 = _mm256_unpacklo_epi16(step3[10], step3[13]);
  1046. const __m256i out_10_1 = _mm256_unpackhi_epi16(step3[10], step3[13]);
  1047. const __m256i out_26_0 = _mm256_unpacklo_epi16(step3[11], step3[12]);
  1048. const __m256i out_26_1 = _mm256_unpackhi_epi16(step3[11], step3[12]);
  1049. const __m256i out_02_2 =
  1050. _mm256_madd_epi16(out_02_0, k__cospi_p30_p02);
  1051. const __m256i out_02_3 =
  1052. _mm256_madd_epi16(out_02_1, k__cospi_p30_p02);
  1053. const __m256i out_18_2 =
  1054. _mm256_madd_epi16(out_18_0, k__cospi_p14_p18);
  1055. const __m256i out_18_3 =
  1056. _mm256_madd_epi16(out_18_1, k__cospi_p14_p18);
  1057. const __m256i out_10_2 =
  1058. _mm256_madd_epi16(out_10_0, k__cospi_p22_p10);
  1059. const __m256i out_10_3 =
  1060. _mm256_madd_epi16(out_10_1, k__cospi_p22_p10);
  1061. const __m256i out_26_2 =
  1062. _mm256_madd_epi16(out_26_0, k__cospi_p06_p26);
  1063. const __m256i out_26_3 =
  1064. _mm256_madd_epi16(out_26_1, k__cospi_p06_p26);
  1065. const __m256i out_06_2 =
  1066. _mm256_madd_epi16(out_26_0, k__cospi_m26_p06);
  1067. const __m256i out_06_3 =
  1068. _mm256_madd_epi16(out_26_1, k__cospi_m26_p06);
  1069. const __m256i out_22_2 =
  1070. _mm256_madd_epi16(out_10_0, k__cospi_m10_p22);
  1071. const __m256i out_22_3 =
  1072. _mm256_madd_epi16(out_10_1, k__cospi_m10_p22);
  1073. const __m256i out_14_2 =
  1074. _mm256_madd_epi16(out_18_0, k__cospi_m18_p14);
  1075. const __m256i out_14_3 =
  1076. _mm256_madd_epi16(out_18_1, k__cospi_m18_p14);
  1077. const __m256i out_30_2 =
  1078. _mm256_madd_epi16(out_02_0, k__cospi_m02_p30);
  1079. const __m256i out_30_3 =
  1080. _mm256_madd_epi16(out_02_1, k__cospi_m02_p30);
  1081. // dct_const_round_shift
  1082. const __m256i out_02_4 =
  1083. _mm256_add_epi32(out_02_2, k__DCT_CONST_ROUNDING);
  1084. const __m256i out_02_5 =
  1085. _mm256_add_epi32(out_02_3, k__DCT_CONST_ROUNDING);
  1086. const __m256i out_18_4 =
  1087. _mm256_add_epi32(out_18_2, k__DCT_CONST_ROUNDING);
  1088. const __m256i out_18_5 =
  1089. _mm256_add_epi32(out_18_3, k__DCT_CONST_ROUNDING);
  1090. const __m256i out_10_4 =
  1091. _mm256_add_epi32(out_10_2, k__DCT_CONST_ROUNDING);
  1092. const __m256i out_10_5 =
  1093. _mm256_add_epi32(out_10_3, k__DCT_CONST_ROUNDING);
  1094. const __m256i out_26_4 =
  1095. _mm256_add_epi32(out_26_2, k__DCT_CONST_ROUNDING);
  1096. const __m256i out_26_5 =
  1097. _mm256_add_epi32(out_26_3, k__DCT_CONST_ROUNDING);
  1098. const __m256i out_06_4 =
  1099. _mm256_add_epi32(out_06_2, k__DCT_CONST_ROUNDING);
  1100. const __m256i out_06_5 =
  1101. _mm256_add_epi32(out_06_3, k__DCT_CONST_ROUNDING);
  1102. const __m256i out_22_4 =
  1103. _mm256_add_epi32(out_22_2, k__DCT_CONST_ROUNDING);
  1104. const __m256i out_22_5 =
  1105. _mm256_add_epi32(out_22_3, k__DCT_CONST_ROUNDING);
  1106. const __m256i out_14_4 =
  1107. _mm256_add_epi32(out_14_2, k__DCT_CONST_ROUNDING);
  1108. const __m256i out_14_5 =
  1109. _mm256_add_epi32(out_14_3, k__DCT_CONST_ROUNDING);
  1110. const __m256i out_30_4 =
  1111. _mm256_add_epi32(out_30_2, k__DCT_CONST_ROUNDING);
  1112. const __m256i out_30_5 =
  1113. _mm256_add_epi32(out_30_3, k__DCT_CONST_ROUNDING);
  1114. const __m256i out_02_6 = _mm256_srai_epi32(out_02_4, DCT_CONST_BITS);
  1115. const __m256i out_02_7 = _mm256_srai_epi32(out_02_5, DCT_CONST_BITS);
  1116. const __m256i out_18_6 = _mm256_srai_epi32(out_18_4, DCT_CONST_BITS);
  1117. const __m256i out_18_7 = _mm256_srai_epi32(out_18_5, DCT_CONST_BITS);
  1118. const __m256i out_10_6 = _mm256_srai_epi32(out_10_4, DCT_CONST_BITS);
  1119. const __m256i out_10_7 = _mm256_srai_epi32(out_10_5, DCT_CONST_BITS);
  1120. const __m256i out_26_6 = _mm256_srai_epi32(out_26_4, DCT_CONST_BITS);
  1121. const __m256i out_26_7 = _mm256_srai_epi32(out_26_5, DCT_CONST_BITS);
  1122. const __m256i out_06_6 = _mm256_srai_epi32(out_06_4, DCT_CONST_BITS);
  1123. const __m256i out_06_7 = _mm256_srai_epi32(out_06_5, DCT_CONST_BITS);
  1124. const __m256i out_22_6 = _mm256_srai_epi32(out_22_4, DCT_CONST_BITS);
  1125. const __m256i out_22_7 = _mm256_srai_epi32(out_22_5, DCT_CONST_BITS);
  1126. const __m256i out_14_6 = _mm256_srai_epi32(out_14_4, DCT_CONST_BITS);
  1127. const __m256i out_14_7 = _mm256_srai_epi32(out_14_5, DCT_CONST_BITS);
  1128. const __m256i out_30_6 = _mm256_srai_epi32(out_30_4, DCT_CONST_BITS);
  1129. const __m256i out_30_7 = _mm256_srai_epi32(out_30_5, DCT_CONST_BITS);
  1130. // Combine
  1131. out[2] = _mm256_packs_epi32(out_02_6, out_02_7);
  1132. out[18] = _mm256_packs_epi32(out_18_6, out_18_7);
  1133. out[10] = _mm256_packs_epi32(out_10_6, out_10_7);
  1134. out[26] = _mm256_packs_epi32(out_26_6, out_26_7);
  1135. out[6] = _mm256_packs_epi32(out_06_6, out_06_7);
  1136. out[22] = _mm256_packs_epi32(out_22_6, out_22_7);
  1137. out[14] = _mm256_packs_epi32(out_14_6, out_14_7);
  1138. out[30] = _mm256_packs_epi32(out_30_6, out_30_7);
  1139. }
  1140. {
  1141. step1[16] = _mm256_add_epi16(step3[17], step2[16]);
  1142. step1[17] = _mm256_sub_epi16(step2[16], step3[17]);
  1143. step1[18] = _mm256_sub_epi16(step2[19], step3[18]);
  1144. step1[19] = _mm256_add_epi16(step3[18], step2[19]);
  1145. step1[20] = _mm256_add_epi16(step3[21], step2[20]);
  1146. step1[21] = _mm256_sub_epi16(step2[20], step3[21]);
  1147. step1[22] = _mm256_sub_epi16(step2[23], step3[22]);
  1148. step1[23] = _mm256_add_epi16(step3[22], step2[23]);
  1149. step1[24] = _mm256_add_epi16(step3[25], step2[24]);
  1150. step1[25] = _mm256_sub_epi16(step2[24], step3[25]);
  1151. step1[26] = _mm256_sub_epi16(step2[27], step3[26]);
  1152. step1[27] = _mm256_add_epi16(step3[26], step2[27]);
  1153. step1[28] = _mm256_add_epi16(step3[29], step2[28]);
  1154. step1[29] = _mm256_sub_epi16(step2[28], step3[29]);
  1155. step1[30] = _mm256_sub_epi16(step2[31], step3[30]);
  1156. step1[31] = _mm256_add_epi16(step3[30], step2[31]);
  1157. }
  1158. // Final stage --- outputs indices are bit-reversed.
  1159. {
  1160. const __m256i out_01_0 = _mm256_unpacklo_epi16(step1[16], step1[31]);
  1161. const __m256i out_01_1 = _mm256_unpackhi_epi16(step1[16], step1[31]);
  1162. const __m256i out_17_0 = _mm256_unpacklo_epi16(step1[17], step1[30]);
  1163. const __m256i out_17_1 = _mm256_unpackhi_epi16(step1[17], step1[30]);
  1164. const __m256i out_09_0 = _mm256_unpacklo_epi16(step1[18], step1[29]);
  1165. const __m256i out_09_1 = _mm256_unpackhi_epi16(step1[18], step1[29]);
  1166. const __m256i out_25_0 = _mm256_unpacklo_epi16(step1[19], step1[28]);
  1167. const __m256i out_25_1 = _mm256_unpackhi_epi16(step1[19], step1[28]);
  1168. const __m256i out_01_2 =
  1169. _mm256_madd_epi16(out_01_0, k__cospi_p31_p01);
  1170. const __m256i out_01_3 =
  1171. _mm256_madd_epi16(out_01_1, k__cospi_p31_p01);
  1172. const __m256i out_17_2 =
  1173. _mm256_madd_epi16(out_17_0, k__cospi_p15_p17);
  1174. const __m256i out_17_3 =
  1175. _mm256_madd_epi16(out_17_1, k__cospi_p15_p17);
  1176. const __m256i out_09_2 =
  1177. _mm256_madd_epi16(out_09_0, k__cospi_p23_p09);
  1178. const __m256i out_09_3 =
  1179. _mm256_madd_epi16(out_09_1, k__cospi_p23_p09);
  1180. const __m256i out_25_2 =
  1181. _mm256_madd_epi16(out_25_0, k__cospi_p07_p25);
  1182. const __m256i out_25_3 =
  1183. _mm256_madd_epi16(out_25_1, k__cospi_p07_p25);
  1184. const __m256i out_07_2 =
  1185. _mm256_madd_epi16(out_25_0, k__cospi_m25_p07);
  1186. const __m256i out_07_3 =
  1187. _mm256_madd_epi16(out_25_1, k__cospi_m25_p07);
  1188. const __m256i out_23_2 =
  1189. _mm256_madd_epi16(out_09_0, k__cospi_m09_p23);
  1190. const __m256i out_23_3 =
  1191. _mm256_madd_epi16(out_09_1, k__cospi_m09_p23);
  1192. const __m256i out_15_2 =
  1193. _mm256_madd_epi16(out_17_0, k__cospi_m17_p15);
  1194. const __m256i out_15_3 =
  1195. _mm256_madd_epi16(out_17_1, k__cospi_m17_p15);
  1196. const __m256i out_31_2 =
  1197. _mm256_madd_epi16(out_01_0, k__cospi_m01_p31);
  1198. const __m256i out_31_3 =
  1199. _mm256_madd_epi16(out_01_1, k__cospi_m01_p31);
  1200. // dct_const_round_shift
  1201. const __m256i out_01_4 =
  1202. _mm256_add_epi32(out_01_2, k__DCT_CONST_ROUNDING);
  1203. const __m256i out_01_5 =
  1204. _mm256_add_epi32(out_01_3, k__DCT_CONST_ROUNDING);
  1205. const __m256i out_17_4 =
  1206. _mm256_add_epi32(out_17_2, k__DCT_CONST_ROUNDING);
  1207. const __m256i out_17_5 =
  1208. _mm256_add_epi32(out_17_3, k__DCT_CONST_ROUNDING);
  1209. const __m256i out_09_4 =
  1210. _mm256_add_epi32(out_09_2, k__DCT_CONST_ROUNDING);
  1211. const __m256i out_09_5 =
  1212. _mm256_add_epi32(out_09_3, k__DCT_CONST_ROUNDING);
  1213. const __m256i out_25_4 =
  1214. _mm256_add_epi32(out_25_2, k__DCT_CONST_ROUNDING);
  1215. const __m256i out_25_5 =
  1216. _mm256_add_epi32(out_25_3, k__DCT_CONST_ROUNDING);
  1217. const __m256i out_07_4 =
  1218. _mm256_add_epi32(out_07_2, k__DCT_CONST_ROUNDING);
  1219. const __m256i out_07_5 =
  1220. _mm256_add_epi32(out_07_3, k__DCT_CONST_ROUNDING);
  1221. const __m256i out_23_4 =
  1222. _mm256_add_epi32(out_23_2, k__DCT_CONST_ROUNDING);
  1223. const __m256i out_23_5 =
  1224. _mm256_add_epi32(out_23_3, k__DCT_CONST_ROUNDING);
  1225. const __m256i out_15_4 =
  1226. _mm256_add_epi32(out_15_2, k__DCT_CONST_ROUNDING);
  1227. const __m256i out_15_5 =
  1228. _mm256_add_epi32(out_15_3, k__DCT_CONST_ROUNDING);
  1229. const __m256i out_31_4 =
  1230. _mm256_add_epi32(out_31_2, k__DCT_CONST_ROUNDING);
  1231. const __m256i out_31_5 =
  1232. _mm256_add_epi32(out_31_3, k__DCT_CONST_ROUNDING);
  1233. const __m256i out_01_6 = _mm256_srai_epi32(out_01_4, DCT_CONST_BITS);
  1234. const __m256i out_01_7 = _mm256_srai_epi32(out_01_5, DCT_CONST_BITS);
  1235. const __m256i out_17_6 = _mm256_srai_epi32(out_17_4, DCT_CONST_BITS);
  1236. const __m256i out_17_7 = _mm256_srai_epi32(out_17_5, DCT_CONST_BITS);
  1237. const __m256i out_09_6 = _mm256_srai_epi32(out_09_4, DCT_CONST_BITS);
  1238. const __m256i out_09_7 = _mm256_srai_epi32(out_09_5, DCT_CONST_BITS);
  1239. const __m256i out_25_6 = _mm256_srai_epi32(out_25_4, DCT_CONST_BITS);
  1240. const __m256i out_25_7 = _mm256_srai_epi32(out_25_5, DCT_CONST_BITS);
  1241. const __m256i out_07_6 = _mm256_srai_epi32(out_07_4, DCT_CONST_BITS);
  1242. const __m256i out_07_7 = _mm256_srai_epi32(out_07_5, DCT_CONST_BITS);
  1243. const __m256i out_23_6 = _mm256_srai_epi32(out_23_4, DCT_CONST_BITS);
  1244. const __m256i out_23_7 = _mm256_srai_epi32(out_23_5, DCT_CONST_BITS);
  1245. const __m256i out_15_6 = _mm256_srai_epi32(out_15_4, DCT_CONST_BITS);
  1246. const __m256i out_15_7 = _mm256_srai_epi32(out_15_5, DCT_CONST_BITS);
  1247. const __m256i out_31_6 = _mm256_srai_epi32(out_31_4, DCT_CONST_BITS);
  1248. const __m256i out_31_7 = _mm256_srai_epi32(out_31_5, DCT_CONST_BITS);
  1249. // Combine
  1250. out[1] = _mm256_packs_epi32(out_01_6, out_01_7);
  1251. out[17] = _mm256_packs_epi32(out_17_6, out_17_7);
  1252. out[9] = _mm256_packs_epi32(out_09_6, out_09_7);
  1253. out[25] = _mm256_packs_epi32(out_25_6, out_25_7);
  1254. out[7] = _mm256_packs_epi32(out_07_6, out_07_7);
  1255. out[23] = _mm256_packs_epi32(out_23_6, out_23_7);
  1256. out[15] = _mm256_packs_epi32(out_15_6, out_15_7);
  1257. out[31] = _mm256_packs_epi32(out_31_6, out_31_7);
  1258. }
  1259. {
  1260. const __m256i out_05_0 = _mm256_unpacklo_epi16(step1[20], step1[27]);
  1261. const __m256i out_05_1 = _mm256_unpackhi_epi16(step1[20], step1[27]);
  1262. const __m256i out_21_0 = _mm256_unpacklo_epi16(step1[21], step1[26]);
  1263. const __m256i out_21_1 = _mm256_unpackhi_epi16(step1[21], step1[26]);
  1264. const __m256i out_13_0 = _mm256_unpacklo_epi16(step1[22], step1[25]);
  1265. const __m256i out_13_1 = _mm256_unpackhi_epi16(step1[22], step1[25]);
  1266. const __m256i out_29_0 = _mm256_unpacklo_epi16(step1[23], step1[24]);
  1267. const __m256i out_29_1 = _mm256_unpackhi_epi16(step1[23], step1[24]);
  1268. const __m256i out_05_2 =
  1269. _mm256_madd_epi16(out_05_0, k__cospi_p27_p05);
  1270. const __m256i out_05_3 =
  1271. _mm256_madd_epi16(out_05_1, k__cospi_p27_p05);
  1272. const __m256i out_21_2 =
  1273. _mm256_madd_epi16(out_21_0, k__cospi_p11_p21);
  1274. const __m256i out_21_3 =
  1275. _mm256_madd_epi16(out_21_1, k__cospi_p11_p21);
  1276. const __m256i out_13_2 =
  1277. _mm256_madd_epi16(out_13_0, k__cospi_p19_p13);
  1278. const __m256i out_13_3 =
  1279. _mm256_madd_epi16(out_13_1, k__cospi_p19_p13);
  1280. const __m256i out_29_2 =
  1281. _mm256_madd_epi16(out_29_0, k__cospi_p03_p29);
  1282. const __m256i out_29_3 =
  1283. _mm256_madd_epi16(out_29_1, k__cospi_p03_p29);
  1284. const __m256i out_03_2 =
  1285. _mm256_madd_epi16(out_29_0, k__cospi_m29_p03);
  1286. const __m256i out_03_3 =
  1287. _mm256_madd_epi16(out_29_1, k__cospi_m29_p03);
  1288. const __m256i out_19_2 =
  1289. _mm256_madd_epi16(out_13_0, k__cospi_m13_p19);
  1290. const __m256i out_19_3 =
  1291. _mm256_madd_epi16(out_13_1, k__cospi_m13_p19);
  1292. const __m256i out_11_2 =
  1293. _mm256_madd_epi16(out_21_0, k__cospi_m21_p11);
  1294. const __m256i out_11_3 =
  1295. _mm256_madd_epi16(out_21_1, k__cospi_m21_p11);
  1296. const __m256i out_27_2 =
  1297. _mm256_madd_epi16(out_05_0, k__cospi_m05_p27);
  1298. const __m256i out_27_3 =
  1299. _mm256_madd_epi16(out_05_1, k__cospi_m05_p27);
  1300. // dct_const_round_shift
  1301. const __m256i out_05_4 =
  1302. _mm256_add_epi32(out_05_2, k__DCT_CONST_ROUNDING);
  1303. const __m256i out_05_5 =
  1304. _mm256_add_epi32(out_05_3, k__DCT_CONST_ROUNDING);
  1305. const __m256i out_21_4 =
  1306. _mm256_add_epi32(out_21_2, k__DCT_CONST_ROUNDING);
  1307. const __m256i out_21_5 =
  1308. _mm256_add_epi32(out_21_3, k__DCT_CONST_ROUNDING);
  1309. const __m256i out_13_4 =
  1310. _mm256_add_epi32(out_13_2, k__DCT_CONST_ROUNDING);
  1311. const __m256i out_13_5 =
  1312. _mm256_add_epi32(out_13_3, k__DCT_CONST_ROUNDING);
  1313. const __m256i out_29_4 =
  1314. _mm256_add_epi32(out_29_2, k__DCT_CONST_ROUNDING);
  1315. const __m256i out_29_5 =
  1316. _mm256_add_epi32(out_29_3, k__DCT_CONST_ROUNDING);
  1317. const __m256i out_03_4 =
  1318. _mm256_add_epi32(out_03_2, k__DCT_CONST_ROUNDING);
  1319. const __m256i out_03_5 =
  1320. _mm256_add_epi32(out_03_3, k__DCT_CONST_ROUNDING);
  1321. const __m256i out_19_4 =
  1322. _mm256_add_epi32(out_19_2, k__DCT_CONST_ROUNDING);
  1323. const __m256i out_19_5 =
  1324. _mm256_add_epi32(out_19_3, k__DCT_CONST_ROUNDING);
  1325. const __m256i out_11_4 =
  1326. _mm256_add_epi32(out_11_2, k__DCT_CONST_ROUNDING);
  1327. const __m256i out_11_5 =
  1328. _mm256_add_epi32(out_11_3, k__DCT_CONST_ROUNDING);
  1329. const __m256i out_27_4 =
  1330. _mm256_add_epi32(out_27_2, k__DCT_CONST_ROUNDING);
  1331. const __m256i out_27_5 =
  1332. _mm256_add_epi32(out_27_3, k__DCT_CONST_ROUNDING);
  1333. const __m256i out_05_6 = _mm256_srai_epi32(out_05_4, DCT_CONST_BITS);
  1334. const __m256i out_05_7 = _mm256_srai_epi32(out_05_5, DCT_CONST_BITS);
  1335. const __m256i out_21_6 = _mm256_srai_epi32(out_21_4, DCT_CONST_BITS);
  1336. const __m256i out_21_7 = _mm256_srai_epi32(out_21_5, DCT_CONST_BITS);
  1337. const __m256i out_13_6 = _mm256_srai_epi32(out_13_4, DCT_CONST_BITS);
  1338. const __m256i out_13_7 = _mm256_srai_epi32(out_13_5, DCT_CONST_BITS);
  1339. const __m256i out_29_6 = _mm256_srai_epi32(out_29_4, DCT_CONST_BITS);
  1340. const __m256i out_29_7 = _mm256_srai_epi32(out_29_5, DCT_CONST_BITS);
  1341. const __m256i out_03_6 = _mm256_srai_epi32(out_03_4, DCT_CONST_BITS);
  1342. const __m256i out_03_7 = _mm256_srai_epi32(out_03_5, DCT_CONST_BITS);
  1343. const __m256i out_19_6 = _mm256_srai_epi32(out_19_4, DCT_CONST_BITS);
  1344. const __m256i out_19_7 = _mm256_srai_epi32(out_19_5, DCT_CONST_BITS);
  1345. const __m256i out_11_6 = _mm256_srai_epi32(out_11_4, DCT_CONST_BITS);
  1346. const __m256i out_11_7 = _mm256_srai_epi32(out_11_5, DCT_CONST_BITS);
  1347. const __m256i out_27_6 = _mm256_srai_epi32(out_27_4, DCT_CONST_BITS);
  1348. const __m256i out_27_7 = _mm256_srai_epi32(out_27_5, DCT_CONST_BITS);
  1349. // Combine
  1350. out[5] = _mm256_packs_epi32(out_05_6, out_05_7);
  1351. out[21] = _mm256_packs_epi32(out_21_6, out_21_7);
  1352. out[13] = _mm256_packs_epi32(out_13_6, out_13_7);
  1353. out[29] = _mm256_packs_epi32(out_29_6, out_29_7);
  1354. out[3] = _mm256_packs_epi32(out_03_6, out_03_7);
  1355. out[19] = _mm256_packs_epi32(out_19_6, out_19_7);
  1356. out[11] = _mm256_packs_epi32(out_11_6, out_11_7);
  1357. out[27] = _mm256_packs_epi32(out_27_6, out_27_7);
  1358. }
  1359. #if FDCT32x32_HIGH_PRECISION
  1360. } else {
  1361. __m256i lstep1[64], lstep2[64], lstep3[64];
  1362. __m256i u[32], v[32], sign[16];
  1363. const __m256i K32One = _mm256_set_epi32(1, 1, 1, 1, 1, 1, 1, 1);
  1364. const __m256i k__pOne_mOne = pair256_set_epi16(1, -1);
  1365. // start using 32-bit operations
  1366. // stage 3
  1367. {
  1368. // expanding to 32-bit length while adding and subtracting
  1369. lstep2[0] = _mm256_unpacklo_epi16(step2[0], step2[7]);
  1370. lstep2[1] = _mm256_unpackhi_epi16(step2[0], step2[7]);
  1371. lstep2[2] = _mm256_unpacklo_epi16(step2[1], step2[6]);
  1372. lstep2[3] = _mm256_unpackhi_epi16(step2[1], step2[6]);
  1373. lstep2[4] = _mm256_unpacklo_epi16(step2[2], step2[5]);
  1374. lstep2[5] = _mm256_unpackhi_epi16(step2[2], step2[5]);
  1375. lstep2[6] = _mm256_unpacklo_epi16(step2[3], step2[4]);
  1376. lstep2[7] = _mm256_unpackhi_epi16(step2[3], step2[4]);
  1377. lstep3[0] = _mm256_madd_epi16(lstep2[0], kOne);
  1378. lstep3[1] = _mm256_madd_epi16(lstep2[1], kOne);
  1379. lstep3[2] = _mm256_madd_epi16(lstep2[2], kOne);
  1380. lstep3[3] = _mm256_madd_epi16(lstep2[3], kOne);
  1381. lstep3[4] = _mm256_madd_epi16(lstep2[4], kOne);
  1382. lstep3[5] = _mm256_madd_epi16(lstep2[5], kOne);
  1383. lstep3[6] = _mm256_madd_epi16(lstep2[6], kOne);
  1384. lstep3[7] = _mm256_madd_epi16(lstep2[7], kOne);
  1385. lstep3[8] = _mm256_madd_epi16(lstep2[6], k__pOne_mOne);
  1386. lstep3[9] = _mm256_madd_epi16(lstep2[7], k__pOne_mOne);
  1387. lstep3[10] = _mm256_madd_epi16(lstep2[4], k__pOne_mOne);
  1388. lstep3[11] = _mm256_madd_epi16(lstep2[5], k__pOne_mOne);
  1389. lstep3[12] = _mm256_madd_epi16(lstep2[2], k__pOne_mOne);
  1390. lstep3[13] = _mm256_madd_epi16(lstep2[3], k__pOne_mOne);
  1391. lstep3[14] = _mm256_madd_epi16(lstep2[0], k__pOne_mOne);
  1392. lstep3[15] = _mm256_madd_epi16(lstep2[1], k__pOne_mOne);
  1393. }
  1394. {
  1395. const __m256i s3_10_0 = _mm256_unpacklo_epi16(step2[13], step2[10]);
  1396. const __m256i s3_10_1 = _mm256_unpackhi_epi16(step2[13], step2[10]);
  1397. const __m256i s3_11_0 = _mm256_unpacklo_epi16(step2[12], step2[11]);
  1398. const __m256i s3_11_1 = _mm256_unpackhi_epi16(step2[12], step2[11]);
  1399. const __m256i s3_10_2 = _mm256_madd_epi16(s3_10_0, k__cospi_p16_m16);
  1400. const __m256i s3_10_3 = _mm256_madd_epi16(s3_10_1, k__cospi_p16_m16);
  1401. const __m256i s3_11_2 = _mm256_madd_epi16(s3_11_0, k__cospi_p16_m16);
  1402. const __m256i s3_11_3 = _mm256_madd_epi16(s3_11_1, k__cospi_p16_m16);
  1403. const __m256i s3_12_2 = _mm256_madd_epi16(s3_11_0, k__cospi_p16_p16);
  1404. const __m256i s3_12_3 = _mm256_madd_epi16(s3_11_1, k__cospi_p16_p16);
  1405. const __m256i s3_13_2 = _mm256_madd_epi16(s3_10_0, k__cospi_p16_p16);
  1406. const __m256i s3_13_3 = _mm256_madd_epi16(s3_10_1, k__cospi_p16_p16);
  1407. // dct_const_round_shift
  1408. const __m256i s3_10_4 =
  1409. _mm256_add_epi32(s3_10_2, k__DCT_CONST_ROUNDING);
  1410. const __m256i s3_10_5 =
  1411. _mm256_add_epi32(s3_10_3, k__DCT_CONST_ROUNDING);
  1412. const __m256i s3_11_4 =
  1413. _mm256_add_epi32(s3_11_2, k__DCT_CONST_ROUNDING);
  1414. const __m256i s3_11_5 =
  1415. _mm256_add_epi32(s3_11_3, k__DCT_CONST_ROUNDING);
  1416. const __m256i s3_12_4 =
  1417. _mm256_add_epi32(s3_12_2, k__DCT_CONST_ROUNDING);
  1418. const __m256i s3_12_5 =
  1419. _mm256_add_epi32(s3_12_3, k__DCT_CONST_ROUNDING);
  1420. const __m256i s3_13_4 =
  1421. _mm256_add_epi32(s3_13_2, k__DCT_CONST_ROUNDING);
  1422. const __m256i s3_13_5 =
  1423. _mm256_add_epi32(s3_13_3, k__DCT_CONST_ROUNDING);
  1424. lstep3[20] = _mm256_srai_epi32(s3_10_4, DCT_CONST_BITS);
  1425. lstep3[21] = _mm256_srai_epi32(s3_10_5, DCT_CONST_BITS);
  1426. lstep3[22] = _mm256_srai_epi32(s3_11_4, DCT_CONST_BITS);
  1427. lstep3[23] = _mm256_srai_epi32(s3_11_5, DCT_CONST_BITS);
  1428. lstep3[24] = _mm256_srai_epi32(s3_12_4, DCT_CONST_BITS);
  1429. lstep3[25] = _mm256_srai_epi32(s3_12_5, DCT_CONST_BITS);
  1430. lstep3[26] = _mm256_srai_epi32(s3_13_4, DCT_CONST_BITS);
  1431. lstep3[27] = _mm256_srai_epi32(s3_13_5, DCT_CONST_BITS);
  1432. }
  1433. {
  1434. lstep1[32] = _mm256_unpacklo_epi16(step1[16], step2[23]);
  1435. lstep1[33] = _mm256_unpackhi_epi16(step1[16], step2[23]);
  1436. lstep1[34] = _mm256_unpacklo_epi16(step1[17], step2[22]);
  1437. lstep1[35] = _mm256_unpackhi_epi16(step1[17], step2[22]);
  1438. lstep1[36] = _mm256_unpacklo_epi16(step1[18], step2[21]);
  1439. lstep1[37] = _mm256_unpackhi_epi16(step1[18], step2[21]);
  1440. lstep1[38] = _mm256_unpacklo_epi16(step1[19], step2[20]);
  1441. lstep1[39] = _mm256_unpackhi_epi16(step1[19], step2[20]);
  1442. lstep1[56] = _mm256_unpacklo_epi16(step1[28], step2[27]);
  1443. lstep1[57] = _mm256_unpackhi_epi16(step1[28], step2[27]);
  1444. lstep1[58] = _mm256_unpacklo_epi16(step1[29], step2[26]);
  1445. lstep1[59] = _mm256_unpackhi_epi16(step1[29], step2[26]);
  1446. lstep1[60] = _mm256_unpacklo_epi16(step1[30], step2[25]);
  1447. lstep1[61] = _mm256_unpackhi_epi16(step1[30], step2[25]);
  1448. lstep1[62] = _mm256_unpacklo_epi16(step1[31], step2[24]);
  1449. lstep1[63] = _mm256_unpackhi_epi16(step1[31], step2[24]);
  1450. lstep3[32] = _mm256_madd_epi16(lstep1[32], kOne);
  1451. lstep3[33] = _mm256_madd_epi16(lstep1[33], kOne);
  1452. lstep3[34] = _mm256_madd_epi16(lstep1[34], kOne);
  1453. lstep3[35] = _mm256_madd_epi16(lstep1[35], kOne);
  1454. lstep3[36] = _mm256_madd_epi16(lstep1[36], kOne);
  1455. lstep3[37] = _mm256_madd_epi16(lstep1[37], kOne);
  1456. lstep3[38] = _mm256_madd_epi16(lstep1[38], kOne);
  1457. lstep3[39] = _mm256_madd_epi16(lstep1[39], kOne);
  1458. lstep3[40] = _mm256_madd_epi16(lstep1[38], k__pOne_mOne);
  1459. lstep3[41] = _mm256_madd_epi16(lstep1[39], k__pOne_mOne);
  1460. lstep3[42] = _mm256_madd_epi16(lstep1[36], k__pOne_mOne);
  1461. lstep3[43] = _mm256_madd_epi16(lstep1[37], k__pOne_mOne);
  1462. lstep3[44] = _mm256_madd_epi16(lstep1[34], k__pOne_mOne);
  1463. lstep3[45] = _mm256_madd_epi16(lstep1[35], k__pOne_mOne);
  1464. lstep3[46] = _mm256_madd_epi16(lstep1[32], k__pOne_mOne);
  1465. lstep3[47] = _mm256_madd_epi16(lstep1[33], k__pOne_mOne);
  1466. lstep3[48] = _mm256_madd_epi16(lstep1[62], k__pOne_mOne);
  1467. lstep3[49] = _mm256_madd_epi16(lstep1[63], k__pOne_mOne);
  1468. lstep3[50] = _mm256_madd_epi16(lstep1[60], k__pOne_mOne);
  1469. lstep3[51] = _mm256_madd_epi16(lstep1[61], k__pOne_mOne);
  1470. lstep3[52] = _mm256_madd_epi16(lstep1[58], k__pOne_mOne);
  1471. lstep3[53] = _mm256_madd_epi16(lstep1[59], k__pOne_mOne);
  1472. lstep3[54] = _mm256_madd_epi16(lstep1[56], k__pOne_mOne);
  1473. lstep3[55] = _mm256_madd_epi16(lstep1[57], k__pOne_mOne);
  1474. lstep3[56] = _mm256_madd_epi16(lstep1[56], kOne);
  1475. lstep3[57] = _mm256_madd_epi16(lstep1[57], kOne);
  1476. lstep3[58] = _mm256_madd_epi16(lstep1[58], kOne);
  1477. lstep3[59] = _mm256_madd_epi16(lstep1[59], kOne);
  1478. lstep3[60] = _mm256_madd_epi16(lstep1[60], kOne);
  1479. lstep3[61] = _mm256_madd_epi16(lstep1[61], kOne);
  1480. lstep3[62] = _mm256_madd_epi16(lstep1[62], kOne);
  1481. lstep3[63] = _mm256_madd_epi16(lstep1[63], kOne);
  1482. }
  1483. // stage 4
  1484. {
  1485. // expanding to 32-bit length prior to addition operations
  1486. sign[0] = _mm256_cmpgt_epi16(kZero, step2[8]);
  1487. sign[1] = _mm256_cmpgt_epi16(kZero, step2[9]);
  1488. sign[2] = _mm256_cmpgt_epi16(kZero, step2[14]);
  1489. sign[3] = _mm256_cmpgt_epi16(kZero, step2[15]);
  1490. lstep2[16] = _mm256_unpacklo_epi16(step2[8], sign[0]);
  1491. lstep2[17] = _mm256_unpackhi_epi16(step2[8], sign[0]);
  1492. lstep2[18] = _mm256_unpacklo_epi16(step2[9], sign[1]);
  1493. lstep2[19] = _mm256_unpackhi_epi16(step2[9], sign[1]);
  1494. lstep2[28] = _mm256_unpacklo_epi16(step2[14], sign[2]);
  1495. lstep2[29] = _mm256_unpackhi_epi16(step2[14], sign[2]);
  1496. lstep2[30] = _mm256_unpacklo_epi16(step2[15], sign[3]);
  1497. lstep2[31] = _mm256_unpackhi_epi16(step2[15], sign[3]);
  1498. lstep1[0] = _mm256_add_epi32(lstep3[6], lstep3[0]);
  1499. lstep1[1] = _mm256_add_epi32(lstep3[7], lstep3[1]);
  1500. lstep1[2] = _mm256_add_epi32(lstep3[4], lstep3[2]);
  1501. lstep1[3] = _mm256_add_epi32(lstep3[5], lstep3[3]);
  1502. lstep1[4] = _mm256_sub_epi32(lstep3[2], lstep3[4]);
  1503. lstep1[5] = _mm256_sub_epi32(lstep3[3], lstep3[5]);
  1504. lstep1[6] = _mm256_sub_epi32(lstep3[0], lstep3[6]);
  1505. lstep1[7] = _mm256_sub_epi32(lstep3[1], lstep3[7]);
  1506. lstep1[16] = _mm256_add_epi32(lstep3[22], lstep2[16]);
  1507. lstep1[17] = _mm256_add_epi32(lstep3[23], lstep2[17]);
  1508. lstep1[18] = _mm256_add_epi32(lstep3[20], lstep2[18]);
  1509. lstep1[19] = _mm256_add_epi32(lstep3[21], lstep2[19]);
  1510. lstep1[20] = _mm256_sub_epi32(lstep2[18], lstep3[20]);
  1511. lstep1[21] = _mm256_sub_epi32(lstep2[19], lstep3[21]);
  1512. lstep1[22] = _mm256_sub_epi32(lstep2[16], lstep3[22]);
  1513. lstep1[23] = _mm256_sub_epi32(lstep2[17], lstep3[23]);
  1514. lstep1[24] = _mm256_sub_epi32(lstep2[30], lstep3[24]);
  1515. lstep1[25] = _mm256_sub_epi32(lstep2[31], lstep3[25]);
  1516. lstep1[26] = _mm256_sub_epi32(lstep2[28], lstep3[26]);
  1517. lstep1[27] = _mm256_sub_epi32(lstep2[29], lstep3[27]);
  1518. lstep1[28] = _mm256_add_epi32(lstep3[26], lstep2[28]);
  1519. lstep1[29] = _mm256_add_epi32(lstep3[27], lstep2[29]);
  1520. lstep1[30] = _mm256_add_epi32(lstep3[24], lstep2[30]);
  1521. lstep1[31] = _mm256_add_epi32(lstep3[25], lstep2[31]);
  1522. }
  1523. {
  1524. // to be continued...
  1525. //
  1526. const __m256i k32_p16_p16 =
  1527. pair256_set_epi32(cospi_16_64, cospi_16_64);
  1528. const __m256i k32_p16_m16 =
  1529. pair256_set_epi32(cospi_16_64, -cospi_16_64);
  1530. u[0] = _mm256_unpacklo_epi32(lstep3[12], lstep3[10]);
  1531. u[1] = _mm256_unpackhi_epi32(lstep3[12], lstep3[10]);
  1532. u[2] = _mm256_unpacklo_epi32(lstep3[13], lstep3[11]);
  1533. u[3] = _mm256_unpackhi_epi32(lstep3[13], lstep3[11]);
  1534. // TODO(jingning): manually inline k_madd_epi32_avx2_ to further hide
  1535. // instruction latency.
  1536. v[0] = k_madd_epi32_avx2(u[0], k32_p16_m16);
  1537. v[1] = k_madd_epi32_avx2(u[1], k32_p16_m16);
  1538. v[2] = k_madd_epi32_avx2(u[2], k32_p16_m16);
  1539. v[3] = k_madd_epi32_avx2(u[3], k32_p16_m16);
  1540. v[4] = k_madd_epi32_avx2(u[0], k32_p16_p16);
  1541. v[5] = k_madd_epi32_avx2(u[1], k32_p16_p16);
  1542. v[6] = k_madd_epi32_avx2(u[2], k32_p16_p16);
  1543. v[7] = k_madd_epi32_avx2(u[3], k32_p16_p16);
  1544. u[0] = k_packs_epi64_avx2(v[0], v[1]);
  1545. u[1] = k_packs_epi64_avx2(v[2], v[3]);
  1546. u[2] = k_packs_epi64_avx2(v[4], v[5]);
  1547. u[3] = k_packs_epi64_avx2(v[6], v[7]);
  1548. v[0] = _mm256_add_epi32(u[0], k__DCT_CONST_ROUNDING);
  1549. v[1] = _mm256_add_epi32(u[1], k__DCT_CONST_ROUNDING);
  1550. v[2] = _mm256_add_epi32(u[2], k__DCT_CONST_ROUNDING);
  1551. v[3] = _mm256_add_epi32(u[3], k__DCT_CONST_ROUNDING);
  1552. lstep1[10] = _mm256_srai_epi32(v[0], DCT_CONST_BITS);
  1553. lstep1[11] = _mm256_srai_epi32(v[1], DCT_CONST_BITS);
  1554. lstep1[12] = _mm256_srai_epi32(v[2], DCT_CONST_BITS);
  1555. lstep1[13] = _mm256_srai_epi32(v[3], DCT_CONST_BITS);
  1556. }
  1557. {
  1558. const __m256i k32_m08_p24 =
  1559. pair256_set_epi32(-cospi_8_64, cospi_24_64);
  1560. const __m256i k32_m24_m08 =
  1561. pair256_set_epi32(-cospi_24_64, -cospi_8_64);
  1562. const __m256i k32_p24_p08 =
  1563. pair256_set_epi32(cospi_24_64, cospi_8_64);
  1564. u[0] = _mm256_unpacklo_epi32(lstep3[36], lstep3[58]);
  1565. u[1] = _mm256_unpackhi_epi32(lstep3[36], lstep3[58]);
  1566. u[2] = _mm256_unpacklo_epi32(lstep3[37], lstep3[59]);
  1567. u[3] = _mm256_unpackhi_epi32(lstep3[37], lstep3[59]);
  1568. u[4] = _mm256_unpacklo_epi32(lstep3[38], lstep3[56]);
  1569. u[5] = _mm256_unpackhi_epi32(lstep3[38], lstep3[56]);
  1570. u[6] = _mm256_unpacklo_epi32(lstep3[39], lstep3[57]);
  1571. u[7] = _mm256_unpackhi_epi32(lstep3[39], lstep3[57]);
  1572. u[8] = _mm256_unpacklo_epi32(lstep3[40], lstep3[54]);
  1573. u[9] = _mm256_unpackhi_epi32(lstep3[40], lstep3[54]);
  1574. u[10] = _mm256_unpacklo_epi32(lstep3[41], lstep3[55]);
  1575. u[11] = _mm256_unpackhi_epi32(lstep3[41], lstep3[55]);
  1576. u[12] = _mm256_unpacklo_epi32(lstep3[42], lstep3[52]);
  1577. u[13] = _mm256_unpackhi_epi32(lstep3[42], lstep3[52]);
  1578. u[14] = _mm256_unpacklo_epi32(lstep3[43], lstep3[53]);
  1579. u[15] = _mm256_unpackhi_epi32(lstep3[43], lstep3[53]);
  1580. v[0] = k_madd_epi32_avx2(u[0], k32_m08_p24);
  1581. v[1] = k_madd_epi32_avx2(u[1], k32_m08_p24);
  1582. v[2] = k_madd_epi32_avx2(u[2], k32_m08_p24);
  1583. v[3] = k_madd_epi32_avx2(u[3], k32_m08_p24);
  1584. v[4] = k_madd_epi32_avx2(u[4], k32_m08_p24);
  1585. v[5] = k_madd_epi32_avx2(u[5], k32_m08_p24);
  1586. v[6] = k_madd_epi32_avx2(u[6], k32_m08_p24);
  1587. v[7] = k_madd_epi32_avx2(u[7], k32_m08_p24);
  1588. v[8] = k_madd_epi32_avx2(u[8], k32_m24_m08);
  1589. v[9] = k_madd_epi32_avx2(u[9], k32_m24_m08);
  1590. v[10] = k_madd_epi32_avx2(u[10], k32_m24_m08);
  1591. v[11] = k_madd_epi32_avx2(u[11], k32_m24_m08);
  1592. v[12] = k_madd_epi32_avx2(u[12], k32_m24_m08);
  1593. v[13] = k_madd_epi32_avx2(u[13], k32_m24_m08);
  1594. v[14] = k_madd_epi32_avx2(u[14], k32_m24_m08);
  1595. v[15] = k_madd_epi32_avx2(u[15], k32_m24_m08);
  1596. v[16] = k_madd_epi32_avx2(u[12], k32_m08_p24);
  1597. v[17] = k_madd_epi32_avx2(u[13], k32_m08_p24);
  1598. v[18] = k_madd_epi32_avx2(u[14], k32_m08_p24);
  1599. v[19] = k_madd_epi32_avx2(u[15], k32_m08_p24);
  1600. v[20] = k_madd_epi32_avx2(u[8], k32_m08_p24);
  1601. v[21] = k_madd_epi32_avx2(u[9], k32_m08_p24);
  1602. v[22] = k_madd_epi32_avx2(u[10], k32_m08_p24);
  1603. v[23] = k_madd_epi32_avx2(u[11], k32_m08_p24);
  1604. v[24] = k_madd_epi32_avx2(u[4], k32_p24_p08);
  1605. v[25] = k_madd_epi32_avx2(u[5], k32_p24_p08);
  1606. v[26] = k_madd_epi32_avx2(u[6], k32_p24_p08);
  1607. v[27] = k_madd_epi32_avx2(u[7], k32_p24_p08);
  1608. v[28] = k_madd_epi32_avx2(u[0], k32_p24_p08);
  1609. v[29] = k_madd_epi32_avx2(u[1], k32_p24_p08);
  1610. v[30] = k_madd_epi32_avx2(u[2], k32_p24_p08);
  1611. v[31] = k_madd_epi32_avx2(u[3], k32_p24_p08);
  1612. u[0] = k_packs_epi64_avx2(v[0], v[1]);
  1613. u[1] = k_packs_epi64_avx2(v[2], v[3]);
  1614. u[2] = k_packs_epi64_avx2(v[4], v[5]);
  1615. u[3] = k_packs_epi64_avx2(v[6], v[7]);
  1616. u[4] = k_packs_epi64_avx2(v[8], v[9]);
  1617. u[5] = k_packs_epi64_avx2(v[10], v[11]);
  1618. u[6] = k_packs_epi64_avx2(v[12], v[13]);
  1619. u[7] = k_packs_epi64_avx2(v[14], v[15]);
  1620. u[8] = k_packs_epi64_avx2(v[16], v[17]);
  1621. u[9] = k_packs_epi64_avx2(v[18], v[19]);
  1622. u[10] = k_packs_epi64_avx2(v[20], v[21]);
  1623. u[11] = k_packs_epi64_avx2(v[22], v[23]);
  1624. u[12] = k_packs_epi64_avx2(v[24], v[25]);
  1625. u[13] = k_packs_epi64_avx2(v[26], v[27]);
  1626. u[14] = k_packs_epi64_avx2(v[28], v[29]);
  1627. u[15] = k_packs_epi64_avx2(v[30], v[31]);
  1628. v[0] = _mm256_add_epi32(u[0], k__DCT_CONST_ROUNDING);
  1629. v[1] = _mm256_add_epi32(u[1], k__DCT_CONST_ROUNDING);
  1630. v[2] = _mm256_add_epi32(u[2], k__DCT_CONST_ROUNDING);
  1631. v[3] = _mm256_add_epi32(u[3], k__DCT_CONST_ROUNDING);
  1632. v[4] = _mm256_add_epi32(u[4], k__DCT_CONST_ROUNDING);
  1633. v[5] = _mm256_add_epi32(u[5], k__DCT_CONST_ROUNDING);
  1634. v[6] = _mm256_add_epi32(u[6], k__DCT_CONST_ROUNDING);
  1635. v[7] = _mm256_add_epi32(u[7], k__DCT_CONST_ROUNDING);
  1636. v[8] = _mm256_add_epi32(u[8], k__DCT_CONST_ROUNDING);
  1637. v[9] = _mm256_add_epi32(u[9], k__DCT_CONST_ROUNDING);
  1638. v[10] = _mm256_add_epi32(u[10], k__DCT_CONST_ROUNDING);
  1639. v[11] = _mm256_add_epi32(u[11], k__DCT_CONST_ROUNDING);
  1640. v[12] = _mm256_add_epi32(u[12], k__DCT_CONST_ROUNDING);
  1641. v[13] = _mm256_add_epi32(u[13], k__DCT_CONST_ROUNDING);
  1642. v[14] = _mm256_add_epi32(u[14], k__DCT_CONST_ROUNDING);
  1643. v[15] = _mm256_add_epi32(u[15], k__DCT_CONST_ROUNDING);
  1644. lstep1[36] = _mm256_srai_epi32(v[0], DCT_CONST_BITS);
  1645. lstep1[37] = _mm256_srai_epi32(v[1], DCT_CONST_BITS);
  1646. lstep1[38] = _mm256_srai_epi32(v[2], DCT_CONST_BITS);
  1647. lstep1[39] = _mm256_srai_epi32(v[3], DCT_CONST_BITS);
  1648. lstep1[40] = _mm256_srai_epi32(v[4], DCT_CONST_BITS);
  1649. lstep1[41] = _mm256_srai_epi32(v[5], DCT_CONST_BITS);
  1650. lstep1[42] = _mm256_srai_epi32(v[6], DCT_CONST_BITS);
  1651. lstep1[43] = _mm256_srai_epi32(v[7], DCT_CONST_BITS);
  1652. lstep1[52] = _mm256_srai_epi32(v[8], DCT_CONST_BITS);
  1653. lstep1[53] = _mm256_srai_epi32(v[9], DCT_CONST_BITS);
  1654. lstep1[54] = _mm256_srai_epi32(v[10], DCT_CONST_BITS);
  1655. lstep1[55] = _mm256_srai_epi32(v[11], DCT_CONST_BITS);
  1656. lstep1[56] = _mm256_srai_epi32(v[12], DCT_CONST_BITS);
  1657. lstep1[57] = _mm256_srai_epi32(v[13], DCT_CONST_BITS);
  1658. lstep1[58] = _mm256_srai_epi32(v[14], DCT_CONST_BITS);
  1659. lstep1[59] = _mm256_srai_epi32(v[15], DCT_CONST_BITS);
  1660. }
  1661. // stage 5
  1662. {
  1663. lstep2[8] = _mm256_add_epi32(lstep1[10], lstep3[8]);
  1664. lstep2[9] = _mm256_add_epi32(lstep1[11], lstep3[9]);
  1665. lstep2[10] = _mm256_sub_epi32(lstep3[8], lstep1[10]);
  1666. lstep2[11] = _mm256_sub_epi32(lstep3[9], lstep1[11]);
  1667. lstep2[12] = _mm256_sub_epi32(lstep3[14], lstep1[12]);
  1668. lstep2[13] = _mm256_sub_epi32(lstep3[15], lstep1[13]);
  1669. lstep2[14] = _mm256_add_epi32(lstep1[12], lstep3[14]);
  1670. lstep2[15] = _mm256_add_epi32(lstep1[13], lstep3[15]);
  1671. }
  1672. {
  1673. const __m256i k32_p16_p16 =
  1674. pair256_set_epi32(cospi_16_64, cospi_16_64);
  1675. const __m256i k32_p16_m16 =
  1676. pair256_set_epi32(cospi_16_64, -cospi_16_64);
  1677. const __m256i k32_p24_p08 =
  1678. pair256_set_epi32(cospi_24_64, cospi_8_64);
  1679. const __m256i k32_m08_p24 =
  1680. pair256_set_epi32(-cospi_8_64, cospi_24_64);
  1681. u[0] = _mm256_unpacklo_epi32(lstep1[0], lstep1[2]);
  1682. u[1] = _mm256_unpackhi_epi32(lstep1[0], lstep1[2]);
  1683. u[2] = _mm256_unpacklo_epi32(lstep1[1], lstep1[3]);
  1684. u[3] = _mm256_unpackhi_epi32(lstep1[1], lstep1[3]);
  1685. u[4] = _mm256_unpacklo_epi32(lstep1[4], lstep1[6]);
  1686. u[5] = _mm256_unpackhi_epi32(lstep1[4], lstep1[6]);
  1687. u[6] = _mm256_unpacklo_epi32(lstep1[5], lstep1[7]);
  1688. u[7] = _mm256_unpackhi_epi32(lstep1[5], lstep1[7]);
  1689. // TODO(jingning): manually inline k_madd_epi32_avx2_ to further hide
  1690. // instruction latency.
  1691. v[0] = k_madd_epi32_avx2(u[0], k32_p16_p16);
  1692. v[1] = k_madd_epi32_avx2(u[1], k32_p16_p16);
  1693. v[2] = k_madd_epi32_avx2(u[2], k32_p16_p16);
  1694. v[3] = k_madd_epi32_avx2(u[3], k32_p16_p16);
  1695. v[4] = k_madd_epi32_avx2(u[0], k32_p16_m16);
  1696. v[5] = k_madd_epi32_avx2(u[1], k32_p16_m16);
  1697. v[6] = k_madd_epi32_avx2(u[2], k32_p16_m16);
  1698. v[7] = k_madd_epi32_avx2(u[3], k32_p16_m16);
  1699. v[8] = k_madd_epi32_avx2(u[4], k32_p24_p08);
  1700. v[9] = k_madd_epi32_avx2(u[5], k32_p24_p08);
  1701. v[10] = k_madd_epi32_avx2(u[6], k32_p24_p08);
  1702. v[11] = k_madd_epi32_avx2(u[7], k32_p24_p08);
  1703. v[12] = k_madd_epi32_avx2(u[4], k32_m08_p24);
  1704. v[13] = k_madd_epi32_avx2(u[5], k32_m08_p24);
  1705. v[14] = k_madd_epi32_avx2(u[6], k32_m08_p24);
  1706. v[15] = k_madd_epi32_avx2(u[7], k32_m08_p24);
  1707. u[0] = k_packs_epi64_avx2(v[0], v[1]);
  1708. u[1] = k_packs_epi64_avx2(v[2], v[3]);
  1709. u[2] = k_packs_epi64_avx2(v[4], v[5]);
  1710. u[3] = k_packs_epi64_avx2(v[6], v[7]);
  1711. u[4] = k_packs_epi64_avx2(v[8], v[9]);
  1712. u[5] = k_packs_epi64_avx2(v[10], v[11]);
  1713. u[6] = k_packs_epi64_avx2(v[12], v[13]);
  1714. u[7] = k_packs_epi64_avx2(v[14], v[15]);
  1715. v[0] = _mm256_add_epi32(u[0], k__DCT_CONST_ROUNDING);
  1716. v[1] = _mm256_add_epi32(u[1], k__DCT_CONST_ROUNDING);
  1717. v[2] = _mm256_add_epi32(u[2], k__DCT_CONST_ROUNDING);
  1718. v[3] = _mm256_add_epi32(u[3], k__DCT_CONST_ROUNDING);
  1719. v[4] = _mm256_add_epi32(u[4], k__DCT_CONST_ROUNDING);
  1720. v[5] = _mm256_add_epi32(u[5], k__DCT_CONST_ROUNDING);
  1721. v[6] = _mm256_add_epi32(u[6], k__DCT_CONST_ROUNDING);
  1722. v[7] = _mm256_add_epi32(u[7], k__DCT_CONST_ROUNDING);
  1723. u[0] = _mm256_srai_epi32(v[0], DCT_CONST_BITS);
  1724. u[1] = _mm256_srai_epi32(v[1], DCT_CONST_BITS);
  1725. u[2] = _mm256_srai_epi32(v[2], DCT_CONST_BITS);
  1726. u[3] = _mm256_srai_epi32(v[3], DCT_CONST_BITS);
  1727. u[4] = _mm256_srai_epi32(v[4], DCT_CONST_BITS);
  1728. u[5] = _mm256_srai_epi32(v[5], DCT_CONST_BITS);
  1729. u[6] = _mm256_srai_epi32(v[6], DCT_CONST_BITS);
  1730. u[7] = _mm256_srai_epi32(v[7], DCT_CONST_BITS);
  1731. sign[0] = _mm256_cmpgt_epi32(kZero, u[0]);
  1732. sign[1] = _mm256_cmpgt_epi32(kZero, u[1]);
  1733. sign[2] = _mm256_cmpgt_epi32(kZero, u[2]);
  1734. sign[3] = _mm256_cmpgt_epi32(kZero, u[3]);
  1735. sign[4] = _mm256_cmpgt_epi32(kZero, u[4]);
  1736. sign[5] = _mm256_cmpgt_epi32(kZero, u[5]);
  1737. sign[6] = _mm256_cmpgt_epi32(kZero, u[6]);
  1738. sign[7] = _mm256_cmpgt_epi32(kZero, u[7]);
  1739. u[0] = _mm256_sub_epi32(u[0], sign[0]);
  1740. u[1] = _mm256_sub_epi32(u[1], sign[1]);
  1741. u[2] = _mm256_sub_epi32(u[2], sign[2]);
  1742. u[3] = _mm256_sub_epi32(u[3], sign[3]);
  1743. u[4] = _mm256_sub_epi32(u[4], sign[4]);
  1744. u[5] = _mm256_sub_epi32(u[5], sign[5]);
  1745. u[6] = _mm256_sub_epi32(u[6], sign[6]);
  1746. u[7] = _mm256_sub_epi32(u[7], sign[7]);
  1747. u[0] = _mm256_add_epi32(u[0], K32One);
  1748. u[1] = _mm256_add_epi32(u[1], K32One);
  1749. u[2] = _mm256_add_epi32(u[2], K32One);
  1750. u[3] = _mm256_add_epi32(u[3], K32One);
  1751. u[4] = _mm256_add_epi32(u[4], K32One);
  1752. u[5] = _mm256_add_epi32(u[5], K32One);
  1753. u[6] = _mm256_add_epi32(u[6], K32One);
  1754. u[7] = _mm256_add_epi32(u[7], K32One);
  1755. u[0] = _mm256_srai_epi32(u[0], 2);
  1756. u[1] = _mm256_srai_epi32(u[1], 2);
  1757. u[2] = _mm256_srai_epi32(u[2], 2);
  1758. u[3] = _mm256_srai_epi32(u[3], 2);
  1759. u[4] = _mm256_srai_epi32(u[4], 2);
  1760. u[5] = _mm256_srai_epi32(u[5], 2);
  1761. u[6] = _mm256_srai_epi32(u[6], 2);
  1762. u[7] = _mm256_srai_epi32(u[7], 2);
  1763. // Combine
  1764. out[0] = _mm256_packs_epi32(u[0], u[1]);
  1765. out[16] = _mm256_packs_epi32(u[2], u[3]);
  1766. out[8] = _mm256_packs_epi32(u[4], u[5]);
  1767. out[24] = _mm256_packs_epi32(u[6], u[7]);
  1768. }
  1769. {
  1770. const __m256i k32_m08_p24 =
  1771. pair256_set_epi32(-cospi_8_64, cospi_24_64);
  1772. const __m256i k32_m24_m08 =
  1773. pair256_set_epi32(-cospi_24_64, -cospi_8_64);
  1774. const __m256i k32_p24_p08 =
  1775. pair256_set_epi32(cospi_24_64, cospi_8_64);
  1776. u[0] = _mm256_unpacklo_epi32(lstep1[18], lstep1[28]);
  1777. u[1] = _mm256_unpackhi_epi32(lstep1[18], lstep1[28]);
  1778. u[2] = _mm256_unpacklo_epi32(lstep1[19], lstep1[29]);
  1779. u[3] = _mm256_unpackhi_epi32(lstep1[19], lstep1[29]);
  1780. u[4] = _mm256_unpacklo_epi32(lstep1[20], lstep1[26]);
  1781. u[5] = _mm256_unpackhi_epi32(lstep1[20], lstep1[26]);
  1782. u[6] = _mm256_unpacklo_epi32(lstep1[21], lstep1[27]);
  1783. u[7] = _mm256_unpackhi_epi32(lstep1[21], lstep1[27]);
  1784. v[0] = k_madd_epi32_avx2(u[0], k32_m08_p24);
  1785. v[1] = k_madd_epi32_avx2(u[1], k32_m08_p24);
  1786. v[2] = k_madd_epi32_avx2(u[2], k32_m08_p24);
  1787. v[3] = k_madd_epi32_avx2(u[3], k32_m08_p24);
  1788. v[4] = k_madd_epi32_avx2(u[4], k32_m24_m08);
  1789. v[5] = k_madd_epi32_avx2(u[5], k32_m24_m08);
  1790. v[6] = k_madd_epi32_avx2(u[6], k32_m24_m08);
  1791. v[7] = k_madd_epi32_avx2(u[7], k32_m24_m08);
  1792. v[8] = k_madd_epi32_avx2(u[4], k32_m08_p24);
  1793. v[9] = k_madd_epi32_avx2(u[5], k32_m08_p24);
  1794. v[10] = k_madd_epi32_avx2(u[6], k32_m08_p24);
  1795. v[11] = k_madd_epi32_avx2(u[7], k32_m08_p24);
  1796. v[12] = k_madd_epi32_avx2(u[0], k32_p24_p08);
  1797. v[13] = k_madd_epi32_avx2(u[1], k32_p24_p08);
  1798. v[14] = k_madd_epi32_avx2(u[2], k32_p24_p08);
  1799. v[15] = k_madd_epi32_avx2(u[3], k32_p24_p08);
  1800. u[0] = k_packs_epi64_avx2(v[0], v[1]);
  1801. u[1] = k_packs_epi64_avx2(v[2], v[3]);
  1802. u[2] = k_packs_epi64_avx2(v[4], v[5]);
  1803. u[3] = k_packs_epi64_avx2(v[6], v[7]);
  1804. u[4] = k_packs_epi64_avx2(v[8], v[9]);
  1805. u[5] = k_packs_epi64_avx2(v[10], v[11]);
  1806. u[6] = k_packs_epi64_avx2(v[12], v[13]);
  1807. u[7] = k_packs_epi64_avx2(v[14], v[15]);
  1808. u[0] = _mm256_add_epi32(u[0], k__DCT_CONST_ROUNDING);
  1809. u[1] = _mm256_add_epi32(u[1], k__DCT_CONST_ROUNDING);
  1810. u[2] = _mm256_add_epi32(u[2], k__DCT_CONST_ROUNDING);
  1811. u[3] = _mm256_add_epi32(u[3], k__DCT_CONST_ROUNDING);
  1812. u[4] = _mm256_add_epi32(u[4], k__DCT_CONST_ROUNDING);
  1813. u[5] = _mm256_add_epi32(u[5], k__DCT_CONST_ROUNDING);
  1814. u[6] = _mm256_add_epi32(u[6], k__DCT_CONST_ROUNDING);
  1815. u[7] = _mm256_add_epi32(u[7], k__DCT_CONST_ROUNDING);
  1816. lstep2[18] = _mm256_srai_epi32(u[0], DCT_CONST_BITS);
  1817. lstep2[19] = _mm256_srai_epi32(u[1], DCT_CONST_BITS);
  1818. lstep2[20] = _mm256_srai_epi32(u[2], DCT_CONST_BITS);
  1819. lstep2[21] = _mm256_srai_epi32(u[3], DCT_CONST_BITS);
  1820. lstep2[26] = _mm256_srai_epi32(u[4], DCT_CONST_BITS);
  1821. lstep2[27] = _mm256_srai_epi32(u[5], DCT_CONST_BITS);
  1822. lstep2[28] = _mm256_srai_epi32(u[6], DCT_CONST_BITS);
  1823. lstep2[29] = _mm256_srai_epi32(u[7], DCT_CONST_BITS);
  1824. }
  1825. {
  1826. lstep2[32] = _mm256_add_epi32(lstep1[38], lstep3[32]);
  1827. lstep2[33] = _mm256_add_epi32(lstep1[39], lstep3[33]);
  1828. lstep2[34] = _mm256_add_epi32(lstep1[36], lstep3[34]);
  1829. lstep2[35] = _mm256_add_epi32(lstep1[37], lstep3[35]);
  1830. lstep2[36] = _mm256_sub_epi32(lstep3[34], lstep1[36]);
  1831. lstep2[37] = _mm256_sub_epi32(lstep3[35], lstep1[37]);
  1832. lstep2[38] = _mm256_sub_epi32(lstep3[32], lstep1[38]);
  1833. lstep2[39] = _mm256_sub_epi32(lstep3[33], lstep1[39]);
  1834. lstep2[40] = _mm256_sub_epi32(lstep3[46], lstep1[40]);
  1835. lstep2[41] = _mm256_sub_epi32(lstep3[47], lstep1[41]);
  1836. lstep2[42] = _mm256_sub_epi32(lstep3[44], lstep1[42]);
  1837. lstep2[43] = _mm256_sub_epi32(lstep3[45], lstep1[43]);
  1838. lstep2[44] = _mm256_add_epi32(lstep1[42], lstep3[44]);
  1839. lstep2[45] = _mm256_add_epi32(lstep1[43], lstep3[45]);
  1840. lstep2[46] = _mm256_add_epi32(lstep1[40], lstep3[46]);
  1841. lstep2[47] = _mm256_add_epi32(lstep1[41], lstep3[47]);
  1842. lstep2[48] = _mm256_add_epi32(lstep1[54], lstep3[48]);
  1843. lstep2[49] = _mm256_add_epi32(lstep1[55], lstep3[49]);
  1844. lstep2[50] = _mm256_add_epi32(lstep1[52], lstep3[50]);
  1845. lstep2[51] = _mm256_add_epi32(lstep1[53], lstep3[51]);
  1846. lstep2[52] = _mm256_sub_epi32(lstep3[50], lstep1[52]);
  1847. lstep2[53] = _mm256_sub_epi32(lstep3[51], lstep1[53]);
  1848. lstep2[54] = _mm256_sub_epi32(lstep3[48], lstep1[54]);
  1849. lstep2[55] = _mm256_sub_epi32(lstep3[49], lstep1[55]);
  1850. lstep2[56] = _mm256_sub_epi32(lstep3[62], lstep1[56]);
  1851. lstep2[57] = _mm256_sub_epi32(lstep3[63], lstep1[57]);
  1852. lstep2[58] = _mm256_sub_epi32(lstep3[60], lstep1[58]);
  1853. lstep2[59] = _mm256_sub_epi32(lstep3[61], lstep1[59]);
  1854. lstep2[60] = _mm256_add_epi32(lstep1[58], lstep3[60]);
  1855. lstep2[61] = _mm256_add_epi32(lstep1[59], lstep3[61]);
  1856. lstep2[62] = _mm256_add_epi32(lstep1[56], lstep3[62]);
  1857. lstep2[63] = _mm256_add_epi32(lstep1[57], lstep3[63]);
  1858. }
  1859. // stage 6
  1860. {
  1861. const __m256i k32_p28_p04 =
  1862. pair256_set_epi32(cospi_28_64, cospi_4_64);
  1863. const __m256i k32_p12_p20 =
  1864. pair256_set_epi32(cospi_12_64, cospi_20_64);
  1865. const __m256i k32_m20_p12 =
  1866. pair256_set_epi32(-cospi_20_64, cospi_12_64);
  1867. const __m256i k32_m04_p28 =
  1868. pair256_set_epi32(-cospi_4_64, cospi_28_64);
  1869. u[0] = _mm256_unpacklo_epi32(lstep2[8], lstep2[14]);
  1870. u[1] = _mm256_unpackhi_epi32(lstep2[8], lstep2[14]);
  1871. u[2] = _mm256_unpacklo_epi32(lstep2[9], lstep2[15]);
  1872. u[3] = _mm256_unpackhi_epi32(lstep2[9], lstep2[15]);
  1873. u[4] = _mm256_unpacklo_epi32(lstep2[10], lstep2[12]);
  1874. u[5] = _mm256_unpackhi_epi32(lstep2[10], lstep2[12]);
  1875. u[6] = _mm256_unpacklo_epi32(lstep2[11], lstep2[13]);
  1876. u[7] = _mm256_unpackhi_epi32(lstep2[11], lstep2[13]);
  1877. u[8] = _mm256_unpacklo_epi32(lstep2[10], lstep2[12]);
  1878. u[9] = _mm256_unpackhi_epi32(lstep2[10], lstep2[12]);
  1879. u[10] = _mm256_unpacklo_epi32(lstep2[11], lstep2[13]);
  1880. u[11] = _mm256_unpackhi_epi32(lstep2[11], lstep2[13]);
  1881. u[12] = _mm256_unpacklo_epi32(lstep2[8], lstep2[14]);
  1882. u[13] = _mm256_unpackhi_epi32(lstep2[8], lstep2[14]);
  1883. u[14] = _mm256_unpacklo_epi32(lstep2[9], lstep2[15]);
  1884. u[15] = _mm256_unpackhi_epi32(lstep2[9], lstep2[15]);
  1885. v[0] = k_madd_epi32_avx2(u[0], k32_p28_p04);
  1886. v[1] = k_madd_epi32_avx2(u[1], k32_p28_p04);
  1887. v[2] = k_madd_epi32_avx2(u[2], k32_p28_p04);
  1888. v[3] = k_madd_epi32_avx2(u[3], k32_p28_p04);
  1889. v[4] = k_madd_epi32_avx2(u[4], k32_p12_p20);
  1890. v[5] = k_madd_epi32_avx2(u[5], k32_p12_p20);
  1891. v[6] = k_madd_epi32_avx2(u[6], k32_p12_p20);
  1892. v[7] = k_madd_epi32_avx2(u[7], k32_p12_p20);
  1893. v[8] = k_madd_epi32_avx2(u[8], k32_m20_p12);
  1894. v[9] = k_madd_epi32_avx2(u[9], k32_m20_p12);
  1895. v[10] = k_madd_epi32_avx2(u[10], k32_m20_p12);
  1896. v[11] = k_madd_epi32_avx2(u[11], k32_m20_p12);
  1897. v[12] = k_madd_epi32_avx2(u[12], k32_m04_p28);
  1898. v[13] = k_madd_epi32_avx2(u[13], k32_m04_p28);
  1899. v[14] = k_madd_epi32_avx2(u[14], k32_m04_p28);
  1900. v[15] = k_madd_epi32_avx2(u[15], k32_m04_p28);
  1901. u[0] = k_packs_epi64_avx2(v[0], v[1]);
  1902. u[1] = k_packs_epi64_avx2(v[2], v[3]);
  1903. u[2] = k_packs_epi64_avx2(v[4], v[5]);
  1904. u[3] = k_packs_epi64_avx2(v[6], v[7]);
  1905. u[4] = k_packs_epi64_avx2(v[8], v[9]);
  1906. u[5] = k_packs_epi64_avx2(v[10], v[11]);
  1907. u[6] = k_packs_epi64_avx2(v[12], v[13]);
  1908. u[7] = k_packs_epi64_avx2(v[14], v[15]);
  1909. v[0] = _mm256_add_epi32(u[0], k__DCT_CONST_ROUNDING);
  1910. v[1] = _mm256_add_epi32(u[1], k__DCT_CONST_ROUNDING);
  1911. v[2] = _mm256_add_epi32(u[2], k__DCT_CONST_ROUNDING);
  1912. v[3] = _mm256_add_epi32(u[3], k__DCT_CONST_ROUNDING);
  1913. v[4] = _mm256_add_epi32(u[4], k__DCT_CONST_ROUNDING);
  1914. v[5] = _mm256_add_epi32(u[5], k__DCT_CONST_ROUNDING);
  1915. v[6] = _mm256_add_epi32(u[6], k__DCT_CONST_ROUNDING);
  1916. v[7] = _mm256_add_epi32(u[7], k__DCT_CONST_ROUNDING);
  1917. u[0] = _mm256_srai_epi32(v[0], DCT_CONST_BITS);
  1918. u[1] = _mm256_srai_epi32(v[1], DCT_CONST_BITS);
  1919. u[2] = _mm256_srai_epi32(v[2], DCT_CONST_BITS);
  1920. u[3] = _mm256_srai_epi32(v[3], DCT_CONST_BITS);
  1921. u[4] = _mm256_srai_epi32(v[4], DCT_CONST_BITS);
  1922. u[5] = _mm256_srai_epi32(v[5], DCT_CONST_BITS);
  1923. u[6] = _mm256_srai_epi32(v[6], DCT_CONST_BITS);
  1924. u[7] = _mm256_srai_epi32(v[7], DCT_CONST_BITS);
  1925. sign[0] = _mm256_cmpgt_epi32(kZero, u[0]);
  1926. sign[1] = _mm256_cmpgt_epi32(kZero, u[1]);
  1927. sign[2] = _mm256_cmpgt_epi32(kZero, u[2]);
  1928. sign[3] = _mm256_cmpgt_epi32(kZero, u[3]);
  1929. sign[4] = _mm256_cmpgt_epi32(kZero, u[4]);
  1930. sign[5] = _mm256_cmpgt_epi32(kZero, u[5]);
  1931. sign[6] = _mm256_cmpgt_epi32(kZero, u[6]);
  1932. sign[7] = _mm256_cmpgt_epi32(kZero, u[7]);
  1933. u[0] = _mm256_sub_epi32(u[0], sign[0]);
  1934. u[1] = _mm256_sub_epi32(u[1], sign[1]);
  1935. u[2] = _mm256_sub_epi32(u[2], sign[2]);
  1936. u[3] = _mm256_sub_epi32(u[3], sign[3]);
  1937. u[4] = _mm256_sub_epi32(u[4], sign[4]);
  1938. u[5] = _mm256_sub_epi32(u[5], sign[5]);
  1939. u[6] = _mm256_sub_epi32(u[6], sign[6]);
  1940. u[7] = _mm256_sub_epi32(u[7], sign[7]);
  1941. u[0] = _mm256_add_epi32(u[0], K32One);
  1942. u[1] = _mm256_add_epi32(u[1], K32One);
  1943. u[2] = _mm256_add_epi32(u[2], K32One);
  1944. u[3] = _mm256_add_epi32(u[3], K32One);
  1945. u[4] = _mm256_add_epi32(u[4], K32One);
  1946. u[5] = _mm256_add_epi32(u[5], K32One);
  1947. u[6] = _mm256_add_epi32(u[6], K32One);
  1948. u[7] = _mm256_add_epi32(u[7], K32One);
  1949. u[0] = _mm256_srai_epi32(u[0], 2);
  1950. u[1] = _mm256_srai_epi32(u[1], 2);
  1951. u[2] = _mm256_srai_epi32(u[2], 2);
  1952. u[3] = _mm256_srai_epi32(u[3], 2);
  1953. u[4] = _mm256_srai_epi32(u[4], 2);
  1954. u[5] = _mm256_srai_epi32(u[5], 2);
  1955. u[6] = _mm256_srai_epi32(u[6], 2);
  1956. u[7] = _mm256_srai_epi32(u[7], 2);
  1957. out[4] = _mm256_packs_epi32(u[0], u[1]);
  1958. out[20] = _mm256_packs_epi32(u[2], u[3]);
  1959. out[12] = _mm256_packs_epi32(u[4], u[5]);
  1960. out[28] = _mm256_packs_epi32(u[6], u[7]);
  1961. }
  1962. {
  1963. lstep3[16] = _mm256_add_epi32(lstep2[18], lstep1[16]);
  1964. lstep3[17] = _mm256_add_epi32(lstep2[19], lstep1[17]);
  1965. lstep3[18] = _mm256_sub_epi32(lstep1[16], lstep2[18]);
  1966. lstep3[19] = _mm256_sub_epi32(lstep1[17], lstep2[19]);
  1967. lstep3[20] = _mm256_sub_epi32(lstep1[22], lstep2[20]);
  1968. lstep3[21] = _mm256_sub_epi32(lstep1[23], lstep2[21]);
  1969. lstep3[22] = _mm256_add_epi32(lstep2[20], lstep1[22]);
  1970. lstep3[23] = _mm256_add_epi32(lstep2[21], lstep1[23]);
  1971. lstep3[24] = _mm256_add_epi32(lstep2[26], lstep1[24]);
  1972. lstep3[25] = _mm256_add_epi32(lstep2[27], lstep1[25]);
  1973. lstep3[26] = _mm256_sub_epi32(lstep1[24], lstep2[26]);
  1974. lstep3[27] = _mm256_sub_epi32(lstep1[25], lstep2[27]);
  1975. lstep3[28] = _mm256_sub_epi32(lstep1[30], lstep2[28]);
  1976. lstep3[29] = _mm256_sub_epi32(lstep1[31], lstep2[29]);
  1977. lstep3[30] = _mm256_add_epi32(lstep2[28], lstep1[30]);
  1978. lstep3[31] = _mm256_add_epi32(lstep2[29], lstep1[31]);
  1979. }
  1980. {
  1981. const __m256i k32_m04_p28 =
  1982. pair256_set_epi32(-cospi_4_64, cospi_28_64);
  1983. const __m256i k32_m28_m04 =
  1984. pair256_set_epi32(-cospi_28_64, -cospi_4_64);
  1985. const __m256i k32_m20_p12 =
  1986. pair256_set_epi32(-cospi_20_64, cospi_12_64);
  1987. const __m256i k32_m12_m20 =
  1988. pair256_set_epi32(-cospi_12_64, -cospi_20_64);
  1989. const __m256i k32_p12_p20 =
  1990. pair256_set_epi32(cospi_12_64, cospi_20_64);
  1991. const __m256i k32_p28_p04 =
  1992. pair256_set_epi32(cospi_28_64, cospi_4_64);
  1993. u[0] = _mm256_unpacklo_epi32(lstep2[34], lstep2[60]);
  1994. u[1] = _mm256_unpackhi_epi32(lstep2[34], lstep2[60]);
  1995. u[2] = _mm256_unpacklo_epi32(lstep2[35], lstep2[61]);
  1996. u[3] = _mm256_unpackhi_epi32(lstep2[35], lstep2[61]);
  1997. u[4] = _mm256_unpacklo_epi32(lstep2[36], lstep2[58]);
  1998. u[5] = _mm256_unpackhi_epi32(lstep2[36], lstep2[58]);
  1999. u[6] = _mm256_unpacklo_epi32(lstep2[37], lstep2[59]);
  2000. u[7] = _mm256_unpackhi_epi32(lstep2[37], lstep2[59]);
  2001. u[8] = _mm256_unpacklo_epi32(lstep2[42], lstep2[52]);
  2002. u[9] = _mm256_unpackhi_epi32(lstep2[42], lstep2[52]);
  2003. u[10] = _mm256_unpacklo_epi32(lstep2[43], lstep2[53]);
  2004. u[11] = _mm256_unpackhi_epi32(lstep2[43], lstep2[53]);
  2005. u[12] = _mm256_unpacklo_epi32(lstep2[44], lstep2[50]);
  2006. u[13] = _mm256_unpackhi_epi32(lstep2[44], lstep2[50]);
  2007. u[14] = _mm256_unpacklo_epi32(lstep2[45], lstep2[51]);
  2008. u[15] = _mm256_unpackhi_epi32(lstep2[45], lstep2[51]);
  2009. v[0] = k_madd_epi32_avx2(u[0], k32_m04_p28);
  2010. v[1] = k_madd_epi32_avx2(u[1], k32_m04_p28);
  2011. v[2] = k_madd_epi32_avx2(u[2], k32_m04_p28);
  2012. v[3] = k_madd_epi32_avx2(u[3], k32_m04_p28);
  2013. v[4] = k_madd_epi32_avx2(u[4], k32_m28_m04);
  2014. v[5] = k_madd_epi32_avx2(u[5], k32_m28_m04);
  2015. v[6] = k_madd_epi32_avx2(u[6], k32_m28_m04);
  2016. v[7] = k_madd_epi32_avx2(u[7], k32_m28_m04);
  2017. v[8] = k_madd_epi32_avx2(u[8], k32_m20_p12);
  2018. v[9] = k_madd_epi32_avx2(u[9], k32_m20_p12);
  2019. v[10] = k_madd_epi32_avx2(u[10], k32_m20_p12);
  2020. v[11] = k_madd_epi32_avx2(u[11], k32_m20_p12);
  2021. v[12] = k_madd_epi32_avx2(u[12], k32_m12_m20);
  2022. v[13] = k_madd_epi32_avx2(u[13], k32_m12_m20);
  2023. v[14] = k_madd_epi32_avx2(u[14], k32_m12_m20);
  2024. v[15] = k_madd_epi32_avx2(u[15], k32_m12_m20);
  2025. v[16] = k_madd_epi32_avx2(u[12], k32_m20_p12);
  2026. v[17] = k_madd_epi32_avx2(u[13], k32_m20_p12);
  2027. v[18] = k_madd_epi32_avx2(u[14], k32_m20_p12);
  2028. v[19] = k_madd_epi32_avx2(u[15], k32_m20_p12);
  2029. v[20] = k_madd_epi32_avx2(u[8], k32_p12_p20);
  2030. v[21] = k_madd_epi32_avx2(u[9], k32_p12_p20);
  2031. v[22] = k_madd_epi32_avx2(u[10], k32_p12_p20);
  2032. v[23] = k_madd_epi32_avx2(u[11], k32_p12_p20);
  2033. v[24] = k_madd_epi32_avx2(u[4], k32_m04_p28);
  2034. v[25] = k_madd_epi32_avx2(u[5], k32_m04_p28);
  2035. v[26] = k_madd_epi32_avx2(u[6], k32_m04_p28);
  2036. v[27] = k_madd_epi32_avx2(u[7], k32_m04_p28);
  2037. v[28] = k_madd_epi32_avx2(u[0], k32_p28_p04);
  2038. v[29] = k_madd_epi32_avx2(u[1], k32_p28_p04);
  2039. v[30] = k_madd_epi32_avx2(u[2], k32_p28_p04);
  2040. v[31] = k_madd_epi32_avx2(u[3], k32_p28_p04);
  2041. u[0] = k_packs_epi64_avx2(v[0], v[1]);
  2042. u[1] = k_packs_epi64_avx2(v[2], v[3]);
  2043. u[2] = k_packs_epi64_avx2(v[4], v[5]);
  2044. u[3] = k_packs_epi64_avx2(v[6], v[7]);
  2045. u[4] = k_packs_epi64_avx2(v[8], v[9]);
  2046. u[5] = k_packs_epi64_avx2(v[10], v[11]);
  2047. u[6] = k_packs_epi64_avx2(v[12], v[13]);
  2048. u[7] = k_packs_epi64_avx2(v[14], v[15]);
  2049. u[8] = k_packs_epi64_avx2(v[16], v[17]);
  2050. u[9] = k_packs_epi64_avx2(v[18], v[19]);
  2051. u[10] = k_packs_epi64_avx2(v[20], v[21]);
  2052. u[11] = k_packs_epi64_avx2(v[22], v[23]);
  2053. u[12] = k_packs_epi64_avx2(v[24], v[25]);
  2054. u[13] = k_packs_epi64_avx2(v[26], v[27]);
  2055. u[14] = k_packs_epi64_avx2(v[28], v[29]);
  2056. u[15] = k_packs_epi64_avx2(v[30], v[31]);
  2057. v[0] = _mm256_add_epi32(u[0], k__DCT_CONST_ROUNDING);
  2058. v[1] = _mm256_add_epi32(u[1], k__DCT_CONST_ROUNDING);
  2059. v[2] = _mm256_add_epi32(u[2], k__DCT_CONST_ROUNDING);
  2060. v[3] = _mm256_add_epi32(u[3], k__DCT_CONST_ROUNDING);
  2061. v[4] = _mm256_add_epi32(u[4], k__DCT_CONST_ROUNDING);
  2062. v[5] = _mm256_add_epi32(u[5], k__DCT_CONST_ROUNDING);
  2063. v[6] = _mm256_add_epi32(u[6], k__DCT_CONST_ROUNDING);
  2064. v[7] = _mm256_add_epi32(u[7], k__DCT_CONST_ROUNDING);
  2065. v[8] = _mm256_add_epi32(u[8], k__DCT_CONST_ROUNDING);
  2066. v[9] = _mm256_add_epi32(u[9], k__DCT_CONST_ROUNDING);
  2067. v[10] = _mm256_add_epi32(u[10], k__DCT_CONST_ROUNDING);
  2068. v[11] = _mm256_add_epi32(u[11], k__DCT_CONST_ROUNDING);
  2069. v[12] = _mm256_add_epi32(u[12], k__DCT_CONST_ROUNDING);
  2070. v[13] = _mm256_add_epi32(u[13], k__DCT_CONST_ROUNDING);
  2071. v[14] = _mm256_add_epi32(u[14], k__DCT_CONST_ROUNDING);
  2072. v[15] = _mm256_add_epi32(u[15], k__DCT_CONST_ROUNDING);
  2073. lstep3[34] = _mm256_srai_epi32(v[0], DCT_CONST_BITS);
  2074. lstep3[35] = _mm256_srai_epi32(v[1], DCT_CONST_BITS);
  2075. lstep3[36] = _mm256_srai_epi32(v[2], DCT_CONST_BITS);
  2076. lstep3[37] = _mm256_srai_epi32(v[3], DCT_CONST_BITS);
  2077. lstep3[42] = _mm256_srai_epi32(v[4], DCT_CONST_BITS);
  2078. lstep3[43] = _mm256_srai_epi32(v[5], DCT_CONST_BITS);
  2079. lstep3[44] = _mm256_srai_epi32(v[6], DCT_CONST_BITS);
  2080. lstep3[45] = _mm256_srai_epi32(v[7], DCT_CONST_BITS);
  2081. lstep3[50] = _mm256_srai_epi32(v[8], DCT_CONST_BITS);
  2082. lstep3[51] = _mm256_srai_epi32(v[9], DCT_CONST_BITS);
  2083. lstep3[52] = _mm256_srai_epi32(v[10], DCT_CONST_BITS);
  2084. lstep3[53] = _mm256_srai_epi32(v[11], DCT_CONST_BITS);
  2085. lstep3[58] = _mm256_srai_epi32(v[12], DCT_CONST_BITS);
  2086. lstep3[59] = _mm256_srai_epi32(v[13], DCT_CONST_BITS);
  2087. lstep3[60] = _mm256_srai_epi32(v[14], DCT_CONST_BITS);
  2088. lstep3[61] = _mm256_srai_epi32(v[15], DCT_CONST_BITS);
  2089. }
  2090. // stage 7
  2091. {
  2092. const __m256i k32_p30_p02 =
  2093. pair256_set_epi32(cospi_30_64, cospi_2_64);
  2094. const __m256i k32_p14_p18 =
  2095. pair256_set_epi32(cospi_14_64, cospi_18_64);
  2096. const __m256i k32_p22_p10 =
  2097. pair256_set_epi32(cospi_22_64, cospi_10_64);
  2098. const __m256i k32_p06_p26 =
  2099. pair256_set_epi32(cospi_6_64, cospi_26_64);
  2100. const __m256i k32_m26_p06 =
  2101. pair256_set_epi32(-cospi_26_64, cospi_6_64);
  2102. const __m256i k32_m10_p22 =
  2103. pair256_set_epi32(-cospi_10_64, cospi_22_64);
  2104. const __m256i k32_m18_p14 =
  2105. pair256_set_epi32(-cospi_18_64, cospi_14_64);
  2106. const __m256i k32_m02_p30 =
  2107. pair256_set_epi32(-cospi_2_64, cospi_30_64);
  2108. u[0] = _mm256_unpacklo_epi32(lstep3[16], lstep3[30]);
  2109. u[1] = _mm256_unpackhi_epi32(lstep3[16], lstep3[30]);
  2110. u[2] = _mm256_unpacklo_epi32(lstep3[17], lstep3[31]);
  2111. u[3] = _mm256_unpackhi_epi32(lstep3[17], lstep3[31]);
  2112. u[4] = _mm256_unpacklo_epi32(lstep3[18], lstep3[28]);
  2113. u[5] = _mm256_unpackhi_epi32(lstep3[18], lstep3[28]);
  2114. u[6] = _mm256_unpacklo_epi32(lstep3[19], lstep3[29]);
  2115. u[7] = _mm256_unpackhi_epi32(lstep3[19], lstep3[29]);
  2116. u[8] = _mm256_unpacklo_epi32(lstep3[20], lstep3[26]);
  2117. u[9] = _mm256_unpackhi_epi32(lstep3[20], lstep3[26]);
  2118. u[10] = _mm256_unpacklo_epi32(lstep3[21], lstep3[27]);
  2119. u[11] = _mm256_unpackhi_epi32(lstep3[21], lstep3[27]);
  2120. u[12] = _mm256_unpacklo_epi32(lstep3[22], lstep3[24]);
  2121. u[13] = _mm256_unpackhi_epi32(lstep3[22], lstep3[24]);
  2122. u[14] = _mm256_unpacklo_epi32(lstep3[23], lstep3[25]);
  2123. u[15] = _mm256_unpackhi_epi32(lstep3[23], lstep3[25]);
  2124. v[0] = k_madd_epi32_avx2(u[0], k32_p30_p02);
  2125. v[1] = k_madd_epi32_avx2(u[1], k32_p30_p02);
  2126. v[2] = k_madd_epi32_avx2(u[2], k32_p30_p02);
  2127. v[3] = k_madd_epi32_avx2(u[3], k32_p30_p02);
  2128. v[4] = k_madd_epi32_avx2(u[4], k32_p14_p18);
  2129. v[5] = k_madd_epi32_avx2(u[5], k32_p14_p18);
  2130. v[6] = k_madd_epi32_avx2(u[6], k32_p14_p18);
  2131. v[7] = k_madd_epi32_avx2(u[7], k32_p14_p18);
  2132. v[8] = k_madd_epi32_avx2(u[8], k32_p22_p10);
  2133. v[9] = k_madd_epi32_avx2(u[9], k32_p22_p10);
  2134. v[10] = k_madd_epi32_avx2(u[10], k32_p22_p10);
  2135. v[11] = k_madd_epi32_avx2(u[11], k32_p22_p10);
  2136. v[12] = k_madd_epi32_avx2(u[12], k32_p06_p26);
  2137. v[13] = k_madd_epi32_avx2(u[13], k32_p06_p26);
  2138. v[14] = k_madd_epi32_avx2(u[14], k32_p06_p26);
  2139. v[15] = k_madd_epi32_avx2(u[15], k32_p06_p26);
  2140. v[16] = k_madd_epi32_avx2(u[12], k32_m26_p06);
  2141. v[17] = k_madd_epi32_avx2(u[13], k32_m26_p06);
  2142. v[18] = k_madd_epi32_avx2(u[14], k32_m26_p06);
  2143. v[19] = k_madd_epi32_avx2(u[15], k32_m26_p06);
  2144. v[20] = k_madd_epi32_avx2(u[8], k32_m10_p22);
  2145. v[21] = k_madd_epi32_avx2(u[9], k32_m10_p22);
  2146. v[22] = k_madd_epi32_avx2(u[10], k32_m10_p22);
  2147. v[23] = k_madd_epi32_avx2(u[11], k32_m10_p22);
  2148. v[24] = k_madd_epi32_avx2(u[4], k32_m18_p14);
  2149. v[25] = k_madd_epi32_avx2(u[5], k32_m18_p14);
  2150. v[26] = k_madd_epi32_avx2(u[6], k32_m18_p14);
  2151. v[27] = k_madd_epi32_avx2(u[7], k32_m18_p14);
  2152. v[28] = k_madd_epi32_avx2(u[0], k32_m02_p30);
  2153. v[29] = k_madd_epi32_avx2(u[1], k32_m02_p30);
  2154. v[30] = k_madd_epi32_avx2(u[2], k32_m02_p30);
  2155. v[31] = k_madd_epi32_avx2(u[3], k32_m02_p30);
  2156. u[0] = k_packs_epi64_avx2(v[0], v[1]);
  2157. u[1] = k_packs_epi64_avx2(v[2], v[3]);
  2158. u[2] = k_packs_epi64_avx2(v[4], v[5]);
  2159. u[3] = k_packs_epi64_avx2(v[6], v[7]);
  2160. u[4] = k_packs_epi64_avx2(v[8], v[9]);
  2161. u[5] = k_packs_epi64_avx2(v[10], v[11]);
  2162. u[6] = k_packs_epi64_avx2(v[12], v[13]);
  2163. u[7] = k_packs_epi64_avx2(v[14], v[15]);
  2164. u[8] = k_packs_epi64_avx2(v[16], v[17]);
  2165. u[9] = k_packs_epi64_avx2(v[18], v[19]);
  2166. u[10] = k_packs_epi64_avx2(v[20], v[21]);
  2167. u[11] = k_packs_epi64_avx2(v[22], v[23]);
  2168. u[12] = k_packs_epi64_avx2(v[24], v[25]);
  2169. u[13] = k_packs_epi64_avx2(v[26], v[27]);
  2170. u[14] = k_packs_epi64_avx2(v[28], v[29]);
  2171. u[15] = k_packs_epi64_avx2(v[30], v[31]);
  2172. v[0] = _mm256_add_epi32(u[0], k__DCT_CONST_ROUNDING);
  2173. v[1] = _mm256_add_epi32(u[1], k__DCT_CONST_ROUNDING);
  2174. v[2] = _mm256_add_epi32(u[2], k__DCT_CONST_ROUNDING);
  2175. v[3] = _mm256_add_epi32(u[3], k__DCT_CONST_ROUNDING);
  2176. v[4] = _mm256_add_epi32(u[4], k__DCT_CONST_ROUNDING);
  2177. v[5] = _mm256_add_epi32(u[5], k__DCT_CONST_ROUNDING);
  2178. v[6] = _mm256_add_epi32(u[6], k__DCT_CONST_ROUNDING);
  2179. v[7] = _mm256_add_epi32(u[7], k__DCT_CONST_ROUNDING);
  2180. v[8] = _mm256_add_epi32(u[8], k__DCT_CONST_ROUNDING);
  2181. v[9] = _mm256_add_epi32(u[9], k__DCT_CONST_ROUNDING);
  2182. v[10] = _mm256_add_epi32(u[10], k__DCT_CONST_ROUNDING);
  2183. v[11] = _mm256_add_epi32(u[11], k__DCT_CONST_ROUNDING);
  2184. v[12] = _mm256_add_epi32(u[12], k__DCT_CONST_ROUNDING);
  2185. v[13] = _mm256_add_epi32(u[13], k__DCT_CONST_ROUNDING);
  2186. v[14] = _mm256_add_epi32(u[14], k__DCT_CONST_ROUNDING);
  2187. v[15] = _mm256_add_epi32(u[15], k__DCT_CONST_ROUNDING);
  2188. u[0] = _mm256_srai_epi32(v[0], DCT_CONST_BITS);
  2189. u[1] = _mm256_srai_epi32(v[1], DCT_CONST_BITS);
  2190. u[2] = _mm256_srai_epi32(v[2], DCT_CONST_BITS);
  2191. u[3] = _mm256_srai_epi32(v[3], DCT_CONST_BITS);
  2192. u[4] = _mm256_srai_epi32(v[4], DCT_CONST_BITS);
  2193. u[5] = _mm256_srai_epi32(v[5], DCT_CONST_BITS);
  2194. u[6] = _mm256_srai_epi32(v[6], DCT_CONST_BITS);
  2195. u[7] = _mm256_srai_epi32(v[7], DCT_CONST_BITS);
  2196. u[8] = _mm256_srai_epi32(v[8], DCT_CONST_BITS);
  2197. u[9] = _mm256_srai_epi32(v[9], DCT_CONST_BITS);
  2198. u[10] = _mm256_srai_epi32(v[10], DCT_CONST_BITS);
  2199. u[11] = _mm256_srai_epi32(v[11], DCT_CONST_BITS);
  2200. u[12] = _mm256_srai_epi32(v[12], DCT_CONST_BITS);
  2201. u[13] = _mm256_srai_epi32(v[13], DCT_CONST_BITS);
  2202. u[14] = _mm256_srai_epi32(v[14], DCT_CONST_BITS);
  2203. u[15] = _mm256_srai_epi32(v[15], DCT_CONST_BITS);
  2204. v[0] = _mm256_cmpgt_epi32(kZero, u[0]);
  2205. v[1] = _mm256_cmpgt_epi32(kZero, u[1]);
  2206. v[2] = _mm256_cmpgt_epi32(kZero, u[2]);
  2207. v[3] = _mm256_cmpgt_epi32(kZero, u[3]);
  2208. v[4] = _mm256_cmpgt_epi32(kZero, u[4]);
  2209. v[5] = _mm256_cmpgt_epi32(kZero, u[5]);
  2210. v[6] = _mm256_cmpgt_epi32(kZero, u[6]);
  2211. v[7] = _mm256_cmpgt_epi32(kZero, u[7]);
  2212. v[8] = _mm256_cmpgt_epi32(kZero, u[8]);
  2213. v[9] = _mm256_cmpgt_epi32(kZero, u[9]);
  2214. v[10] = _mm256_cmpgt_epi32(kZero, u[10]);
  2215. v[11] = _mm256_cmpgt_epi32(kZero, u[11]);
  2216. v[12] = _mm256_cmpgt_epi32(kZero, u[12]);
  2217. v[13] = _mm256_cmpgt_epi32(kZero, u[13]);
  2218. v[14] = _mm256_cmpgt_epi32(kZero, u[14]);
  2219. v[15] = _mm256_cmpgt_epi32(kZero, u[15]);
  2220. u[0] = _mm256_sub_epi32(u[0], v[0]);
  2221. u[1] = _mm256_sub_epi32(u[1], v[1]);
  2222. u[2] = _mm256_sub_epi32(u[2], v[2]);
  2223. u[3] = _mm256_sub_epi32(u[3], v[3]);
  2224. u[4] = _mm256_sub_epi32(u[4], v[4]);
  2225. u[5] = _mm256_sub_epi32(u[5], v[5]);
  2226. u[6] = _mm256_sub_epi32(u[6], v[6]);
  2227. u[7] = _mm256_sub_epi32(u[7], v[7]);
  2228. u[8] = _mm256_sub_epi32(u[8], v[8]);
  2229. u[9] = _mm256_sub_epi32(u[9], v[9]);
  2230. u[10] = _mm256_sub_epi32(u[10], v[10]);
  2231. u[11] = _mm256_sub_epi32(u[11], v[11]);
  2232. u[12] = _mm256_sub_epi32(u[12], v[12]);
  2233. u[13] = _mm256_sub_epi32(u[13], v[13]);
  2234. u[14] = _mm256_sub_epi32(u[14], v[14]);
  2235. u[15] = _mm256_sub_epi32(u[15], v[15]);
  2236. v[0] = _mm256_add_epi32(u[0], K32One);
  2237. v[1] = _mm256_add_epi32(u[1], K32One);
  2238. v[2] = _mm256_add_epi32(u[2], K32One);
  2239. v[3] = _mm256_add_epi32(u[3], K32One);
  2240. v[4] = _mm256_add_epi32(u[4], K32One);
  2241. v[5] = _mm256_add_epi32(u[5], K32One);
  2242. v[6] = _mm256_add_epi32(u[6], K32One);
  2243. v[7] = _mm256_add_epi32(u[7], K32One);
  2244. v[8] = _mm256_add_epi32(u[8], K32One);
  2245. v[9] = _mm256_add_epi32(u[9], K32One);
  2246. v[10] = _mm256_add_epi32(u[10], K32One);
  2247. v[11] = _mm256_add_epi32(u[11], K32One);
  2248. v[12] = _mm256_add_epi32(u[12], K32One);
  2249. v[13] = _mm256_add_epi32(u[13], K32One);
  2250. v[14] = _mm256_add_epi32(u[14], K32One);
  2251. v[15] = _mm256_add_epi32(u[15], K32One);
  2252. u[0] = _mm256_srai_epi32(v[0], 2);
  2253. u[1] = _mm256_srai_epi32(v[1], 2);
  2254. u[2] = _mm256_srai_epi32(v[2], 2);
  2255. u[3] = _mm256_srai_epi32(v[3], 2);
  2256. u[4] = _mm256_srai_epi32(v[4], 2);
  2257. u[5] = _mm256_srai_epi32(v[5], 2);
  2258. u[6] = _mm256_srai_epi32(v[6], 2);
  2259. u[7] = _mm256_srai_epi32(v[7], 2);
  2260. u[8] = _mm256_srai_epi32(v[8], 2);
  2261. u[9] = _mm256_srai_epi32(v[9], 2);
  2262. u[10] = _mm256_srai_epi32(v[10], 2);
  2263. u[11] = _mm256_srai_epi32(v[11], 2);
  2264. u[12] = _mm256_srai_epi32(v[12], 2);
  2265. u[13] = _mm256_srai_epi32(v[13], 2);
  2266. u[14] = _mm256_srai_epi32(v[14], 2);
  2267. u[15] = _mm256_srai_epi32(v[15], 2);
  2268. out[2] = _mm256_packs_epi32(u[0], u[1]);
  2269. out[18] = _mm256_packs_epi32(u[2], u[3]);
  2270. out[10] = _mm256_packs_epi32(u[4], u[5]);
  2271. out[26] = _mm256_packs_epi32(u[6], u[7]);
  2272. out[6] = _mm256_packs_epi32(u[8], u[9]);
  2273. out[22] = _mm256_packs_epi32(u[10], u[11]);
  2274. out[14] = _mm256_packs_epi32(u[12], u[13]);
  2275. out[30] = _mm256_packs_epi32(u[14], u[15]);
  2276. }
  2277. {
  2278. lstep1[32] = _mm256_add_epi32(lstep3[34], lstep2[32]);
  2279. lstep1[33] = _mm256_add_epi32(lstep3[35], lstep2[33]);
  2280. lstep1[34] = _mm256_sub_epi32(lstep2[32], lstep3[34]);
  2281. lstep1[35] = _mm256_sub_epi32(lstep2[33], lstep3[35]);
  2282. lstep1[36] = _mm256_sub_epi32(lstep2[38], lstep3[36]);
  2283. lstep1[37] = _mm256_sub_epi32(lstep2[39], lstep3[37]);
  2284. lstep1[38] = _mm256_add_epi32(lstep3[36], lstep2[38]);
  2285. lstep1[39] = _mm256_add_epi32(lstep3[37], lstep2[39]);
  2286. lstep1[40] = _mm256_add_epi32(lstep3[42], lstep2[40]);
  2287. lstep1[41] = _mm256_add_epi32(lstep3[43], lstep2[41]);
  2288. lstep1[42] = _mm256_sub_epi32(lstep2[40], lstep3[42]);
  2289. lstep1[43] = _mm256_sub_epi32(lstep2[41], lstep3[43]);
  2290. lstep1[44] = _mm256_sub_epi32(lstep2[46], lstep3[44]);
  2291. lstep1[45] = _mm256_sub_epi32(lstep2[47], lstep3[45]);
  2292. lstep1[46] = _mm256_add_epi32(lstep3[44], lstep2[46]);
  2293. lstep1[47] = _mm256_add_epi32(lstep3[45], lstep2[47]);
  2294. lstep1[48] = _mm256_add_epi32(lstep3[50], lstep2[48]);
  2295. lstep1[49] = _mm256_add_epi32(lstep3[51], lstep2[49]);
  2296. lstep1[50] = _mm256_sub_epi32(lstep2[48], lstep3[50]);
  2297. lstep1[51] = _mm256_sub_epi32(lstep2[49], lstep3[51]);
  2298. lstep1[52] = _mm256_sub_epi32(lstep2[54], lstep3[52]);
  2299. lstep1[53] = _mm256_sub_epi32(lstep2[55], lstep3[53]);
  2300. lstep1[54] = _mm256_add_epi32(lstep3[52], lstep2[54]);
  2301. lstep1[55] = _mm256_add_epi32(lstep3[53], lstep2[55]);
  2302. lstep1[56] = _mm256_add_epi32(lstep3[58], lstep2[56]);
  2303. lstep1[57] = _mm256_add_epi32(lstep3[59], lstep2[57]);
  2304. lstep1[58] = _mm256_sub_epi32(lstep2[56], lstep3[58]);
  2305. lstep1[59] = _mm256_sub_epi32(lstep2[57], lstep3[59]);
  2306. lstep1[60] = _mm256_sub_epi32(lstep2[62], lstep3[60]);
  2307. lstep1[61] = _mm256_sub_epi32(lstep2[63], lstep3[61]);
  2308. lstep1[62] = _mm256_add_epi32(lstep3[60], lstep2[62]);
  2309. lstep1[63] = _mm256_add_epi32(lstep3[61], lstep2[63]);
  2310. }
  2311. // stage 8
  2312. {
  2313. const __m256i k32_p31_p01 =
  2314. pair256_set_epi32(cospi_31_64, cospi_1_64);
  2315. const __m256i k32_p15_p17 =
  2316. pair256_set_epi32(cospi_15_64, cospi_17_64);
  2317. const __m256i k32_p23_p09 =
  2318. pair256_set_epi32(cospi_23_64, cospi_9_64);
  2319. const __m256i k32_p07_p25 =
  2320. pair256_set_epi32(cospi_7_64, cospi_25_64);
  2321. const __m256i k32_m25_p07 =
  2322. pair256_set_epi32(-cospi_25_64, cospi_7_64);
  2323. const __m256i k32_m09_p23 =
  2324. pair256_set_epi32(-cospi_9_64, cospi_23_64);
  2325. const __m256i k32_m17_p15 =
  2326. pair256_set_epi32(-cospi_17_64, cospi_15_64);
  2327. const __m256i k32_m01_p31 =
  2328. pair256_set_epi32(-cospi_1_64, cospi_31_64);
  2329. u[0] = _mm256_unpacklo_epi32(lstep1[32], lstep1[62]);
  2330. u[1] = _mm256_unpackhi_epi32(lstep1[32], lstep1[62]);
  2331. u[2] = _mm256_unpacklo_epi32(lstep1[33], lstep1[63]);
  2332. u[3] = _mm256_unpackhi_epi32(lstep1[33], lstep1[63]);
  2333. u[4] = _mm256_unpacklo_epi32(lstep1[34], lstep1[60]);
  2334. u[5] = _mm256_unpackhi_epi32(lstep1[34], lstep1[60]);
  2335. u[6] = _mm256_unpacklo_epi32(lstep1[35], lstep1[61]);
  2336. u[7] = _mm256_unpackhi_epi32(lstep1[35], lstep1[61]);
  2337. u[8] = _mm256_unpacklo_epi32(lstep1[36], lstep1[58]);
  2338. u[9] = _mm256_unpackhi_epi32(lstep1[36], lstep1[58]);
  2339. u[10] = _mm256_unpacklo_epi32(lstep1[37], lstep1[59]);
  2340. u[11] = _mm256_unpackhi_epi32(lstep1[37], lstep1[59]);
  2341. u[12] = _mm256_unpacklo_epi32(lstep1[38], lstep1[56]);
  2342. u[13] = _mm256_unpackhi_epi32(lstep1[38], lstep1[56]);
  2343. u[14] = _mm256_unpacklo_epi32(lstep1[39], lstep1[57]);
  2344. u[15] = _mm256_unpackhi_epi32(lstep1[39], lstep1[57]);
  2345. v[0] = k_madd_epi32_avx2(u[0], k32_p31_p01);
  2346. v[1] = k_madd_epi32_avx2(u[1], k32_p31_p01);
  2347. v[2] = k_madd_epi32_avx2(u[2], k32_p31_p01);
  2348. v[3] = k_madd_epi32_avx2(u[3], k32_p31_p01);
  2349. v[4] = k_madd_epi32_avx2(u[4], k32_p15_p17);
  2350. v[5] = k_madd_epi32_avx2(u[5], k32_p15_p17);
  2351. v[6] = k_madd_epi32_avx2(u[6], k32_p15_p17);
  2352. v[7] = k_madd_epi32_avx2(u[7], k32_p15_p17);
  2353. v[8] = k_madd_epi32_avx2(u[8], k32_p23_p09);
  2354. v[9] = k_madd_epi32_avx2(u[9], k32_p23_p09);
  2355. v[10] = k_madd_epi32_avx2(u[10], k32_p23_p09);
  2356. v[11] = k_madd_epi32_avx2(u[11], k32_p23_p09);
  2357. v[12] = k_madd_epi32_avx2(u[12], k32_p07_p25);
  2358. v[13] = k_madd_epi32_avx2(u[13], k32_p07_p25);
  2359. v[14] = k_madd_epi32_avx2(u[14], k32_p07_p25);
  2360. v[15] = k_madd_epi32_avx2(u[15], k32_p07_p25);
  2361. v[16] = k_madd_epi32_avx2(u[12], k32_m25_p07);
  2362. v[17] = k_madd_epi32_avx2(u[13], k32_m25_p07);
  2363. v[18] = k_madd_epi32_avx2(u[14], k32_m25_p07);
  2364. v[19] = k_madd_epi32_avx2(u[15], k32_m25_p07);
  2365. v[20] = k_madd_epi32_avx2(u[8], k32_m09_p23);
  2366. v[21] = k_madd_epi32_avx2(u[9], k32_m09_p23);
  2367. v[22] = k_madd_epi32_avx2(u[10], k32_m09_p23);
  2368. v[23] = k_madd_epi32_avx2(u[11], k32_m09_p23);
  2369. v[24] = k_madd_epi32_avx2(u[4], k32_m17_p15);
  2370. v[25] = k_madd_epi32_avx2(u[5], k32_m17_p15);
  2371. v[26] = k_madd_epi32_avx2(u[6], k32_m17_p15);
  2372. v[27] = k_madd_epi32_avx2(u[7], k32_m17_p15);
  2373. v[28] = k_madd_epi32_avx2(u[0], k32_m01_p31);
  2374. v[29] = k_madd_epi32_avx2(u[1], k32_m01_p31);
  2375. v[30] = k_madd_epi32_avx2(u[2], k32_m01_p31);
  2376. v[31] = k_madd_epi32_avx2(u[3], k32_m01_p31);
  2377. u[0] = k_packs_epi64_avx2(v[0], v[1]);
  2378. u[1] = k_packs_epi64_avx2(v[2], v[3]);
  2379. u[2] = k_packs_epi64_avx2(v[4], v[5]);
  2380. u[3] = k_packs_epi64_avx2(v[6], v[7]);
  2381. u[4] = k_packs_epi64_avx2(v[8], v[9]);
  2382. u[5] = k_packs_epi64_avx2(v[10], v[11]);
  2383. u[6] = k_packs_epi64_avx2(v[12], v[13]);
  2384. u[7] = k_packs_epi64_avx2(v[14], v[15]);
  2385. u[8] = k_packs_epi64_avx2(v[16], v[17]);
  2386. u[9] = k_packs_epi64_avx2(v[18], v[19]);
  2387. u[10] = k_packs_epi64_avx2(v[20], v[21]);
  2388. u[11] = k_packs_epi64_avx2(v[22], v[23]);
  2389. u[12] = k_packs_epi64_avx2(v[24], v[25]);
  2390. u[13] = k_packs_epi64_avx2(v[26], v[27]);
  2391. u[14] = k_packs_epi64_avx2(v[28], v[29]);
  2392. u[15] = k_packs_epi64_avx2(v[30], v[31]);
  2393. v[0] = _mm256_add_epi32(u[0], k__DCT_CONST_ROUNDING);
  2394. v[1] = _mm256_add_epi32(u[1], k__DCT_CONST_ROUNDING);
  2395. v[2] = _mm256_add_epi32(u[2], k__DCT_CONST_ROUNDING);
  2396. v[3] = _mm256_add_epi32(u[3], k__DCT_CONST_ROUNDING);
  2397. v[4] = _mm256_add_epi32(u[4], k__DCT_CONST_ROUNDING);
  2398. v[5] = _mm256_add_epi32(u[5], k__DCT_CONST_ROUNDING);
  2399. v[6] = _mm256_add_epi32(u[6], k__DCT_CONST_ROUNDING);
  2400. v[7] = _mm256_add_epi32(u[7], k__DCT_CONST_ROUNDING);
  2401. v[8] = _mm256_add_epi32(u[8], k__DCT_CONST_ROUNDING);
  2402. v[9] = _mm256_add_epi32(u[9], k__DCT_CONST_ROUNDING);
  2403. v[10] = _mm256_add_epi32(u[10], k__DCT_CONST_ROUNDING);
  2404. v[11] = _mm256_add_epi32(u[11], k__DCT_CONST_ROUNDING);
  2405. v[12] = _mm256_add_epi32(u[12], k__DCT_CONST_ROUNDING);
  2406. v[13] = _mm256_add_epi32(u[13], k__DCT_CONST_ROUNDING);
  2407. v[14] = _mm256_add_epi32(u[14], k__DCT_CONST_ROUNDING);
  2408. v[15] = _mm256_add_epi32(u[15], k__DCT_CONST_ROUNDING);
  2409. u[0] = _mm256_srai_epi32(v[0], DCT_CONST_BITS);
  2410. u[1] = _mm256_srai_epi32(v[1], DCT_CONST_BITS);
  2411. u[2] = _mm256_srai_epi32(v[2], DCT_CONST_BITS);
  2412. u[3] = _mm256_srai_epi32(v[3], DCT_CONST_BITS);
  2413. u[4] = _mm256_srai_epi32(v[4], DCT_CONST_BITS);
  2414. u[5] = _mm256_srai_epi32(v[5], DCT_CONST_BITS);
  2415. u[6] = _mm256_srai_epi32(v[6], DCT_CONST_BITS);
  2416. u[7] = _mm256_srai_epi32(v[7], DCT_CONST_BITS);
  2417. u[8] = _mm256_srai_epi32(v[8], DCT_CONST_BITS);
  2418. u[9] = _mm256_srai_epi32(v[9], DCT_CONST_BITS);
  2419. u[10] = _mm256_srai_epi32(v[10], DCT_CONST_BITS);
  2420. u[11] = _mm256_srai_epi32(v[11], DCT_CONST_BITS);
  2421. u[12] = _mm256_srai_epi32(v[12], DCT_CONST_BITS);
  2422. u[13] = _mm256_srai_epi32(v[13], DCT_CONST_BITS);
  2423. u[14] = _mm256_srai_epi32(v[14], DCT_CONST_BITS);
  2424. u[15] = _mm256_srai_epi32(v[15], DCT_CONST_BITS);
  2425. v[0] = _mm256_cmpgt_epi32(kZero, u[0]);
  2426. v[1] = _mm256_cmpgt_epi32(kZero, u[1]);
  2427. v[2] = _mm256_cmpgt_epi32(kZero, u[2]);
  2428. v[3] = _mm256_cmpgt_epi32(kZero, u[3]);
  2429. v[4] = _mm256_cmpgt_epi32(kZero, u[4]);
  2430. v[5] = _mm256_cmpgt_epi32(kZero, u[5]);
  2431. v[6] = _mm256_cmpgt_epi32(kZero, u[6]);
  2432. v[7] = _mm256_cmpgt_epi32(kZero, u[7]);
  2433. v[8] = _mm256_cmpgt_epi32(kZero, u[8]);
  2434. v[9] = _mm256_cmpgt_epi32(kZero, u[9]);
  2435. v[10] = _mm256_cmpgt_epi32(kZero, u[10]);
  2436. v[11] = _mm256_cmpgt_epi32(kZero, u[11]);
  2437. v[12] = _mm256_cmpgt_epi32(kZero, u[12]);
  2438. v[13] = _mm256_cmpgt_epi32(kZero, u[13]);
  2439. v[14] = _mm256_cmpgt_epi32(kZero, u[14]);
  2440. v[15] = _mm256_cmpgt_epi32(kZero, u[15]);
  2441. u[0] = _mm256_sub_epi32(u[0], v[0]);
  2442. u[1] = _mm256_sub_epi32(u[1], v[1]);
  2443. u[2] = _mm256_sub_epi32(u[2], v[2]);
  2444. u[3] = _mm256_sub_epi32(u[3], v[3]);
  2445. u[4] = _mm256_sub_epi32(u[4], v[4]);
  2446. u[5] = _mm256_sub_epi32(u[5], v[5]);
  2447. u[6] = _mm256_sub_epi32(u[6], v[6]);
  2448. u[7] = _mm256_sub_epi32(u[7], v[7]);
  2449. u[8] = _mm256_sub_epi32(u[8], v[8]);
  2450. u[9] = _mm256_sub_epi32(u[9], v[9]);
  2451. u[10] = _mm256_sub_epi32(u[10], v[10]);
  2452. u[11] = _mm256_sub_epi32(u[11], v[11]);
  2453. u[12] = _mm256_sub_epi32(u[12], v[12]);
  2454. u[13] = _mm256_sub_epi32(u[13], v[13]);
  2455. u[14] = _mm256_sub_epi32(u[14], v[14]);
  2456. u[15] = _mm256_sub_epi32(u[15], v[15]);
  2457. v[0] = _mm256_add_epi32(u[0], K32One);
  2458. v[1] = _mm256_add_epi32(u[1], K32One);
  2459. v[2] = _mm256_add_epi32(u[2], K32One);
  2460. v[3] = _mm256_add_epi32(u[3], K32One);
  2461. v[4] = _mm256_add_epi32(u[4], K32One);
  2462. v[5] = _mm256_add_epi32(u[5], K32One);
  2463. v[6] = _mm256_add_epi32(u[6], K32One);
  2464. v[7] = _mm256_add_epi32(u[7], K32One);
  2465. v[8] = _mm256_add_epi32(u[8], K32One);
  2466. v[9] = _mm256_add_epi32(u[9], K32One);
  2467. v[10] = _mm256_add_epi32(u[10], K32One);
  2468. v[11] = _mm256_add_epi32(u[11], K32One);
  2469. v[12] = _mm256_add_epi32(u[12], K32One);
  2470. v[13] = _mm256_add_epi32(u[13], K32One);
  2471. v[14] = _mm256_add_epi32(u[14], K32One);
  2472. v[15] = _mm256_add_epi32(u[15], K32One);
  2473. u[0] = _mm256_srai_epi32(v[0], 2);
  2474. u[1] = _mm256_srai_epi32(v[1], 2);
  2475. u[2] = _mm256_srai_epi32(v[2], 2);
  2476. u[3] = _mm256_srai_epi32(v[3], 2);
  2477. u[4] = _mm256_srai_epi32(v[4], 2);
  2478. u[5] = _mm256_srai_epi32(v[5], 2);
  2479. u[6] = _mm256_srai_epi32(v[6], 2);
  2480. u[7] = _mm256_srai_epi32(v[7], 2);
  2481. u[8] = _mm256_srai_epi32(v[8], 2);
  2482. u[9] = _mm256_srai_epi32(v[9], 2);
  2483. u[10] = _mm256_srai_epi32(v[10], 2);
  2484. u[11] = _mm256_srai_epi32(v[11], 2);
  2485. u[12] = _mm256_srai_epi32(v[12], 2);
  2486. u[13] = _mm256_srai_epi32(v[13], 2);
  2487. u[14] = _mm256_srai_epi32(v[14], 2);
  2488. u[15] = _mm256_srai_epi32(v[15], 2);
  2489. out[1] = _mm256_packs_epi32(u[0], u[1]);
  2490. out[17] = _mm256_packs_epi32(u[2], u[3]);
  2491. out[9] = _mm256_packs_epi32(u[4], u[5]);
  2492. out[25] = _mm256_packs_epi32(u[6], u[7]);
  2493. out[7] = _mm256_packs_epi32(u[8], u[9]);
  2494. out[23] = _mm256_packs_epi32(u[10], u[11]);
  2495. out[15] = _mm256_packs_epi32(u[12], u[13]);
  2496. out[31] = _mm256_packs_epi32(u[14], u[15]);
  2497. }
  2498. {
  2499. const __m256i k32_p27_p05 =
  2500. pair256_set_epi32(cospi_27_64, cospi_5_64);
  2501. const __m256i k32_p11_p21 =
  2502. pair256_set_epi32(cospi_11_64, cospi_21_64);
  2503. const __m256i k32_p19_p13 =
  2504. pair256_set_epi32(cospi_19_64, cospi_13_64);
  2505. const __m256i k32_p03_p29 =
  2506. pair256_set_epi32(cospi_3_64, cospi_29_64);
  2507. const __m256i k32_m29_p03 =
  2508. pair256_set_epi32(-cospi_29_64, cospi_3_64);
  2509. const __m256i k32_m13_p19 =
  2510. pair256_set_epi32(-cospi_13_64, cospi_19_64);
  2511. const __m256i k32_m21_p11 =
  2512. pair256_set_epi32(-cospi_21_64, cospi_11_64);
  2513. const __m256i k32_m05_p27 =
  2514. pair256_set_epi32(-cospi_5_64, cospi_27_64);
  2515. u[0] = _mm256_unpacklo_epi32(lstep1[40], lstep1[54]);
  2516. u[1] = _mm256_unpackhi_epi32(lstep1[40], lstep1[54]);
  2517. u[2] = _mm256_unpacklo_epi32(lstep1[41], lstep1[55]);
  2518. u[3] = _mm256_unpackhi_epi32(lstep1[41], lstep1[55]);
  2519. u[4] = _mm256_unpacklo_epi32(lstep1[42], lstep1[52]);
  2520. u[5] = _mm256_unpackhi_epi32(lstep1[42], lstep1[52]);
  2521. u[6] = _mm256_unpacklo_epi32(lstep1[43], lstep1[53]);
  2522. u[7] = _mm256_unpackhi_epi32(lstep1[43], lstep1[53]);
  2523. u[8] = _mm256_unpacklo_epi32(lstep1[44], lstep1[50]);
  2524. u[9] = _mm256_unpackhi_epi32(lstep1[44], lstep1[50]);
  2525. u[10] = _mm256_unpacklo_epi32(lstep1[45], lstep1[51]);
  2526. u[11] = _mm256_unpackhi_epi32(lstep1[45], lstep1[51]);
  2527. u[12] = _mm256_unpacklo_epi32(lstep1[46], lstep1[48]);
  2528. u[13] = _mm256_unpackhi_epi32(lstep1[46], lstep1[48]);
  2529. u[14] = _mm256_unpacklo_epi32(lstep1[47], lstep1[49]);
  2530. u[15] = _mm256_unpackhi_epi32(lstep1[47], lstep1[49]);
  2531. v[0] = k_madd_epi32_avx2(u[0], k32_p27_p05);
  2532. v[1] = k_madd_epi32_avx2(u[1], k32_p27_p05);
  2533. v[2] = k_madd_epi32_avx2(u[2], k32_p27_p05);
  2534. v[3] = k_madd_epi32_avx2(u[3], k32_p27_p05);
  2535. v[4] = k_madd_epi32_avx2(u[4], k32_p11_p21);
  2536. v[5] = k_madd_epi32_avx2(u[5], k32_p11_p21);
  2537. v[6] = k_madd_epi32_avx2(u[6], k32_p11_p21);
  2538. v[7] = k_madd_epi32_avx2(u[7], k32_p11_p21);
  2539. v[8] = k_madd_epi32_avx2(u[8], k32_p19_p13);
  2540. v[9] = k_madd_epi32_avx2(u[9], k32_p19_p13);
  2541. v[10] = k_madd_epi32_avx2(u[10], k32_p19_p13);
  2542. v[11] = k_madd_epi32_avx2(u[11], k32_p19_p13);
  2543. v[12] = k_madd_epi32_avx2(u[12], k32_p03_p29);
  2544. v[13] = k_madd_epi32_avx2(u[13], k32_p03_p29);
  2545. v[14] = k_madd_epi32_avx2(u[14], k32_p03_p29);
  2546. v[15] = k_madd_epi32_avx2(u[15], k32_p03_p29);
  2547. v[16] = k_madd_epi32_avx2(u[12], k32_m29_p03);
  2548. v[17] = k_madd_epi32_avx2(u[13], k32_m29_p03);
  2549. v[18] = k_madd_epi32_avx2(u[14], k32_m29_p03);
  2550. v[19] = k_madd_epi32_avx2(u[15], k32_m29_p03);
  2551. v[20] = k_madd_epi32_avx2(u[8], k32_m13_p19);
  2552. v[21] = k_madd_epi32_avx2(u[9], k32_m13_p19);
  2553. v[22] = k_madd_epi32_avx2(u[10], k32_m13_p19);
  2554. v[23] = k_madd_epi32_avx2(u[11], k32_m13_p19);
  2555. v[24] = k_madd_epi32_avx2(u[4], k32_m21_p11);
  2556. v[25] = k_madd_epi32_avx2(u[5], k32_m21_p11);
  2557. v[26] = k_madd_epi32_avx2(u[6], k32_m21_p11);
  2558. v[27] = k_madd_epi32_avx2(u[7], k32_m21_p11);
  2559. v[28] = k_madd_epi32_avx2(u[0], k32_m05_p27);
  2560. v[29] = k_madd_epi32_avx2(u[1], k32_m05_p27);
  2561. v[30] = k_madd_epi32_avx2(u[2], k32_m05_p27);
  2562. v[31] = k_madd_epi32_avx2(u[3], k32_m05_p27);
  2563. u[0] = k_packs_epi64_avx2(v[0], v[1]);
  2564. u[1] = k_packs_epi64_avx2(v[2], v[3]);
  2565. u[2] = k_packs_epi64_avx2(v[4], v[5]);
  2566. u[3] = k_packs_epi64_avx2(v[6], v[7]);
  2567. u[4] = k_packs_epi64_avx2(v[8], v[9]);
  2568. u[5] = k_packs_epi64_avx2(v[10], v[11]);
  2569. u[6] = k_packs_epi64_avx2(v[12], v[13]);
  2570. u[7] = k_packs_epi64_avx2(v[14], v[15]);
  2571. u[8] = k_packs_epi64_avx2(v[16], v[17]);
  2572. u[9] = k_packs_epi64_avx2(v[18], v[19]);
  2573. u[10] = k_packs_epi64_avx2(v[20], v[21]);
  2574. u[11] = k_packs_epi64_avx2(v[22], v[23]);
  2575. u[12] = k_packs_epi64_avx2(v[24], v[25]);
  2576. u[13] = k_packs_epi64_avx2(v[26], v[27]);
  2577. u[14] = k_packs_epi64_avx2(v[28], v[29]);
  2578. u[15] = k_packs_epi64_avx2(v[30], v[31]);
  2579. v[0] = _mm256_add_epi32(u[0], k__DCT_CONST_ROUNDING);
  2580. v[1] = _mm256_add_epi32(u[1], k__DCT_CONST_ROUNDING);
  2581. v[2] = _mm256_add_epi32(u[2], k__DCT_CONST_ROUNDING);
  2582. v[3] = _mm256_add_epi32(u[3], k__DCT_CONST_ROUNDING);
  2583. v[4] = _mm256_add_epi32(u[4], k__DCT_CONST_ROUNDING);
  2584. v[5] = _mm256_add_epi32(u[5], k__DCT_CONST_ROUNDING);
  2585. v[6] = _mm256_add_epi32(u[6], k__DCT_CONST_ROUNDING);
  2586. v[7] = _mm256_add_epi32(u[7], k__DCT_CONST_ROUNDING);
  2587. v[8] = _mm256_add_epi32(u[8], k__DCT_CONST_ROUNDING);
  2588. v[9] = _mm256_add_epi32(u[9], k__DCT_CONST_ROUNDING);
  2589. v[10] = _mm256_add_epi32(u[10], k__DCT_CONST_ROUNDING);
  2590. v[11] = _mm256_add_epi32(u[11], k__DCT_CONST_ROUNDING);
  2591. v[12] = _mm256_add_epi32(u[12], k__DCT_CONST_ROUNDING);
  2592. v[13] = _mm256_add_epi32(u[13], k__DCT_CONST_ROUNDING);
  2593. v[14] = _mm256_add_epi32(u[14], k__DCT_CONST_ROUNDING);
  2594. v[15] = _mm256_add_epi32(u[15], k__DCT_CONST_ROUNDING);
  2595. u[0] = _mm256_srai_epi32(v[0], DCT_CONST_BITS);
  2596. u[1] = _mm256_srai_epi32(v[1], DCT_CONST_BITS);
  2597. u[2] = _mm256_srai_epi32(v[2], DCT_CONST_BITS);
  2598. u[3] = _mm256_srai_epi32(v[3], DCT_CONST_BITS);
  2599. u[4] = _mm256_srai_epi32(v[4], DCT_CONST_BITS);
  2600. u[5] = _mm256_srai_epi32(v[5], DCT_CONST_BITS);
  2601. u[6] = _mm256_srai_epi32(v[6], DCT_CONST_BITS);
  2602. u[7] = _mm256_srai_epi32(v[7], DCT_CONST_BITS);
  2603. u[8] = _mm256_srai_epi32(v[8], DCT_CONST_BITS);
  2604. u[9] = _mm256_srai_epi32(v[9], DCT_CONST_BITS);
  2605. u[10] = _mm256_srai_epi32(v[10], DCT_CONST_BITS);
  2606. u[11] = _mm256_srai_epi32(v[11], DCT_CONST_BITS);
  2607. u[12] = _mm256_srai_epi32(v[12], DCT_CONST_BITS);
  2608. u[13] = _mm256_srai_epi32(v[13], DCT_CONST_BITS);
  2609. u[14] = _mm256_srai_epi32(v[14], DCT_CONST_BITS);
  2610. u[15] = _mm256_srai_epi32(v[15], DCT_CONST_BITS);
  2611. v[0] = _mm256_cmpgt_epi32(kZero, u[0]);
  2612. v[1] = _mm256_cmpgt_epi32(kZero, u[1]);
  2613. v[2] = _mm256_cmpgt_epi32(kZero, u[2]);
  2614. v[3] = _mm256_cmpgt_epi32(kZero, u[3]);
  2615. v[4] = _mm256_cmpgt_epi32(kZero, u[4]);
  2616. v[5] = _mm256_cmpgt_epi32(kZero, u[5]);
  2617. v[6] = _mm256_cmpgt_epi32(kZero, u[6]);
  2618. v[7] = _mm256_cmpgt_epi32(kZero, u[7]);
  2619. v[8] = _mm256_cmpgt_epi32(kZero, u[8]);
  2620. v[9] = _mm256_cmpgt_epi32(kZero, u[9]);
  2621. v[10] = _mm256_cmpgt_epi32(kZero, u[10]);
  2622. v[11] = _mm256_cmpgt_epi32(kZero, u[11]);
  2623. v[12] = _mm256_cmpgt_epi32(kZero, u[12]);
  2624. v[13] = _mm256_cmpgt_epi32(kZero, u[13]);
  2625. v[14] = _mm256_cmpgt_epi32(kZero, u[14]);
  2626. v[15] = _mm256_cmpgt_epi32(kZero, u[15]);
  2627. u[0] = _mm256_sub_epi32(u[0], v[0]);
  2628. u[1] = _mm256_sub_epi32(u[1], v[1]);
  2629. u[2] = _mm256_sub_epi32(u[2], v[2]);
  2630. u[3] = _mm256_sub_epi32(u[3], v[3]);
  2631. u[4] = _mm256_sub_epi32(u[4], v[4]);
  2632. u[5] = _mm256_sub_epi32(u[5], v[5]);
  2633. u[6] = _mm256_sub_epi32(u[6], v[6]);
  2634. u[7] = _mm256_sub_epi32(u[7], v[7]);
  2635. u[8] = _mm256_sub_epi32(u[8], v[8]);
  2636. u[9] = _mm256_sub_epi32(u[9], v[9]);
  2637. u[10] = _mm256_sub_epi32(u[10], v[10]);
  2638. u[11] = _mm256_sub_epi32(u[11], v[11]);
  2639. u[12] = _mm256_sub_epi32(u[12], v[12]);
  2640. u[13] = _mm256_sub_epi32(u[13], v[13]);
  2641. u[14] = _mm256_sub_epi32(u[14], v[14]);
  2642. u[15] = _mm256_sub_epi32(u[15], v[15]);
  2643. v[0] = _mm256_add_epi32(u[0], K32One);
  2644. v[1] = _mm256_add_epi32(u[1], K32One);
  2645. v[2] = _mm256_add_epi32(u[2], K32One);
  2646. v[3] = _mm256_add_epi32(u[3], K32One);
  2647. v[4] = _mm256_add_epi32(u[4], K32One);
  2648. v[5] = _mm256_add_epi32(u[5], K32One);
  2649. v[6] = _mm256_add_epi32(u[6], K32One);
  2650. v[7] = _mm256_add_epi32(u[7], K32One);
  2651. v[8] = _mm256_add_epi32(u[8], K32One);
  2652. v[9] = _mm256_add_epi32(u[9], K32One);
  2653. v[10] = _mm256_add_epi32(u[10], K32One);
  2654. v[11] = _mm256_add_epi32(u[11], K32One);
  2655. v[12] = _mm256_add_epi32(u[12], K32One);
  2656. v[13] = _mm256_add_epi32(u[13], K32One);
  2657. v[14] = _mm256_add_epi32(u[14], K32One);
  2658. v[15] = _mm256_add_epi32(u[15], K32One);
  2659. u[0] = _mm256_srai_epi32(v[0], 2);
  2660. u[1] = _mm256_srai_epi32(v[1], 2);
  2661. u[2] = _mm256_srai_epi32(v[2], 2);
  2662. u[3] = _mm256_srai_epi32(v[3], 2);
  2663. u[4] = _mm256_srai_epi32(v[4], 2);
  2664. u[5] = _mm256_srai_epi32(v[5], 2);
  2665. u[6] = _mm256_srai_epi32(v[6], 2);
  2666. u[7] = _mm256_srai_epi32(v[7], 2);
  2667. u[8] = _mm256_srai_epi32(v[8], 2);
  2668. u[9] = _mm256_srai_epi32(v[9], 2);
  2669. u[10] = _mm256_srai_epi32(v[10], 2);
  2670. u[11] = _mm256_srai_epi32(v[11], 2);
  2671. u[12] = _mm256_srai_epi32(v[12], 2);
  2672. u[13] = _mm256_srai_epi32(v[13], 2);
  2673. u[14] = _mm256_srai_epi32(v[14], 2);
  2674. u[15] = _mm256_srai_epi32(v[15], 2);
  2675. out[5] = _mm256_packs_epi32(u[0], u[1]);
  2676. out[21] = _mm256_packs_epi32(u[2], u[3]);
  2677. out[13] = _mm256_packs_epi32(u[4], u[5]);
  2678. out[29] = _mm256_packs_epi32(u[6], u[7]);
  2679. out[3] = _mm256_packs_epi32(u[8], u[9]);
  2680. out[19] = _mm256_packs_epi32(u[10], u[11]);
  2681. out[11] = _mm256_packs_epi32(u[12], u[13]);
  2682. out[27] = _mm256_packs_epi32(u[14], u[15]);
  2683. }
  2684. }
  2685. #endif
  2686. // Transpose the results, do it as four 8x8 transposes.
  2687. {
  2688. int transpose_block;
  2689. int16_t *output_currStep, *output_nextStep;
  2690. if (0 == pass) {
  2691. output_currStep = &intermediate[column_start * 32];
  2692. output_nextStep = &intermediate[(column_start + 8) * 32];
  2693. } else {
  2694. output_currStep = &output_org[column_start * 32];
  2695. output_nextStep = &output_org[(column_start + 8) * 32];
  2696. }
  2697. for (transpose_block = 0; transpose_block < 4; ++transpose_block) {
  2698. __m256i *this_out = &out[8 * transpose_block];
  2699. // 00 01 02 03 04 05 06 07 08 09 10 11 12 13 14 15
  2700. // 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
  2701. // 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
  2702. // 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
  2703. // 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
  2704. // 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115
  2705. // 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
  2706. // 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155
  2707. const __m256i tr0_0 = _mm256_unpacklo_epi16(this_out[0], this_out[1]);
  2708. const __m256i tr0_1 = _mm256_unpacklo_epi16(this_out[2], this_out[3]);
  2709. const __m256i tr0_2 = _mm256_unpackhi_epi16(this_out[0], this_out[1]);
  2710. const __m256i tr0_3 = _mm256_unpackhi_epi16(this_out[2], this_out[3]);
  2711. const __m256i tr0_4 = _mm256_unpacklo_epi16(this_out[4], this_out[5]);
  2712. const __m256i tr0_5 = _mm256_unpacklo_epi16(this_out[6], this_out[7]);
  2713. const __m256i tr0_6 = _mm256_unpackhi_epi16(this_out[4], this_out[5]);
  2714. const __m256i tr0_7 = _mm256_unpackhi_epi16(this_out[6], this_out[7]);
  2715. // 00 20 01 21 02 22 03 23 08 28 09 29 10 30 11 31
  2716. // 40 60 41 61 42 62 43 63 48 68 49 69 50 70 51 71
  2717. // 04 24 05 25 06 26 07 27 12 32 13 33 14 34 15 35
  2718. // 44 64 45 65 46 66 47 67 52 72 53 73 54 74 55 75
  2719. // 80 100 81 101 82 102 83 103 88 108 89 109 90 110 91 101
  2720. // 120 140 121 141 122 142 123 143 128 148 129 149 130 150 131 151
  2721. // 84 104 85 105 86 106 87 107 92 112 93 113 94 114 95 115
  2722. // 124 144 125 145 126 146 127 147 132 152 133 153 134 154 135 155
  2723. const __m256i tr1_0 = _mm256_unpacklo_epi32(tr0_0, tr0_1);
  2724. const __m256i tr1_1 = _mm256_unpacklo_epi32(tr0_2, tr0_3);
  2725. const __m256i tr1_2 = _mm256_unpackhi_epi32(tr0_0, tr0_1);
  2726. const __m256i tr1_3 = _mm256_unpackhi_epi32(tr0_2, tr0_3);
  2727. const __m256i tr1_4 = _mm256_unpacklo_epi32(tr0_4, tr0_5);
  2728. const __m256i tr1_5 = _mm256_unpacklo_epi32(tr0_6, tr0_7);
  2729. const __m256i tr1_6 = _mm256_unpackhi_epi32(tr0_4, tr0_5);
  2730. const __m256i tr1_7 = _mm256_unpackhi_epi32(tr0_6, tr0_7);
  2731. // 00 20 40 60 01 21 41 61 08 28 48 68 09 29 49 69
  2732. // 04 24 44 64 05 25 45 65 12 32 52 72 13 33 53 73
  2733. // 02 22 42 62 03 23 43 63 10 30 50 70 11 31 51 71
  2734. // 06 26 46 66 07 27 47 67 14 34 54 74 15 35 55 75
  2735. // 80 100 120 140 81 101 121 141 88 108 128 148 89 109 129 149
  2736. // 84 104 124 144 85 105 125 145 92 112 132 152 93 113 133 153
  2737. // 82 102 122 142 83 103 123 143 90 110 130 150 91 101 131 151
  2738. // 86 106 126 146 87 107 127 147 94 114 134 154 95 115 135 155
  2739. __m256i tr2_0 = _mm256_unpacklo_epi64(tr1_0, tr1_4);
  2740. __m256i tr2_1 = _mm256_unpackhi_epi64(tr1_0, tr1_4);
  2741. __m256i tr2_2 = _mm256_unpacklo_epi64(tr1_2, tr1_6);
  2742. __m256i tr2_3 = _mm256_unpackhi_epi64(tr1_2, tr1_6);
  2743. __m256i tr2_4 = _mm256_unpacklo_epi64(tr1_1, tr1_5);
  2744. __m256i tr2_5 = _mm256_unpackhi_epi64(tr1_1, tr1_5);
  2745. __m256i tr2_6 = _mm256_unpacklo_epi64(tr1_3, tr1_7);
  2746. __m256i tr2_7 = _mm256_unpackhi_epi64(tr1_3, tr1_7);
  2747. // 00 20 40 60 80 100 120 140 08 28 48 68 88 108 128 148
  2748. // 01 21 41 61 81 101 121 141 09 29 49 69 89 109 129 149
  2749. // 02 22 42 62 82 102 122 142 10 30 50 70 90 110 130 150
  2750. // 03 23 43 63 83 103 123 143 11 31 51 71 91 101 131 151
  2751. // 04 24 44 64 84 104 124 144 12 32 52 72 92 112 132 152
  2752. // 05 25 45 65 85 105 125 145 13 33 53 73 93 113 133 153
  2753. // 06 26 46 66 86 106 126 146 14 34 54 74 94 114 134 154
  2754. // 07 27 47 67 87 107 127 147 15 35 55 75 95 115 135 155
  2755. if (0 == pass) {
  2756. // output[j] = (output[j] + 1 + (output[j] > 0)) >> 2;
  2757. // TODO(cd): see quality impact of only doing
  2758. // output[j] = (output[j] + 1) >> 2;
  2759. // which would remove the code between here ...
  2760. __m256i tr2_0_0 = _mm256_cmpgt_epi16(tr2_0, kZero);
  2761. __m256i tr2_1_0 = _mm256_cmpgt_epi16(tr2_1, kZero);
  2762. __m256i tr2_2_0 = _mm256_cmpgt_epi16(tr2_2, kZero);
  2763. __m256i tr2_3_0 = _mm256_cmpgt_epi16(tr2_3, kZero);
  2764. __m256i tr2_4_0 = _mm256_cmpgt_epi16(tr2_4, kZero);
  2765. __m256i tr2_5_0 = _mm256_cmpgt_epi16(tr2_5, kZero);
  2766. __m256i tr2_6_0 = _mm256_cmpgt_epi16(tr2_6, kZero);
  2767. __m256i tr2_7_0 = _mm256_cmpgt_epi16(tr2_7, kZero);
  2768. tr2_0 = _mm256_sub_epi16(tr2_0, tr2_0_0);
  2769. tr2_1 = _mm256_sub_epi16(tr2_1, tr2_1_0);
  2770. tr2_2 = _mm256_sub_epi16(tr2_2, tr2_2_0);
  2771. tr2_3 = _mm256_sub_epi16(tr2_3, tr2_3_0);
  2772. tr2_4 = _mm256_sub_epi16(tr2_4, tr2_4_0);
  2773. tr2_5 = _mm256_sub_epi16(tr2_5, tr2_5_0);
  2774. tr2_6 = _mm256_sub_epi16(tr2_6, tr2_6_0);
  2775. tr2_7 = _mm256_sub_epi16(tr2_7, tr2_7_0);
  2776. // ... and here.
  2777. // PS: also change code in vp9/encoder/vp9_dct.c
  2778. tr2_0 = _mm256_add_epi16(tr2_0, kOne);
  2779. tr2_1 = _mm256_add_epi16(tr2_1, kOne);
  2780. tr2_2 = _mm256_add_epi16(tr2_2, kOne);
  2781. tr2_3 = _mm256_add_epi16(tr2_3, kOne);
  2782. tr2_4 = _mm256_add_epi16(tr2_4, kOne);
  2783. tr2_5 = _mm256_add_epi16(tr2_5, kOne);
  2784. tr2_6 = _mm256_add_epi16(tr2_6, kOne);
  2785. tr2_7 = _mm256_add_epi16(tr2_7, kOne);
  2786. tr2_0 = _mm256_srai_epi16(tr2_0, 2);
  2787. tr2_1 = _mm256_srai_epi16(tr2_1, 2);
  2788. tr2_2 = _mm256_srai_epi16(tr2_2, 2);
  2789. tr2_3 = _mm256_srai_epi16(tr2_3, 2);
  2790. tr2_4 = _mm256_srai_epi16(tr2_4, 2);
  2791. tr2_5 = _mm256_srai_epi16(tr2_5, 2);
  2792. tr2_6 = _mm256_srai_epi16(tr2_6, 2);
  2793. tr2_7 = _mm256_srai_epi16(tr2_7, 2);
  2794. }
  2795. // Note: even though all these stores are aligned, using the aligned
  2796. // intrinsic make the code slightly slower.
  2797. _mm_storeu_si128((__m128i *)(output_currStep + 0 * 32),
  2798. _mm256_castsi256_si128(tr2_0));
  2799. _mm_storeu_si128((__m128i *)(output_currStep + 1 * 32),
  2800. _mm256_castsi256_si128(tr2_1));
  2801. _mm_storeu_si128((__m128i *)(output_currStep + 2 * 32),
  2802. _mm256_castsi256_si128(tr2_2));
  2803. _mm_storeu_si128((__m128i *)(output_currStep + 3 * 32),
  2804. _mm256_castsi256_si128(tr2_3));
  2805. _mm_storeu_si128((__m128i *)(output_currStep + 4 * 32),
  2806. _mm256_castsi256_si128(tr2_4));
  2807. _mm_storeu_si128((__m128i *)(output_currStep + 5 * 32),
  2808. _mm256_castsi256_si128(tr2_5));
  2809. _mm_storeu_si128((__m128i *)(output_currStep + 6 * 32),
  2810. _mm256_castsi256_si128(tr2_6));
  2811. _mm_storeu_si128((__m128i *)(output_currStep + 7 * 32),
  2812. _mm256_castsi256_si128(tr2_7));
  2813. _mm_storeu_si128((__m128i *)(output_nextStep + 0 * 32),
  2814. _mm256_extractf128_si256(tr2_0, 1));
  2815. _mm_storeu_si128((__m128i *)(output_nextStep + 1 * 32),
  2816. _mm256_extractf128_si256(tr2_1, 1));
  2817. _mm_storeu_si128((__m128i *)(output_nextStep + 2 * 32),
  2818. _mm256_extractf128_si256(tr2_2, 1));
  2819. _mm_storeu_si128((__m128i *)(output_nextStep + 3 * 32),
  2820. _mm256_extractf128_si256(tr2_3, 1));
  2821. _mm_storeu_si128((__m128i *)(output_nextStep + 4 * 32),
  2822. _mm256_extractf128_si256(tr2_4, 1));
  2823. _mm_storeu_si128((__m128i *)(output_nextStep + 5 * 32),
  2824. _mm256_extractf128_si256(tr2_5, 1));
  2825. _mm_storeu_si128((__m128i *)(output_nextStep + 6 * 32),
  2826. _mm256_extractf128_si256(tr2_6, 1));
  2827. _mm_storeu_si128((__m128i *)(output_nextStep + 7 * 32),
  2828. _mm256_extractf128_si256(tr2_7, 1));
  2829. // Process next 8x8
  2830. output_currStep += 8;
  2831. output_nextStep += 8;
  2832. }
  2833. }
  2834. }
  2835. }
  2836. } // NOLINT