convolve8_dspr2.c 73 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602
  1. /*
  2. * Copyright (c) 2013 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include <assert.h>
  11. #include <stdio.h>
  12. #include "./vpx_dsp_rtcd.h"
  13. #include "vpx_dsp/mips/convolve_common_dspr2.h"
  14. #include "vpx_dsp/vpx_dsp_common.h"
  15. #include "vpx_dsp/vpx_filter.h"
  16. #include "vpx_ports/mem.h"
  17. #if HAVE_DSPR2
  18. static void convolve_horiz_4_transposed_dspr2(const uint8_t *src,
  19. int32_t src_stride, uint8_t *dst,
  20. int32_t dst_stride,
  21. const int16_t *filter_x0,
  22. int32_t h) {
  23. int32_t y;
  24. uint8_t *cm = vpx_ff_cropTbl;
  25. uint8_t *dst_ptr;
  26. int32_t vector1b, vector2b, vector3b, vector4b;
  27. int32_t Temp1, Temp2, Temp3, Temp4;
  28. uint32_t vector4a = 64;
  29. uint32_t tp1, tp2;
  30. uint32_t p1, p2, p3, p4;
  31. uint32_t tn1, tn2;
  32. vector1b = ((const int32_t *)filter_x0)[0];
  33. vector2b = ((const int32_t *)filter_x0)[1];
  34. vector3b = ((const int32_t *)filter_x0)[2];
  35. vector4b = ((const int32_t *)filter_x0)[3];
  36. for (y = h; y--;) {
  37. dst_ptr = dst;
  38. /* prefetch data to cache memory */
  39. prefetch_load(src + src_stride);
  40. prefetch_load(src + src_stride + 32);
  41. __asm__ __volatile__(
  42. "ulw %[tp1], 0(%[src]) \n\t"
  43. "ulw %[tp2], 4(%[src]) \n\t"
  44. /* even 1. pixel */
  45. "mtlo %[vector4a], $ac3 \n\t"
  46. "mthi $zero, $ac3 \n\t"
  47. "preceu.ph.qbr %[p1], %[tp1] \n\t"
  48. "preceu.ph.qbl %[p2], %[tp1] \n\t"
  49. "preceu.ph.qbr %[p3], %[tp2] \n\t"
  50. "preceu.ph.qbl %[p4], %[tp2] \n\t"
  51. "dpa.w.ph $ac3, %[p1], %[vector1b] \n\t"
  52. "dpa.w.ph $ac3, %[p2], %[vector2b] \n\t"
  53. "dpa.w.ph $ac3, %[p3], %[vector3b] \n\t"
  54. "ulw %[tn2], 8(%[src]) \n\t"
  55. "dpa.w.ph $ac3, %[p4], %[vector4b] \n\t"
  56. "extp %[Temp1], $ac3, 31 \n\t"
  57. /* even 2. pixel */
  58. "mtlo %[vector4a], $ac2 \n\t"
  59. "mthi $zero, $ac2 \n\t"
  60. "preceu.ph.qbr %[p1], %[tn2] \n\t"
  61. "balign %[tn1], %[tn2], 3 \n\t"
  62. "balign %[tn2], %[tp2], 3 \n\t"
  63. "balign %[tp2], %[tp1], 3 \n\t"
  64. "dpa.w.ph $ac2, %[p2], %[vector1b] \n\t"
  65. "dpa.w.ph $ac2, %[p3], %[vector2b] \n\t"
  66. "dpa.w.ph $ac2, %[p4], %[vector3b] \n\t"
  67. "dpa.w.ph $ac2, %[p1], %[vector4b] \n\t"
  68. "extp %[Temp3], $ac2, 31 \n\t"
  69. /* odd 1. pixel */
  70. "lbux %[tp1], %[Temp1](%[cm]) \n\t"
  71. "mtlo %[vector4a], $ac3 \n\t"
  72. "mthi $zero, $ac3 \n\t"
  73. "preceu.ph.qbr %[p1], %[tp2] \n\t"
  74. "preceu.ph.qbl %[p2], %[tp2] \n\t"
  75. "preceu.ph.qbr %[p3], %[tn2] \n\t"
  76. "preceu.ph.qbl %[p4], %[tn2] \n\t"
  77. "dpa.w.ph $ac3, %[p1], %[vector1b] \n\t"
  78. "dpa.w.ph $ac3, %[p2], %[vector2b] \n\t"
  79. "dpa.w.ph $ac3, %[p3], %[vector3b] \n\t"
  80. "dpa.w.ph $ac3, %[p4], %[vector4b] \n\t"
  81. "extp %[Temp2], $ac3, 31 \n\t"
  82. /* odd 2. pixel */
  83. "lbux %[tp2], %[Temp3](%[cm]) \n\t"
  84. "mtlo %[vector4a], $ac2 \n\t"
  85. "mthi $zero, $ac2 \n\t"
  86. "preceu.ph.qbr %[p1], %[tn1] \n\t"
  87. "dpa.w.ph $ac2, %[p2], %[vector1b] \n\t"
  88. "dpa.w.ph $ac2, %[p3], %[vector2b] \n\t"
  89. "dpa.w.ph $ac2, %[p4], %[vector3b] \n\t"
  90. "dpa.w.ph $ac2, %[p1], %[vector4b] \n\t"
  91. "extp %[Temp4], $ac2, 31 \n\t"
  92. /* clamp */
  93. "lbux %[tn1], %[Temp2](%[cm]) \n\t"
  94. "lbux %[p2], %[Temp4](%[cm]) \n\t"
  95. /* store bytes */
  96. "sb %[tp1], 0(%[dst_ptr]) \n\t"
  97. "addu %[dst_ptr], %[dst_ptr], %[dst_stride] \n\t"
  98. "sb %[tn1], 0(%[dst_ptr]) \n\t"
  99. "addu %[dst_ptr], %[dst_ptr], %[dst_stride] \n\t"
  100. "sb %[tp2], 0(%[dst_ptr]) \n\t"
  101. "addu %[dst_ptr], %[dst_ptr], %[dst_stride] \n\t"
  102. "sb %[p2], 0(%[dst_ptr]) \n\t"
  103. "addu %[dst_ptr], %[dst_ptr], %[dst_stride] \n\t"
  104. : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tn1] "=&r"(tn1),
  105. [tn2] "=&r"(tn2), [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3),
  106. [p4] "=&r"(p4), [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2),
  107. [Temp3] "=&r"(Temp3), [Temp4] "=&r"(Temp4), [dst_ptr] "+r"(dst_ptr)
  108. : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
  109. [vector3b] "r"(vector3b), [vector4b] "r"(vector4b),
  110. [vector4a] "r"(vector4a), [cm] "r"(cm), [src] "r"(src),
  111. [dst_stride] "r"(dst_stride));
  112. /* Next row... */
  113. src += src_stride;
  114. dst += 1;
  115. }
  116. }
  117. static void convolve_horiz_8_transposed_dspr2(const uint8_t *src,
  118. int32_t src_stride, uint8_t *dst,
  119. int32_t dst_stride,
  120. const int16_t *filter_x0,
  121. int32_t h) {
  122. int32_t y;
  123. uint8_t *cm = vpx_ff_cropTbl;
  124. uint8_t *dst_ptr;
  125. uint32_t vector4a = 64;
  126. int32_t vector1b, vector2b, vector3b, vector4b;
  127. int32_t Temp1, Temp2, Temp3;
  128. uint32_t tp1, tp2, tp3;
  129. uint32_t p1, p2, p3, p4, n1;
  130. uint8_t *odd_dst;
  131. uint32_t dst_pitch_2 = (dst_stride << 1);
  132. vector1b = ((const int32_t *)filter_x0)[0];
  133. vector2b = ((const int32_t *)filter_x0)[1];
  134. vector3b = ((const int32_t *)filter_x0)[2];
  135. vector4b = ((const int32_t *)filter_x0)[3];
  136. for (y = h; y--;) {
  137. /* prefetch data to cache memory */
  138. prefetch_load(src + src_stride);
  139. prefetch_load(src + src_stride + 32);
  140. dst_ptr = dst;
  141. odd_dst = (dst_ptr + dst_stride);
  142. __asm__ __volatile__(
  143. "ulw %[tp2], 0(%[src]) \n\t"
  144. "ulw %[tp1], 4(%[src]) \n\t"
  145. /* even 1. pixel */
  146. "mtlo %[vector4a], $ac3 \n\t"
  147. "mthi $zero, $ac3 \n\t"
  148. "mtlo %[vector4a], $ac2 \n\t"
  149. "mthi $zero, $ac2 \n\t"
  150. "preceu.ph.qbr %[p1], %[tp2] \n\t"
  151. "preceu.ph.qbl %[p2], %[tp2] \n\t"
  152. "preceu.ph.qbr %[p3], %[tp1] \n\t"
  153. "preceu.ph.qbl %[p4], %[tp1] \n\t"
  154. "ulw %[tp3], 8(%[src]) \n\t"
  155. "dpa.w.ph $ac3, %[p1], %[vector1b] \n\t"
  156. "dpa.w.ph $ac3, %[p2], %[vector2b] \n\t"
  157. "dpa.w.ph $ac3, %[p3], %[vector3b] \n\t"
  158. "dpa.w.ph $ac3, %[p4], %[vector4b] \n\t"
  159. "extp %[Temp1], $ac3, 31 \n\t"
  160. /* even 2. pixel */
  161. "preceu.ph.qbr %[p1], %[tp3] \n\t"
  162. "preceu.ph.qbl %[n1], %[tp3] \n\t"
  163. "ulw %[tp2], 12(%[src]) \n\t"
  164. "dpa.w.ph $ac2, %[p2], %[vector1b] \n\t"
  165. "dpa.w.ph $ac2, %[p3], %[vector2b] \n\t"
  166. "dpa.w.ph $ac2, %[p4], %[vector3b] \n\t"
  167. "dpa.w.ph $ac2, %[p1], %[vector4b] \n\t"
  168. "extp %[Temp3], $ac2, 31 \n\t"
  169. /* even 3. pixel */
  170. "lbux %[Temp2], %[Temp1](%[cm]) \n\t"
  171. "mtlo %[vector4a], $ac1 \n\t"
  172. "mthi $zero, $ac1 \n\t"
  173. "preceu.ph.qbr %[p2], %[tp2] \n\t"
  174. "dpa.w.ph $ac1, %[p3], %[vector1b] \n\t"
  175. "dpa.w.ph $ac1, %[p4], %[vector2b] \n\t"
  176. "dpa.w.ph $ac1, %[p1], %[vector3b] \n\t"
  177. "lbux %[tp3], %[Temp3](%[cm]) \n\t"
  178. "dpa.w.ph $ac1, %[n1], %[vector4b] \n\t"
  179. "extp %[p3], $ac1, 31 \n\t"
  180. /* even 4. pixel */
  181. "mtlo %[vector4a], $ac2 \n\t"
  182. "mthi $zero, $ac2 \n\t"
  183. "mtlo %[vector4a], $ac3 \n\t"
  184. "mthi $zero, $ac3 \n\t"
  185. "sb %[Temp2], 0(%[dst_ptr]) \n\t"
  186. "addu %[dst_ptr], %[dst_ptr], %[dst_pitch_2] \n\t"
  187. "sb %[tp3], 0(%[dst_ptr]) \n\t"
  188. "addu %[dst_ptr], %[dst_ptr], %[dst_pitch_2] \n\t"
  189. "ulw %[tp1], 1(%[src]) \n\t"
  190. "ulw %[tp3], 5(%[src]) \n\t"
  191. "dpa.w.ph $ac2, %[p4], %[vector1b] \n\t"
  192. "dpa.w.ph $ac2, %[p1], %[vector2b] \n\t"
  193. "dpa.w.ph $ac2, %[n1], %[vector3b] \n\t"
  194. "dpa.w.ph $ac2, %[p2], %[vector4b] \n\t"
  195. "extp %[Temp3], $ac2, 31 \n\t"
  196. "lbux %[tp2], %[p3](%[cm]) \n\t"
  197. /* odd 1. pixel */
  198. "mtlo %[vector4a], $ac1 \n\t"
  199. "mthi $zero, $ac1 \n\t"
  200. "preceu.ph.qbr %[p1], %[tp1] \n\t"
  201. "preceu.ph.qbl %[p2], %[tp1] \n\t"
  202. "preceu.ph.qbr %[p3], %[tp3] \n\t"
  203. "preceu.ph.qbl %[p4], %[tp3] \n\t"
  204. "sb %[tp2], 0(%[dst_ptr]) \n\t"
  205. "addu %[dst_ptr], %[dst_ptr], %[dst_pitch_2] \n\t"
  206. "ulw %[tp2], 9(%[src]) \n\t"
  207. "dpa.w.ph $ac3, %[p1], %[vector1b] \n\t"
  208. "dpa.w.ph $ac3, %[p2], %[vector2b] \n\t"
  209. "dpa.w.ph $ac3, %[p3], %[vector3b] \n\t"
  210. "dpa.w.ph $ac3, %[p4], %[vector4b] \n\t"
  211. "extp %[Temp2], $ac3, 31 \n\t"
  212. /* odd 2. pixel */
  213. "lbux %[tp1], %[Temp3](%[cm]) \n\t"
  214. "mtlo %[vector4a], $ac3 \n\t"
  215. "mthi $zero, $ac3 \n\t"
  216. "mtlo %[vector4a], $ac2 \n\t"
  217. "mthi $zero, $ac2 \n\t"
  218. "preceu.ph.qbr %[p1], %[tp2] \n\t"
  219. "preceu.ph.qbl %[n1], %[tp2] \n\t"
  220. "ulw %[Temp1], 13(%[src]) \n\t"
  221. "dpa.w.ph $ac1, %[p2], %[vector1b] \n\t"
  222. "sb %[tp1], 0(%[dst_ptr]) \n\t"
  223. "addu %[dst_ptr], %[dst_ptr], %[dst_pitch_2] \n\t"
  224. "dpa.w.ph $ac1, %[p3], %[vector2b] \n\t"
  225. "dpa.w.ph $ac1, %[p4], %[vector3b] \n\t"
  226. "dpa.w.ph $ac1, %[p1], %[vector4b] \n\t"
  227. "extp %[Temp3], $ac1, 31 \n\t"
  228. /* odd 3. pixel */
  229. "lbux %[tp3], %[Temp2](%[cm]) \n\t"
  230. "preceu.ph.qbr %[p2], %[Temp1] \n\t"
  231. "dpa.w.ph $ac3, %[p3], %[vector1b] \n\t"
  232. "dpa.w.ph $ac3, %[p4], %[vector2b] \n\t"
  233. "dpa.w.ph $ac3, %[p1], %[vector3b] \n\t"
  234. "dpa.w.ph $ac3, %[n1], %[vector4b] \n\t"
  235. "extp %[Temp2], $ac3, 31 \n\t"
  236. /* odd 4. pixel */
  237. "sb %[tp3], 0(%[odd_dst]) \n\t"
  238. "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] \n\t"
  239. "dpa.w.ph $ac2, %[p4], %[vector1b] \n\t"
  240. "dpa.w.ph $ac2, %[p1], %[vector2b] \n\t"
  241. "dpa.w.ph $ac2, %[n1], %[vector3b] \n\t"
  242. "dpa.w.ph $ac2, %[p2], %[vector4b] \n\t"
  243. "extp %[Temp1], $ac2, 31 \n\t"
  244. /* clamp */
  245. "lbux %[p4], %[Temp3](%[cm]) \n\t"
  246. "lbux %[p2], %[Temp2](%[cm]) \n\t"
  247. "lbux %[n1], %[Temp1](%[cm]) \n\t"
  248. /* store bytes */
  249. "sb %[p4], 0(%[odd_dst]) \n\t"
  250. "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] \n\t"
  251. "sb %[p2], 0(%[odd_dst]) \n\t"
  252. "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] \n\t"
  253. "sb %[n1], 0(%[odd_dst]) \n\t"
  254. : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3), [p1] "=&r"(p1),
  255. [p2] "=&r"(p2), [p3] "=&r"(p3), [p4] "=&r"(p4), [n1] "=&r"(n1),
  256. [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3),
  257. [dst_ptr] "+r"(dst_ptr), [odd_dst] "+r"(odd_dst)
  258. : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
  259. [vector3b] "r"(vector3b), [vector4b] "r"(vector4b),
  260. [vector4a] "r"(vector4a), [cm] "r"(cm), [src] "r"(src),
  261. [dst_pitch_2] "r"(dst_pitch_2));
  262. /* Next row... */
  263. src += src_stride;
  264. dst += 1;
  265. }
  266. }
  267. static void convolve_horiz_16_transposed_dspr2(
  268. const uint8_t *src_ptr, int32_t src_stride, uint8_t *dst_ptr,
  269. int32_t dst_stride, const int16_t *filter_x0, int32_t h, int32_t count) {
  270. int32_t c, y;
  271. const uint8_t *src;
  272. uint8_t *dst;
  273. uint8_t *cm = vpx_ff_cropTbl;
  274. uint32_t vector_64 = 64;
  275. int32_t filter12, filter34, filter56, filter78;
  276. int32_t Temp1, Temp2, Temp3;
  277. uint32_t qload1, qload2;
  278. uint32_t p1, p2, p3, p4, p5;
  279. uint32_t st1, st2, st3;
  280. uint32_t dst_pitch_2 = (dst_stride << 1);
  281. uint8_t *odd_dst;
  282. filter12 = ((const int32_t *)filter_x0)[0];
  283. filter34 = ((const int32_t *)filter_x0)[1];
  284. filter56 = ((const int32_t *)filter_x0)[2];
  285. filter78 = ((const int32_t *)filter_x0)[3];
  286. for (y = h; y--;) {
  287. /* prefetch data to cache memory */
  288. prefetch_load(src_ptr + src_stride);
  289. prefetch_load(src_ptr + src_stride + 32);
  290. src = src_ptr;
  291. dst = dst_ptr;
  292. odd_dst = (dst + dst_stride);
  293. for (c = 0; c < count; c++) {
  294. __asm__ __volatile__(
  295. "ulw %[qload1], 0(%[src]) "
  296. "\n\t"
  297. "ulw %[qload2], 4(%[src]) "
  298. "\n\t"
  299. /* even 1. pixel */
  300. "mtlo %[vector_64], $ac1 "
  301. "\n\t" /* even 1 */
  302. "mthi $zero, $ac1 "
  303. "\n\t"
  304. "mtlo %[vector_64], $ac2 "
  305. "\n\t" /* even 2 */
  306. "mthi $zero, $ac2 "
  307. "\n\t"
  308. "preceu.ph.qbr %[p3], %[qload2] "
  309. "\n\t"
  310. "preceu.ph.qbl %[p4], %[qload2] "
  311. "\n\t"
  312. "preceu.ph.qbr %[p1], %[qload1] "
  313. "\n\t"
  314. "preceu.ph.qbl %[p2], %[qload1] "
  315. "\n\t"
  316. "ulw %[qload2], 8(%[src]) "
  317. "\n\t"
  318. "dpa.w.ph $ac1, %[p1], %[filter12] "
  319. "\n\t" /* even 1 */
  320. "dpa.w.ph $ac1, %[p2], %[filter34] "
  321. "\n\t" /* even 1 */
  322. "dpa.w.ph $ac1, %[p3], %[filter56] "
  323. "\n\t" /* even 1 */
  324. "dpa.w.ph $ac1, %[p4], %[filter78] "
  325. "\n\t" /* even 1 */
  326. "extp %[Temp1], $ac1, 31 "
  327. "\n\t" /* even 1 */
  328. /* even 2. pixel */
  329. "mtlo %[vector_64], $ac3 "
  330. "\n\t" /* even 3 */
  331. "mthi $zero, $ac3 "
  332. "\n\t"
  333. "preceu.ph.qbr %[p1], %[qload2] "
  334. "\n\t"
  335. "preceu.ph.qbl %[p5], %[qload2] "
  336. "\n\t"
  337. "ulw %[qload1], 12(%[src]) "
  338. "\n\t"
  339. "dpa.w.ph $ac2, %[p2], %[filter12] "
  340. "\n\t" /* even 1 */
  341. "dpa.w.ph $ac2, %[p3], %[filter34] "
  342. "\n\t" /* even 1 */
  343. "dpa.w.ph $ac2, %[p4], %[filter56] "
  344. "\n\t" /* even 1 */
  345. "dpa.w.ph $ac2, %[p1], %[filter78] "
  346. "\n\t" /* even 1 */
  347. "lbux %[st1], %[Temp1](%[cm]) "
  348. "\n\t" /* even 1 */
  349. "extp %[Temp2], $ac2, 31 "
  350. "\n\t" /* even 1 */
  351. /* even 3. pixel */
  352. "mtlo %[vector_64], $ac1 "
  353. "\n\t" /* even 4 */
  354. "mthi $zero, $ac1 "
  355. "\n\t"
  356. "preceu.ph.qbr %[p2], %[qload1] "
  357. "\n\t"
  358. "sb %[st1], 0(%[dst]) "
  359. "\n\t" /* even 1 */
  360. "addu %[dst], %[dst], %[dst_pitch_2] "
  361. " \n\t"
  362. "dpa.w.ph $ac3, %[p3], %[filter12] "
  363. "\n\t" /* even 3 */
  364. "dpa.w.ph $ac3, %[p4], %[filter34] "
  365. "\n\t" /* even 3 */
  366. "dpa.w.ph $ac3, %[p1], %[filter56] "
  367. "\n\t" /* even 3 */
  368. "dpa.w.ph $ac3, %[p5], %[filter78] "
  369. "\n\t" /* even 3 */
  370. "extp %[Temp3], $ac3, 31 "
  371. "\n\t" /* even 3 */
  372. "lbux %[st2], %[Temp2](%[cm]) "
  373. "\n\t" /* even 1 */
  374. /* even 4. pixel */
  375. "mtlo %[vector_64], $ac2 "
  376. "\n\t" /* even 5 */
  377. "mthi $zero, $ac2 "
  378. "\n\t"
  379. "preceu.ph.qbl %[p3], %[qload1] "
  380. "\n\t"
  381. "sb %[st2], 0(%[dst]) "
  382. "\n\t" /* even 2 */
  383. "addu %[dst], %[dst], %[dst_pitch_2] "
  384. "\n\t"
  385. "ulw %[qload2], 16(%[src]) "
  386. "\n\t"
  387. "dpa.w.ph $ac1, %[p4], %[filter12] "
  388. "\n\t" /* even 4 */
  389. "dpa.w.ph $ac1, %[p1], %[filter34] "
  390. "\n\t" /* even 4 */
  391. "dpa.w.ph $ac1, %[p5], %[filter56] "
  392. "\n\t" /* even 4 */
  393. "dpa.w.ph $ac1, %[p2], %[filter78] "
  394. "\n\t" /* even 4 */
  395. "extp %[Temp1], $ac1, 31 "
  396. "\n\t" /* even 4 */
  397. "lbux %[st3], %[Temp3](%[cm]) "
  398. "\n\t" /* even 3 */
  399. /* even 5. pixel */
  400. "mtlo %[vector_64], $ac3 "
  401. "\n\t" /* even 6 */
  402. "mthi $zero, $ac3 "
  403. "\n\t"
  404. "preceu.ph.qbr %[p4], %[qload2] "
  405. "\n\t"
  406. "sb %[st3], 0(%[dst]) "
  407. "\n\t" /* even 3 */
  408. "addu %[dst], %[dst], %[dst_pitch_2] "
  409. "\n\t"
  410. "dpa.w.ph $ac2, %[p1], %[filter12] "
  411. "\n\t" /* even 5 */
  412. "dpa.w.ph $ac2, %[p5], %[filter34] "
  413. "\n\t" /* even 5 */
  414. "dpa.w.ph $ac2, %[p2], %[filter56] "
  415. "\n\t" /* even 5 */
  416. "dpa.w.ph $ac2, %[p3], %[filter78] "
  417. "\n\t" /* even 5 */
  418. "extp %[Temp2], $ac2, 31 "
  419. "\n\t" /* even 5 */
  420. "lbux %[st1], %[Temp1](%[cm]) "
  421. "\n\t" /* even 4 */
  422. /* even 6. pixel */
  423. "mtlo %[vector_64], $ac1 "
  424. "\n\t" /* even 7 */
  425. "mthi $zero, $ac1 "
  426. "\n\t"
  427. "preceu.ph.qbl %[p1], %[qload2] "
  428. "\n\t"
  429. "sb %[st1], 0(%[dst]) "
  430. "\n\t" /* even 4 */
  431. "addu %[dst], %[dst], %[dst_pitch_2] "
  432. "\n\t"
  433. "ulw %[qload1], 20(%[src]) "
  434. "\n\t"
  435. "dpa.w.ph $ac3, %[p5], %[filter12] "
  436. "\n\t" /* even 6 */
  437. "dpa.w.ph $ac3, %[p2], %[filter34] "
  438. "\n\t" /* even 6 */
  439. "dpa.w.ph $ac3, %[p3], %[filter56] "
  440. "\n\t" /* even 6 */
  441. "dpa.w.ph $ac3, %[p4], %[filter78] "
  442. "\n\t" /* even 6 */
  443. "extp %[Temp3], $ac3, 31 "
  444. "\n\t" /* even 6 */
  445. "lbux %[st2], %[Temp2](%[cm]) "
  446. "\n\t" /* even 5 */
  447. /* even 7. pixel */
  448. "mtlo %[vector_64], $ac2 "
  449. "\n\t" /* even 8 */
  450. "mthi $zero, $ac2 "
  451. "\n\t"
  452. "preceu.ph.qbr %[p5], %[qload1] "
  453. "\n\t"
  454. "sb %[st2], 0(%[dst]) "
  455. "\n\t" /* even 5 */
  456. "addu %[dst], %[dst], %[dst_pitch_2] "
  457. "\n\t"
  458. "dpa.w.ph $ac1, %[p2], %[filter12] "
  459. "\n\t" /* even 7 */
  460. "dpa.w.ph $ac1, %[p3], %[filter34] "
  461. "\n\t" /* even 7 */
  462. "dpa.w.ph $ac1, %[p4], %[filter56] "
  463. "\n\t" /* even 7 */
  464. "dpa.w.ph $ac1, %[p1], %[filter78] "
  465. "\n\t" /* even 7 */
  466. "extp %[Temp1], $ac1, 31 "
  467. "\n\t" /* even 7 */
  468. "lbux %[st3], %[Temp3](%[cm]) "
  469. "\n\t" /* even 6 */
  470. /* even 8. pixel */
  471. "mtlo %[vector_64], $ac3 "
  472. "\n\t" /* odd 1 */
  473. "mthi $zero, $ac3 "
  474. "\n\t"
  475. "dpa.w.ph $ac2, %[p3], %[filter12] "
  476. "\n\t" /* even 8 */
  477. "dpa.w.ph $ac2, %[p4], %[filter34] "
  478. "\n\t" /* even 8 */
  479. "sb %[st3], 0(%[dst]) "
  480. "\n\t" /* even 6 */
  481. "addu %[dst], %[dst], %[dst_pitch_2] "
  482. "\n\t"
  483. "dpa.w.ph $ac2, %[p1], %[filter56] "
  484. "\n\t" /* even 8 */
  485. "dpa.w.ph $ac2, %[p5], %[filter78] "
  486. "\n\t" /* even 8 */
  487. "extp %[Temp2], $ac2, 31 "
  488. "\n\t" /* even 8 */
  489. "lbux %[st1], %[Temp1](%[cm]) "
  490. "\n\t" /* even 7 */
  491. /* ODD pixels */
  492. "ulw %[qload1], 1(%[src]) "
  493. "\n\t"
  494. "ulw %[qload2], 5(%[src]) "
  495. "\n\t"
  496. /* odd 1. pixel */
  497. "mtlo %[vector_64], $ac1 "
  498. "\n\t" /* odd 2 */
  499. "mthi $zero, $ac1 "
  500. "\n\t"
  501. "preceu.ph.qbr %[p1], %[qload1] "
  502. "\n\t"
  503. "preceu.ph.qbl %[p2], %[qload1] "
  504. "\n\t"
  505. "preceu.ph.qbr %[p3], %[qload2] "
  506. "\n\t"
  507. "preceu.ph.qbl %[p4], %[qload2] "
  508. "\n\t"
  509. "sb %[st1], 0(%[dst]) "
  510. "\n\t" /* even 7 */
  511. "addu %[dst], %[dst], %[dst_pitch_2] "
  512. "\n\t"
  513. "ulw %[qload2], 9(%[src]) "
  514. "\n\t"
  515. "dpa.w.ph $ac3, %[p1], %[filter12] "
  516. "\n\t" /* odd 1 */
  517. "dpa.w.ph $ac3, %[p2], %[filter34] "
  518. "\n\t" /* odd 1 */
  519. "dpa.w.ph $ac3, %[p3], %[filter56] "
  520. "\n\t" /* odd 1 */
  521. "dpa.w.ph $ac3, %[p4], %[filter78] "
  522. "\n\t" /* odd 1 */
  523. "extp %[Temp3], $ac3, 31 "
  524. "\n\t" /* odd 1 */
  525. "lbux %[st2], %[Temp2](%[cm]) "
  526. "\n\t" /* even 8 */
  527. /* odd 2. pixel */
  528. "mtlo %[vector_64], $ac2 "
  529. "\n\t" /* odd 3 */
  530. "mthi $zero, $ac2 "
  531. "\n\t"
  532. "preceu.ph.qbr %[p1], %[qload2] "
  533. "\n\t"
  534. "preceu.ph.qbl %[p5], %[qload2] "
  535. "\n\t"
  536. "sb %[st2], 0(%[dst]) "
  537. "\n\t" /* even 8 */
  538. "ulw %[qload1], 13(%[src]) "
  539. "\n\t"
  540. "dpa.w.ph $ac1, %[p2], %[filter12] "
  541. "\n\t" /* odd 2 */
  542. "dpa.w.ph $ac1, %[p3], %[filter34] "
  543. "\n\t" /* odd 2 */
  544. "dpa.w.ph $ac1, %[p4], %[filter56] "
  545. "\n\t" /* odd 2 */
  546. "dpa.w.ph $ac1, %[p1], %[filter78] "
  547. "\n\t" /* odd 2 */
  548. "extp %[Temp1], $ac1, 31 "
  549. "\n\t" /* odd 2 */
  550. "lbux %[st3], %[Temp3](%[cm]) "
  551. "\n\t" /* odd 1 */
  552. /* odd 3. pixel */
  553. "mtlo %[vector_64], $ac3 "
  554. "\n\t" /* odd 4 */
  555. "mthi $zero, $ac3 "
  556. "\n\t"
  557. "preceu.ph.qbr %[p2], %[qload1] "
  558. "\n\t"
  559. "sb %[st3], 0(%[odd_dst]) "
  560. "\n\t" /* odd 1 */
  561. "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] "
  562. "\n\t"
  563. "dpa.w.ph $ac2, %[p3], %[filter12] "
  564. "\n\t" /* odd 3 */
  565. "dpa.w.ph $ac2, %[p4], %[filter34] "
  566. "\n\t" /* odd 3 */
  567. "dpa.w.ph $ac2, %[p1], %[filter56] "
  568. "\n\t" /* odd 3 */
  569. "dpa.w.ph $ac2, %[p5], %[filter78] "
  570. "\n\t" /* odd 3 */
  571. "extp %[Temp2], $ac2, 31 "
  572. "\n\t" /* odd 3 */
  573. "lbux %[st1], %[Temp1](%[cm]) "
  574. "\n\t" /* odd 2 */
  575. /* odd 4. pixel */
  576. "mtlo %[vector_64], $ac1 "
  577. "\n\t" /* odd 5 */
  578. "mthi $zero, $ac1 "
  579. "\n\t"
  580. "preceu.ph.qbl %[p3], %[qload1] "
  581. "\n\t"
  582. "sb %[st1], 0(%[odd_dst]) "
  583. "\n\t" /* odd 2 */
  584. "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] "
  585. "\n\t"
  586. "ulw %[qload2], 17(%[src]) "
  587. "\n\t"
  588. "dpa.w.ph $ac3, %[p4], %[filter12] "
  589. "\n\t" /* odd 4 */
  590. "dpa.w.ph $ac3, %[p1], %[filter34] "
  591. "\n\t" /* odd 4 */
  592. "dpa.w.ph $ac3, %[p5], %[filter56] "
  593. "\n\t" /* odd 4 */
  594. "dpa.w.ph $ac3, %[p2], %[filter78] "
  595. "\n\t" /* odd 4 */
  596. "extp %[Temp3], $ac3, 31 "
  597. "\n\t" /* odd 4 */
  598. "lbux %[st2], %[Temp2](%[cm]) "
  599. "\n\t" /* odd 3 */
  600. /* odd 5. pixel */
  601. "mtlo %[vector_64], $ac2 "
  602. "\n\t" /* odd 6 */
  603. "mthi $zero, $ac2 "
  604. "\n\t"
  605. "preceu.ph.qbr %[p4], %[qload2] "
  606. "\n\t"
  607. "sb %[st2], 0(%[odd_dst]) "
  608. "\n\t" /* odd 3 */
  609. "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] "
  610. "\n\t"
  611. "dpa.w.ph $ac1, %[p1], %[filter12] "
  612. "\n\t" /* odd 5 */
  613. "dpa.w.ph $ac1, %[p5], %[filter34] "
  614. "\n\t" /* odd 5 */
  615. "dpa.w.ph $ac1, %[p2], %[filter56] "
  616. "\n\t" /* odd 5 */
  617. "dpa.w.ph $ac1, %[p3], %[filter78] "
  618. "\n\t" /* odd 5 */
  619. "extp %[Temp1], $ac1, 31 "
  620. "\n\t" /* odd 5 */
  621. "lbux %[st3], %[Temp3](%[cm]) "
  622. "\n\t" /* odd 4 */
  623. /* odd 6. pixel */
  624. "mtlo %[vector_64], $ac3 "
  625. "\n\t" /* odd 7 */
  626. "mthi $zero, $ac3 "
  627. "\n\t"
  628. "preceu.ph.qbl %[p1], %[qload2] "
  629. "\n\t"
  630. "sb %[st3], 0(%[odd_dst]) "
  631. "\n\t" /* odd 4 */
  632. "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] "
  633. "\n\t"
  634. "ulw %[qload1], 21(%[src]) "
  635. "\n\t"
  636. "dpa.w.ph $ac2, %[p5], %[filter12] "
  637. "\n\t" /* odd 6 */
  638. "dpa.w.ph $ac2, %[p2], %[filter34] "
  639. "\n\t" /* odd 6 */
  640. "dpa.w.ph $ac2, %[p3], %[filter56] "
  641. "\n\t" /* odd 6 */
  642. "dpa.w.ph $ac2, %[p4], %[filter78] "
  643. "\n\t" /* odd 6 */
  644. "extp %[Temp2], $ac2, 31 "
  645. "\n\t" /* odd 6 */
  646. "lbux %[st1], %[Temp1](%[cm]) "
  647. "\n\t" /* odd 5 */
  648. /* odd 7. pixel */
  649. "mtlo %[vector_64], $ac1 "
  650. "\n\t" /* odd 8 */
  651. "mthi $zero, $ac1 "
  652. "\n\t"
  653. "preceu.ph.qbr %[p5], %[qload1] "
  654. "\n\t"
  655. "sb %[st1], 0(%[odd_dst]) "
  656. "\n\t" /* odd 5 */
  657. "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] "
  658. "\n\t"
  659. "dpa.w.ph $ac3, %[p2], %[filter12] "
  660. "\n\t" /* odd 7 */
  661. "dpa.w.ph $ac3, %[p3], %[filter34] "
  662. "\n\t" /* odd 7 */
  663. "dpa.w.ph $ac3, %[p4], %[filter56] "
  664. "\n\t" /* odd 7 */
  665. "dpa.w.ph $ac3, %[p1], %[filter78] "
  666. "\n\t" /* odd 7 */
  667. "extp %[Temp3], $ac3, 31 "
  668. "\n\t" /* odd 7 */
  669. /* odd 8. pixel */
  670. "dpa.w.ph $ac1, %[p3], %[filter12] "
  671. "\n\t" /* odd 8 */
  672. "dpa.w.ph $ac1, %[p4], %[filter34] "
  673. "\n\t" /* odd 8 */
  674. "dpa.w.ph $ac1, %[p1], %[filter56] "
  675. "\n\t" /* odd 8 */
  676. "dpa.w.ph $ac1, %[p5], %[filter78] "
  677. "\n\t" /* odd 8 */
  678. "extp %[Temp1], $ac1, 31 "
  679. "\n\t" /* odd 8 */
  680. "lbux %[st2], %[Temp2](%[cm]) "
  681. "\n\t" /* odd 6 */
  682. "lbux %[st3], %[Temp3](%[cm]) "
  683. "\n\t" /* odd 7 */
  684. "lbux %[st1], %[Temp1](%[cm]) "
  685. "\n\t" /* odd 8 */
  686. "sb %[st2], 0(%[odd_dst]) "
  687. "\n\t" /* odd 6 */
  688. "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] "
  689. "\n\t"
  690. "sb %[st3], 0(%[odd_dst]) "
  691. "\n\t" /* odd 7 */
  692. "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] "
  693. "\n\t"
  694. "sb %[st1], 0(%[odd_dst]) "
  695. "\n\t" /* odd 8 */
  696. : [qload1] "=&r"(qload1), [qload2] "=&r"(qload2), [p5] "=&r"(p5),
  697. [st1] "=&r"(st1), [st2] "=&r"(st2), [st3] "=&r"(st3),
  698. [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3), [p4] "=&r"(p4),
  699. [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3),
  700. [dst] "+r"(dst), [odd_dst] "+r"(odd_dst)
  701. : [filter12] "r"(filter12), [filter34] "r"(filter34),
  702. [filter56] "r"(filter56), [filter78] "r"(filter78),
  703. [vector_64] "r"(vector_64), [cm] "r"(cm), [src] "r"(src),
  704. [dst_pitch_2] "r"(dst_pitch_2));
  705. src += 16;
  706. dst = (dst_ptr + ((c + 1) * 16 * dst_stride));
  707. odd_dst = (dst + dst_stride);
  708. }
  709. /* Next row... */
  710. src_ptr += src_stride;
  711. dst_ptr += 1;
  712. }
  713. }
  714. static void convolve_horiz_64_transposed_dspr2(
  715. const uint8_t *src_ptr, int32_t src_stride, uint8_t *dst_ptr,
  716. int32_t dst_stride, const int16_t *filter_x0, int32_t h) {
  717. int32_t c, y;
  718. const uint8_t *src;
  719. uint8_t *dst;
  720. uint8_t *cm = vpx_ff_cropTbl;
  721. uint32_t vector_64 = 64;
  722. int32_t filter12, filter34, filter56, filter78;
  723. int32_t Temp1, Temp2, Temp3;
  724. uint32_t qload1, qload2;
  725. uint32_t p1, p2, p3, p4, p5;
  726. uint32_t st1, st2, st3;
  727. uint32_t dst_pitch_2 = (dst_stride << 1);
  728. uint8_t *odd_dst;
  729. filter12 = ((const int32_t *)filter_x0)[0];
  730. filter34 = ((const int32_t *)filter_x0)[1];
  731. filter56 = ((const int32_t *)filter_x0)[2];
  732. filter78 = ((const int32_t *)filter_x0)[3];
  733. for (y = h; y--;) {
  734. /* prefetch data to cache memory */
  735. prefetch_load(src_ptr + src_stride);
  736. prefetch_load(src_ptr + src_stride + 32);
  737. prefetch_load(src_ptr + src_stride + 64);
  738. src = src_ptr;
  739. dst = dst_ptr;
  740. odd_dst = (dst + dst_stride);
  741. for (c = 0; c < 4; c++) {
  742. __asm__ __volatile__(
  743. "ulw %[qload1], 0(%[src]) "
  744. "\n\t"
  745. "ulw %[qload2], 4(%[src]) "
  746. "\n\t"
  747. /* even 1. pixel */
  748. "mtlo %[vector_64], $ac1 "
  749. "\n\t" /* even 1 */
  750. "mthi $zero, $ac1 "
  751. "\n\t"
  752. "mtlo %[vector_64], $ac2 "
  753. "\n\t" /* even 2 */
  754. "mthi $zero, $ac2 "
  755. "\n\t"
  756. "preceu.ph.qbr %[p3], %[qload2] "
  757. "\n\t"
  758. "preceu.ph.qbl %[p4], %[qload2] "
  759. "\n\t"
  760. "preceu.ph.qbr %[p1], %[qload1] "
  761. "\n\t"
  762. "preceu.ph.qbl %[p2], %[qload1] "
  763. "\n\t"
  764. "ulw %[qload2], 8(%[src]) "
  765. "\n\t"
  766. "dpa.w.ph $ac1, %[p1], %[filter12] "
  767. "\n\t" /* even 1 */
  768. "dpa.w.ph $ac1, %[p2], %[filter34] "
  769. "\n\t" /* even 1 */
  770. "dpa.w.ph $ac1, %[p3], %[filter56] "
  771. "\n\t" /* even 1 */
  772. "dpa.w.ph $ac1, %[p4], %[filter78] "
  773. "\n\t" /* even 1 */
  774. "extp %[Temp1], $ac1, 31 "
  775. "\n\t" /* even 1 */
  776. /* even 2. pixel */
  777. "mtlo %[vector_64], $ac3 "
  778. "\n\t" /* even 3 */
  779. "mthi $zero, $ac3 "
  780. "\n\t"
  781. "preceu.ph.qbr %[p1], %[qload2] "
  782. "\n\t"
  783. "preceu.ph.qbl %[p5], %[qload2] "
  784. "\n\t"
  785. "ulw %[qload1], 12(%[src]) "
  786. "\n\t"
  787. "dpa.w.ph $ac2, %[p2], %[filter12] "
  788. "\n\t" /* even 1 */
  789. "dpa.w.ph $ac2, %[p3], %[filter34] "
  790. "\n\t" /* even 1 */
  791. "dpa.w.ph $ac2, %[p4], %[filter56] "
  792. "\n\t" /* even 1 */
  793. "dpa.w.ph $ac2, %[p1], %[filter78] "
  794. "\n\t" /* even 1 */
  795. "lbux %[st1], %[Temp1](%[cm]) "
  796. "\n\t" /* even 1 */
  797. "extp %[Temp2], $ac2, 31 "
  798. "\n\t" /* even 1 */
  799. /* even 3. pixel */
  800. "mtlo %[vector_64], $ac1 "
  801. "\n\t" /* even 4 */
  802. "mthi $zero, $ac1 "
  803. "\n\t"
  804. "preceu.ph.qbr %[p2], %[qload1] "
  805. "\n\t"
  806. "sb %[st1], 0(%[dst]) "
  807. "\n\t" /* even 1 */
  808. "addu %[dst], %[dst], %[dst_pitch_2] "
  809. " \n\t"
  810. "dpa.w.ph $ac3, %[p3], %[filter12] "
  811. "\n\t" /* even 3 */
  812. "dpa.w.ph $ac3, %[p4], %[filter34] "
  813. "\n\t" /* even 3 */
  814. "dpa.w.ph $ac3, %[p1], %[filter56] "
  815. "\n\t" /* even 3 */
  816. "dpa.w.ph $ac3, %[p5], %[filter78] "
  817. "\n\t" /* even 3 */
  818. "extp %[Temp3], $ac3, 31 "
  819. "\n\t" /* even 3 */
  820. "lbux %[st2], %[Temp2](%[cm]) "
  821. "\n\t" /* even 1 */
  822. /* even 4. pixel */
  823. "mtlo %[vector_64], $ac2 "
  824. "\n\t" /* even 5 */
  825. "mthi $zero, $ac2 "
  826. "\n\t"
  827. "preceu.ph.qbl %[p3], %[qload1] "
  828. "\n\t"
  829. "sb %[st2], 0(%[dst]) "
  830. "\n\t" /* even 2 */
  831. "addu %[dst], %[dst], %[dst_pitch_2] "
  832. "\n\t"
  833. "ulw %[qload2], 16(%[src]) "
  834. "\n\t"
  835. "dpa.w.ph $ac1, %[p4], %[filter12] "
  836. "\n\t" /* even 4 */
  837. "dpa.w.ph $ac1, %[p1], %[filter34] "
  838. "\n\t" /* even 4 */
  839. "dpa.w.ph $ac1, %[p5], %[filter56] "
  840. "\n\t" /* even 4 */
  841. "dpa.w.ph $ac1, %[p2], %[filter78] "
  842. "\n\t" /* even 4 */
  843. "extp %[Temp1], $ac1, 31 "
  844. "\n\t" /* even 4 */
  845. "lbux %[st3], %[Temp3](%[cm]) "
  846. "\n\t" /* even 3 */
  847. /* even 5. pixel */
  848. "mtlo %[vector_64], $ac3 "
  849. "\n\t" /* even 6 */
  850. "mthi $zero, $ac3 "
  851. "\n\t"
  852. "preceu.ph.qbr %[p4], %[qload2] "
  853. "\n\t"
  854. "sb %[st3], 0(%[dst]) "
  855. "\n\t" /* even 3 */
  856. "addu %[dst], %[dst], %[dst_pitch_2] "
  857. "\n\t"
  858. "dpa.w.ph $ac2, %[p1], %[filter12] "
  859. "\n\t" /* even 5 */
  860. "dpa.w.ph $ac2, %[p5], %[filter34] "
  861. "\n\t" /* even 5 */
  862. "dpa.w.ph $ac2, %[p2], %[filter56] "
  863. "\n\t" /* even 5 */
  864. "dpa.w.ph $ac2, %[p3], %[filter78] "
  865. "\n\t" /* even 5 */
  866. "extp %[Temp2], $ac2, 31 "
  867. "\n\t" /* even 5 */
  868. "lbux %[st1], %[Temp1](%[cm]) "
  869. "\n\t" /* even 4 */
  870. /* even 6. pixel */
  871. "mtlo %[vector_64], $ac1 "
  872. "\n\t" /* even 7 */
  873. "mthi $zero, $ac1 "
  874. "\n\t"
  875. "preceu.ph.qbl %[p1], %[qload2] "
  876. "\n\t"
  877. "sb %[st1], 0(%[dst]) "
  878. "\n\t" /* even 4 */
  879. "addu %[dst], %[dst], %[dst_pitch_2] "
  880. "\n\t"
  881. "ulw %[qload1], 20(%[src]) "
  882. "\n\t"
  883. "dpa.w.ph $ac3, %[p5], %[filter12] "
  884. "\n\t" /* even 6 */
  885. "dpa.w.ph $ac3, %[p2], %[filter34] "
  886. "\n\t" /* even 6 */
  887. "dpa.w.ph $ac3, %[p3], %[filter56] "
  888. "\n\t" /* even 6 */
  889. "dpa.w.ph $ac3, %[p4], %[filter78] "
  890. "\n\t" /* even 6 */
  891. "extp %[Temp3], $ac3, 31 "
  892. "\n\t" /* even 6 */
  893. "lbux %[st2], %[Temp2](%[cm]) "
  894. "\n\t" /* even 5 */
  895. /* even 7. pixel */
  896. "mtlo %[vector_64], $ac2 "
  897. "\n\t" /* even 8 */
  898. "mthi $zero, $ac2 "
  899. "\n\t"
  900. "preceu.ph.qbr %[p5], %[qload1] "
  901. "\n\t"
  902. "sb %[st2], 0(%[dst]) "
  903. "\n\t" /* even 5 */
  904. "addu %[dst], %[dst], %[dst_pitch_2] "
  905. "\n\t"
  906. "dpa.w.ph $ac1, %[p2], %[filter12] "
  907. "\n\t" /* even 7 */
  908. "dpa.w.ph $ac1, %[p3], %[filter34] "
  909. "\n\t" /* even 7 */
  910. "dpa.w.ph $ac1, %[p4], %[filter56] "
  911. "\n\t" /* even 7 */
  912. "dpa.w.ph $ac1, %[p1], %[filter78] "
  913. "\n\t" /* even 7 */
  914. "extp %[Temp1], $ac1, 31 "
  915. "\n\t" /* even 7 */
  916. "lbux %[st3], %[Temp3](%[cm]) "
  917. "\n\t" /* even 6 */
  918. /* even 8. pixel */
  919. "mtlo %[vector_64], $ac3 "
  920. "\n\t" /* odd 1 */
  921. "mthi $zero, $ac3 "
  922. "\n\t"
  923. "dpa.w.ph $ac2, %[p3], %[filter12] "
  924. "\n\t" /* even 8 */
  925. "dpa.w.ph $ac2, %[p4], %[filter34] "
  926. "\n\t" /* even 8 */
  927. "sb %[st3], 0(%[dst]) "
  928. "\n\t" /* even 6 */
  929. "addu %[dst], %[dst], %[dst_pitch_2] "
  930. "\n\t"
  931. "dpa.w.ph $ac2, %[p1], %[filter56] "
  932. "\n\t" /* even 8 */
  933. "dpa.w.ph $ac2, %[p5], %[filter78] "
  934. "\n\t" /* even 8 */
  935. "extp %[Temp2], $ac2, 31 "
  936. "\n\t" /* even 8 */
  937. "lbux %[st1], %[Temp1](%[cm]) "
  938. "\n\t" /* even 7 */
  939. /* ODD pixels */
  940. "ulw %[qload1], 1(%[src]) "
  941. "\n\t"
  942. "ulw %[qload2], 5(%[src]) "
  943. "\n\t"
  944. /* odd 1. pixel */
  945. "mtlo %[vector_64], $ac1 "
  946. "\n\t" /* odd 2 */
  947. "mthi $zero, $ac1 "
  948. "\n\t"
  949. "preceu.ph.qbr %[p1], %[qload1] "
  950. "\n\t"
  951. "preceu.ph.qbl %[p2], %[qload1] "
  952. "\n\t"
  953. "preceu.ph.qbr %[p3], %[qload2] "
  954. "\n\t"
  955. "preceu.ph.qbl %[p4], %[qload2] "
  956. "\n\t"
  957. "sb %[st1], 0(%[dst]) "
  958. "\n\t" /* even 7 */
  959. "addu %[dst], %[dst], %[dst_pitch_2] "
  960. "\n\t"
  961. "ulw %[qload2], 9(%[src]) "
  962. "\n\t"
  963. "dpa.w.ph $ac3, %[p1], %[filter12] "
  964. "\n\t" /* odd 1 */
  965. "dpa.w.ph $ac3, %[p2], %[filter34] "
  966. "\n\t" /* odd 1 */
  967. "dpa.w.ph $ac3, %[p3], %[filter56] "
  968. "\n\t" /* odd 1 */
  969. "dpa.w.ph $ac3, %[p4], %[filter78] "
  970. "\n\t" /* odd 1 */
  971. "extp %[Temp3], $ac3, 31 "
  972. "\n\t" /* odd 1 */
  973. "lbux %[st2], %[Temp2](%[cm]) "
  974. "\n\t" /* even 8 */
  975. /* odd 2. pixel */
  976. "mtlo %[vector_64], $ac2 "
  977. "\n\t" /* odd 3 */
  978. "mthi $zero, $ac2 "
  979. "\n\t"
  980. "preceu.ph.qbr %[p1], %[qload2] "
  981. "\n\t"
  982. "preceu.ph.qbl %[p5], %[qload2] "
  983. "\n\t"
  984. "sb %[st2], 0(%[dst]) "
  985. "\n\t" /* even 8 */
  986. "ulw %[qload1], 13(%[src]) "
  987. "\n\t"
  988. "dpa.w.ph $ac1, %[p2], %[filter12] "
  989. "\n\t" /* odd 2 */
  990. "dpa.w.ph $ac1, %[p3], %[filter34] "
  991. "\n\t" /* odd 2 */
  992. "dpa.w.ph $ac1, %[p4], %[filter56] "
  993. "\n\t" /* odd 2 */
  994. "dpa.w.ph $ac1, %[p1], %[filter78] "
  995. "\n\t" /* odd 2 */
  996. "extp %[Temp1], $ac1, 31 "
  997. "\n\t" /* odd 2 */
  998. "lbux %[st3], %[Temp3](%[cm]) "
  999. "\n\t" /* odd 1 */
  1000. /* odd 3. pixel */
  1001. "mtlo %[vector_64], $ac3 "
  1002. "\n\t" /* odd 4 */
  1003. "mthi $zero, $ac3 "
  1004. "\n\t"
  1005. "preceu.ph.qbr %[p2], %[qload1] "
  1006. "\n\t"
  1007. "sb %[st3], 0(%[odd_dst]) "
  1008. "\n\t" /* odd 1 */
  1009. "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] "
  1010. "\n\t"
  1011. "dpa.w.ph $ac2, %[p3], %[filter12] "
  1012. "\n\t" /* odd 3 */
  1013. "dpa.w.ph $ac2, %[p4], %[filter34] "
  1014. "\n\t" /* odd 3 */
  1015. "dpa.w.ph $ac2, %[p1], %[filter56] "
  1016. "\n\t" /* odd 3 */
  1017. "dpa.w.ph $ac2, %[p5], %[filter78] "
  1018. "\n\t" /* odd 3 */
  1019. "extp %[Temp2], $ac2, 31 "
  1020. "\n\t" /* odd 3 */
  1021. "lbux %[st1], %[Temp1](%[cm]) "
  1022. "\n\t" /* odd 2 */
  1023. /* odd 4. pixel */
  1024. "mtlo %[vector_64], $ac1 "
  1025. "\n\t" /* odd 5 */
  1026. "mthi $zero, $ac1 "
  1027. "\n\t"
  1028. "preceu.ph.qbl %[p3], %[qload1] "
  1029. "\n\t"
  1030. "sb %[st1], 0(%[odd_dst]) "
  1031. "\n\t" /* odd 2 */
  1032. "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] "
  1033. "\n\t"
  1034. "ulw %[qload2], 17(%[src]) "
  1035. "\n\t"
  1036. "dpa.w.ph $ac3, %[p4], %[filter12] "
  1037. "\n\t" /* odd 4 */
  1038. "dpa.w.ph $ac3, %[p1], %[filter34] "
  1039. "\n\t" /* odd 4 */
  1040. "dpa.w.ph $ac3, %[p5], %[filter56] "
  1041. "\n\t" /* odd 4 */
  1042. "dpa.w.ph $ac3, %[p2], %[filter78] "
  1043. "\n\t" /* odd 4 */
  1044. "extp %[Temp3], $ac3, 31 "
  1045. "\n\t" /* odd 4 */
  1046. "lbux %[st2], %[Temp2](%[cm]) "
  1047. "\n\t" /* odd 3 */
  1048. /* odd 5. pixel */
  1049. "mtlo %[vector_64], $ac2 "
  1050. "\n\t" /* odd 6 */
  1051. "mthi $zero, $ac2 "
  1052. "\n\t"
  1053. "preceu.ph.qbr %[p4], %[qload2] "
  1054. "\n\t"
  1055. "sb %[st2], 0(%[odd_dst]) "
  1056. "\n\t" /* odd 3 */
  1057. "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] "
  1058. "\n\t"
  1059. "dpa.w.ph $ac1, %[p1], %[filter12] "
  1060. "\n\t" /* odd 5 */
  1061. "dpa.w.ph $ac1, %[p5], %[filter34] "
  1062. "\n\t" /* odd 5 */
  1063. "dpa.w.ph $ac1, %[p2], %[filter56] "
  1064. "\n\t" /* odd 5 */
  1065. "dpa.w.ph $ac1, %[p3], %[filter78] "
  1066. "\n\t" /* odd 5 */
  1067. "extp %[Temp1], $ac1, 31 "
  1068. "\n\t" /* odd 5 */
  1069. "lbux %[st3], %[Temp3](%[cm]) "
  1070. "\n\t" /* odd 4 */
  1071. /* odd 6. pixel */
  1072. "mtlo %[vector_64], $ac3 "
  1073. "\n\t" /* odd 7 */
  1074. "mthi $zero, $ac3 "
  1075. "\n\t"
  1076. "preceu.ph.qbl %[p1], %[qload2] "
  1077. "\n\t"
  1078. "sb %[st3], 0(%[odd_dst]) "
  1079. "\n\t" /* odd 4 */
  1080. "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] "
  1081. "\n\t"
  1082. "ulw %[qload1], 21(%[src]) "
  1083. "\n\t"
  1084. "dpa.w.ph $ac2, %[p5], %[filter12] "
  1085. "\n\t" /* odd 6 */
  1086. "dpa.w.ph $ac2, %[p2], %[filter34] "
  1087. "\n\t" /* odd 6 */
  1088. "dpa.w.ph $ac2, %[p3], %[filter56] "
  1089. "\n\t" /* odd 6 */
  1090. "dpa.w.ph $ac2, %[p4], %[filter78] "
  1091. "\n\t" /* odd 6 */
  1092. "extp %[Temp2], $ac2, 31 "
  1093. "\n\t" /* odd 6 */
  1094. "lbux %[st1], %[Temp1](%[cm]) "
  1095. "\n\t" /* odd 5 */
  1096. /* odd 7. pixel */
  1097. "mtlo %[vector_64], $ac1 "
  1098. "\n\t" /* odd 8 */
  1099. "mthi $zero, $ac1 "
  1100. "\n\t"
  1101. "preceu.ph.qbr %[p5], %[qload1] "
  1102. "\n\t"
  1103. "sb %[st1], 0(%[odd_dst]) "
  1104. "\n\t" /* odd 5 */
  1105. "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] "
  1106. "\n\t"
  1107. "dpa.w.ph $ac3, %[p2], %[filter12] "
  1108. "\n\t" /* odd 7 */
  1109. "dpa.w.ph $ac3, %[p3], %[filter34] "
  1110. "\n\t" /* odd 7 */
  1111. "dpa.w.ph $ac3, %[p4], %[filter56] "
  1112. "\n\t" /* odd 7 */
  1113. "dpa.w.ph $ac3, %[p1], %[filter78] "
  1114. "\n\t" /* odd 7 */
  1115. "extp %[Temp3], $ac3, 31 "
  1116. "\n\t" /* odd 7 */
  1117. /* odd 8. pixel */
  1118. "dpa.w.ph $ac1, %[p3], %[filter12] "
  1119. "\n\t" /* odd 8 */
  1120. "dpa.w.ph $ac1, %[p4], %[filter34] "
  1121. "\n\t" /* odd 8 */
  1122. "dpa.w.ph $ac1, %[p1], %[filter56] "
  1123. "\n\t" /* odd 8 */
  1124. "dpa.w.ph $ac1, %[p5], %[filter78] "
  1125. "\n\t" /* odd 8 */
  1126. "extp %[Temp1], $ac1, 31 "
  1127. "\n\t" /* odd 8 */
  1128. "lbux %[st2], %[Temp2](%[cm]) "
  1129. "\n\t" /* odd 6 */
  1130. "lbux %[st3], %[Temp3](%[cm]) "
  1131. "\n\t" /* odd 7 */
  1132. "lbux %[st1], %[Temp1](%[cm]) "
  1133. "\n\t" /* odd 8 */
  1134. "sb %[st2], 0(%[odd_dst]) "
  1135. "\n\t" /* odd 6 */
  1136. "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] "
  1137. "\n\t"
  1138. "sb %[st3], 0(%[odd_dst]) "
  1139. "\n\t" /* odd 7 */
  1140. "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] "
  1141. "\n\t"
  1142. "sb %[st1], 0(%[odd_dst]) "
  1143. "\n\t" /* odd 8 */
  1144. : [qload1] "=&r"(qload1), [qload2] "=&r"(qload2), [p5] "=&r"(p5),
  1145. [st1] "=&r"(st1), [st2] "=&r"(st2), [st3] "=&r"(st3),
  1146. [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3), [p4] "=&r"(p4),
  1147. [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3),
  1148. [dst] "+r"(dst), [odd_dst] "+r"(odd_dst)
  1149. : [filter12] "r"(filter12), [filter34] "r"(filter34),
  1150. [filter56] "r"(filter56), [filter78] "r"(filter78),
  1151. [vector_64] "r"(vector_64), [cm] "r"(cm), [src] "r"(src),
  1152. [dst_pitch_2] "r"(dst_pitch_2));
  1153. src += 16;
  1154. dst = (dst_ptr + ((c + 1) * 16 * dst_stride));
  1155. odd_dst = (dst + dst_stride);
  1156. }
  1157. /* Next row... */
  1158. src_ptr += src_stride;
  1159. dst_ptr += 1;
  1160. }
  1161. }
  1162. void convolve_horiz_transposed(const uint8_t *src, ptrdiff_t src_stride,
  1163. uint8_t *dst, ptrdiff_t dst_stride,
  1164. const int16_t *filter, int w, int h) {
  1165. int x, y, k;
  1166. for (y = 0; y < h; ++y) {
  1167. for (x = 0; x < w; ++x) {
  1168. int sum = 0;
  1169. for (k = 0; k < 8; ++k) sum += src[x + k] * filter[k];
  1170. dst[x * dst_stride] = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS));
  1171. }
  1172. src += src_stride;
  1173. dst += 1;
  1174. }
  1175. }
  1176. void copy_horiz_transposed(const uint8_t *src, ptrdiff_t src_stride,
  1177. uint8_t *dst, ptrdiff_t dst_stride, int w, int h) {
  1178. int x, y;
  1179. for (y = 0; y < h; ++y) {
  1180. for (x = 0; x < w; ++x) {
  1181. dst[x * dst_stride] = src[x];
  1182. }
  1183. src += src_stride;
  1184. dst += 1;
  1185. }
  1186. }
  1187. void vpx_convolve8_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
  1188. ptrdiff_t dst_stride, const InterpKernel *filter,
  1189. int x0_q4, int32_t x_step_q4, int y0_q4, int y_step_q4,
  1190. int w, int h) {
  1191. const int16_t *const filter_x = filter[x0_q4];
  1192. const int16_t *const filter_y = filter[y0_q4];
  1193. DECLARE_ALIGNED(32, uint8_t, temp[64 * 135]);
  1194. int32_t intermediate_height = ((h * y_step_q4) >> 4) + 7;
  1195. uint32_t pos = 38;
  1196. assert(x_step_q4 == 16);
  1197. assert(y_step_q4 == 16);
  1198. assert(((const int32_t *)filter_x)[1] != 0x800000);
  1199. assert(((const int32_t *)filter_y)[1] != 0x800000);
  1200. (void)x_step_q4;
  1201. /* bit positon for extract from acc */
  1202. __asm__ __volatile__("wrdsp %[pos], 1 \n\t"
  1203. :
  1204. : [pos] "r"(pos));
  1205. if (intermediate_height < h) intermediate_height = h;
  1206. /* copy the src to dst */
  1207. if (filter_x[3] == 0x80) {
  1208. copy_horiz_transposed(src - src_stride * 3, src_stride, temp,
  1209. intermediate_height, w, intermediate_height);
  1210. } else if (vpx_get_filter_taps(filter_x) == 2) {
  1211. vpx_convolve2_dspr2(src - src_stride * 3, src_stride, temp,
  1212. intermediate_height, filter_x, w, intermediate_height);
  1213. } else {
  1214. src -= (src_stride * 3 + 3);
  1215. /* prefetch data to cache memory */
  1216. prefetch_load(src);
  1217. prefetch_load(src + 32);
  1218. switch (w) {
  1219. case 4:
  1220. convolve_horiz_4_transposed_dspr2(src, src_stride, temp,
  1221. intermediate_height, filter_x,
  1222. intermediate_height);
  1223. break;
  1224. case 8:
  1225. convolve_horiz_8_transposed_dspr2(src, src_stride, temp,
  1226. intermediate_height, filter_x,
  1227. intermediate_height);
  1228. break;
  1229. case 16:
  1230. case 32:
  1231. convolve_horiz_16_transposed_dspr2(src, src_stride, temp,
  1232. intermediate_height, filter_x,
  1233. intermediate_height, (w / 16));
  1234. break;
  1235. case 64:
  1236. prefetch_load(src + 32);
  1237. convolve_horiz_64_transposed_dspr2(src, src_stride, temp,
  1238. intermediate_height, filter_x,
  1239. intermediate_height);
  1240. break;
  1241. default:
  1242. convolve_horiz_transposed(src, src_stride, temp, intermediate_height,
  1243. filter_x, w, intermediate_height);
  1244. break;
  1245. }
  1246. }
  1247. /* copy the src to dst */
  1248. if (filter_y[3] == 0x80) {
  1249. copy_horiz_transposed(temp + 3, intermediate_height, dst, dst_stride, h, w);
  1250. } else if (vpx_get_filter_taps(filter_y) == 2) {
  1251. vpx_convolve2_dspr2(temp + 3, intermediate_height, dst, dst_stride,
  1252. filter_y, h, w);
  1253. } else {
  1254. switch (h) {
  1255. case 4:
  1256. convolve_horiz_4_transposed_dspr2(temp, intermediate_height, dst,
  1257. dst_stride, filter_y, w);
  1258. break;
  1259. case 8:
  1260. convolve_horiz_8_transposed_dspr2(temp, intermediate_height, dst,
  1261. dst_stride, filter_y, w);
  1262. break;
  1263. case 16:
  1264. case 32:
  1265. convolve_horiz_16_transposed_dspr2(temp, intermediate_height, dst,
  1266. dst_stride, filter_y, w, (h / 16));
  1267. break;
  1268. case 64:
  1269. convolve_horiz_64_transposed_dspr2(temp, intermediate_height, dst,
  1270. dst_stride, filter_y, w);
  1271. break;
  1272. default:
  1273. convolve_horiz_transposed(temp, intermediate_height, dst, dst_stride,
  1274. filter_y, h, w);
  1275. break;
  1276. }
  1277. }
  1278. }
  1279. void vpx_convolve_copy_dspr2(const uint8_t *src, ptrdiff_t src_stride,
  1280. uint8_t *dst, ptrdiff_t dst_stride,
  1281. const InterpKernel *filter, int x0_q4,
  1282. int x_step_q4, int y0_q4, int y_step_q4, int w,
  1283. int h) {
  1284. int x, y;
  1285. (void)filter;
  1286. (void)x0_q4;
  1287. (void)x_step_q4;
  1288. (void)y0_q4;
  1289. (void)y_step_q4;
  1290. /* prefetch data to cache memory */
  1291. prefetch_load(src);
  1292. prefetch_load(src + 32);
  1293. prefetch_store(dst);
  1294. switch (w) {
  1295. case 4: {
  1296. uint32_t tp1;
  1297. /* 1 word storage */
  1298. for (y = h; y--;) {
  1299. prefetch_load(src + src_stride);
  1300. prefetch_load(src + src_stride + 32);
  1301. prefetch_store(dst + dst_stride);
  1302. __asm__ __volatile__(
  1303. "ulw %[tp1], (%[src]) \n\t"
  1304. "sw %[tp1], (%[dst]) \n\t" /* store */
  1305. : [tp1] "=&r"(tp1)
  1306. : [src] "r"(src), [dst] "r"(dst));
  1307. src += src_stride;
  1308. dst += dst_stride;
  1309. }
  1310. break;
  1311. }
  1312. case 8: {
  1313. uint32_t tp1, tp2;
  1314. /* 2 word storage */
  1315. for (y = h; y--;) {
  1316. prefetch_load(src + src_stride);
  1317. prefetch_load(src + src_stride + 32);
  1318. prefetch_store(dst + dst_stride);
  1319. __asm__ __volatile__(
  1320. "ulw %[tp1], 0(%[src]) \n\t"
  1321. "ulw %[tp2], 4(%[src]) \n\t"
  1322. "sw %[tp1], 0(%[dst]) \n\t" /* store */
  1323. "sw %[tp2], 4(%[dst]) \n\t" /* store */
  1324. : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2)
  1325. : [src] "r"(src), [dst] "r"(dst));
  1326. src += src_stride;
  1327. dst += dst_stride;
  1328. }
  1329. break;
  1330. }
  1331. case 16: {
  1332. uint32_t tp1, tp2, tp3, tp4;
  1333. /* 4 word storage */
  1334. for (y = h; y--;) {
  1335. prefetch_load(src + src_stride);
  1336. prefetch_load(src + src_stride + 32);
  1337. prefetch_store(dst + dst_stride);
  1338. __asm__ __volatile__(
  1339. "ulw %[tp1], 0(%[src]) \n\t"
  1340. "ulw %[tp2], 4(%[src]) \n\t"
  1341. "ulw %[tp3], 8(%[src]) \n\t"
  1342. "ulw %[tp4], 12(%[src]) \n\t"
  1343. "sw %[tp1], 0(%[dst]) \n\t" /* store */
  1344. "sw %[tp2], 4(%[dst]) \n\t" /* store */
  1345. "sw %[tp3], 8(%[dst]) \n\t" /* store */
  1346. "sw %[tp4], 12(%[dst]) \n\t" /* store */
  1347. : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3),
  1348. [tp4] "=&r"(tp4)
  1349. : [src] "r"(src), [dst] "r"(dst));
  1350. src += src_stride;
  1351. dst += dst_stride;
  1352. }
  1353. break;
  1354. }
  1355. case 32: {
  1356. uint32_t tp1, tp2, tp3, tp4;
  1357. uint32_t tp5, tp6, tp7, tp8;
  1358. /* 8 word storage */
  1359. for (y = h; y--;) {
  1360. prefetch_load(src + src_stride);
  1361. prefetch_load(src + src_stride + 32);
  1362. prefetch_store(dst + dst_stride);
  1363. __asm__ __volatile__(
  1364. "ulw %[tp1], 0(%[src]) \n\t"
  1365. "ulw %[tp2], 4(%[src]) \n\t"
  1366. "ulw %[tp3], 8(%[src]) \n\t"
  1367. "ulw %[tp4], 12(%[src]) \n\t"
  1368. "ulw %[tp5], 16(%[src]) \n\t"
  1369. "ulw %[tp6], 20(%[src]) \n\t"
  1370. "ulw %[tp7], 24(%[src]) \n\t"
  1371. "ulw %[tp8], 28(%[src]) \n\t"
  1372. "sw %[tp1], 0(%[dst]) \n\t" /* store */
  1373. "sw %[tp2], 4(%[dst]) \n\t" /* store */
  1374. "sw %[tp3], 8(%[dst]) \n\t" /* store */
  1375. "sw %[tp4], 12(%[dst]) \n\t" /* store */
  1376. "sw %[tp5], 16(%[dst]) \n\t" /* store */
  1377. "sw %[tp6], 20(%[dst]) \n\t" /* store */
  1378. "sw %[tp7], 24(%[dst]) \n\t" /* store */
  1379. "sw %[tp8], 28(%[dst]) \n\t" /* store */
  1380. : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3),
  1381. [tp4] "=&r"(tp4), [tp5] "=&r"(tp5), [tp6] "=&r"(tp6),
  1382. [tp7] "=&r"(tp7), [tp8] "=&r"(tp8)
  1383. : [src] "r"(src), [dst] "r"(dst));
  1384. src += src_stride;
  1385. dst += dst_stride;
  1386. }
  1387. break;
  1388. }
  1389. case 64: {
  1390. uint32_t tp1, tp2, tp3, tp4;
  1391. uint32_t tp5, tp6, tp7, tp8;
  1392. prefetch_load(src + 64);
  1393. prefetch_store(dst + 32);
  1394. /* 16 word storage */
  1395. for (y = h; y--;) {
  1396. prefetch_load(src + src_stride);
  1397. prefetch_load(src + src_stride + 32);
  1398. prefetch_load(src + src_stride + 64);
  1399. prefetch_store(dst + dst_stride);
  1400. prefetch_store(dst + dst_stride + 32);
  1401. __asm__ __volatile__(
  1402. "ulw %[tp1], 0(%[src]) \n\t"
  1403. "ulw %[tp2], 4(%[src]) \n\t"
  1404. "ulw %[tp3], 8(%[src]) \n\t"
  1405. "ulw %[tp4], 12(%[src]) \n\t"
  1406. "ulw %[tp5], 16(%[src]) \n\t"
  1407. "ulw %[tp6], 20(%[src]) \n\t"
  1408. "ulw %[tp7], 24(%[src]) \n\t"
  1409. "ulw %[tp8], 28(%[src]) \n\t"
  1410. "sw %[tp1], 0(%[dst]) \n\t" /* store */
  1411. "sw %[tp2], 4(%[dst]) \n\t" /* store */
  1412. "sw %[tp3], 8(%[dst]) \n\t" /* store */
  1413. "sw %[tp4], 12(%[dst]) \n\t" /* store */
  1414. "sw %[tp5], 16(%[dst]) \n\t" /* store */
  1415. "sw %[tp6], 20(%[dst]) \n\t" /* store */
  1416. "sw %[tp7], 24(%[dst]) \n\t" /* store */
  1417. "sw %[tp8], 28(%[dst]) \n\t" /* store */
  1418. "ulw %[tp1], 32(%[src]) \n\t"
  1419. "ulw %[tp2], 36(%[src]) \n\t"
  1420. "ulw %[tp3], 40(%[src]) \n\t"
  1421. "ulw %[tp4], 44(%[src]) \n\t"
  1422. "ulw %[tp5], 48(%[src]) \n\t"
  1423. "ulw %[tp6], 52(%[src]) \n\t"
  1424. "ulw %[tp7], 56(%[src]) \n\t"
  1425. "ulw %[tp8], 60(%[src]) \n\t"
  1426. "sw %[tp1], 32(%[dst]) \n\t" /* store */
  1427. "sw %[tp2], 36(%[dst]) \n\t" /* store */
  1428. "sw %[tp3], 40(%[dst]) \n\t" /* store */
  1429. "sw %[tp4], 44(%[dst]) \n\t" /* store */
  1430. "sw %[tp5], 48(%[dst]) \n\t" /* store */
  1431. "sw %[tp6], 52(%[dst]) \n\t" /* store */
  1432. "sw %[tp7], 56(%[dst]) \n\t" /* store */
  1433. "sw %[tp8], 60(%[dst]) \n\t" /* store */
  1434. : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3),
  1435. [tp4] "=&r"(tp4), [tp5] "=&r"(tp5), [tp6] "=&r"(tp6),
  1436. [tp7] "=&r"(tp7), [tp8] "=&r"(tp8)
  1437. : [src] "r"(src), [dst] "r"(dst));
  1438. src += src_stride;
  1439. dst += dst_stride;
  1440. }
  1441. break;
  1442. }
  1443. default:
  1444. for (y = h; y--;) {
  1445. for (x = 0; x < w; ++x) {
  1446. dst[x] = src[x];
  1447. }
  1448. src += src_stride;
  1449. dst += dst_stride;
  1450. }
  1451. break;
  1452. }
  1453. }
  1454. #endif