convolve8_horiz_dspr2.c 51 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878
  1. /*
  2. * Copyright (c) 2013 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include <assert.h>
  11. #include <stdio.h>
  12. #include "./vpx_dsp_rtcd.h"
  13. #include "vpx_dsp/mips/convolve_common_dspr2.h"
  14. #include "vpx_dsp/vpx_dsp_common.h"
  15. #include "vpx_dsp/vpx_filter.h"
  16. #include "vpx_ports/mem.h"
  17. #if HAVE_DSPR2
  18. static void convolve_horiz_4_dspr2(const uint8_t *src, int32_t src_stride,
  19. uint8_t *dst, int32_t dst_stride,
  20. const int16_t *filter_x0, int32_t h) {
  21. int32_t y;
  22. uint8_t *cm = vpx_ff_cropTbl;
  23. int32_t vector1b, vector2b, vector3b, vector4b;
  24. int32_t Temp1, Temp2, Temp3, Temp4;
  25. uint32_t vector4a = 64;
  26. uint32_t tp1, tp2;
  27. uint32_t p1, p2, p3, p4;
  28. uint32_t n1, n2, n3, n4;
  29. uint32_t tn1, tn2;
  30. vector1b = ((const int32_t *)filter_x0)[0];
  31. vector2b = ((const int32_t *)filter_x0)[1];
  32. vector3b = ((const int32_t *)filter_x0)[2];
  33. vector4b = ((const int32_t *)filter_x0)[3];
  34. for (y = h; y--;) {
  35. /* prefetch data to cache memory */
  36. prefetch_load(src + src_stride);
  37. prefetch_load(src + src_stride + 32);
  38. prefetch_store(dst + dst_stride);
  39. __asm__ __volatile__(
  40. "ulw %[tp1], 0(%[src]) \n\t"
  41. "ulw %[tp2], 4(%[src]) \n\t"
  42. /* even 1. pixel */
  43. "mtlo %[vector4a], $ac3 \n\t"
  44. "mthi $zero, $ac3 \n\t"
  45. "preceu.ph.qbr %[p1], %[tp1] \n\t"
  46. "preceu.ph.qbl %[p2], %[tp1] \n\t"
  47. "preceu.ph.qbr %[p3], %[tp2] \n\t"
  48. "preceu.ph.qbl %[p4], %[tp2] \n\t"
  49. "dpa.w.ph $ac3, %[p1], %[vector1b] \n\t"
  50. "dpa.w.ph $ac3, %[p2], %[vector2b] \n\t"
  51. "dpa.w.ph $ac3, %[p3], %[vector3b] \n\t"
  52. "ulw %[tn2], 8(%[src]) \n\t"
  53. "dpa.w.ph $ac3, %[p4], %[vector4b] \n\t"
  54. "extp %[Temp1], $ac3, 31 \n\t"
  55. /* even 2. pixel */
  56. "mtlo %[vector4a], $ac2 \n\t"
  57. "mthi $zero, $ac2 \n\t"
  58. "preceu.ph.qbr %[p1], %[tn2] \n\t"
  59. "balign %[tn1], %[tn2], 3 \n\t"
  60. "balign %[tn2], %[tp2], 3 \n\t"
  61. "balign %[tp2], %[tp1], 3 \n\t"
  62. "dpa.w.ph $ac2, %[p2], %[vector1b] \n\t"
  63. "dpa.w.ph $ac2, %[p3], %[vector2b] \n\t"
  64. "dpa.w.ph $ac2, %[p4], %[vector3b] \n\t"
  65. "dpa.w.ph $ac2, %[p1], %[vector4b] \n\t"
  66. "extp %[Temp3], $ac2, 31 \n\t"
  67. /* odd 1. pixel */
  68. "lbux %[tp1], %[Temp1](%[cm]) \n\t"
  69. "mtlo %[vector4a], $ac3 \n\t"
  70. "mthi $zero, $ac3 \n\t"
  71. "preceu.ph.qbr %[n1], %[tp2] \n\t"
  72. "preceu.ph.qbl %[n2], %[tp2] \n\t"
  73. "preceu.ph.qbr %[n3], %[tn2] \n\t"
  74. "preceu.ph.qbl %[n4], %[tn2] \n\t"
  75. "dpa.w.ph $ac3, %[n1], %[vector1b] \n\t"
  76. "dpa.w.ph $ac3, %[n2], %[vector2b] \n\t"
  77. "dpa.w.ph $ac3, %[n3], %[vector3b] \n\t"
  78. "dpa.w.ph $ac3, %[n4], %[vector4b] \n\t"
  79. "extp %[Temp2], $ac3, 31 \n\t"
  80. /* odd 2. pixel */
  81. "lbux %[tp2], %[Temp3](%[cm]) \n\t"
  82. "mtlo %[vector4a], $ac2 \n\t"
  83. "mthi $zero, $ac2 \n\t"
  84. "preceu.ph.qbr %[n1], %[tn1] \n\t"
  85. "dpa.w.ph $ac2, %[n2], %[vector1b] \n\t"
  86. "dpa.w.ph $ac2, %[n3], %[vector2b] \n\t"
  87. "dpa.w.ph $ac2, %[n4], %[vector3b] \n\t"
  88. "dpa.w.ph $ac2, %[n1], %[vector4b] \n\t"
  89. "extp %[Temp4], $ac2, 31 \n\t"
  90. /* clamp */
  91. "lbux %[tn1], %[Temp2](%[cm]) \n\t"
  92. "lbux %[n2], %[Temp4](%[cm]) \n\t"
  93. /* store bytes */
  94. "sb %[tp1], 0(%[dst]) \n\t"
  95. "sb %[tn1], 1(%[dst]) \n\t"
  96. "sb %[tp2], 2(%[dst]) \n\t"
  97. "sb %[n2], 3(%[dst]) \n\t"
  98. : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tn1] "=&r"(tn1),
  99. [tn2] "=&r"(tn2), [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3),
  100. [p4] "=&r"(p4), [n1] "=&r"(n1), [n2] "=&r"(n2), [n3] "=&r"(n3),
  101. [n4] "=&r"(n4), [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2),
  102. [Temp3] "=&r"(Temp3), [Temp4] "=&r"(Temp4)
  103. : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
  104. [vector3b] "r"(vector3b), [vector4b] "r"(vector4b),
  105. [vector4a] "r"(vector4a), [cm] "r"(cm), [dst] "r"(dst),
  106. [src] "r"(src));
  107. /* Next row... */
  108. src += src_stride;
  109. dst += dst_stride;
  110. }
  111. }
  112. static void convolve_horiz_8_dspr2(const uint8_t *src, int32_t src_stride,
  113. uint8_t *dst, int32_t dst_stride,
  114. const int16_t *filter_x0, int32_t h) {
  115. int32_t y;
  116. uint8_t *cm = vpx_ff_cropTbl;
  117. uint32_t vector4a = 64;
  118. int32_t vector1b, vector2b, vector3b, vector4b;
  119. int32_t Temp1, Temp2, Temp3;
  120. uint32_t tp1, tp2;
  121. uint32_t p1, p2, p3, p4, n1;
  122. uint32_t tn1, tn2, tn3;
  123. uint32_t st0, st1;
  124. vector1b = ((const int32_t *)filter_x0)[0];
  125. vector2b = ((const int32_t *)filter_x0)[1];
  126. vector3b = ((const int32_t *)filter_x0)[2];
  127. vector4b = ((const int32_t *)filter_x0)[3];
  128. for (y = h; y--;) {
  129. /* prefetch data to cache memory */
  130. prefetch_load(src + src_stride);
  131. prefetch_load(src + src_stride + 32);
  132. prefetch_store(dst + dst_stride);
  133. __asm__ __volatile__(
  134. "ulw %[tp1], 0(%[src]) \n\t"
  135. "ulw %[tp2], 4(%[src]) \n\t"
  136. /* even 1. pixel */
  137. "mtlo %[vector4a], $ac3 \n\t"
  138. "mthi $zero, $ac3 \n\t"
  139. "mtlo %[vector4a], $ac2 \n\t"
  140. "mthi $zero, $ac2 \n\t"
  141. "preceu.ph.qbr %[p1], %[tp1] \n\t"
  142. "preceu.ph.qbl %[p2], %[tp1] \n\t"
  143. "preceu.ph.qbr %[p3], %[tp2] \n\t"
  144. "preceu.ph.qbl %[p4], %[tp2] \n\t"
  145. "ulw %[tn2], 8(%[src]) \n\t"
  146. "dpa.w.ph $ac3, %[p1], %[vector1b] \n\t"
  147. "dpa.w.ph $ac3, %[p2], %[vector2b] \n\t"
  148. "dpa.w.ph $ac3, %[p3], %[vector3b] \n\t"
  149. "dpa.w.ph $ac3, %[p4], %[vector4b] \n\t"
  150. "extp %[Temp1], $ac3, 31 \n\t"
  151. /* even 2. pixel */
  152. "preceu.ph.qbr %[p1], %[tn2] \n\t"
  153. "preceu.ph.qbl %[n1], %[tn2] \n\t"
  154. "ulw %[tn1], 12(%[src]) \n\t"
  155. "dpa.w.ph $ac2, %[p2], %[vector1b] \n\t"
  156. "dpa.w.ph $ac2, %[p3], %[vector2b] \n\t"
  157. "dpa.w.ph $ac2, %[p4], %[vector3b] \n\t"
  158. "dpa.w.ph $ac2, %[p1], %[vector4b] \n\t"
  159. "extp %[Temp3], $ac2, 31 \n\t"
  160. /* even 3. pixel */
  161. "lbux %[st0], %[Temp1](%[cm]) \n\t"
  162. "mtlo %[vector4a], $ac1 \n\t"
  163. "mthi $zero, $ac1 \n\t"
  164. "preceu.ph.qbr %[p2], %[tn1] \n\t"
  165. "dpa.w.ph $ac1, %[p3], %[vector1b] \n\t"
  166. "dpa.w.ph $ac1, %[p4], %[vector2b] \n\t"
  167. "dpa.w.ph $ac1, %[p1], %[vector3b] \n\t"
  168. "dpa.w.ph $ac1, %[n1], %[vector4b] \n\t"
  169. "extp %[Temp1], $ac1, 31 \n\t"
  170. /* even 4. pixel */
  171. "mtlo %[vector4a], $ac2 \n\t"
  172. "mthi $zero, $ac2 \n\t"
  173. "mtlo %[vector4a], $ac3 \n\t"
  174. "mthi $zero, $ac3 \n\t"
  175. "sb %[st0], 0(%[dst]) \n\t"
  176. "lbux %[st1], %[Temp3](%[cm]) \n\t"
  177. "balign %[tn3], %[tn1], 3 \n\t"
  178. "balign %[tn1], %[tn2], 3 \n\t"
  179. "balign %[tn2], %[tp2], 3 \n\t"
  180. "balign %[tp2], %[tp1], 3 \n\t"
  181. "dpa.w.ph $ac2, %[p4], %[vector1b] \n\t"
  182. "dpa.w.ph $ac2, %[p1], %[vector2b] \n\t"
  183. "dpa.w.ph $ac2, %[n1], %[vector3b] \n\t"
  184. "dpa.w.ph $ac2, %[p2], %[vector4b] \n\t"
  185. "extp %[Temp3], $ac2, 31 \n\t"
  186. "lbux %[st0], %[Temp1](%[cm]) \n\t"
  187. /* odd 1. pixel */
  188. "mtlo %[vector4a], $ac1 \n\t"
  189. "mthi $zero, $ac1 \n\t"
  190. "sb %[st1], 2(%[dst]) \n\t"
  191. "preceu.ph.qbr %[p1], %[tp2] \n\t"
  192. "preceu.ph.qbl %[p2], %[tp2] \n\t"
  193. "preceu.ph.qbr %[p3], %[tn2] \n\t"
  194. "preceu.ph.qbl %[p4], %[tn2] \n\t"
  195. "sb %[st0], 4(%[dst]) \n\t"
  196. "dpa.w.ph $ac3, %[p1], %[vector1b] \n\t"
  197. "dpa.w.ph $ac3, %[p2], %[vector2b] \n\t"
  198. "dpa.w.ph $ac3, %[p3], %[vector3b] \n\t"
  199. "dpa.w.ph $ac3, %[p4], %[vector4b] \n\t"
  200. "extp %[Temp2], $ac3, 31 \n\t"
  201. /* odd 2. pixel */
  202. "mtlo %[vector4a], $ac3 \n\t"
  203. "mthi $zero, $ac3 \n\t"
  204. "mtlo %[vector4a], $ac2 \n\t"
  205. "mthi $zero, $ac2 \n\t"
  206. "preceu.ph.qbr %[p1], %[tn1] \n\t"
  207. "preceu.ph.qbl %[n1], %[tn1] \n\t"
  208. "lbux %[st0], %[Temp3](%[cm]) \n\t"
  209. "dpa.w.ph $ac1, %[p2], %[vector1b] \n\t"
  210. "dpa.w.ph $ac1, %[p3], %[vector2b] \n\t"
  211. "dpa.w.ph $ac1, %[p4], %[vector3b] \n\t"
  212. "dpa.w.ph $ac1, %[p1], %[vector4b] \n\t"
  213. "extp %[Temp3], $ac1, 31 \n\t"
  214. /* odd 3. pixel */
  215. "lbux %[st1], %[Temp2](%[cm]) \n\t"
  216. "preceu.ph.qbr %[p2], %[tn3] \n\t"
  217. "dpa.w.ph $ac3, %[p3], %[vector1b] \n\t"
  218. "dpa.w.ph $ac3, %[p4], %[vector2b] \n\t"
  219. "dpa.w.ph $ac3, %[p1], %[vector3b] \n\t"
  220. "dpa.w.ph $ac3, %[n1], %[vector4b] \n\t"
  221. "extp %[Temp2], $ac3, 31 \n\t"
  222. /* odd 4. pixel */
  223. "sb %[st1], 1(%[dst]) \n\t"
  224. "sb %[st0], 6(%[dst]) \n\t"
  225. "dpa.w.ph $ac2, %[p4], %[vector1b] \n\t"
  226. "dpa.w.ph $ac2, %[p1], %[vector2b] \n\t"
  227. "dpa.w.ph $ac2, %[n1], %[vector3b] \n\t"
  228. "dpa.w.ph $ac2, %[p2], %[vector4b] \n\t"
  229. "extp %[Temp1], $ac2, 31 \n\t"
  230. /* clamp */
  231. "lbux %[p4], %[Temp3](%[cm]) \n\t"
  232. "lbux %[p2], %[Temp2](%[cm]) \n\t"
  233. "lbux %[n1], %[Temp1](%[cm]) \n\t"
  234. /* store bytes */
  235. "sb %[p4], 3(%[dst]) \n\t"
  236. "sb %[p2], 5(%[dst]) \n\t"
  237. "sb %[n1], 7(%[dst]) \n\t"
  238. : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tn1] "=&r"(tn1),
  239. [tn2] "=&r"(tn2), [tn3] "=&r"(tn3), [st0] "=&r"(st0),
  240. [st1] "=&r"(st1), [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3),
  241. [p4] "=&r"(p4), [n1] "=&r"(n1), [Temp1] "=&r"(Temp1),
  242. [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3)
  243. : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
  244. [vector3b] "r"(vector3b), [vector4b] "r"(vector4b),
  245. [vector4a] "r"(vector4a), [cm] "r"(cm), [dst] "r"(dst),
  246. [src] "r"(src));
  247. /* Next row... */
  248. src += src_stride;
  249. dst += dst_stride;
  250. }
  251. }
  252. static void convolve_horiz_16_dspr2(const uint8_t *src_ptr, int32_t src_stride,
  253. uint8_t *dst_ptr, int32_t dst_stride,
  254. const int16_t *filter_x0, int32_t h,
  255. int32_t count) {
  256. int32_t y, c;
  257. const uint8_t *src;
  258. uint8_t *dst;
  259. uint8_t *cm = vpx_ff_cropTbl;
  260. uint32_t vector_64 = 64;
  261. int32_t filter12, filter34, filter56, filter78;
  262. int32_t Temp1, Temp2, Temp3;
  263. uint32_t qload1, qload2, qload3;
  264. uint32_t p1, p2, p3, p4, p5;
  265. uint32_t st1, st2, st3;
  266. filter12 = ((const int32_t *)filter_x0)[0];
  267. filter34 = ((const int32_t *)filter_x0)[1];
  268. filter56 = ((const int32_t *)filter_x0)[2];
  269. filter78 = ((const int32_t *)filter_x0)[3];
  270. for (y = h; y--;) {
  271. src = src_ptr;
  272. dst = dst_ptr;
  273. /* prefetch data to cache memory */
  274. prefetch_load(src_ptr + src_stride);
  275. prefetch_load(src_ptr + src_stride + 32);
  276. prefetch_store(dst_ptr + dst_stride);
  277. for (c = 0; c < count; c++) {
  278. __asm__ __volatile__(
  279. "ulw %[qload1], 0(%[src]) \n\t"
  280. "ulw %[qload2], 4(%[src]) \n\t"
  281. /* even 1. pixel */
  282. "mtlo %[vector_64], $ac1 \n\t" /* even 1 */
  283. "mthi $zero, $ac1 \n\t"
  284. "mtlo %[vector_64], $ac2 \n\t" /* even 2 */
  285. "mthi $zero, $ac2 \n\t"
  286. "preceu.ph.qbr %[p1], %[qload1] \n\t"
  287. "preceu.ph.qbl %[p2], %[qload1] \n\t"
  288. "preceu.ph.qbr %[p3], %[qload2] \n\t"
  289. "preceu.ph.qbl %[p4], %[qload2] \n\t"
  290. "ulw %[qload3], 8(%[src]) \n\t"
  291. "dpa.w.ph $ac1, %[p1], %[filter12] \n\t" /* even 1 */
  292. "dpa.w.ph $ac1, %[p2], %[filter34] \n\t" /* even 1 */
  293. "dpa.w.ph $ac1, %[p3], %[filter56] \n\t" /* even 1 */
  294. "dpa.w.ph $ac1, %[p4], %[filter78] \n\t" /* even 1 */
  295. "extp %[Temp1], $ac1, 31 \n\t" /* even 1 */
  296. /* even 2. pixel */
  297. "mtlo %[vector_64], $ac3 \n\t" /* even 3 */
  298. "mthi $zero, $ac3 \n\t"
  299. "preceu.ph.qbr %[p1], %[qload3] \n\t"
  300. "preceu.ph.qbl %[p5], %[qload3] \n\t"
  301. "ulw %[qload1], 12(%[src]) \n\t"
  302. "dpa.w.ph $ac2, %[p2], %[filter12] \n\t" /* even 1 */
  303. "dpa.w.ph $ac2, %[p3], %[filter34] \n\t" /* even 1 */
  304. "dpa.w.ph $ac2, %[p4], %[filter56] \n\t" /* even 1 */
  305. "dpa.w.ph $ac2, %[p1], %[filter78] \n\t" /* even 1 */
  306. "extp %[Temp2], $ac2, 31 \n\t" /* even 1 */
  307. "lbux %[st1], %[Temp1](%[cm]) \n\t" /* even 1 */
  308. /* even 3. pixel */
  309. "mtlo %[vector_64], $ac1 \n\t" /* even 4 */
  310. "mthi $zero, $ac1 \n\t"
  311. "preceu.ph.qbr %[p2], %[qload1] \n\t"
  312. "sb %[st1], 0(%[dst]) \n\t" /* even 1 */
  313. "dpa.w.ph $ac3, %[p3], %[filter12] \n\t" /* even 3 */
  314. "dpa.w.ph $ac3, %[p4], %[filter34] \n\t" /* even 3 */
  315. "dpa.w.ph $ac3, %[p1], %[filter56] \n\t" /* even 3 */
  316. "dpa.w.ph $ac3, %[p5], %[filter78] \n\t" /* even 3 */
  317. "extp %[Temp3], $ac3, 31 \n\t" /* even 3 */
  318. "lbux %[st2], %[Temp2](%[cm]) \n\t" /* even 1 */
  319. /* even 4. pixel */
  320. "mtlo %[vector_64], $ac2 \n\t" /* even 5 */
  321. "mthi $zero, $ac2 \n\t"
  322. "preceu.ph.qbl %[p3], %[qload1] \n\t"
  323. "sb %[st2], 2(%[dst]) \n\t" /* even 1 */
  324. "ulw %[qload2], 16(%[src]) \n\t"
  325. "dpa.w.ph $ac1, %[p4], %[filter12] \n\t" /* even 4 */
  326. "dpa.w.ph $ac1, %[p1], %[filter34] \n\t" /* even 4 */
  327. "dpa.w.ph $ac1, %[p5], %[filter56] \n\t" /* even 4 */
  328. "dpa.w.ph $ac1, %[p2], %[filter78] \n\t" /* even 4 */
  329. "extp %[Temp1], $ac1, 31 \n\t" /* even 4 */
  330. "lbux %[st3], %[Temp3](%[cm]) \n\t" /* even 3 */
  331. /* even 5. pixel */
  332. "mtlo %[vector_64], $ac3 \n\t" /* even 6 */
  333. "mthi $zero, $ac3 \n\t"
  334. "preceu.ph.qbr %[p4], %[qload2] \n\t"
  335. "sb %[st3], 4(%[dst]) \n\t" /* even 3 */
  336. "dpa.w.ph $ac2, %[p1], %[filter12] \n\t" /* even 5 */
  337. "dpa.w.ph $ac2, %[p5], %[filter34] \n\t" /* even 5 */
  338. "dpa.w.ph $ac2, %[p2], %[filter56] \n\t" /* even 5 */
  339. "dpa.w.ph $ac2, %[p3], %[filter78] \n\t" /* even 5 */
  340. "extp %[Temp2], $ac2, 31 \n\t" /* even 5 */
  341. "lbux %[st1], %[Temp1](%[cm]) \n\t" /* even 4 */
  342. /* even 6. pixel */
  343. "mtlo %[vector_64], $ac1 \n\t" /* even 7 */
  344. "mthi $zero, $ac1 \n\t"
  345. "preceu.ph.qbl %[p1], %[qload2] \n\t"
  346. "sb %[st1], 6(%[dst]) \n\t" /* even 4 */
  347. "ulw %[qload3], 20(%[src]) \n\t"
  348. "dpa.w.ph $ac3, %[p5], %[filter12] \n\t" /* even 6 */
  349. "dpa.w.ph $ac3, %[p2], %[filter34] \n\t" /* even 6 */
  350. "dpa.w.ph $ac3, %[p3], %[filter56] \n\t" /* even 6 */
  351. "dpa.w.ph $ac3, %[p4], %[filter78] \n\t" /* even 6 */
  352. "extp %[Temp3], $ac3, 31 \n\t" /* even 6 */
  353. "lbux %[st2], %[Temp2](%[cm]) \n\t" /* even 5 */
  354. /* even 7. pixel */
  355. "mtlo %[vector_64], $ac2 \n\t" /* even 8 */
  356. "mthi $zero, $ac2 \n\t"
  357. "preceu.ph.qbr %[p5], %[qload3] \n\t"
  358. "sb %[st2], 8(%[dst]) \n\t" /* even 5 */
  359. "dpa.w.ph $ac1, %[p2], %[filter12] \n\t" /* even 7 */
  360. "dpa.w.ph $ac1, %[p3], %[filter34] \n\t" /* even 7 */
  361. "dpa.w.ph $ac1, %[p4], %[filter56] \n\t" /* even 7 */
  362. "dpa.w.ph $ac1, %[p1], %[filter78] \n\t" /* even 7 */
  363. "extp %[Temp1], $ac1, 31 \n\t" /* even 7 */
  364. "lbux %[st3], %[Temp3](%[cm]) \n\t" /* even 6 */
  365. /* even 8. pixel */
  366. "mtlo %[vector_64], $ac3 \n\t" /* odd 1 */
  367. "mthi $zero, $ac3 \n\t"
  368. "dpa.w.ph $ac2, %[p3], %[filter12] \n\t" /* even 8 */
  369. "dpa.w.ph $ac2, %[p4], %[filter34] \n\t" /* even 8 */
  370. "sb %[st3], 10(%[dst]) \n\t" /* even 6 */
  371. "dpa.w.ph $ac2, %[p1], %[filter56] \n\t" /* even 8 */
  372. "dpa.w.ph $ac2, %[p5], %[filter78] \n\t" /* even 8 */
  373. "extp %[Temp2], $ac2, 31 \n\t" /* even 8 */
  374. "lbux %[st1], %[Temp1](%[cm]) \n\t" /* even 7 */
  375. /* ODD pixels */
  376. "ulw %[qload1], 1(%[src]) \n\t"
  377. "ulw %[qload2], 5(%[src]) \n\t"
  378. /* odd 1. pixel */
  379. "mtlo %[vector_64], $ac1 \n\t" /* odd 2 */
  380. "mthi $zero, $ac1 \n\t"
  381. "preceu.ph.qbr %[p1], %[qload1] \n\t"
  382. "preceu.ph.qbl %[p2], %[qload1] \n\t"
  383. "preceu.ph.qbr %[p3], %[qload2] \n\t"
  384. "preceu.ph.qbl %[p4], %[qload2] \n\t"
  385. "sb %[st1], 12(%[dst]) \n\t" /* even 7 */
  386. "ulw %[qload3], 9(%[src]) \n\t"
  387. "dpa.w.ph $ac3, %[p1], %[filter12] \n\t" /* odd 1 */
  388. "dpa.w.ph $ac3, %[p2], %[filter34] \n\t" /* odd 1 */
  389. "dpa.w.ph $ac3, %[p3], %[filter56] \n\t" /* odd 1 */
  390. "dpa.w.ph $ac3, %[p4], %[filter78] \n\t" /* odd 1 */
  391. "extp %[Temp3], $ac3, 31 \n\t" /* odd 1 */
  392. "lbux %[st2], %[Temp2](%[cm]) \n\t" /* even 8 */
  393. /* odd 2. pixel */
  394. "mtlo %[vector_64], $ac2 \n\t" /* odd 3 */
  395. "mthi $zero, $ac2 \n\t"
  396. "preceu.ph.qbr %[p1], %[qload3] \n\t"
  397. "preceu.ph.qbl %[p5], %[qload3] \n\t"
  398. "sb %[st2], 14(%[dst]) \n\t" /* even 8 */
  399. "ulw %[qload1], 13(%[src]) \n\t"
  400. "dpa.w.ph $ac1, %[p2], %[filter12] \n\t" /* odd 2 */
  401. "dpa.w.ph $ac1, %[p3], %[filter34] \n\t" /* odd 2 */
  402. "dpa.w.ph $ac1, %[p4], %[filter56] \n\t" /* odd 2 */
  403. "dpa.w.ph $ac1, %[p1], %[filter78] \n\t" /* odd 2 */
  404. "extp %[Temp1], $ac1, 31 \n\t" /* odd 2 */
  405. "lbux %[st3], %[Temp3](%[cm]) \n\t" /* odd 1 */
  406. /* odd 3. pixel */
  407. "mtlo %[vector_64], $ac3 \n\t" /* odd 4 */
  408. "mthi $zero, $ac3 \n\t"
  409. "preceu.ph.qbr %[p2], %[qload1] \n\t"
  410. "sb %[st3], 1(%[dst]) \n\t" /* odd 1 */
  411. "dpa.w.ph $ac2, %[p3], %[filter12] \n\t" /* odd 3 */
  412. "dpa.w.ph $ac2, %[p4], %[filter34] \n\t" /* odd 3 */
  413. "dpa.w.ph $ac2, %[p1], %[filter56] \n\t" /* odd 3 */
  414. "dpa.w.ph $ac2, %[p5], %[filter78] \n\t" /* odd 3 */
  415. "extp %[Temp2], $ac2, 31 \n\t" /* odd 3 */
  416. "lbux %[st1], %[Temp1](%[cm]) \n\t" /* odd 2 */
  417. /* odd 4. pixel */
  418. "mtlo %[vector_64], $ac1 \n\t" /* odd 5 */
  419. "mthi $zero, $ac1 \n\t"
  420. "preceu.ph.qbl %[p3], %[qload1] \n\t"
  421. "sb %[st1], 3(%[dst]) \n\t" /* odd 2 */
  422. "ulw %[qload2], 17(%[src]) \n\t"
  423. "dpa.w.ph $ac3, %[p4], %[filter12] \n\t" /* odd 4 */
  424. "dpa.w.ph $ac3, %[p1], %[filter34] \n\t" /* odd 4 */
  425. "dpa.w.ph $ac3, %[p5], %[filter56] \n\t" /* odd 4 */
  426. "dpa.w.ph $ac3, %[p2], %[filter78] \n\t" /* odd 4 */
  427. "extp %[Temp3], $ac3, 31 \n\t" /* odd 4 */
  428. "lbux %[st2], %[Temp2](%[cm]) \n\t" /* odd 3 */
  429. /* odd 5. pixel */
  430. "mtlo %[vector_64], $ac2 \n\t" /* odd 6 */
  431. "mthi $zero, $ac2 \n\t"
  432. "preceu.ph.qbr %[p4], %[qload2] \n\t"
  433. "sb %[st2], 5(%[dst]) \n\t" /* odd 3 */
  434. "dpa.w.ph $ac1, %[p1], %[filter12] \n\t" /* odd 5 */
  435. "dpa.w.ph $ac1, %[p5], %[filter34] \n\t" /* odd 5 */
  436. "dpa.w.ph $ac1, %[p2], %[filter56] \n\t" /* odd 5 */
  437. "dpa.w.ph $ac1, %[p3], %[filter78] \n\t" /* odd 5 */
  438. "extp %[Temp1], $ac1, 31 \n\t" /* odd 5 */
  439. "lbux %[st3], %[Temp3](%[cm]) \n\t" /* odd 4 */
  440. /* odd 6. pixel */
  441. "mtlo %[vector_64], $ac3 \n\t" /* odd 7 */
  442. "mthi $zero, $ac3 \n\t"
  443. "preceu.ph.qbl %[p1], %[qload2] \n\t"
  444. "sb %[st3], 7(%[dst]) \n\t" /* odd 4 */
  445. "ulw %[qload3], 21(%[src]) \n\t"
  446. "dpa.w.ph $ac2, %[p5], %[filter12] \n\t" /* odd 6 */
  447. "dpa.w.ph $ac2, %[p2], %[filter34] \n\t" /* odd 6 */
  448. "dpa.w.ph $ac2, %[p3], %[filter56] \n\t" /* odd 6 */
  449. "dpa.w.ph $ac2, %[p4], %[filter78] \n\t" /* odd 6 */
  450. "extp %[Temp2], $ac2, 31 \n\t" /* odd 6 */
  451. "lbux %[st1], %[Temp1](%[cm]) \n\t" /* odd 5 */
  452. /* odd 7. pixel */
  453. "mtlo %[vector_64], $ac1 \n\t" /* odd 8 */
  454. "mthi $zero, $ac1 \n\t"
  455. "preceu.ph.qbr %[p5], %[qload3] \n\t"
  456. "sb %[st1], 9(%[dst]) \n\t" /* odd 5 */
  457. "dpa.w.ph $ac3, %[p2], %[filter12] \n\t" /* odd 7 */
  458. "dpa.w.ph $ac3, %[p3], %[filter34] \n\t" /* odd 7 */
  459. "dpa.w.ph $ac3, %[p4], %[filter56] \n\t" /* odd 7 */
  460. "dpa.w.ph $ac3, %[p1], %[filter78] \n\t" /* odd 7 */
  461. "extp %[Temp3], $ac3, 31 \n\t" /* odd 7 */
  462. /* odd 8. pixel */
  463. "dpa.w.ph $ac1, %[p3], %[filter12] \n\t" /* odd 8 */
  464. "dpa.w.ph $ac1, %[p4], %[filter34] \n\t" /* odd 8 */
  465. "dpa.w.ph $ac1, %[p1], %[filter56] \n\t" /* odd 8 */
  466. "dpa.w.ph $ac1, %[p5], %[filter78] \n\t" /* odd 8 */
  467. "extp %[Temp1], $ac1, 31 \n\t" /* odd 8 */
  468. "lbux %[st2], %[Temp2](%[cm]) \n\t" /* odd 6 */
  469. "lbux %[st3], %[Temp3](%[cm]) \n\t" /* odd 7 */
  470. "lbux %[st1], %[Temp1](%[cm]) \n\t" /* odd 8 */
  471. "sb %[st2], 11(%[dst]) \n\t" /* odd 6 */
  472. "sb %[st3], 13(%[dst]) \n\t" /* odd 7 */
  473. "sb %[st1], 15(%[dst]) \n\t" /* odd 8 */
  474. : [qload1] "=&r"(qload1), [qload2] "=&r"(qload2),
  475. [qload3] "=&r"(qload3), [st1] "=&r"(st1), [st2] "=&r"(st2),
  476. [st3] "=&r"(st3), [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3),
  477. [p4] "=&r"(p4), [p5] "=&r"(p5), [Temp1] "=&r"(Temp1),
  478. [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3)
  479. : [filter12] "r"(filter12), [filter34] "r"(filter34),
  480. [filter56] "r"(filter56), [filter78] "r"(filter78),
  481. [vector_64] "r"(vector_64), [cm] "r"(cm), [dst] "r"(dst),
  482. [src] "r"(src));
  483. src += 16;
  484. dst += 16;
  485. }
  486. /* Next row... */
  487. src_ptr += src_stride;
  488. dst_ptr += dst_stride;
  489. }
  490. }
  491. static void convolve_horiz_64_dspr2(const uint8_t *src_ptr, int32_t src_stride,
  492. uint8_t *dst_ptr, int32_t dst_stride,
  493. const int16_t *filter_x0, int32_t h) {
  494. int32_t y, c;
  495. const uint8_t *src;
  496. uint8_t *dst;
  497. uint8_t *cm = vpx_ff_cropTbl;
  498. uint32_t vector_64 = 64;
  499. int32_t filter12, filter34, filter56, filter78;
  500. int32_t Temp1, Temp2, Temp3;
  501. uint32_t qload1, qload2, qload3;
  502. uint32_t p1, p2, p3, p4, p5;
  503. uint32_t st1, st2, st3;
  504. filter12 = ((const int32_t *)filter_x0)[0];
  505. filter34 = ((const int32_t *)filter_x0)[1];
  506. filter56 = ((const int32_t *)filter_x0)[2];
  507. filter78 = ((const int32_t *)filter_x0)[3];
  508. for (y = h; y--;) {
  509. src = src_ptr;
  510. dst = dst_ptr;
  511. /* prefetch data to cache memory */
  512. prefetch_load(src_ptr + src_stride);
  513. prefetch_load(src_ptr + src_stride + 32);
  514. prefetch_load(src_ptr + src_stride + 64);
  515. prefetch_store(dst_ptr + dst_stride);
  516. prefetch_store(dst_ptr + dst_stride + 32);
  517. for (c = 0; c < 4; c++) {
  518. __asm__ __volatile__(
  519. "ulw %[qload1], 0(%[src]) \n\t"
  520. "ulw %[qload2], 4(%[src]) \n\t"
  521. /* even 1. pixel */
  522. "mtlo %[vector_64], $ac1 \n\t" /* even 1 */
  523. "mthi $zero, $ac1 \n\t"
  524. "mtlo %[vector_64], $ac2 \n\t" /* even 2 */
  525. "mthi $zero, $ac2 \n\t"
  526. "preceu.ph.qbr %[p1], %[qload1] \n\t"
  527. "preceu.ph.qbl %[p2], %[qload1] \n\t"
  528. "preceu.ph.qbr %[p3], %[qload2] \n\t"
  529. "preceu.ph.qbl %[p4], %[qload2] \n\t"
  530. "ulw %[qload3], 8(%[src]) \n\t"
  531. "dpa.w.ph $ac1, %[p1], %[filter12] \n\t" /* even 1 */
  532. "dpa.w.ph $ac1, %[p2], %[filter34] \n\t" /* even 1 */
  533. "dpa.w.ph $ac1, %[p3], %[filter56] \n\t" /* even 1 */
  534. "dpa.w.ph $ac1, %[p4], %[filter78] \n\t" /* even 1 */
  535. "extp %[Temp1], $ac1, 31 \n\t" /* even 1 */
  536. /* even 2. pixel */
  537. "mtlo %[vector_64], $ac3 \n\t" /* even 3 */
  538. "mthi $zero, $ac3 \n\t"
  539. "preceu.ph.qbr %[p1], %[qload3] \n\t"
  540. "preceu.ph.qbl %[p5], %[qload3] \n\t"
  541. "ulw %[qload1], 12(%[src]) \n\t"
  542. "dpa.w.ph $ac2, %[p2], %[filter12] \n\t" /* even 1 */
  543. "dpa.w.ph $ac2, %[p3], %[filter34] \n\t" /* even 1 */
  544. "dpa.w.ph $ac2, %[p4], %[filter56] \n\t" /* even 1 */
  545. "dpa.w.ph $ac2, %[p1], %[filter78] \n\t" /* even 1 */
  546. "extp %[Temp2], $ac2, 31 \n\t" /* even 1 */
  547. "lbux %[st1], %[Temp1](%[cm]) \n\t" /* even 1 */
  548. /* even 3. pixel */
  549. "mtlo %[vector_64], $ac1 \n\t" /* even 4 */
  550. "mthi $zero, $ac1 \n\t"
  551. "preceu.ph.qbr %[p2], %[qload1] \n\t"
  552. "sb %[st1], 0(%[dst]) \n\t" /* even 1 */
  553. "dpa.w.ph $ac3, %[p3], %[filter12] \n\t" /* even 3 */
  554. "dpa.w.ph $ac3, %[p4], %[filter34] \n\t" /* even 3 */
  555. "dpa.w.ph $ac3, %[p1], %[filter56] \n\t" /* even 3 */
  556. "dpa.w.ph $ac3, %[p5], %[filter78] \n\t" /* even 3 */
  557. "extp %[Temp3], $ac3, 31 \n\t" /* even 3 */
  558. "lbux %[st2], %[Temp2](%[cm]) \n\t" /* even 1 */
  559. /* even 4. pixel */
  560. "mtlo %[vector_64], $ac2 \n\t" /* even 5 */
  561. "mthi $zero, $ac2 \n\t"
  562. "preceu.ph.qbl %[p3], %[qload1] \n\t"
  563. "sb %[st2], 2(%[dst]) \n\t" /* even 1 */
  564. "ulw %[qload2], 16(%[src]) \n\t"
  565. "dpa.w.ph $ac1, %[p4], %[filter12] \n\t" /* even 4 */
  566. "dpa.w.ph $ac1, %[p1], %[filter34] \n\t" /* even 4 */
  567. "dpa.w.ph $ac1, %[p5], %[filter56] \n\t" /* even 4 */
  568. "dpa.w.ph $ac1, %[p2], %[filter78] \n\t" /* even 4 */
  569. "extp %[Temp1], $ac1, 31 \n\t" /* even 4 */
  570. "lbux %[st3], %[Temp3](%[cm]) \n\t" /* even 3 */
  571. /* even 5. pixel */
  572. "mtlo %[vector_64], $ac3 \n\t" /* even 6 */
  573. "mthi $zero, $ac3 \n\t"
  574. "preceu.ph.qbr %[p4], %[qload2] \n\t"
  575. "sb %[st3], 4(%[dst]) \n\t" /* even 3 */
  576. "dpa.w.ph $ac2, %[p1], %[filter12] \n\t" /* even 5 */
  577. "dpa.w.ph $ac2, %[p5], %[filter34] \n\t" /* even 5 */
  578. "dpa.w.ph $ac2, %[p2], %[filter56] \n\t" /* even 5 */
  579. "dpa.w.ph $ac2, %[p3], %[filter78] \n\t" /* even 5 */
  580. "extp %[Temp2], $ac2, 31 \n\t" /* even 5 */
  581. "lbux %[st1], %[Temp1](%[cm]) \n\t" /* even 4 */
  582. /* even 6. pixel */
  583. "mtlo %[vector_64], $ac1 \n\t" /* even 7 */
  584. "mthi $zero, $ac1 \n\t"
  585. "preceu.ph.qbl %[p1], %[qload2] \n\t"
  586. "sb %[st1], 6(%[dst]) \n\t" /* even 4 */
  587. "ulw %[qload3], 20(%[src]) \n\t"
  588. "dpa.w.ph $ac3, %[p5], %[filter12] \n\t" /* even 6 */
  589. "dpa.w.ph $ac3, %[p2], %[filter34] \n\t" /* even 6 */
  590. "dpa.w.ph $ac3, %[p3], %[filter56] \n\t" /* even 6 */
  591. "dpa.w.ph $ac3, %[p4], %[filter78] \n\t" /* even 6 */
  592. "extp %[Temp3], $ac3, 31 \n\t" /* even 6 */
  593. "lbux %[st2], %[Temp2](%[cm]) \n\t" /* even 5 */
  594. /* even 7. pixel */
  595. "mtlo %[vector_64], $ac2 \n\t" /* even 8 */
  596. "mthi $zero, $ac2 \n\t"
  597. "preceu.ph.qbr %[p5], %[qload3] \n\t"
  598. "sb %[st2], 8(%[dst]) \n\t" /* even 5 */
  599. "dpa.w.ph $ac1, %[p2], %[filter12] \n\t" /* even 7 */
  600. "dpa.w.ph $ac1, %[p3], %[filter34] \n\t" /* even 7 */
  601. "dpa.w.ph $ac1, %[p4], %[filter56] \n\t" /* even 7 */
  602. "dpa.w.ph $ac1, %[p1], %[filter78] \n\t" /* even 7 */
  603. "extp %[Temp1], $ac1, 31 \n\t" /* even 7 */
  604. "lbux %[st3], %[Temp3](%[cm]) \n\t" /* even 6 */
  605. /* even 8. pixel */
  606. "mtlo %[vector_64], $ac3 \n\t" /* odd 1 */
  607. "mthi $zero, $ac3 \n\t"
  608. "dpa.w.ph $ac2, %[p3], %[filter12] \n\t" /* even 8 */
  609. "dpa.w.ph $ac2, %[p4], %[filter34] \n\t" /* even 8 */
  610. "sb %[st3], 10(%[dst]) \n\t" /* even 6 */
  611. "dpa.w.ph $ac2, %[p1], %[filter56] \n\t" /* even 8 */
  612. "dpa.w.ph $ac2, %[p5], %[filter78] \n\t" /* even 8 */
  613. "extp %[Temp2], $ac2, 31 \n\t" /* even 8 */
  614. "lbux %[st1], %[Temp1](%[cm]) \n\t" /* even 7 */
  615. /* ODD pixels */
  616. "ulw %[qload1], 1(%[src]) \n\t"
  617. "ulw %[qload2], 5(%[src]) \n\t"
  618. /* odd 1. pixel */
  619. "mtlo %[vector_64], $ac1 \n\t" /* odd 2 */
  620. "mthi $zero, $ac1 \n\t"
  621. "preceu.ph.qbr %[p1], %[qload1] \n\t"
  622. "preceu.ph.qbl %[p2], %[qload1] \n\t"
  623. "preceu.ph.qbr %[p3], %[qload2] \n\t"
  624. "preceu.ph.qbl %[p4], %[qload2] \n\t"
  625. "sb %[st1], 12(%[dst]) \n\t" /* even 7 */
  626. "ulw %[qload3], 9(%[src]) \n\t"
  627. "dpa.w.ph $ac3, %[p1], %[filter12] \n\t" /* odd 1 */
  628. "dpa.w.ph $ac3, %[p2], %[filter34] \n\t" /* odd 1 */
  629. "dpa.w.ph $ac3, %[p3], %[filter56] \n\t" /* odd 1 */
  630. "dpa.w.ph $ac3, %[p4], %[filter78] \n\t" /* odd 1 */
  631. "extp %[Temp3], $ac3, 31 \n\t" /* odd 1 */
  632. "lbux %[st2], %[Temp2](%[cm]) \n\t" /* even 8 */
  633. /* odd 2. pixel */
  634. "mtlo %[vector_64], $ac2 \n\t" /* odd 3 */
  635. "mthi $zero, $ac2 \n\t"
  636. "preceu.ph.qbr %[p1], %[qload3] \n\t"
  637. "preceu.ph.qbl %[p5], %[qload3] \n\t"
  638. "sb %[st2], 14(%[dst]) \n\t" /* even 8 */
  639. "ulw %[qload1], 13(%[src]) \n\t"
  640. "dpa.w.ph $ac1, %[p2], %[filter12] \n\t" /* odd 2 */
  641. "dpa.w.ph $ac1, %[p3], %[filter34] \n\t" /* odd 2 */
  642. "dpa.w.ph $ac1, %[p4], %[filter56] \n\t" /* odd 2 */
  643. "dpa.w.ph $ac1, %[p1], %[filter78] \n\t" /* odd 2 */
  644. "extp %[Temp1], $ac1, 31 \n\t" /* odd 2 */
  645. "lbux %[st3], %[Temp3](%[cm]) \n\t" /* odd 1 */
  646. /* odd 3. pixel */
  647. "mtlo %[vector_64], $ac3 \n\t" /* odd 4 */
  648. "mthi $zero, $ac3 \n\t"
  649. "preceu.ph.qbr %[p2], %[qload1] \n\t"
  650. "sb %[st3], 1(%[dst]) \n\t" /* odd 1 */
  651. "dpa.w.ph $ac2, %[p3], %[filter12] \n\t" /* odd 3 */
  652. "dpa.w.ph $ac2, %[p4], %[filter34] \n\t" /* odd 3 */
  653. "dpa.w.ph $ac2, %[p1], %[filter56] \n\t" /* odd 3 */
  654. "dpa.w.ph $ac2, %[p5], %[filter78] \n\t" /* odd 3 */
  655. "extp %[Temp2], $ac2, 31 \n\t" /* odd 3 */
  656. "lbux %[st1], %[Temp1](%[cm]) \n\t" /* odd 2 */
  657. /* odd 4. pixel */
  658. "mtlo %[vector_64], $ac1 \n\t" /* odd 5 */
  659. "mthi $zero, $ac1 \n\t"
  660. "preceu.ph.qbl %[p3], %[qload1] \n\t"
  661. "sb %[st1], 3(%[dst]) \n\t" /* odd 2 */
  662. "ulw %[qload2], 17(%[src]) \n\t"
  663. "dpa.w.ph $ac3, %[p4], %[filter12] \n\t" /* odd 4 */
  664. "dpa.w.ph $ac3, %[p1], %[filter34] \n\t" /* odd 4 */
  665. "dpa.w.ph $ac3, %[p5], %[filter56] \n\t" /* odd 4 */
  666. "dpa.w.ph $ac3, %[p2], %[filter78] \n\t" /* odd 4 */
  667. "extp %[Temp3], $ac3, 31 \n\t" /* odd 4 */
  668. "lbux %[st2], %[Temp2](%[cm]) \n\t" /* odd 3 */
  669. /* odd 5. pixel */
  670. "mtlo %[vector_64], $ac2 \n\t" /* odd 6 */
  671. "mthi $zero, $ac2 \n\t"
  672. "preceu.ph.qbr %[p4], %[qload2] \n\t"
  673. "sb %[st2], 5(%[dst]) \n\t" /* odd 3 */
  674. "dpa.w.ph $ac1, %[p1], %[filter12] \n\t" /* odd 5 */
  675. "dpa.w.ph $ac1, %[p5], %[filter34] \n\t" /* odd 5 */
  676. "dpa.w.ph $ac1, %[p2], %[filter56] \n\t" /* odd 5 */
  677. "dpa.w.ph $ac1, %[p3], %[filter78] \n\t" /* odd 5 */
  678. "extp %[Temp1], $ac1, 31 \n\t" /* odd 5 */
  679. "lbux %[st3], %[Temp3](%[cm]) \n\t" /* odd 4 */
  680. /* odd 6. pixel */
  681. "mtlo %[vector_64], $ac3 \n\t" /* odd 7 */
  682. "mthi $zero, $ac3 \n\t"
  683. "preceu.ph.qbl %[p1], %[qload2] \n\t"
  684. "sb %[st3], 7(%[dst]) \n\t" /* odd 4 */
  685. "ulw %[qload3], 21(%[src]) \n\t"
  686. "dpa.w.ph $ac2, %[p5], %[filter12] \n\t" /* odd 6 */
  687. "dpa.w.ph $ac2, %[p2], %[filter34] \n\t" /* odd 6 */
  688. "dpa.w.ph $ac2, %[p3], %[filter56] \n\t" /* odd 6 */
  689. "dpa.w.ph $ac2, %[p4], %[filter78] \n\t" /* odd 6 */
  690. "extp %[Temp2], $ac2, 31 \n\t" /* odd 6 */
  691. "lbux %[st1], %[Temp1](%[cm]) \n\t" /* odd 5 */
  692. /* odd 7. pixel */
  693. "mtlo %[vector_64], $ac1 \n\t" /* odd 8 */
  694. "mthi $zero, $ac1 \n\t"
  695. "preceu.ph.qbr %[p5], %[qload3] \n\t"
  696. "sb %[st1], 9(%[dst]) \n\t" /* odd 5 */
  697. "dpa.w.ph $ac3, %[p2], %[filter12] \n\t" /* odd 7 */
  698. "dpa.w.ph $ac3, %[p3], %[filter34] \n\t" /* odd 7 */
  699. "dpa.w.ph $ac3, %[p4], %[filter56] \n\t" /* odd 7 */
  700. "dpa.w.ph $ac3, %[p1], %[filter78] \n\t" /* odd 7 */
  701. "extp %[Temp3], $ac3, 31 \n\t" /* odd 7 */
  702. /* odd 8. pixel */
  703. "dpa.w.ph $ac1, %[p3], %[filter12] \n\t" /* odd 8 */
  704. "dpa.w.ph $ac1, %[p4], %[filter34] \n\t" /* odd 8 */
  705. "dpa.w.ph $ac1, %[p1], %[filter56] \n\t" /* odd 8 */
  706. "dpa.w.ph $ac1, %[p5], %[filter78] \n\t" /* odd 8 */
  707. "extp %[Temp1], $ac1, 31 \n\t" /* odd 8 */
  708. "lbux %[st2], %[Temp2](%[cm]) \n\t" /* odd 6 */
  709. "lbux %[st3], %[Temp3](%[cm]) \n\t" /* odd 7 */
  710. "lbux %[st1], %[Temp1](%[cm]) \n\t" /* odd 8 */
  711. "sb %[st2], 11(%[dst]) \n\t" /* odd 6 */
  712. "sb %[st3], 13(%[dst]) \n\t" /* odd 7 */
  713. "sb %[st1], 15(%[dst]) \n\t" /* odd 8 */
  714. : [qload1] "=&r"(qload1), [qload2] "=&r"(qload2),
  715. [qload3] "=&r"(qload3), [st1] "=&r"(st1), [st2] "=&r"(st2),
  716. [st3] "=&r"(st3), [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3),
  717. [p4] "=&r"(p4), [p5] "=&r"(p5), [Temp1] "=&r"(Temp1),
  718. [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3)
  719. : [filter12] "r"(filter12), [filter34] "r"(filter34),
  720. [filter56] "r"(filter56), [filter78] "r"(filter78),
  721. [vector_64] "r"(vector_64), [cm] "r"(cm), [dst] "r"(dst),
  722. [src] "r"(src));
  723. src += 16;
  724. dst += 16;
  725. }
  726. /* Next row... */
  727. src_ptr += src_stride;
  728. dst_ptr += dst_stride;
  729. }
  730. }
  731. void vpx_convolve8_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
  732. uint8_t *dst, ptrdiff_t dst_stride,
  733. const InterpKernel *filter, int x0_q4,
  734. int x_step_q4, int y0_q4, int y_step_q4, int w,
  735. int h) {
  736. const int16_t *const filter_x = filter[x0_q4];
  737. assert(x_step_q4 == 16);
  738. assert(((const int32_t *)filter_x)[1] != 0x800000);
  739. if (vpx_get_filter_taps(filter_x) == 2) {
  740. vpx_convolve2_horiz_dspr2(src, src_stride, dst, dst_stride, filter, x0_q4,
  741. x_step_q4, y0_q4, y_step_q4, w, h);
  742. } else {
  743. uint32_t pos = 38;
  744. prefetch_load((const uint8_t *)filter_x);
  745. src -= 3;
  746. /* bit positon for extract from acc */
  747. __asm__ __volatile__("wrdsp %[pos], 1 \n\t"
  748. :
  749. : [pos] "r"(pos));
  750. /* prefetch data to cache memory */
  751. prefetch_load(src);
  752. prefetch_load(src + 32);
  753. prefetch_store(dst);
  754. switch (w) {
  755. case 4:
  756. convolve_horiz_4_dspr2(src, (int32_t)src_stride, dst,
  757. (int32_t)dst_stride, filter_x, (int32_t)h);
  758. break;
  759. case 8:
  760. convolve_horiz_8_dspr2(src, (int32_t)src_stride, dst,
  761. (int32_t)dst_stride, filter_x, (int32_t)h);
  762. break;
  763. case 16:
  764. convolve_horiz_16_dspr2(src, (int32_t)src_stride, dst,
  765. (int32_t)dst_stride, filter_x, (int32_t)h, 1);
  766. break;
  767. case 32:
  768. convolve_horiz_16_dspr2(src, (int32_t)src_stride, dst,
  769. (int32_t)dst_stride, filter_x, (int32_t)h, 2);
  770. break;
  771. case 64:
  772. prefetch_load(src + 64);
  773. prefetch_store(dst + 32);
  774. convolve_horiz_64_dspr2(src, (int32_t)src_stride, dst,
  775. (int32_t)dst_stride, filter_x, (int32_t)h);
  776. break;
  777. default:
  778. vpx_convolve8_horiz_c(src + 3, src_stride, dst, dst_stride, filter,
  779. x0_q4, x_step_q4, y0_q4, y_step_q4, w, h);
  780. break;
  781. }
  782. }
  783. }
  784. #endif