convolve8_avg_horiz_dspr2.c 61 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998
  1. /*
  2. * Copyright (c) 2013 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include <assert.h>
  11. #include <stdio.h>
  12. #include "./vpx_dsp_rtcd.h"
  13. #include "vpx_dsp/mips/convolve_common_dspr2.h"
  14. #include "vpx_dsp/vpx_convolve.h"
  15. #include "vpx_dsp/vpx_dsp_common.h"
  16. #include "vpx_dsp/vpx_filter.h"
  17. #include "vpx_ports/mem.h"
  18. #if HAVE_DSPR2
  19. static void convolve_avg_horiz_4_dspr2(const uint8_t *src, int32_t src_stride,
  20. uint8_t *dst, int32_t dst_stride,
  21. const int16_t *filter_x0, int32_t h) {
  22. int32_t y;
  23. uint8_t *cm = vpx_ff_cropTbl;
  24. int32_t vector1b, vector2b, vector3b, vector4b;
  25. int32_t Temp1, Temp2, Temp3, Temp4;
  26. uint32_t vector4a = 64;
  27. uint32_t tp1, tp2;
  28. uint32_t p1, p2, p3, p4;
  29. uint32_t n1, n2, n3, n4;
  30. uint32_t tn1, tn2;
  31. vector1b = ((const int32_t *)filter_x0)[0];
  32. vector2b = ((const int32_t *)filter_x0)[1];
  33. vector3b = ((const int32_t *)filter_x0)[2];
  34. vector4b = ((const int32_t *)filter_x0)[3];
  35. for (y = h; y--;) {
  36. /* prefetch data to cache memory */
  37. prefetch_load(src + src_stride);
  38. prefetch_load(src + src_stride + 32);
  39. prefetch_store(dst + dst_stride);
  40. __asm__ __volatile__(
  41. "ulw %[tp1], 0(%[src]) \n\t"
  42. "ulw %[tp2], 4(%[src]) \n\t"
  43. /* even 1. pixel */
  44. "mtlo %[vector4a], $ac3 \n\t"
  45. "mthi $zero, $ac3 \n\t"
  46. "preceu.ph.qbr %[p1], %[tp1] \n\t"
  47. "preceu.ph.qbl %[p2], %[tp1] \n\t"
  48. "preceu.ph.qbr %[p3], %[tp2] \n\t"
  49. "preceu.ph.qbl %[p4], %[tp2] \n\t"
  50. "dpa.w.ph $ac3, %[p1], %[vector1b] \n\t"
  51. "dpa.w.ph $ac3, %[p2], %[vector2b] \n\t"
  52. "dpa.w.ph $ac3, %[p3], %[vector3b] \n\t"
  53. "ulw %[tn2], 8(%[src]) \n\t"
  54. "dpa.w.ph $ac3, %[p4], %[vector4b] \n\t"
  55. "extp %[Temp1], $ac3, 31 \n\t"
  56. /* even 2. pixel */
  57. "mtlo %[vector4a], $ac2 \n\t"
  58. "mthi $zero, $ac2 \n\t"
  59. "preceu.ph.qbr %[p1], %[tn2] \n\t"
  60. "balign %[tn1], %[tn2], 3 \n\t"
  61. "balign %[tn2], %[tp2], 3 \n\t"
  62. "balign %[tp2], %[tp1], 3 \n\t"
  63. "dpa.w.ph $ac2, %[p2], %[vector1b] \n\t"
  64. "dpa.w.ph $ac2, %[p3], %[vector2b] \n\t"
  65. "dpa.w.ph $ac2, %[p4], %[vector3b] \n\t"
  66. "dpa.w.ph $ac2, %[p1], %[vector4b] \n\t"
  67. "extp %[Temp3], $ac2, 31 \n\t"
  68. "lbu %[p2], 3(%[dst]) \n\t" /* load odd 2 */
  69. /* odd 1. pixel */
  70. "lbux %[tp1], %[Temp1](%[cm]) \n\t" /* even 1 */
  71. "mtlo %[vector4a], $ac3 \n\t"
  72. "mthi $zero, $ac3 \n\t"
  73. "lbu %[Temp1], 1(%[dst]) \n\t" /* load odd 1 */
  74. "preceu.ph.qbr %[n1], %[tp2] \n\t"
  75. "preceu.ph.qbl %[n2], %[tp2] \n\t"
  76. "preceu.ph.qbr %[n3], %[tn2] \n\t"
  77. "preceu.ph.qbl %[n4], %[tn2] \n\t"
  78. "dpa.w.ph $ac3, %[n1], %[vector1b] \n\t"
  79. "dpa.w.ph $ac3, %[n2], %[vector2b] \n\t"
  80. "dpa.w.ph $ac3, %[n3], %[vector3b] \n\t"
  81. "dpa.w.ph $ac3, %[n4], %[vector4b] \n\t"
  82. "extp %[Temp2], $ac3, 31 \n\t"
  83. "lbu %[tn2], 0(%[dst]) \n\t" /* load even 1 */
  84. /* odd 2. pixel */
  85. "lbux %[tp2], %[Temp3](%[cm]) \n\t" /* even 2 */
  86. "mtlo %[vector4a], $ac2 \n\t"
  87. "mthi $zero, $ac2 \n\t"
  88. "preceu.ph.qbr %[n1], %[tn1] \n\t"
  89. "lbux %[tn1], %[Temp2](%[cm]) \n\t" /* odd 1 */
  90. "addqh_r.w %[tn2], %[tn2], %[tp1] \n\t" /* average even 1 */
  91. "dpa.w.ph $ac2, %[n2], %[vector1b] \n\t"
  92. "dpa.w.ph $ac2, %[n3], %[vector2b] \n\t"
  93. "dpa.w.ph $ac2, %[n4], %[vector3b] \n\t"
  94. "dpa.w.ph $ac2, %[n1], %[vector4b] \n\t"
  95. "extp %[Temp4], $ac2, 31 \n\t"
  96. "lbu %[tp1], 2(%[dst]) \n\t" /* load even 2 */
  97. "sb %[tn2], 0(%[dst]) \n\t" /* store even 1 */
  98. /* clamp */
  99. "addqh_r.w %[Temp1], %[Temp1], %[tn1] \n\t" /* average odd 1 */
  100. "lbux %[n2], %[Temp4](%[cm]) \n\t" /* odd 2 */
  101. "sb %[Temp1], 1(%[dst]) \n\t" /* store odd 1 */
  102. "addqh_r.w %[tp1], %[tp1], %[tp2] \n\t" /* average even 2 */
  103. "sb %[tp1], 2(%[dst]) \n\t" /* store even 2 */
  104. "addqh_r.w %[p2], %[p2], %[n2] \n\t" /* average odd 2 */
  105. "sb %[p2], 3(%[dst]) \n\t" /* store odd 2 */
  106. : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tn1] "=&r"(tn1),
  107. [tn2] "=&r"(tn2), [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3),
  108. [p4] "=&r"(p4), [n1] "=&r"(n1), [n2] "=&r"(n2), [n3] "=&r"(n3),
  109. [n4] "=&r"(n4), [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2),
  110. [Temp3] "=&r"(Temp3), [Temp4] "=&r"(Temp4)
  111. : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
  112. [vector3b] "r"(vector3b), [vector4b] "r"(vector4b),
  113. [vector4a] "r"(vector4a), [cm] "r"(cm), [dst] "r"(dst),
  114. [src] "r"(src));
  115. /* Next row... */
  116. src += src_stride;
  117. dst += dst_stride;
  118. }
  119. }
  120. static void convolve_avg_horiz_8_dspr2(const uint8_t *src, int32_t src_stride,
  121. uint8_t *dst, int32_t dst_stride,
  122. const int16_t *filter_x0, int32_t h) {
  123. int32_t y;
  124. uint8_t *cm = vpx_ff_cropTbl;
  125. uint32_t vector4a = 64;
  126. int32_t vector1b, vector2b, vector3b, vector4b;
  127. int32_t Temp1, Temp2, Temp3;
  128. uint32_t tp1, tp2;
  129. uint32_t p1, p2, p3, p4, n1;
  130. uint32_t tn1, tn2, tn3;
  131. uint32_t st0, st1;
  132. vector1b = ((const int32_t *)filter_x0)[0];
  133. vector2b = ((const int32_t *)filter_x0)[1];
  134. vector3b = ((const int32_t *)filter_x0)[2];
  135. vector4b = ((const int32_t *)filter_x0)[3];
  136. for (y = h; y--;) {
  137. /* prefetch data to cache memory */
  138. prefetch_load(src + src_stride);
  139. prefetch_load(src + src_stride + 32);
  140. prefetch_store(dst + dst_stride);
  141. __asm__ __volatile__(
  142. "ulw %[tp1], 0(%[src]) \n\t"
  143. "ulw %[tp2], 4(%[src]) \n\t"
  144. /* even 1. pixel */
  145. "mtlo %[vector4a], $ac3 \n\t"
  146. "mthi $zero, $ac3 \n\t"
  147. "mtlo %[vector4a], $ac2 \n\t"
  148. "mthi $zero, $ac2 \n\t"
  149. "preceu.ph.qbr %[p1], %[tp1] \n\t"
  150. "preceu.ph.qbl %[p2], %[tp1] \n\t"
  151. "preceu.ph.qbr %[p3], %[tp2] \n\t"
  152. "preceu.ph.qbl %[p4], %[tp2] \n\t"
  153. "ulw %[tn2], 8(%[src]) \n\t"
  154. "dpa.w.ph $ac3, %[p1], %[vector1b] \n\t"
  155. "dpa.w.ph $ac3, %[p2], %[vector2b] \n\t"
  156. "dpa.w.ph $ac3, %[p3], %[vector3b] \n\t"
  157. "dpa.w.ph $ac3, %[p4], %[vector4b] \n\t"
  158. "extp %[Temp1], $ac3, 31 \n\t"
  159. "lbu %[Temp2], 0(%[dst]) \n\t"
  160. "lbu %[tn3], 2(%[dst]) \n\t"
  161. /* even 2. pixel */
  162. "preceu.ph.qbr %[p1], %[tn2] \n\t"
  163. "preceu.ph.qbl %[n1], %[tn2] \n\t"
  164. "ulw %[tn1], 12(%[src]) \n\t"
  165. "dpa.w.ph $ac2, %[p2], %[vector1b] \n\t"
  166. "dpa.w.ph $ac2, %[p3], %[vector2b] \n\t"
  167. "dpa.w.ph $ac2, %[p4], %[vector3b] \n\t"
  168. "dpa.w.ph $ac2, %[p1], %[vector4b] \n\t"
  169. "extp %[Temp3], $ac2, 31 \n\t"
  170. /* even 3. pixel */
  171. "lbux %[st0], %[Temp1](%[cm]) \n\t"
  172. "mtlo %[vector4a], $ac1 \n\t"
  173. "mthi $zero, $ac1 \n\t"
  174. "preceu.ph.qbr %[p2], %[tn1] \n\t"
  175. "lbux %[st1], %[Temp3](%[cm]) \n\t"
  176. "dpa.w.ph $ac1, %[p3], %[vector1b] \n\t"
  177. "dpa.w.ph $ac1, %[p4], %[vector2b] \n\t"
  178. "dpa.w.ph $ac1, %[p1], %[vector3b] \n\t"
  179. "dpa.w.ph $ac1, %[n1], %[vector4b] \n\t"
  180. "extp %[Temp1], $ac1, 31 \n\t"
  181. "addqh_r.w %[Temp2], %[Temp2], %[st0] \n\t"
  182. "addqh_r.w %[tn3], %[tn3], %[st1] \n\t"
  183. "sb %[Temp2], 0(%[dst]) \n\t"
  184. "sb %[tn3], 2(%[dst]) \n\t"
  185. /* even 4. pixel */
  186. "mtlo %[vector4a], $ac2 \n\t"
  187. "mthi $zero, $ac2 \n\t"
  188. "mtlo %[vector4a], $ac3 \n\t"
  189. "mthi $zero, $ac3 \n\t"
  190. "balign %[tn3], %[tn1], 3 \n\t"
  191. "balign %[tn1], %[tn2], 3 \n\t"
  192. "balign %[tn2], %[tp2], 3 \n\t"
  193. "balign %[tp2], %[tp1], 3 \n\t"
  194. "lbux %[st0], %[Temp1](%[cm]) \n\t"
  195. "lbu %[Temp2], 4(%[dst]) \n\t"
  196. "addqh_r.w %[Temp2], %[Temp2], %[st0] \n\t"
  197. "dpa.w.ph $ac2, %[p4], %[vector1b] \n\t"
  198. "dpa.w.ph $ac2, %[p1], %[vector2b] \n\t"
  199. "dpa.w.ph $ac2, %[n1], %[vector3b] \n\t"
  200. "dpa.w.ph $ac2, %[p2], %[vector4b] \n\t"
  201. "extp %[Temp3], $ac2, 31 \n\t"
  202. /* odd 1. pixel */
  203. "mtlo %[vector4a], $ac1 \n\t"
  204. "mthi $zero, $ac1 \n\t"
  205. "sb %[Temp2], 4(%[dst]) \n\t"
  206. "preceu.ph.qbr %[p1], %[tp2] \n\t"
  207. "preceu.ph.qbl %[p2], %[tp2] \n\t"
  208. "preceu.ph.qbr %[p3], %[tn2] \n\t"
  209. "preceu.ph.qbl %[p4], %[tn2] \n\t"
  210. "dpa.w.ph $ac3, %[p1], %[vector1b] \n\t"
  211. "dpa.w.ph $ac3, %[p2], %[vector2b] \n\t"
  212. "dpa.w.ph $ac3, %[p3], %[vector3b] \n\t"
  213. "dpa.w.ph $ac3, %[p4], %[vector4b] \n\t"
  214. "extp %[Temp2], $ac3, 31 \n\t"
  215. "lbu %[tp1], 6(%[dst]) \n\t"
  216. /* odd 2. pixel */
  217. "mtlo %[vector4a], $ac3 \n\t"
  218. "mthi $zero, $ac3 \n\t"
  219. "mtlo %[vector4a], $ac2 \n\t"
  220. "mthi $zero, $ac2 \n\t"
  221. "preceu.ph.qbr %[p1], %[tn1] \n\t"
  222. "preceu.ph.qbl %[n1], %[tn1] \n\t"
  223. "lbux %[st0], %[Temp3](%[cm]) \n\t"
  224. "dpa.w.ph $ac1, %[p2], %[vector1b] \n\t"
  225. "dpa.w.ph $ac1, %[p3], %[vector2b] \n\t"
  226. "dpa.w.ph $ac1, %[p4], %[vector3b] \n\t"
  227. "dpa.w.ph $ac1, %[p1], %[vector4b] \n\t"
  228. "extp %[Temp3], $ac1, 31 \n\t"
  229. "lbu %[tp2], 1(%[dst]) \n\t"
  230. "lbu %[tn2], 3(%[dst]) \n\t"
  231. "addqh_r.w %[tp1], %[tp1], %[st0] \n\t"
  232. /* odd 3. pixel */
  233. "lbux %[st1], %[Temp2](%[cm]) \n\t"
  234. "preceu.ph.qbr %[p2], %[tn3] \n\t"
  235. "dpa.w.ph $ac3, %[p3], %[vector1b] \n\t"
  236. "dpa.w.ph $ac3, %[p4], %[vector2b] \n\t"
  237. "dpa.w.ph $ac3, %[p1], %[vector3b] \n\t"
  238. "dpa.w.ph $ac3, %[n1], %[vector4b] \n\t"
  239. "addqh_r.w %[tp2], %[tp2], %[st1] \n\t"
  240. "extp %[Temp2], $ac3, 31 \n\t"
  241. "lbu %[tn3], 5(%[dst]) \n\t"
  242. /* odd 4. pixel */
  243. "sb %[tp2], 1(%[dst]) \n\t"
  244. "sb %[tp1], 6(%[dst]) \n\t"
  245. "dpa.w.ph $ac2, %[p4], %[vector1b] \n\t"
  246. "dpa.w.ph $ac2, %[p1], %[vector2b] \n\t"
  247. "dpa.w.ph $ac2, %[n1], %[vector3b] \n\t"
  248. "dpa.w.ph $ac2, %[p2], %[vector4b] \n\t"
  249. "extp %[Temp1], $ac2, 31 \n\t"
  250. "lbu %[tn1], 7(%[dst]) \n\t"
  251. /* clamp */
  252. "lbux %[p4], %[Temp3](%[cm]) \n\t"
  253. "addqh_r.w %[tn2], %[tn2], %[p4] \n\t"
  254. "lbux %[p2], %[Temp2](%[cm]) \n\t"
  255. "addqh_r.w %[tn3], %[tn3], %[p2] \n\t"
  256. "lbux %[n1], %[Temp1](%[cm]) \n\t"
  257. "addqh_r.w %[tn1], %[tn1], %[n1] \n\t"
  258. /* store bytes */
  259. "sb %[tn2], 3(%[dst]) \n\t"
  260. "sb %[tn3], 5(%[dst]) \n\t"
  261. "sb %[tn1], 7(%[dst]) \n\t"
  262. : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tn1] "=&r"(tn1),
  263. [tn2] "=&r"(tn2), [tn3] "=&r"(tn3), [st0] "=&r"(st0),
  264. [st1] "=&r"(st1), [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3),
  265. [p4] "=&r"(p4), [n1] "=&r"(n1), [Temp1] "=&r"(Temp1),
  266. [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3)
  267. : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
  268. [vector3b] "r"(vector3b), [vector4b] "r"(vector4b),
  269. [vector4a] "r"(vector4a), [cm] "r"(cm), [dst] "r"(dst),
  270. [src] "r"(src));
  271. /* Next row... */
  272. src += src_stride;
  273. dst += dst_stride;
  274. }
  275. }
  276. static void convolve_avg_horiz_16_dspr2(const uint8_t *src_ptr,
  277. int32_t src_stride, uint8_t *dst_ptr,
  278. int32_t dst_stride,
  279. const int16_t *filter_x0, int32_t h,
  280. int32_t count) {
  281. int32_t y, c;
  282. const uint8_t *src;
  283. uint8_t *dst;
  284. uint8_t *cm = vpx_ff_cropTbl;
  285. uint32_t vector_64 = 64;
  286. int32_t filter12, filter34, filter56, filter78;
  287. int32_t Temp1, Temp2, Temp3;
  288. uint32_t qload1, qload2, qload3;
  289. uint32_t p1, p2, p3, p4, p5;
  290. uint32_t st1, st2, st3;
  291. filter12 = ((const int32_t *)filter_x0)[0];
  292. filter34 = ((const int32_t *)filter_x0)[1];
  293. filter56 = ((const int32_t *)filter_x0)[2];
  294. filter78 = ((const int32_t *)filter_x0)[3];
  295. for (y = h; y--;) {
  296. src = src_ptr;
  297. dst = dst_ptr;
  298. /* prefetch data to cache memory */
  299. prefetch_load(src_ptr + src_stride);
  300. prefetch_load(src_ptr + src_stride + 32);
  301. prefetch_store(dst_ptr + dst_stride);
  302. for (c = 0; c < count; c++) {
  303. __asm__ __volatile__(
  304. "ulw %[qload1], 0(%[src]) \n\t"
  305. "ulw %[qload2], 4(%[src]) \n\t"
  306. /* even 1. pixel */
  307. "mtlo %[vector_64], $ac1 \n\t" /* even 1 */
  308. "mthi $zero, $ac1 \n\t"
  309. "mtlo %[vector_64], $ac2 \n\t" /* even 2 */
  310. "mthi $zero, $ac2 \n\t"
  311. "preceu.ph.qbr %[p1], %[qload1] \n\t"
  312. "preceu.ph.qbl %[p2], %[qload1] \n\t"
  313. "preceu.ph.qbr %[p3], %[qload2] \n\t"
  314. "preceu.ph.qbl %[p4], %[qload2] \n\t"
  315. "ulw %[qload3], 8(%[src]) \n\t"
  316. "dpa.w.ph $ac1, %[p1], %[filter12] \n\t" /* even 1 */
  317. "dpa.w.ph $ac1, %[p2], %[filter34] \n\t" /* even 1 */
  318. "dpa.w.ph $ac1, %[p3], %[filter56] \n\t" /* even 1 */
  319. "dpa.w.ph $ac1, %[p4], %[filter78] \n\t" /* even 1 */
  320. "extp %[Temp1], $ac1, 31 \n\t" /* even 1 */
  321. "lbu %[st2], 0(%[dst]) \n\t" /* load even 1 from dst */
  322. /* even 2. pixel */
  323. "mtlo %[vector_64], $ac3 \n\t" /* even 3 */
  324. "mthi $zero, $ac3 \n\t"
  325. "preceu.ph.qbr %[p1], %[qload3] \n\t"
  326. "preceu.ph.qbl %[p5], %[qload3] \n\t"
  327. "ulw %[qload1], 12(%[src]) \n\t"
  328. "dpa.w.ph $ac2, %[p2], %[filter12] \n\t" /* even 1 */
  329. "dpa.w.ph $ac2, %[p3], %[filter34] \n\t" /* even 1 */
  330. "dpa.w.ph $ac2, %[p4], %[filter56] \n\t" /* even 1 */
  331. "dpa.w.ph $ac2, %[p1], %[filter78] \n\t" /* even 1 */
  332. "extp %[Temp2], $ac2, 31 \n\t" /* even 1 */
  333. "lbux %[st1], %[Temp1](%[cm]) \n\t" /* even 1 */
  334. "lbu %[qload3], 2(%[dst]) \n\t" /* load even 2 from dst */
  335. /* even 3. pixel */
  336. "mtlo %[vector_64], $ac1 \n\t" /* even 4 */
  337. "mthi $zero, $ac1 \n\t"
  338. "addqh_r.w %[st2], %[st2], %[st1] \n\t" /* average even 1 */
  339. "preceu.ph.qbr %[p2], %[qload1] \n\t"
  340. "sb %[st2], 0(%[dst]) \n\t" /* store even 1 to dst */
  341. "dpa.w.ph $ac3, %[p3], %[filter12] \n\t" /* even 3 */
  342. "dpa.w.ph $ac3, %[p4], %[filter34] \n\t" /* even 3 */
  343. "dpa.w.ph $ac3, %[p1], %[filter56] \n\t" /* even 3 */
  344. "dpa.w.ph $ac3, %[p5], %[filter78] \n\t" /* even 3 */
  345. "extp %[Temp3], $ac3, 31 \n\t" /* even 3 */
  346. "lbux %[st2], %[Temp2](%[cm]) \n\t" /* even 1 */
  347. /* even 4. pixel */
  348. "mtlo %[vector_64], $ac2 \n\t" /* even 5 */
  349. "mthi $zero, $ac2 \n\t"
  350. "addqh_r.w %[qload3], %[qload3], %[st2] \n\t" /* average even 2 */
  351. "preceu.ph.qbl %[p3], %[qload1] \n\t"
  352. "sb %[qload3], 2(%[dst]) \n\t" /* store even 2 to dst */
  353. "ulw %[qload2], 16(%[src]) \n\t"
  354. "lbu %[qload3], 4(%[dst]) \n\t" /* load even 3 from dst */
  355. "lbu %[qload1], 6(%[dst]) \n\t" /* load even 4 from dst */
  356. "dpa.w.ph $ac1, %[p4], %[filter12] \n\t" /* even 4 */
  357. "dpa.w.ph $ac1, %[p1], %[filter34] \n\t" /* even 4 */
  358. "dpa.w.ph $ac1, %[p5], %[filter56] \n\t" /* even 4 */
  359. "dpa.w.ph $ac1, %[p2], %[filter78] \n\t" /* even 4 */
  360. "extp %[Temp1], $ac1, 31 \n\t" /* even 4 */
  361. "lbux %[st3], %[Temp3](%[cm]) \n\t" /* even 3 */
  362. /* even 5. pixel */
  363. "mtlo %[vector_64], $ac3 \n\t" /* even 6 */
  364. "mthi $zero, $ac3 \n\t"
  365. "addqh_r.w %[qload3], %[qload3], %[st3] \n\t" /* average even 3 */
  366. "preceu.ph.qbr %[p4], %[qload2] \n\t"
  367. "sb %[qload3], 4(%[dst]) \n\t" /* store even 3 to dst */
  368. "dpa.w.ph $ac2, %[p1], %[filter12] \n\t" /* even 5 */
  369. "dpa.w.ph $ac2, %[p5], %[filter34] \n\t" /* even 5 */
  370. "dpa.w.ph $ac2, %[p2], %[filter56] \n\t" /* even 5 */
  371. "dpa.w.ph $ac2, %[p3], %[filter78] \n\t" /* even 5 */
  372. "extp %[Temp2], $ac2, 31 \n\t" /* even 5 */
  373. "lbux %[st1], %[Temp1](%[cm]) \n\t" /* even 4 */
  374. /* even 6. pixel */
  375. "mtlo %[vector_64], $ac1 \n\t" /* even 7 */
  376. "mthi $zero, $ac1 \n\t"
  377. "addqh_r.w %[qload1], %[qload1], %[st1] \n\t" /* average even 4 */
  378. "preceu.ph.qbl %[p1], %[qload2] \n\t"
  379. "sb %[qload1], 6(%[dst]) \n\t" /* store even 4 to dst */
  380. "ulw %[qload3], 20(%[src]) \n\t"
  381. "dpa.w.ph $ac3, %[p5], %[filter12] \n\t" /* even 6 */
  382. "dpa.w.ph $ac3, %[p2], %[filter34] \n\t" /* even 6 */
  383. "dpa.w.ph $ac3, %[p3], %[filter56] \n\t" /* even 6 */
  384. "dpa.w.ph $ac3, %[p4], %[filter78] \n\t" /* even 6 */
  385. "lbu %[qload2], 8(%[dst]) \n\t" /* load even 5 from dst */
  386. "extp %[Temp3], $ac3, 31 \n\t" /* even 6 */
  387. "lbux %[st2], %[Temp2](%[cm]) \n\t" /* even 5 */
  388. /* even 7. pixel */
  389. "mtlo %[vector_64], $ac2 \n\t" /* even 8 */
  390. "mthi $zero, $ac2 \n\t"
  391. "addqh_r.w %[qload2], %[qload2], %[st2] \n\t" /* average even 5 */
  392. "preceu.ph.qbr %[p5], %[qload3] \n\t"
  393. "sb %[qload2], 8(%[dst]) \n\t" /* store even 5 to dst */
  394. "dpa.w.ph $ac1, %[p2], %[filter12] \n\t" /* even 7 */
  395. "dpa.w.ph $ac1, %[p3], %[filter34] \n\t" /* even 7 */
  396. "dpa.w.ph $ac1, %[p4], %[filter56] \n\t" /* even 7 */
  397. "dpa.w.ph $ac1, %[p1], %[filter78] \n\t" /* even 7 */
  398. "lbu %[qload3], 10(%[dst]) \n\t" /* load even 6 from dst */
  399. "extp %[Temp1], $ac1, 31 \n\t" /* even 7 */
  400. "lbux %[st3], %[Temp3](%[cm]) \n\t" /* even 6 */
  401. "lbu %[st2], 12(%[dst]) \n\t" /* load even 7 from dst */
  402. /* even 8. pixel */
  403. "mtlo %[vector_64], $ac3 \n\t" /* odd 1 */
  404. "mthi $zero, $ac3 \n\t"
  405. "addqh_r.w %[qload3], %[qload3], %[st3] \n\t" /* average even 6 */
  406. "dpa.w.ph $ac2, %[p3], %[filter12] \n\t" /* even 8 */
  407. "dpa.w.ph $ac2, %[p4], %[filter34] \n\t" /* even 8 */
  408. "sb %[qload3], 10(%[dst]) \n\t" /* store even 6 to dst */
  409. "dpa.w.ph $ac2, %[p1], %[filter56] \n\t" /* even 8 */
  410. "dpa.w.ph $ac2, %[p5], %[filter78] \n\t" /* even 8 */
  411. "extp %[Temp2], $ac2, 31 \n\t" /* even 8 */
  412. "lbux %[st1], %[Temp1](%[cm]) \n\t" /* even 7 */
  413. /* ODD pixels */
  414. "ulw %[qload1], 1(%[src]) \n\t"
  415. "ulw %[qload2], 5(%[src]) \n\t"
  416. "addqh_r.w %[st2], %[st2], %[st1] \n\t" /* average even 7 */
  417. /* odd 1. pixel */
  418. "mtlo %[vector_64], $ac1 \n\t" /* odd 2 */
  419. "mthi $zero, $ac1 \n\t"
  420. "preceu.ph.qbr %[p1], %[qload1] \n\t"
  421. "preceu.ph.qbl %[p2], %[qload1] \n\t"
  422. "preceu.ph.qbr %[p3], %[qload2] \n\t"
  423. "preceu.ph.qbl %[p4], %[qload2] \n\t"
  424. "sb %[st2], 12(%[dst]) \n\t" /* store even 7 to dst */
  425. "ulw %[qload3], 9(%[src]) \n\t"
  426. "dpa.w.ph $ac3, %[p1], %[filter12] \n\t" /* odd 1 */
  427. "dpa.w.ph $ac3, %[p2], %[filter34] \n\t" /* odd 1 */
  428. "lbu %[qload2], 14(%[dst]) \n\t" /* load even 8 from dst */
  429. "dpa.w.ph $ac3, %[p3], %[filter56] \n\t" /* odd 1 */
  430. "dpa.w.ph $ac3, %[p4], %[filter78] \n\t" /* odd 1 */
  431. "extp %[Temp3], $ac3, 31 \n\t" /* odd 1 */
  432. "lbux %[st2], %[Temp2](%[cm]) \n\t" /* even 8 */
  433. "lbu %[st1], 1(%[dst]) \n\t" /* load odd 1 from dst */
  434. /* odd 2. pixel */
  435. "mtlo %[vector_64], $ac2 \n\t" /* odd 3 */
  436. "mthi $zero, $ac2 \n\t"
  437. "addqh_r.w %[qload2], %[qload2], %[st2] \n\t" /* average even 8 */
  438. "preceu.ph.qbr %[p1], %[qload3] \n\t"
  439. "preceu.ph.qbl %[p5], %[qload3] \n\t"
  440. "sb %[qload2], 14(%[dst]) \n\t" /* store even 8 to dst */
  441. "ulw %[qload1], 13(%[src]) \n\t"
  442. "dpa.w.ph $ac1, %[p2], %[filter12] \n\t" /* odd 2 */
  443. "dpa.w.ph $ac1, %[p3], %[filter34] \n\t" /* odd 2 */
  444. "dpa.w.ph $ac1, %[p4], %[filter56] \n\t" /* odd 2 */
  445. "dpa.w.ph $ac1, %[p1], %[filter78] \n\t" /* odd 2 */
  446. "lbu %[qload3], 3(%[dst]) \n\t" /* load odd 2 from dst */
  447. "extp %[Temp1], $ac1, 31 \n\t" /* odd 2 */
  448. "lbux %[st3], %[Temp3](%[cm]) \n\t" /* odd 1 */
  449. /* odd 3. pixel */
  450. "mtlo %[vector_64], $ac3 \n\t" /* odd 4 */
  451. "mthi $zero, $ac3 \n\t"
  452. "addqh_r.w %[st3], %[st3], %[st1] \n\t" /* average odd 1 */
  453. "preceu.ph.qbr %[p2], %[qload1] \n\t"
  454. "dpa.w.ph $ac2, %[p3], %[filter12] \n\t" /* odd 3 */
  455. "dpa.w.ph $ac2, %[p4], %[filter34] \n\t" /* odd 3 */
  456. "dpa.w.ph $ac2, %[p1], %[filter56] \n\t" /* odd 3 */
  457. "dpa.w.ph $ac2, %[p5], %[filter78] \n\t" /* odd 3 */
  458. "sb %[st3], 1(%[dst]) \n\t" /* store odd 1 to dst */
  459. "extp %[Temp2], $ac2, 31 \n\t" /* odd 3 */
  460. "lbux %[st1], %[Temp1](%[cm]) \n\t" /* odd 2 */
  461. /* odd 4. pixel */
  462. "mtlo %[vector_64], $ac1 \n\t" /* odd 5 */
  463. "mthi $zero, $ac1 \n\t"
  464. "addqh_r.w %[qload3], %[qload3], %[st1] \n\t" /* average odd 2 */
  465. "preceu.ph.qbl %[p3], %[qload1] \n\t"
  466. "sb %[qload3], 3(%[dst]) \n\t" /* store odd 2 to dst */
  467. "lbu %[qload1], 5(%[dst]) \n\t" /* load odd 3 from dst */
  468. "ulw %[qload2], 17(%[src]) \n\t"
  469. "dpa.w.ph $ac3, %[p4], %[filter12] \n\t" /* odd 4 */
  470. "dpa.w.ph $ac3, %[p1], %[filter34] \n\t" /* odd 4 */
  471. "dpa.w.ph $ac3, %[p5], %[filter56] \n\t" /* odd 4 */
  472. "dpa.w.ph $ac3, %[p2], %[filter78] \n\t" /* odd 4 */
  473. "extp %[Temp3], $ac3, 31 \n\t" /* odd 4 */
  474. "lbux %[st2], %[Temp2](%[cm]) \n\t" /* odd 3 */
  475. "lbu %[st1], 7(%[dst]) \n\t" /* load odd 4 from dst */
  476. /* odd 5. pixel */
  477. "mtlo %[vector_64], $ac2 \n\t" /* odd 6 */
  478. "mthi $zero, $ac2 \n\t"
  479. "addqh_r.w %[qload1], %[qload1], %[st2] \n\t" /* average odd 3 */
  480. "preceu.ph.qbr %[p4], %[qload2] \n\t"
  481. "sb %[qload1], 5(%[dst]) \n\t" /* store odd 3 to dst */
  482. "dpa.w.ph $ac1, %[p1], %[filter12] \n\t" /* odd 5 */
  483. "dpa.w.ph $ac1, %[p5], %[filter34] \n\t" /* odd 5 */
  484. "dpa.w.ph $ac1, %[p2], %[filter56] \n\t" /* odd 5 */
  485. "dpa.w.ph $ac1, %[p3], %[filter78] \n\t" /* odd 5 */
  486. "extp %[Temp1], $ac1, 31 \n\t" /* odd 5 */
  487. "lbux %[st3], %[Temp3](%[cm]) \n\t" /* odd 4 */
  488. "lbu %[qload1], 9(%[dst]) \n\t" /* load odd 5 from dst */
  489. /* odd 6. pixel */
  490. "mtlo %[vector_64], $ac3 \n\t" /* odd 7 */
  491. "mthi $zero, $ac3 \n\t"
  492. "addqh_r.w %[st1], %[st1], %[st3] \n\t" /* average odd 4 */
  493. "preceu.ph.qbl %[p1], %[qload2] \n\t"
  494. "sb %[st1], 7(%[dst]) \n\t" /* store odd 4 to dst */
  495. "ulw %[qload3], 21(%[src]) \n\t"
  496. "dpa.w.ph $ac2, %[p5], %[filter12] \n\t" /* odd 6 */
  497. "dpa.w.ph $ac2, %[p2], %[filter34] \n\t" /* odd 6 */
  498. "dpa.w.ph $ac2, %[p3], %[filter56] \n\t" /* odd 6 */
  499. "dpa.w.ph $ac2, %[p4], %[filter78] \n\t" /* odd 6 */
  500. "extp %[Temp2], $ac2, 31 \n\t" /* odd 6 */
  501. "lbux %[st1], %[Temp1](%[cm]) \n\t" /* odd 5 */
  502. /* odd 7. pixel */
  503. "mtlo %[vector_64], $ac1 \n\t" /* odd 8 */
  504. "mthi $zero, $ac1 \n\t"
  505. "addqh_r.w %[qload1], %[qload1], %[st1] \n\t" /* average odd 5 */
  506. "preceu.ph.qbr %[p5], %[qload3] \n\t"
  507. "sb %[qload1], 9(%[dst]) \n\t" /* store odd 5 to dst */
  508. "lbu %[qload2], 11(%[dst]) \n\t" /* load odd 6 from dst */
  509. "dpa.w.ph $ac3, %[p2], %[filter12] \n\t" /* odd 7 */
  510. "dpa.w.ph $ac3, %[p3], %[filter34] \n\t" /* odd 7 */
  511. "dpa.w.ph $ac3, %[p4], %[filter56] \n\t" /* odd 7 */
  512. "dpa.w.ph $ac3, %[p1], %[filter78] \n\t" /* odd 7 */
  513. "extp %[Temp3], $ac3, 31 \n\t" /* odd 7 */
  514. "lbu %[qload3], 13(%[dst]) \n\t" /* load odd 7 from dst */
  515. /* odd 8. pixel */
  516. "dpa.w.ph $ac1, %[p3], %[filter12] \n\t" /* odd 8 */
  517. "dpa.w.ph $ac1, %[p4], %[filter34] \n\t" /* odd 8 */
  518. "dpa.w.ph $ac1, %[p1], %[filter56] \n\t" /* odd 8 */
  519. "dpa.w.ph $ac1, %[p5], %[filter78] \n\t" /* odd 8 */
  520. "extp %[Temp1], $ac1, 31 \n\t" /* odd 8 */
  521. "lbu %[qload1], 15(%[dst]) \n\t" /* load odd 8 from dst */
  522. "lbux %[st2], %[Temp2](%[cm]) \n\t" /* odd 6 */
  523. "addqh_r.w %[qload2], %[qload2], %[st2] \n\t" /* average odd 6 */
  524. "lbux %[st3], %[Temp3](%[cm]) \n\t" /* odd 7 */
  525. "addqh_r.w %[qload3], %[qload3], %[st3] \n\t" /* average odd 7 */
  526. "lbux %[st1], %[Temp1](%[cm]) \n\t" /* odd 8 */
  527. "addqh_r.w %[qload1], %[qload1], %[st1] \n\t" /* average odd 8 */
  528. "sb %[qload2], 11(%[dst]) \n\t" /* store odd 6 to dst */
  529. "sb %[qload3], 13(%[dst]) \n\t" /* store odd 7 to dst */
  530. "sb %[qload1], 15(%[dst]) \n\t" /* store odd 8 to dst */
  531. : [qload1] "=&r"(qload1), [qload2] "=&r"(qload2), [st1] "=&r"(st1),
  532. [st2] "=&r"(st2), [st3] "=&r"(st3), [p1] "=&r"(p1), [p2] "=&r"(p2),
  533. [p3] "=&r"(p3), [p4] "=&r"(p4), [qload3] "=&r"(qload3),
  534. [p5] "=&r"(p5), [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2),
  535. [Temp3] "=&r"(Temp3)
  536. : [filter12] "r"(filter12), [filter34] "r"(filter34),
  537. [filter56] "r"(filter56), [filter78] "r"(filter78),
  538. [vector_64] "r"(vector_64), [cm] "r"(cm), [dst] "r"(dst),
  539. [src] "r"(src));
  540. src += 16;
  541. dst += 16;
  542. }
  543. /* Next row... */
  544. src_ptr += src_stride;
  545. dst_ptr += dst_stride;
  546. }
  547. }
  548. static void convolve_avg_horiz_64_dspr2(const uint8_t *src_ptr,
  549. int32_t src_stride, uint8_t *dst_ptr,
  550. int32_t dst_stride,
  551. const int16_t *filter_x0, int32_t h) {
  552. int32_t y, c;
  553. const uint8_t *src;
  554. uint8_t *dst;
  555. uint8_t *cm = vpx_ff_cropTbl;
  556. uint32_t vector_64 = 64;
  557. int32_t filter12, filter34, filter56, filter78;
  558. int32_t Temp1, Temp2, Temp3;
  559. uint32_t qload1, qload2, qload3;
  560. uint32_t p1, p2, p3, p4, p5;
  561. uint32_t st1, st2, st3;
  562. filter12 = ((const int32_t *)filter_x0)[0];
  563. filter34 = ((const int32_t *)filter_x0)[1];
  564. filter56 = ((const int32_t *)filter_x0)[2];
  565. filter78 = ((const int32_t *)filter_x0)[3];
  566. for (y = h; y--;) {
  567. src = src_ptr;
  568. dst = dst_ptr;
  569. /* prefetch data to cache memory */
  570. prefetch_load(src_ptr + src_stride);
  571. prefetch_load(src_ptr + src_stride + 32);
  572. prefetch_load(src_ptr + src_stride + 64);
  573. prefetch_store(dst_ptr + dst_stride);
  574. prefetch_store(dst_ptr + dst_stride + 32);
  575. for (c = 0; c < 4; c++) {
  576. __asm__ __volatile__(
  577. "ulw %[qload1], 0(%[src]) \n\t"
  578. "ulw %[qload2], 4(%[src]) \n\t"
  579. /* even 1. pixel */
  580. "mtlo %[vector_64], $ac1 \n\t" /* even 1 */
  581. "mthi $zero, $ac1 \n\t"
  582. "mtlo %[vector_64], $ac2 \n\t" /* even 2 */
  583. "mthi $zero, $ac2 \n\t"
  584. "preceu.ph.qbr %[p1], %[qload1] \n\t"
  585. "preceu.ph.qbl %[p2], %[qload1] \n\t"
  586. "preceu.ph.qbr %[p3], %[qload2] \n\t"
  587. "preceu.ph.qbl %[p4], %[qload2] \n\t"
  588. "ulw %[qload3], 8(%[src]) \n\t"
  589. "dpa.w.ph $ac1, %[p1], %[filter12] \n\t" /* even 1 */
  590. "dpa.w.ph $ac1, %[p2], %[filter34] \n\t" /* even 1 */
  591. "dpa.w.ph $ac1, %[p3], %[filter56] \n\t" /* even 1 */
  592. "dpa.w.ph $ac1, %[p4], %[filter78] \n\t" /* even 1 */
  593. "extp %[Temp1], $ac1, 31 \n\t" /* even 1 */
  594. "lbu %[st2], 0(%[dst]) \n\t" /* load even 1 from dst */
  595. /* even 2. pixel */
  596. "mtlo %[vector_64], $ac3 \n\t" /* even 3 */
  597. "mthi $zero, $ac3 \n\t"
  598. "preceu.ph.qbr %[p1], %[qload3] \n\t"
  599. "preceu.ph.qbl %[p5], %[qload3] \n\t"
  600. "ulw %[qload1], 12(%[src]) \n\t"
  601. "dpa.w.ph $ac2, %[p2], %[filter12] \n\t" /* even 1 */
  602. "dpa.w.ph $ac2, %[p3], %[filter34] \n\t" /* even 1 */
  603. "dpa.w.ph $ac2, %[p4], %[filter56] \n\t" /* even 1 */
  604. "dpa.w.ph $ac2, %[p1], %[filter78] \n\t" /* even 1 */
  605. "extp %[Temp2], $ac2, 31 \n\t" /* even 1 */
  606. "lbux %[st1], %[Temp1](%[cm]) \n\t" /* even 1 */
  607. "lbu %[qload3], 2(%[dst]) \n\t" /* load even 2 from dst */
  608. /* even 3. pixel */
  609. "mtlo %[vector_64], $ac1 \n\t" /* even 4 */
  610. "mthi $zero, $ac1 \n\t"
  611. "addqh_r.w %[st2], %[st2], %[st1] \n\t" /* average even 1 */
  612. "preceu.ph.qbr %[p2], %[qload1] \n\t"
  613. "sb %[st2], 0(%[dst]) \n\t" /* store even 1 to dst */
  614. "dpa.w.ph $ac3, %[p3], %[filter12] \n\t" /* even 3 */
  615. "dpa.w.ph $ac3, %[p4], %[filter34] \n\t" /* even 3 */
  616. "dpa.w.ph $ac3, %[p1], %[filter56] \n\t" /* even 3 */
  617. "dpa.w.ph $ac3, %[p5], %[filter78] \n\t" /* even 3 */
  618. "extp %[Temp3], $ac3, 31 \n\t" /* even 3 */
  619. "lbux %[st2], %[Temp2](%[cm]) \n\t" /* even 1 */
  620. /* even 4. pixel */
  621. "mtlo %[vector_64], $ac2 \n\t" /* even 5 */
  622. "mthi $zero, $ac2 \n\t"
  623. "addqh_r.w %[qload3], %[qload3], %[st2] \n\t" /* average even 2 */
  624. "preceu.ph.qbl %[p3], %[qload1] \n\t"
  625. "sb %[qload3], 2(%[dst]) \n\t" /* store even 2 to dst */
  626. "ulw %[qload2], 16(%[src]) \n\t"
  627. "lbu %[qload3], 4(%[dst]) \n\t" /* load even 3 from dst */
  628. "lbu %[qload1], 6(%[dst]) \n\t" /* load even 4 from dst */
  629. "dpa.w.ph $ac1, %[p4], %[filter12] \n\t" /* even 4 */
  630. "dpa.w.ph $ac1, %[p1], %[filter34] \n\t" /* even 4 */
  631. "dpa.w.ph $ac1, %[p5], %[filter56] \n\t" /* even 4 */
  632. "dpa.w.ph $ac1, %[p2], %[filter78] \n\t" /* even 4 */
  633. "extp %[Temp1], $ac1, 31 \n\t" /* even 4 */
  634. "lbux %[st3], %[Temp3](%[cm]) \n\t" /* even 3 */
  635. /* even 5. pixel */
  636. "mtlo %[vector_64], $ac3 \n\t" /* even 6 */
  637. "mthi $zero, $ac3 \n\t"
  638. "addqh_r.w %[qload3], %[qload3], %[st3] \n\t" /* average even 3 */
  639. "preceu.ph.qbr %[p4], %[qload2] \n\t"
  640. "sb %[qload3], 4(%[dst]) \n\t" /* store even 3 to dst */
  641. "dpa.w.ph $ac2, %[p1], %[filter12] \n\t" /* even 5 */
  642. "dpa.w.ph $ac2, %[p5], %[filter34] \n\t" /* even 5 */
  643. "dpa.w.ph $ac2, %[p2], %[filter56] \n\t" /* even 5 */
  644. "dpa.w.ph $ac2, %[p3], %[filter78] \n\t" /* even 5 */
  645. "extp %[Temp2], $ac2, 31 \n\t" /* even 5 */
  646. "lbux %[st1], %[Temp1](%[cm]) \n\t" /* even 4 */
  647. /* even 6. pixel */
  648. "mtlo %[vector_64], $ac1 \n\t" /* even 7 */
  649. "mthi $zero, $ac1 \n\t"
  650. "addqh_r.w %[qload1], %[qload1], %[st1] \n\t" /* average even 4 */
  651. "preceu.ph.qbl %[p1], %[qload2] \n\t"
  652. "sb %[qload1], 6(%[dst]) \n\t" /* store even 4 to dst */
  653. "ulw %[qload3], 20(%[src]) \n\t"
  654. "dpa.w.ph $ac3, %[p5], %[filter12] \n\t" /* even 6 */
  655. "dpa.w.ph $ac3, %[p2], %[filter34] \n\t" /* even 6 */
  656. "dpa.w.ph $ac3, %[p3], %[filter56] \n\t" /* even 6 */
  657. "dpa.w.ph $ac3, %[p4], %[filter78] \n\t" /* even 6 */
  658. "lbu %[qload2], 8(%[dst]) \n\t" /* load even 5 from dst */
  659. "extp %[Temp3], $ac3, 31 \n\t" /* even 6 */
  660. "lbux %[st2], %[Temp2](%[cm]) \n\t" /* even 5 */
  661. /* even 7. pixel */
  662. "mtlo %[vector_64], $ac2 \n\t" /* even 8 */
  663. "mthi $zero, $ac2 \n\t"
  664. "addqh_r.w %[qload2], %[qload2], %[st2] \n\t" /* average even 5 */
  665. "preceu.ph.qbr %[p5], %[qload3] \n\t"
  666. "sb %[qload2], 8(%[dst]) \n\t" /* store even 5 to dst */
  667. "dpa.w.ph $ac1, %[p2], %[filter12] \n\t" /* even 7 */
  668. "dpa.w.ph $ac1, %[p3], %[filter34] \n\t" /* even 7 */
  669. "dpa.w.ph $ac1, %[p4], %[filter56] \n\t" /* even 7 */
  670. "dpa.w.ph $ac1, %[p1], %[filter78] \n\t" /* even 7 */
  671. "lbu %[qload3], 10(%[dst]) \n\t" /* load even 6 from dst */
  672. "extp %[Temp1], $ac1, 31 \n\t" /* even 7 */
  673. "lbux %[st3], %[Temp3](%[cm]) \n\t" /* even 6 */
  674. "lbu %[st2], 12(%[dst]) \n\t" /* load even 7 from dst */
  675. /* even 8. pixel */
  676. "mtlo %[vector_64], $ac3 \n\t" /* odd 1 */
  677. "mthi $zero, $ac3 \n\t"
  678. "addqh_r.w %[qload3], %[qload3], %[st3] \n\t" /* average even 6 */
  679. "dpa.w.ph $ac2, %[p3], %[filter12] \n\t" /* even 8 */
  680. "dpa.w.ph $ac2, %[p4], %[filter34] \n\t" /* even 8 */
  681. "sb %[qload3], 10(%[dst]) \n\t" /* store even 6 to dst */
  682. "dpa.w.ph $ac2, %[p1], %[filter56] \n\t" /* even 8 */
  683. "dpa.w.ph $ac2, %[p5], %[filter78] \n\t" /* even 8 */
  684. "extp %[Temp2], $ac2, 31 \n\t" /* even 8 */
  685. "lbux %[st1], %[Temp1](%[cm]) \n\t" /* even 7 */
  686. /* ODD pixels */
  687. "ulw %[qload1], 1(%[src]) \n\t"
  688. "ulw %[qload2], 5(%[src]) \n\t"
  689. "addqh_r.w %[st2], %[st2], %[st1] \n\t" /* average even 7 */
  690. /* odd 1. pixel */
  691. "mtlo %[vector_64], $ac1 \n\t" /* odd 2 */
  692. "mthi $zero, $ac1 \n\t"
  693. "preceu.ph.qbr %[p1], %[qload1] \n\t"
  694. "preceu.ph.qbl %[p2], %[qload1] \n\t"
  695. "preceu.ph.qbr %[p3], %[qload2] \n\t"
  696. "preceu.ph.qbl %[p4], %[qload2] \n\t"
  697. "sb %[st2], 12(%[dst]) \n\t" /* store even 7 to dst */
  698. "ulw %[qload3], 9(%[src]) \n\t"
  699. "dpa.w.ph $ac3, %[p1], %[filter12] \n\t" /* odd 1 */
  700. "dpa.w.ph $ac3, %[p2], %[filter34] \n\t" /* odd 1 */
  701. "lbu %[qload2], 14(%[dst]) \n\t" /* load even 8 from dst */
  702. "dpa.w.ph $ac3, %[p3], %[filter56] \n\t" /* odd 1 */
  703. "dpa.w.ph $ac3, %[p4], %[filter78] \n\t" /* odd 1 */
  704. "extp %[Temp3], $ac3, 31 \n\t" /* odd 1 */
  705. "lbux %[st2], %[Temp2](%[cm]) \n\t" /* even 8 */
  706. "lbu %[st1], 1(%[dst]) \n\t" /* load odd 1 from dst */
  707. /* odd 2. pixel */
  708. "mtlo %[vector_64], $ac2 \n\t" /* odd 3 */
  709. "mthi $zero, $ac2 \n\t"
  710. "addqh_r.w %[qload2], %[qload2], %[st2] \n\t" /* average even 8 */
  711. "preceu.ph.qbr %[p1], %[qload3] \n\t"
  712. "preceu.ph.qbl %[p5], %[qload3] \n\t"
  713. "sb %[qload2], 14(%[dst]) \n\t" /* store even 8 to dst */
  714. "ulw %[qload1], 13(%[src]) \n\t"
  715. "dpa.w.ph $ac1, %[p2], %[filter12] \n\t" /* odd 2 */
  716. "dpa.w.ph $ac1, %[p3], %[filter34] \n\t" /* odd 2 */
  717. "dpa.w.ph $ac1, %[p4], %[filter56] \n\t" /* odd 2 */
  718. "dpa.w.ph $ac1, %[p1], %[filter78] \n\t" /* odd 2 */
  719. "lbu %[qload3], 3(%[dst]) \n\t" /* load odd 2 from dst */
  720. "extp %[Temp1], $ac1, 31 \n\t" /* odd 2 */
  721. "lbux %[st3], %[Temp3](%[cm]) \n\t" /* odd 1 */
  722. /* odd 3. pixel */
  723. "mtlo %[vector_64], $ac3 \n\t" /* odd 4 */
  724. "mthi $zero, $ac3 \n\t"
  725. "addqh_r.w %[st3], %[st3], %[st1] \n\t" /* average odd 1 */
  726. "preceu.ph.qbr %[p2], %[qload1] \n\t"
  727. "dpa.w.ph $ac2, %[p3], %[filter12] \n\t" /* odd 3 */
  728. "dpa.w.ph $ac2, %[p4], %[filter34] \n\t" /* odd 3 */
  729. "dpa.w.ph $ac2, %[p1], %[filter56] \n\t" /* odd 3 */
  730. "dpa.w.ph $ac2, %[p5], %[filter78] \n\t" /* odd 3 */
  731. "sb %[st3], 1(%[dst]) \n\t" /* store odd 1 to dst */
  732. "extp %[Temp2], $ac2, 31 \n\t" /* odd 3 */
  733. "lbux %[st1], %[Temp1](%[cm]) \n\t" /* odd 2 */
  734. /* odd 4. pixel */
  735. "mtlo %[vector_64], $ac1 \n\t" /* odd 5 */
  736. "mthi $zero, $ac1 \n\t"
  737. "addqh_r.w %[qload3], %[qload3], %[st1] \n\t" /* average odd 2 */
  738. "preceu.ph.qbl %[p3], %[qload1] \n\t"
  739. "sb %[qload3], 3(%[dst]) \n\t" /* store odd 2 to dst */
  740. "lbu %[qload1], 5(%[dst]) \n\t" /* load odd 3 from dst */
  741. "ulw %[qload2], 17(%[src]) \n\t"
  742. "dpa.w.ph $ac3, %[p4], %[filter12] \n\t" /* odd 4 */
  743. "dpa.w.ph $ac3, %[p1], %[filter34] \n\t" /* odd 4 */
  744. "dpa.w.ph $ac3, %[p5], %[filter56] \n\t" /* odd 4 */
  745. "dpa.w.ph $ac3, %[p2], %[filter78] \n\t" /* odd 4 */
  746. "extp %[Temp3], $ac3, 31 \n\t" /* odd 4 */
  747. "lbux %[st2], %[Temp2](%[cm]) \n\t" /* odd 3 */
  748. "lbu %[st1], 7(%[dst]) \n\t" /* load odd 4 from dst */
  749. /* odd 5. pixel */
  750. "mtlo %[vector_64], $ac2 \n\t" /* odd 6 */
  751. "mthi $zero, $ac2 \n\t"
  752. "addqh_r.w %[qload1], %[qload1], %[st2] \n\t" /* average odd 3 */
  753. "preceu.ph.qbr %[p4], %[qload2] \n\t"
  754. "sb %[qload1], 5(%[dst]) \n\t" /* store odd 3 to dst */
  755. "dpa.w.ph $ac1, %[p1], %[filter12] \n\t" /* odd 5 */
  756. "dpa.w.ph $ac1, %[p5], %[filter34] \n\t" /* odd 5 */
  757. "dpa.w.ph $ac1, %[p2], %[filter56] \n\t" /* odd 5 */
  758. "dpa.w.ph $ac1, %[p3], %[filter78] \n\t" /* odd 5 */
  759. "extp %[Temp1], $ac1, 31 \n\t" /* odd 5 */
  760. "lbux %[st3], %[Temp3](%[cm]) \n\t" /* odd 4 */
  761. "lbu %[qload1], 9(%[dst]) \n\t" /* load odd 5 from dst */
  762. /* odd 6. pixel */
  763. "mtlo %[vector_64], $ac3 \n\t" /* odd 7 */
  764. "mthi $zero, $ac3 \n\t"
  765. "addqh_r.w %[st1], %[st1], %[st3] \n\t" /* average odd 4 */
  766. "preceu.ph.qbl %[p1], %[qload2] \n\t"
  767. "sb %[st1], 7(%[dst]) \n\t" /* store odd 4 to dst */
  768. "ulw %[qload3], 21(%[src]) \n\t"
  769. "dpa.w.ph $ac2, %[p5], %[filter12] \n\t" /* odd 6 */
  770. "dpa.w.ph $ac2, %[p2], %[filter34] \n\t" /* odd 6 */
  771. "dpa.w.ph $ac2, %[p3], %[filter56] \n\t" /* odd 6 */
  772. "dpa.w.ph $ac2, %[p4], %[filter78] \n\t" /* odd 6 */
  773. "extp %[Temp2], $ac2, 31 \n\t" /* odd 6 */
  774. "lbux %[st1], %[Temp1](%[cm]) \n\t" /* odd 5 */
  775. /* odd 7. pixel */
  776. "mtlo %[vector_64], $ac1 \n\t" /* odd 8 */
  777. "mthi $zero, $ac1 \n\t"
  778. "addqh_r.w %[qload1], %[qload1], %[st1] \n\t" /* average odd 5 */
  779. "preceu.ph.qbr %[p5], %[qload3] \n\t"
  780. "sb %[qload1], 9(%[dst]) \n\t" /* store odd 5 to dst */
  781. "lbu %[qload2], 11(%[dst]) \n\t" /* load odd 6 from dst */
  782. "dpa.w.ph $ac3, %[p2], %[filter12] \n\t" /* odd 7 */
  783. "dpa.w.ph $ac3, %[p3], %[filter34] \n\t" /* odd 7 */
  784. "dpa.w.ph $ac3, %[p4], %[filter56] \n\t" /* odd 7 */
  785. "dpa.w.ph $ac3, %[p1], %[filter78] \n\t" /* odd 7 */
  786. "extp %[Temp3], $ac3, 31 \n\t" /* odd 7 */
  787. "lbu %[qload3], 13(%[dst]) \n\t" /* load odd 7 from dst */
  788. /* odd 8. pixel */
  789. "dpa.w.ph $ac1, %[p3], %[filter12] \n\t" /* odd 8 */
  790. "dpa.w.ph $ac1, %[p4], %[filter34] \n\t" /* odd 8 */
  791. "dpa.w.ph $ac1, %[p1], %[filter56] \n\t" /* odd 8 */
  792. "dpa.w.ph $ac1, %[p5], %[filter78] \n\t" /* odd 8 */
  793. "extp %[Temp1], $ac1, 31 \n\t" /* odd 8 */
  794. "lbu %[qload1], 15(%[dst]) \n\t" /* load odd 8 from dst */
  795. "lbux %[st2], %[Temp2](%[cm]) \n\t" /* odd 6 */
  796. "addqh_r.w %[qload2], %[qload2], %[st2] \n\t" /* average odd 6 */
  797. "lbux %[st3], %[Temp3](%[cm]) \n\t" /* odd 7 */
  798. "addqh_r.w %[qload3], %[qload3], %[st3] \n\t" /* average odd 7 */
  799. "lbux %[st1], %[Temp1](%[cm]) \n\t" /* odd 8 */
  800. "addqh_r.w %[qload1], %[qload1], %[st1] \n\t" /* average odd 8 */
  801. "sb %[qload2], 11(%[dst]) \n\t" /* store odd 6 to dst */
  802. "sb %[qload3], 13(%[dst]) \n\t" /* store odd 7 to dst */
  803. "sb %[qload1], 15(%[dst]) \n\t" /* store odd 8 to dst */
  804. : [qload1] "=&r"(qload1), [qload2] "=&r"(qload2), [st1] "=&r"(st1),
  805. [st2] "=&r"(st2), [st3] "=&r"(st3), [p1] "=&r"(p1), [p2] "=&r"(p2),
  806. [p3] "=&r"(p3), [p4] "=&r"(p4), [qload3] "=&r"(qload3),
  807. [p5] "=&r"(p5), [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2),
  808. [Temp3] "=&r"(Temp3)
  809. : [filter12] "r"(filter12), [filter34] "r"(filter34),
  810. [filter56] "r"(filter56), [filter78] "r"(filter78),
  811. [vector_64] "r"(vector_64), [cm] "r"(cm), [dst] "r"(dst),
  812. [src] "r"(src));
  813. src += 16;
  814. dst += 16;
  815. }
  816. /* Next row... */
  817. src_ptr += src_stride;
  818. dst_ptr += dst_stride;
  819. }
  820. }
  821. void vpx_convolve8_avg_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
  822. uint8_t *dst, ptrdiff_t dst_stride,
  823. const InterpKernel *filter, int x0_q4,
  824. int32_t x_step_q4, int y0_q4, int y_step_q4,
  825. int w, int h) {
  826. const int16_t *const filter_x = filter[x0_q4];
  827. assert(x_step_q4 == 16);
  828. assert(((const int32_t *)filter_x)[1] != 0x800000);
  829. if (vpx_get_filter_taps(filter_x) == 2) {
  830. vpx_convolve2_avg_horiz_dspr2(src, src_stride, dst, dst_stride, filter,
  831. x0_q4, x_step_q4, y0_q4, y_step_q4, w, h);
  832. } else {
  833. uint32_t pos = 38;
  834. src -= 3;
  835. /* bit positon for extract from acc */
  836. __asm__ __volatile__("wrdsp %[pos], 1 \n\t"
  837. :
  838. : [pos] "r"(pos));
  839. /* prefetch data to cache memory */
  840. prefetch_load(src);
  841. prefetch_load(src + 32);
  842. prefetch_store(dst);
  843. switch (w) {
  844. case 4:
  845. convolve_avg_horiz_4_dspr2(src, src_stride, dst, dst_stride, filter_x,
  846. h);
  847. break;
  848. case 8:
  849. convolve_avg_horiz_8_dspr2(src, src_stride, dst, dst_stride, filter_x,
  850. h);
  851. break;
  852. case 16:
  853. convolve_avg_horiz_16_dspr2(src, src_stride, dst, dst_stride, filter_x,
  854. h, 1);
  855. break;
  856. case 32:
  857. convolve_avg_horiz_16_dspr2(src, src_stride, dst, dst_stride, filter_x,
  858. h, 2);
  859. break;
  860. case 64:
  861. prefetch_load(src + 64);
  862. prefetch_store(dst + 32);
  863. convolve_avg_horiz_64_dspr2(src, src_stride, dst, dst_stride, filter_x,
  864. h);
  865. break;
  866. default:
  867. vpx_convolve8_avg_horiz_c(src + 3, src_stride, dst, dst_stride, filter,
  868. x0_q4, x_step_q4, y0_q4, y_step_q4, w, h);
  869. break;
  870. }
  871. }
  872. }
  873. #endif