convolve8_avg_dspr2.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647
  1. /*
  2. * Copyright (c) 2013 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include <assert.h>
  11. #include <stdio.h>
  12. #include "./vpx_dsp_rtcd.h"
  13. #include "vpx_dsp/mips/convolve_common_dspr2.h"
  14. #include "vpx_dsp/vpx_convolve.h"
  15. #include "vpx_dsp/vpx_dsp_common.h"
  16. #include "vpx_dsp/vpx_filter.h"
  17. #include "vpx_ports/mem.h"
  18. #if HAVE_DSPR2
  19. static void convolve_avg_vert_4_dspr2(const uint8_t *src, int32_t src_stride,
  20. uint8_t *dst, int32_t dst_stride,
  21. const int16_t *filter_y, int32_t w,
  22. int32_t h) {
  23. int32_t x, y;
  24. const uint8_t *src_ptr;
  25. uint8_t *dst_ptr;
  26. uint8_t *cm = vpx_ff_cropTbl;
  27. uint32_t vector4a = 64;
  28. uint32_t load1, load2, load3, load4;
  29. uint32_t p1, p2;
  30. uint32_t n1, n2;
  31. uint32_t scratch1, scratch2;
  32. uint32_t store1, store2;
  33. int32_t vector1b, vector2b, vector3b, vector4b;
  34. int32_t Temp1, Temp2;
  35. vector1b = ((const int32_t *)filter_y)[0];
  36. vector2b = ((const int32_t *)filter_y)[1];
  37. vector3b = ((const int32_t *)filter_y)[2];
  38. vector4b = ((const int32_t *)filter_y)[3];
  39. src -= 3 * src_stride;
  40. for (y = h; y--;) {
  41. /* prefetch data to cache memory */
  42. prefetch_store(dst + dst_stride);
  43. for (x = 0; x < w; x += 4) {
  44. src_ptr = src + x;
  45. dst_ptr = dst + x;
  46. __asm__ __volatile__(
  47. "ulw %[load1], 0(%[src_ptr]) \n\t"
  48. "add %[src_ptr], %[src_ptr], %[src_stride] \n\t"
  49. "ulw %[load2], 0(%[src_ptr]) \n\t"
  50. "add %[src_ptr], %[src_ptr], %[src_stride] \n\t"
  51. "ulw %[load3], 0(%[src_ptr]) \n\t"
  52. "add %[src_ptr], %[src_ptr], %[src_stride] \n\t"
  53. "ulw %[load4], 0(%[src_ptr]) \n\t"
  54. "mtlo %[vector4a], $ac0 \n\t"
  55. "mtlo %[vector4a], $ac1 \n\t"
  56. "mtlo %[vector4a], $ac2 \n\t"
  57. "mtlo %[vector4a], $ac3 \n\t"
  58. "mthi $zero, $ac0 \n\t"
  59. "mthi $zero, $ac1 \n\t"
  60. "mthi $zero, $ac2 \n\t"
  61. "mthi $zero, $ac3 \n\t"
  62. "preceu.ph.qbr %[scratch1], %[load1] \n\t"
  63. "preceu.ph.qbr %[p1], %[load2] \n\t"
  64. "precrq.ph.w %[n1], %[p1], %[scratch1] \n\t" /* pixel 2 */
  65. "append %[p1], %[scratch1], 16 \n\t" /* pixel 1 */
  66. "preceu.ph.qbr %[scratch2], %[load3] \n\t"
  67. "preceu.ph.qbr %[p2], %[load4] \n\t"
  68. "precrq.ph.w %[n2], %[p2], %[scratch2] \n\t" /* pixel 2 */
  69. "append %[p2], %[scratch2], 16 \n\t" /* pixel 1 */
  70. "dpa.w.ph $ac0, %[p1], %[vector1b] \n\t"
  71. "dpa.w.ph $ac0, %[p2], %[vector2b] \n\t"
  72. "dpa.w.ph $ac1, %[n1], %[vector1b] \n\t"
  73. "dpa.w.ph $ac1, %[n2], %[vector2b] \n\t"
  74. "preceu.ph.qbl %[scratch1], %[load1] \n\t"
  75. "preceu.ph.qbl %[p1], %[load2] \n\t"
  76. "precrq.ph.w %[n1], %[p1], %[scratch1] \n\t" /* pixel 2 */
  77. "append %[p1], %[scratch1], 16 \n\t" /* pixel 1 */
  78. "preceu.ph.qbl %[scratch2], %[load3] \n\t"
  79. "preceu.ph.qbl %[p2], %[load4] \n\t"
  80. "precrq.ph.w %[n2], %[p2], %[scratch2] \n\t" /* pixel 2 */
  81. "append %[p2], %[scratch2], 16 \n\t" /* pixel 1 */
  82. "dpa.w.ph $ac2, %[p1], %[vector1b] \n\t"
  83. "dpa.w.ph $ac2, %[p2], %[vector2b] \n\t"
  84. "dpa.w.ph $ac3, %[n1], %[vector1b] \n\t"
  85. "dpa.w.ph $ac3, %[n2], %[vector2b] \n\t"
  86. "add %[src_ptr], %[src_ptr], %[src_stride] \n\t"
  87. "ulw %[load1], 0(%[src_ptr]) \n\t"
  88. "add %[src_ptr], %[src_ptr], %[src_stride] \n\t"
  89. "ulw %[load2], 0(%[src_ptr]) \n\t"
  90. "add %[src_ptr], %[src_ptr], %[src_stride] \n\t"
  91. "ulw %[load3], 0(%[src_ptr]) \n\t"
  92. "add %[src_ptr], %[src_ptr], %[src_stride] \n\t"
  93. "ulw %[load4], 0(%[src_ptr]) \n\t"
  94. "preceu.ph.qbr %[scratch1], %[load1] \n\t"
  95. "preceu.ph.qbr %[p1], %[load2] \n\t"
  96. "precrq.ph.w %[n1], %[p1], %[scratch1] \n\t" /* pixel 2 */
  97. "append %[p1], %[scratch1], 16 \n\t" /* pixel 1 */
  98. "preceu.ph.qbr %[scratch2], %[load3] \n\t"
  99. "preceu.ph.qbr %[p2], %[load4] \n\t"
  100. "precrq.ph.w %[n2], %[p2], %[scratch2] \n\t" /* pixel 2 */
  101. "append %[p2], %[scratch2], 16 \n\t" /* pixel 1 */
  102. "dpa.w.ph $ac0, %[p1], %[vector3b] \n\t"
  103. "dpa.w.ph $ac0, %[p2], %[vector4b] \n\t"
  104. "extp %[Temp1], $ac0, 31 \n\t"
  105. "dpa.w.ph $ac1, %[n1], %[vector3b] \n\t"
  106. "dpa.w.ph $ac1, %[n2], %[vector4b] \n\t"
  107. "extp %[Temp2], $ac1, 31 \n\t"
  108. "preceu.ph.qbl %[scratch1], %[load1] \n\t"
  109. "preceu.ph.qbl %[p1], %[load2] \n\t"
  110. "precrq.ph.w %[n1], %[p1], %[scratch1] \n\t" /* pixel 2 */
  111. "append %[p1], %[scratch1], 16 \n\t" /* pixel 1 */
  112. "lbu %[scratch1], 0(%[dst_ptr]) \n\t"
  113. "preceu.ph.qbl %[scratch2], %[load3] \n\t"
  114. "preceu.ph.qbl %[p2], %[load4] \n\t"
  115. "precrq.ph.w %[n2], %[p2], %[scratch2] \n\t" /* pixel 2 */
  116. "append %[p2], %[scratch2], 16 \n\t" /* pixel 1 */
  117. "lbu %[scratch2], 1(%[dst_ptr]) \n\t"
  118. "lbux %[store1], %[Temp1](%[cm]) \n\t"
  119. "dpa.w.ph $ac2, %[p1], %[vector3b] \n\t"
  120. "dpa.w.ph $ac2, %[p2], %[vector4b] \n\t"
  121. "addqh_r.w %[store1], %[store1], %[scratch1] \n\t" /* pixel 1 */
  122. "extp %[Temp1], $ac2, 31 \n\t"
  123. "lbux %[store2], %[Temp2](%[cm]) \n\t"
  124. "dpa.w.ph $ac3, %[n1], %[vector3b] \n\t"
  125. "dpa.w.ph $ac3, %[n2], %[vector4b] \n\t"
  126. "addqh_r.w %[store2], %[store2], %[scratch2] \n\t" /* pixel 2 */
  127. "extp %[Temp2], $ac3, 31 \n\t"
  128. "lbu %[scratch1], 2(%[dst_ptr]) \n\t"
  129. "sb %[store1], 0(%[dst_ptr]) \n\t"
  130. "sb %[store2], 1(%[dst_ptr]) \n\t"
  131. "lbu %[scratch2], 3(%[dst_ptr]) \n\t"
  132. "lbux %[store1], %[Temp1](%[cm]) \n\t"
  133. "lbux %[store2], %[Temp2](%[cm]) \n\t"
  134. "addqh_r.w %[store1], %[store1], %[scratch1] \n\t" /* pixel 3 */
  135. "addqh_r.w %[store2], %[store2], %[scratch2] \n\t" /* pixel 4 */
  136. "sb %[store1], 2(%[dst_ptr]) \n\t"
  137. "sb %[store2], 3(%[dst_ptr]) \n\t"
  138. : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3),
  139. [load4] "=&r"(load4), [p1] "=&r"(p1), [p2] "=&r"(p2),
  140. [n1] "=&r"(n1), [n2] "=&r"(n2), [scratch1] "=&r"(scratch1),
  141. [scratch2] "=&r"(scratch2), [Temp1] "=&r"(Temp1),
  142. [Temp2] "=&r"(Temp2), [store1] "=&r"(store1),
  143. [store2] "=&r"(store2), [src_ptr] "+r"(src_ptr)
  144. : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
  145. [vector3b] "r"(vector3b), [vector4b] "r"(vector4b),
  146. [vector4a] "r"(vector4a), [src_stride] "r"(src_stride),
  147. [cm] "r"(cm), [dst_ptr] "r"(dst_ptr));
  148. }
  149. /* Next row... */
  150. src += src_stride;
  151. dst += dst_stride;
  152. }
  153. }
  154. static void convolve_avg_vert_64_dspr2(const uint8_t *src, int32_t src_stride,
  155. uint8_t *dst, int32_t dst_stride,
  156. const int16_t *filter_y, int32_t h) {
  157. int32_t x, y;
  158. const uint8_t *src_ptr;
  159. uint8_t *dst_ptr;
  160. uint8_t *cm = vpx_ff_cropTbl;
  161. uint32_t vector4a = 64;
  162. uint32_t load1, load2, load3, load4;
  163. uint32_t p1, p2;
  164. uint32_t n1, n2;
  165. uint32_t scratch1, scratch2;
  166. uint32_t store1, store2;
  167. int32_t vector1b, vector2b, vector3b, vector4b;
  168. int32_t Temp1, Temp2;
  169. vector1b = ((const int32_t *)filter_y)[0];
  170. vector2b = ((const int32_t *)filter_y)[1];
  171. vector3b = ((const int32_t *)filter_y)[2];
  172. vector4b = ((const int32_t *)filter_y)[3];
  173. src -= 3 * src_stride;
  174. for (y = h; y--;) {
  175. /* prefetch data to cache memory */
  176. prefetch_store(dst + dst_stride);
  177. prefetch_store(dst + dst_stride + 32);
  178. for (x = 0; x < 64; x += 4) {
  179. src_ptr = src + x;
  180. dst_ptr = dst + x;
  181. __asm__ __volatile__(
  182. "ulw %[load1], 0(%[src_ptr]) \n\t"
  183. "add %[src_ptr], %[src_ptr], %[src_stride] \n\t"
  184. "ulw %[load2], 0(%[src_ptr]) \n\t"
  185. "add %[src_ptr], %[src_ptr], %[src_stride] \n\t"
  186. "ulw %[load3], 0(%[src_ptr]) \n\t"
  187. "add %[src_ptr], %[src_ptr], %[src_stride] \n\t"
  188. "ulw %[load4], 0(%[src_ptr]) \n\t"
  189. "mtlo %[vector4a], $ac0 \n\t"
  190. "mtlo %[vector4a], $ac1 \n\t"
  191. "mtlo %[vector4a], $ac2 \n\t"
  192. "mtlo %[vector4a], $ac3 \n\t"
  193. "mthi $zero, $ac0 \n\t"
  194. "mthi $zero, $ac1 \n\t"
  195. "mthi $zero, $ac2 \n\t"
  196. "mthi $zero, $ac3 \n\t"
  197. "preceu.ph.qbr %[scratch1], %[load1] \n\t"
  198. "preceu.ph.qbr %[p1], %[load2] \n\t"
  199. "precrq.ph.w %[n1], %[p1], %[scratch1] \n\t" /* pixel 2 */
  200. "append %[p1], %[scratch1], 16 \n\t" /* pixel 1 */
  201. "preceu.ph.qbr %[scratch2], %[load3] \n\t"
  202. "preceu.ph.qbr %[p2], %[load4] \n\t"
  203. "precrq.ph.w %[n2], %[p2], %[scratch2] \n\t" /* pixel 2 */
  204. "append %[p2], %[scratch2], 16 \n\t" /* pixel 1 */
  205. "dpa.w.ph $ac0, %[p1], %[vector1b] \n\t"
  206. "dpa.w.ph $ac0, %[p2], %[vector2b] \n\t"
  207. "dpa.w.ph $ac1, %[n1], %[vector1b] \n\t"
  208. "dpa.w.ph $ac1, %[n2], %[vector2b] \n\t"
  209. "preceu.ph.qbl %[scratch1], %[load1] \n\t"
  210. "preceu.ph.qbl %[p1], %[load2] \n\t"
  211. "precrq.ph.w %[n1], %[p1], %[scratch1] \n\t" /* pixel 2 */
  212. "append %[p1], %[scratch1], 16 \n\t" /* pixel 1 */
  213. "preceu.ph.qbl %[scratch2], %[load3] \n\t"
  214. "preceu.ph.qbl %[p2], %[load4] \n\t"
  215. "precrq.ph.w %[n2], %[p2], %[scratch2] \n\t" /* pixel 2 */
  216. "append %[p2], %[scratch2], 16 \n\t" /* pixel 1 */
  217. "dpa.w.ph $ac2, %[p1], %[vector1b] \n\t"
  218. "dpa.w.ph $ac2, %[p2], %[vector2b] \n\t"
  219. "dpa.w.ph $ac3, %[n1], %[vector1b] \n\t"
  220. "dpa.w.ph $ac3, %[n2], %[vector2b] \n\t"
  221. "add %[src_ptr], %[src_ptr], %[src_stride] \n\t"
  222. "ulw %[load1], 0(%[src_ptr]) \n\t"
  223. "add %[src_ptr], %[src_ptr], %[src_stride] \n\t"
  224. "ulw %[load2], 0(%[src_ptr]) \n\t"
  225. "add %[src_ptr], %[src_ptr], %[src_stride] \n\t"
  226. "ulw %[load3], 0(%[src_ptr]) \n\t"
  227. "add %[src_ptr], %[src_ptr], %[src_stride] \n\t"
  228. "ulw %[load4], 0(%[src_ptr]) \n\t"
  229. "preceu.ph.qbr %[scratch1], %[load1] \n\t"
  230. "preceu.ph.qbr %[p1], %[load2] \n\t"
  231. "precrq.ph.w %[n1], %[p1], %[scratch1] \n\t" /* pixel 2 */
  232. "append %[p1], %[scratch1], 16 \n\t" /* pixel 1 */
  233. "preceu.ph.qbr %[scratch2], %[load3] \n\t"
  234. "preceu.ph.qbr %[p2], %[load4] \n\t"
  235. "precrq.ph.w %[n2], %[p2], %[scratch2] \n\t" /* pixel 2 */
  236. "append %[p2], %[scratch2], 16 \n\t" /* pixel 1 */
  237. "dpa.w.ph $ac0, %[p1], %[vector3b] \n\t"
  238. "dpa.w.ph $ac0, %[p2], %[vector4b] \n\t"
  239. "extp %[Temp1], $ac0, 31 \n\t"
  240. "dpa.w.ph $ac1, %[n1], %[vector3b] \n\t"
  241. "dpa.w.ph $ac1, %[n2], %[vector4b] \n\t"
  242. "extp %[Temp2], $ac1, 31 \n\t"
  243. "preceu.ph.qbl %[scratch1], %[load1] \n\t"
  244. "preceu.ph.qbl %[p1], %[load2] \n\t"
  245. "precrq.ph.w %[n1], %[p1], %[scratch1] \n\t" /* pixel 2 */
  246. "append %[p1], %[scratch1], 16 \n\t" /* pixel 1 */
  247. "lbu %[scratch1], 0(%[dst_ptr]) \n\t"
  248. "preceu.ph.qbl %[scratch2], %[load3] \n\t"
  249. "preceu.ph.qbl %[p2], %[load4] \n\t"
  250. "precrq.ph.w %[n2], %[p2], %[scratch2] \n\t" /* pixel 2 */
  251. "append %[p2], %[scratch2], 16 \n\t" /* pixel 1 */
  252. "lbu %[scratch2], 1(%[dst_ptr]) \n\t"
  253. "lbux %[store1], %[Temp1](%[cm]) \n\t"
  254. "dpa.w.ph $ac2, %[p1], %[vector3b] \n\t"
  255. "dpa.w.ph $ac2, %[p2], %[vector4b] \n\t"
  256. "addqh_r.w %[store1], %[store1], %[scratch1] \n\t" /* pixel 1 */
  257. "extp %[Temp1], $ac2, 31 \n\t"
  258. "lbux %[store2], %[Temp2](%[cm]) \n\t"
  259. "dpa.w.ph $ac3, %[n1], %[vector3b] \n\t"
  260. "dpa.w.ph $ac3, %[n2], %[vector4b] \n\t"
  261. "addqh_r.w %[store2], %[store2], %[scratch2] \n\t" /* pixel 2 */
  262. "extp %[Temp2], $ac3, 31 \n\t"
  263. "lbu %[scratch1], 2(%[dst_ptr]) \n\t"
  264. "sb %[store1], 0(%[dst_ptr]) \n\t"
  265. "sb %[store2], 1(%[dst_ptr]) \n\t"
  266. "lbu %[scratch2], 3(%[dst_ptr]) \n\t"
  267. "lbux %[store1], %[Temp1](%[cm]) \n\t"
  268. "lbux %[store2], %[Temp2](%[cm]) \n\t"
  269. "addqh_r.w %[store1], %[store1], %[scratch1] \n\t" /* pixel 3 */
  270. "addqh_r.w %[store2], %[store2], %[scratch2] \n\t" /* pixel 4 */
  271. "sb %[store1], 2(%[dst_ptr]) \n\t"
  272. "sb %[store2], 3(%[dst_ptr]) \n\t"
  273. : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3),
  274. [load4] "=&r"(load4), [p1] "=&r"(p1), [p2] "=&r"(p2),
  275. [n1] "=&r"(n1), [n2] "=&r"(n2), [scratch1] "=&r"(scratch1),
  276. [scratch2] "=&r"(scratch2), [Temp1] "=&r"(Temp1),
  277. [Temp2] "=&r"(Temp2), [store1] "=&r"(store1),
  278. [store2] "=&r"(store2), [src_ptr] "+r"(src_ptr)
  279. : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
  280. [vector3b] "r"(vector3b), [vector4b] "r"(vector4b),
  281. [vector4a] "r"(vector4a), [src_stride] "r"(src_stride),
  282. [cm] "r"(cm), [dst_ptr] "r"(dst_ptr));
  283. }
  284. /* Next row... */
  285. src += src_stride;
  286. dst += dst_stride;
  287. }
  288. }
  289. void vpx_convolve8_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
  290. uint8_t *dst, ptrdiff_t dst_stride,
  291. const InterpKernel *filter, int x0_q4,
  292. int32_t x_step_q4, int y0_q4, int y_step_q4,
  293. int w, int h) {
  294. const int16_t *const filter_y = filter[y0_q4];
  295. assert(y_step_q4 == 16);
  296. assert(((const int32_t *)filter_y)[1] != 0x800000);
  297. if (vpx_get_filter_taps(filter_y) == 2) {
  298. vpx_convolve2_avg_vert_dspr2(src, src_stride, dst, dst_stride, filter,
  299. x0_q4, x_step_q4, y0_q4, y_step_q4, w, h);
  300. } else {
  301. uint32_t pos = 38;
  302. /* bit positon for extract from acc */
  303. __asm__ __volatile__("wrdsp %[pos], 1 \n\t"
  304. :
  305. : [pos] "r"(pos));
  306. prefetch_store(dst);
  307. switch (w) {
  308. case 4:
  309. case 8:
  310. case 16:
  311. case 32:
  312. convolve_avg_vert_4_dspr2(src, src_stride, dst, dst_stride, filter_y, w,
  313. h);
  314. break;
  315. case 64:
  316. prefetch_store(dst + 32);
  317. convolve_avg_vert_64_dspr2(src, src_stride, dst, dst_stride, filter_y,
  318. h);
  319. break;
  320. default:
  321. vpx_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter,
  322. x0_q4, x_step_q4, y0_q4, y_step_q4, w, h);
  323. break;
  324. }
  325. }
  326. }
  327. void vpx_convolve8_avg_dspr2(const uint8_t *src, ptrdiff_t src_stride,
  328. uint8_t *dst, ptrdiff_t dst_stride,
  329. const InterpKernel *filter, int x0_q4,
  330. int32_t x_step_q4, int y0_q4, int y_step_q4, int w,
  331. int h) {
  332. /* Fixed size intermediate buffer places limits on parameters. */
  333. DECLARE_ALIGNED(32, uint8_t, temp[64 * 135]);
  334. int32_t intermediate_height = ((h * y_step_q4) >> 4) + 7;
  335. assert(w <= 64);
  336. assert(h <= 64);
  337. assert(x_step_q4 == 16);
  338. assert(y_step_q4 == 16);
  339. if (intermediate_height < h) intermediate_height = h;
  340. vpx_convolve8_horiz(src - (src_stride * 3), src_stride, temp, 64, filter,
  341. x0_q4, x_step_q4, y0_q4, y_step_q4, w,
  342. intermediate_height);
  343. vpx_convolve8_avg_vert(temp + 64 * 3, 64, dst, dst_stride, filter, x0_q4,
  344. x_step_q4, y0_q4, y_step_q4, w, h);
  345. }
  346. void vpx_convolve_avg_dspr2(const uint8_t *src, ptrdiff_t src_stride,
  347. uint8_t *dst, ptrdiff_t dst_stride,
  348. const InterpKernel *filter, int x0_q4,
  349. int32_t x_step_q4, int y0_q4, int y_step_q4, int w,
  350. int h) {
  351. int x, y;
  352. uint32_t tp1, tp2, tn1, tp3, tp4, tn2;
  353. (void)filter;
  354. (void)x0_q4;
  355. (void)x_step_q4;
  356. (void)y0_q4;
  357. (void)y_step_q4;
  358. /* prefetch data to cache memory */
  359. prefetch_load(src);
  360. prefetch_load(src + 32);
  361. prefetch_store(dst);
  362. switch (w) {
  363. case 4:
  364. /* 1 word storage */
  365. for (y = h; y--;) {
  366. prefetch_load(src + src_stride);
  367. prefetch_load(src + src_stride + 32);
  368. prefetch_store(dst + dst_stride);
  369. __asm__ __volatile__(
  370. "ulw %[tp1], 0(%[src]) \n\t"
  371. "ulw %[tp2], 0(%[dst]) \n\t"
  372. "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
  373. "sw %[tn1], 0(%[dst]) \n\t" /* store */
  374. : [tn1] "=&r"(tn1), [tp1] "=&r"(tp1), [tp2] "=&r"(tp2)
  375. : [src] "r"(src), [dst] "r"(dst));
  376. src += src_stride;
  377. dst += dst_stride;
  378. }
  379. break;
  380. case 8:
  381. /* 2 word storage */
  382. for (y = h; y--;) {
  383. prefetch_load(src + src_stride);
  384. prefetch_load(src + src_stride + 32);
  385. prefetch_store(dst + dst_stride);
  386. __asm__ __volatile__(
  387. "ulw %[tp1], 0(%[src]) \n\t"
  388. "ulw %[tp2], 0(%[dst]) \n\t"
  389. "ulw %[tp3], 4(%[src]) \n\t"
  390. "ulw %[tp4], 4(%[dst]) \n\t"
  391. "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
  392. "sw %[tn1], 0(%[dst]) \n\t" /* store */
  393. "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */
  394. "sw %[tn2], 4(%[dst]) \n\t" /* store */
  395. : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3),
  396. [tp4] "=&r"(tp4), [tn1] "=&r"(tn1), [tn2] "=&r"(tn2)
  397. : [src] "r"(src), [dst] "r"(dst));
  398. src += src_stride;
  399. dst += dst_stride;
  400. }
  401. break;
  402. case 16:
  403. /* 4 word storage */
  404. for (y = h; y--;) {
  405. prefetch_load(src + src_stride);
  406. prefetch_load(src + src_stride + 32);
  407. prefetch_store(dst + dst_stride);
  408. __asm__ __volatile__(
  409. "ulw %[tp1], 0(%[src]) \n\t"
  410. "ulw %[tp2], 0(%[dst]) \n\t"
  411. "ulw %[tp3], 4(%[src]) \n\t"
  412. "ulw %[tp4], 4(%[dst]) \n\t"
  413. "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
  414. "ulw %[tp1], 8(%[src]) \n\t"
  415. "ulw %[tp2], 8(%[dst]) \n\t"
  416. "sw %[tn1], 0(%[dst]) \n\t" /* store */
  417. "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */
  418. "sw %[tn2], 4(%[dst]) \n\t" /* store */
  419. "ulw %[tp3], 12(%[src]) \n\t"
  420. "ulw %[tp4], 12(%[dst]) \n\t"
  421. "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
  422. "sw %[tn1], 8(%[dst]) \n\t" /* store */
  423. "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */
  424. "sw %[tn2], 12(%[dst]) \n\t" /* store */
  425. : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3),
  426. [tp4] "=&r"(tp4), [tn1] "=&r"(tn1), [tn2] "=&r"(tn2)
  427. : [src] "r"(src), [dst] "r"(dst));
  428. src += src_stride;
  429. dst += dst_stride;
  430. }
  431. break;
  432. case 32:
  433. /* 8 word storage */
  434. for (y = h; y--;) {
  435. prefetch_load(src + src_stride);
  436. prefetch_load(src + src_stride + 32);
  437. prefetch_store(dst + dst_stride);
  438. __asm__ __volatile__(
  439. "ulw %[tp1], 0(%[src]) \n\t"
  440. "ulw %[tp2], 0(%[dst]) \n\t"
  441. "ulw %[tp3], 4(%[src]) \n\t"
  442. "ulw %[tp4], 4(%[dst]) \n\t"
  443. "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
  444. "ulw %[tp1], 8(%[src]) \n\t"
  445. "ulw %[tp2], 8(%[dst]) \n\t"
  446. "sw %[tn1], 0(%[dst]) \n\t" /* store */
  447. "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */
  448. "sw %[tn2], 4(%[dst]) \n\t" /* store */
  449. "ulw %[tp3], 12(%[src]) \n\t"
  450. "ulw %[tp4], 12(%[dst]) \n\t"
  451. "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
  452. "ulw %[tp1], 16(%[src]) \n\t"
  453. "ulw %[tp2], 16(%[dst]) \n\t"
  454. "sw %[tn1], 8(%[dst]) \n\t" /* store */
  455. "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */
  456. "sw %[tn2], 12(%[dst]) \n\t" /* store */
  457. "ulw %[tp3], 20(%[src]) \n\t"
  458. "ulw %[tp4], 20(%[dst]) \n\t"
  459. "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
  460. "ulw %[tp1], 24(%[src]) \n\t"
  461. "ulw %[tp2], 24(%[dst]) \n\t"
  462. "sw %[tn1], 16(%[dst]) \n\t" /* store */
  463. "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */
  464. "sw %[tn2], 20(%[dst]) \n\t" /* store */
  465. "ulw %[tp3], 28(%[src]) \n\t"
  466. "ulw %[tp4], 28(%[dst]) \n\t"
  467. "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
  468. "sw %[tn1], 24(%[dst]) \n\t" /* store */
  469. "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */
  470. "sw %[tn2], 28(%[dst]) \n\t" /* store */
  471. : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3),
  472. [tp4] "=&r"(tp4), [tn1] "=&r"(tn1), [tn2] "=&r"(tn2)
  473. : [src] "r"(src), [dst] "r"(dst));
  474. src += src_stride;
  475. dst += dst_stride;
  476. }
  477. break;
  478. case 64:
  479. prefetch_load(src + 64);
  480. prefetch_store(dst + 32);
  481. /* 16 word storage */
  482. for (y = h; y--;) {
  483. prefetch_load(src + src_stride);
  484. prefetch_load(src + src_stride + 32);
  485. prefetch_load(src + src_stride + 64);
  486. prefetch_store(dst + dst_stride);
  487. prefetch_store(dst + dst_stride + 32);
  488. __asm__ __volatile__(
  489. "ulw %[tp1], 0(%[src]) \n\t"
  490. "ulw %[tp2], 0(%[dst]) \n\t"
  491. "ulw %[tp3], 4(%[src]) \n\t"
  492. "ulw %[tp4], 4(%[dst]) \n\t"
  493. "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
  494. "ulw %[tp1], 8(%[src]) \n\t"
  495. "ulw %[tp2], 8(%[dst]) \n\t"
  496. "sw %[tn1], 0(%[dst]) \n\t" /* store */
  497. "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */
  498. "sw %[tn2], 4(%[dst]) \n\t" /* store */
  499. "ulw %[tp3], 12(%[src]) \n\t"
  500. "ulw %[tp4], 12(%[dst]) \n\t"
  501. "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
  502. "ulw %[tp1], 16(%[src]) \n\t"
  503. "ulw %[tp2], 16(%[dst]) \n\t"
  504. "sw %[tn1], 8(%[dst]) \n\t" /* store */
  505. "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */
  506. "sw %[tn2], 12(%[dst]) \n\t" /* store */
  507. "ulw %[tp3], 20(%[src]) \n\t"
  508. "ulw %[tp4], 20(%[dst]) \n\t"
  509. "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
  510. "ulw %[tp1], 24(%[src]) \n\t"
  511. "ulw %[tp2], 24(%[dst]) \n\t"
  512. "sw %[tn1], 16(%[dst]) \n\t" /* store */
  513. "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */
  514. "sw %[tn2], 20(%[dst]) \n\t" /* store */
  515. "ulw %[tp3], 28(%[src]) \n\t"
  516. "ulw %[tp4], 28(%[dst]) \n\t"
  517. "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
  518. "ulw %[tp1], 32(%[src]) \n\t"
  519. "ulw %[tp2], 32(%[dst]) \n\t"
  520. "sw %[tn1], 24(%[dst]) \n\t" /* store */
  521. "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */
  522. "sw %[tn2], 28(%[dst]) \n\t" /* store */
  523. "ulw %[tp3], 36(%[src]) \n\t"
  524. "ulw %[tp4], 36(%[dst]) \n\t"
  525. "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
  526. "ulw %[tp1], 40(%[src]) \n\t"
  527. "ulw %[tp2], 40(%[dst]) \n\t"
  528. "sw %[tn1], 32(%[dst]) \n\t" /* store */
  529. "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */
  530. "sw %[tn2], 36(%[dst]) \n\t" /* store */
  531. "ulw %[tp3], 44(%[src]) \n\t"
  532. "ulw %[tp4], 44(%[dst]) \n\t"
  533. "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
  534. "ulw %[tp1], 48(%[src]) \n\t"
  535. "ulw %[tp2], 48(%[dst]) \n\t"
  536. "sw %[tn1], 40(%[dst]) \n\t" /* store */
  537. "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */
  538. "sw %[tn2], 44(%[dst]) \n\t" /* store */
  539. "ulw %[tp3], 52(%[src]) \n\t"
  540. "ulw %[tp4], 52(%[dst]) \n\t"
  541. "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
  542. "ulw %[tp1], 56(%[src]) \n\t"
  543. "ulw %[tp2], 56(%[dst]) \n\t"
  544. "sw %[tn1], 48(%[dst]) \n\t" /* store */
  545. "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */
  546. "sw %[tn2], 52(%[dst]) \n\t" /* store */
  547. "ulw %[tp3], 60(%[src]) \n\t"
  548. "ulw %[tp4], 60(%[dst]) \n\t"
  549. "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
  550. "sw %[tn1], 56(%[dst]) \n\t" /* store */
  551. "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */
  552. "sw %[tn2], 60(%[dst]) \n\t" /* store */
  553. : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3),
  554. [tp4] "=&r"(tp4), [tn1] "=&r"(tn1), [tn2] "=&r"(tn2)
  555. : [src] "r"(src), [dst] "r"(dst));
  556. src += src_stride;
  557. dst += dst_stride;
  558. }
  559. break;
  560. default:
  561. for (y = h; y > 0; --y) {
  562. for (x = 0; x < w; ++x) {
  563. dst[x] = (dst[x] + src[x] + 1) >> 1;
  564. }
  565. src += src_stride;
  566. dst += dst_stride;
  567. }
  568. break;
  569. }
  570. }
  571. #endif