idctllm_dspr2.c 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346
  1. /*
  2. * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include "vp8_rtcd.h"
  11. #if HAVE_DSPR2
  12. #define CROP_WIDTH 256
  13. /******************************************************************************
  14. * Notes:
  15. *
  16. * This implementation makes use of 16 bit fixed point version of two multiply
  17. * constants:
  18. * 1. sqrt(2) * cos (pi/8)
  19. * 2. sqrt(2) * sin (pi/8)
  20. * Since the first constant is bigger than 1, to maintain the same 16 bit
  21. * fixed point precision as the second one, we use a trick of
  22. * x * a = x + x*(a-1)
  23. * so
  24. * x * sqrt(2) * cos (pi/8) = x + x * (sqrt(2) *cos(pi/8)-1).
  25. ****************************************************************************/
  26. extern unsigned char ff_cropTbl[256 + 2 * CROP_WIDTH];
  27. static const int cospi8sqrt2minus1 = 20091;
  28. static const int sinpi8sqrt2 = 35468;
  29. inline void prefetch_load_short(short *src) {
  30. __asm__ __volatile__("pref 0, 0(%[src]) \n\t" : : [src] "r"(src));
  31. }
  32. void vp8_short_idct4x4llm_dspr2(short *input, unsigned char *pred_ptr,
  33. int pred_stride, unsigned char *dst_ptr,
  34. int dst_stride) {
  35. int r, c;
  36. int a1, b1, c1, d1;
  37. short output[16];
  38. short *ip = input;
  39. short *op = output;
  40. int temp1, temp2;
  41. int shortpitch = 4;
  42. int c2, d2;
  43. int temp3, temp4;
  44. unsigned char *cm = ff_cropTbl + CROP_WIDTH;
  45. /* prepare data for load */
  46. prefetch_load_short(ip + 8);
  47. /* first loop is unrolled */
  48. a1 = ip[0] + ip[8];
  49. b1 = ip[0] - ip[8];
  50. temp1 = (ip[4] * sinpi8sqrt2) >> 16;
  51. temp2 = ip[12] + ((ip[12] * cospi8sqrt2minus1) >> 16);
  52. c1 = temp1 - temp2;
  53. temp1 = ip[4] + ((ip[4] * cospi8sqrt2minus1) >> 16);
  54. temp2 = (ip[12] * sinpi8sqrt2) >> 16;
  55. d1 = temp1 + temp2;
  56. temp3 = (ip[5] * sinpi8sqrt2) >> 16;
  57. temp4 = ip[13] + ((ip[13] * cospi8sqrt2minus1) >> 16);
  58. c2 = temp3 - temp4;
  59. temp3 = ip[5] + ((ip[5] * cospi8sqrt2minus1) >> 16);
  60. temp4 = (ip[13] * sinpi8sqrt2) >> 16;
  61. d2 = temp3 + temp4;
  62. op[0] = a1 + d1;
  63. op[12] = a1 - d1;
  64. op[4] = b1 + c1;
  65. op[8] = b1 - c1;
  66. a1 = ip[1] + ip[9];
  67. b1 = ip[1] - ip[9];
  68. op[1] = a1 + d2;
  69. op[13] = a1 - d2;
  70. op[5] = b1 + c2;
  71. op[9] = b1 - c2;
  72. a1 = ip[2] + ip[10];
  73. b1 = ip[2] - ip[10];
  74. temp1 = (ip[6] * sinpi8sqrt2) >> 16;
  75. temp2 = ip[14] + ((ip[14] * cospi8sqrt2minus1) >> 16);
  76. c1 = temp1 - temp2;
  77. temp1 = ip[6] + ((ip[6] * cospi8sqrt2minus1) >> 16);
  78. temp2 = (ip[14] * sinpi8sqrt2) >> 16;
  79. d1 = temp1 + temp2;
  80. temp3 = (ip[7] * sinpi8sqrt2) >> 16;
  81. temp4 = ip[15] + ((ip[15] * cospi8sqrt2minus1) >> 16);
  82. c2 = temp3 - temp4;
  83. temp3 = ip[7] + ((ip[7] * cospi8sqrt2minus1) >> 16);
  84. temp4 = (ip[15] * sinpi8sqrt2) >> 16;
  85. d2 = temp3 + temp4;
  86. op[2] = a1 + d1;
  87. op[14] = a1 - d1;
  88. op[6] = b1 + c1;
  89. op[10] = b1 - c1;
  90. a1 = ip[3] + ip[11];
  91. b1 = ip[3] - ip[11];
  92. op[3] = a1 + d2;
  93. op[15] = a1 - d2;
  94. op[7] = b1 + c2;
  95. op[11] = b1 - c2;
  96. ip = output;
  97. /* prepare data for load */
  98. prefetch_load_short(ip + shortpitch);
  99. /* second loop is unrolled */
  100. a1 = ip[0] + ip[2];
  101. b1 = ip[0] - ip[2];
  102. temp1 = (ip[1] * sinpi8sqrt2) >> 16;
  103. temp2 = ip[3] + ((ip[3] * cospi8sqrt2minus1) >> 16);
  104. c1 = temp1 - temp2;
  105. temp1 = ip[1] + ((ip[1] * cospi8sqrt2minus1) >> 16);
  106. temp2 = (ip[3] * sinpi8sqrt2) >> 16;
  107. d1 = temp1 + temp2;
  108. temp3 = (ip[5] * sinpi8sqrt2) >> 16;
  109. temp4 = ip[7] + ((ip[7] * cospi8sqrt2minus1) >> 16);
  110. c2 = temp3 - temp4;
  111. temp3 = ip[5] + ((ip[5] * cospi8sqrt2minus1) >> 16);
  112. temp4 = (ip[7] * sinpi8sqrt2) >> 16;
  113. d2 = temp3 + temp4;
  114. op[0] = (a1 + d1 + 4) >> 3;
  115. op[3] = (a1 - d1 + 4) >> 3;
  116. op[1] = (b1 + c1 + 4) >> 3;
  117. op[2] = (b1 - c1 + 4) >> 3;
  118. a1 = ip[4] + ip[6];
  119. b1 = ip[4] - ip[6];
  120. op[4] = (a1 + d2 + 4) >> 3;
  121. op[7] = (a1 - d2 + 4) >> 3;
  122. op[5] = (b1 + c2 + 4) >> 3;
  123. op[6] = (b1 - c2 + 4) >> 3;
  124. a1 = ip[8] + ip[10];
  125. b1 = ip[8] - ip[10];
  126. temp1 = (ip[9] * sinpi8sqrt2) >> 16;
  127. temp2 = ip[11] + ((ip[11] * cospi8sqrt2minus1) >> 16);
  128. c1 = temp1 - temp2;
  129. temp1 = ip[9] + ((ip[9] * cospi8sqrt2minus1) >> 16);
  130. temp2 = (ip[11] * sinpi8sqrt2) >> 16;
  131. d1 = temp1 + temp2;
  132. temp3 = (ip[13] * sinpi8sqrt2) >> 16;
  133. temp4 = ip[15] + ((ip[15] * cospi8sqrt2minus1) >> 16);
  134. c2 = temp3 - temp4;
  135. temp3 = ip[13] + ((ip[13] * cospi8sqrt2minus1) >> 16);
  136. temp4 = (ip[15] * sinpi8sqrt2) >> 16;
  137. d2 = temp3 + temp4;
  138. op[8] = (a1 + d1 + 4) >> 3;
  139. op[11] = (a1 - d1 + 4) >> 3;
  140. op[9] = (b1 + c1 + 4) >> 3;
  141. op[10] = (b1 - c1 + 4) >> 3;
  142. a1 = ip[12] + ip[14];
  143. b1 = ip[12] - ip[14];
  144. op[12] = (a1 + d2 + 4) >> 3;
  145. op[15] = (a1 - d2 + 4) >> 3;
  146. op[13] = (b1 + c2 + 4) >> 3;
  147. op[14] = (b1 - c2 + 4) >> 3;
  148. ip = output;
  149. for (r = 0; r < 4; ++r) {
  150. for (c = 0; c < 4; ++c) {
  151. short a = ip[c] + pred_ptr[c];
  152. dst_ptr[c] = cm[a];
  153. }
  154. ip += 4;
  155. dst_ptr += dst_stride;
  156. pred_ptr += pred_stride;
  157. }
  158. }
  159. void vp8_dc_only_idct_add_dspr2(short input_dc, unsigned char *pred_ptr,
  160. int pred_stride, unsigned char *dst_ptr,
  161. int dst_stride) {
  162. int a1;
  163. int i, absa1;
  164. int t2, vector_a1, vector_a;
  165. /* a1 = ((input_dc + 4) >> 3); */
  166. __asm__ __volatile__(
  167. "addi %[a1], %[input_dc], 4 \n\t"
  168. "sra %[a1], %[a1], 3 \n\t"
  169. : [a1] "=r"(a1)
  170. : [input_dc] "r"(input_dc));
  171. if (a1 < 0) {
  172. /* use quad-byte
  173. * input and output memory are four byte aligned
  174. */
  175. __asm__ __volatile__(
  176. "abs %[absa1], %[a1] \n\t"
  177. "replv.qb %[vector_a1], %[absa1] \n\t"
  178. : [absa1] "=r"(absa1), [vector_a1] "=r"(vector_a1)
  179. : [a1] "r"(a1));
  180. /* use (a1 - predptr[c]) instead a1 + predptr[c] */
  181. for (i = 4; i--;) {
  182. __asm__ __volatile__(
  183. "lw %[t2], 0(%[pred_ptr]) \n\t"
  184. "add %[pred_ptr], %[pred_ptr], %[pred_stride] \n\t"
  185. "subu_s.qb %[vector_a], %[t2], %[vector_a1] \n\t"
  186. "sw %[vector_a], 0(%[dst_ptr]) \n\t"
  187. "add %[dst_ptr], %[dst_ptr], %[dst_stride] \n\t"
  188. : [t2] "=&r"(t2), [vector_a] "=&r"(vector_a),
  189. [dst_ptr] "+&r"(dst_ptr), [pred_ptr] "+&r"(pred_ptr)
  190. : [dst_stride] "r"(dst_stride), [pred_stride] "r"(pred_stride),
  191. [vector_a1] "r"(vector_a1));
  192. }
  193. } else {
  194. /* use quad-byte
  195. * input and output memory are four byte aligned
  196. */
  197. __asm__ __volatile__("replv.qb %[vector_a1], %[a1] \n\t"
  198. : [vector_a1] "=r"(vector_a1)
  199. : [a1] "r"(a1));
  200. for (i = 4; i--;) {
  201. __asm__ __volatile__(
  202. "lw %[t2], 0(%[pred_ptr]) \n\t"
  203. "add %[pred_ptr], %[pred_ptr], %[pred_stride] \n\t"
  204. "addu_s.qb %[vector_a], %[vector_a1], %[t2] \n\t"
  205. "sw %[vector_a], 0(%[dst_ptr]) \n\t"
  206. "add %[dst_ptr], %[dst_ptr], %[dst_stride] \n\t"
  207. : [t2] "=&r"(t2), [vector_a] "=&r"(vector_a),
  208. [dst_ptr] "+&r"(dst_ptr), [pred_ptr] "+&r"(pred_ptr)
  209. : [dst_stride] "r"(dst_stride), [pred_stride] "r"(pred_stride),
  210. [vector_a1] "r"(vector_a1));
  211. }
  212. }
  213. }
  214. void vp8_short_inv_walsh4x4_dspr2(short *input, short *mb_dqcoeff) {
  215. short output[16];
  216. int i;
  217. int a1, b1, c1, d1;
  218. int a2, b2, c2, d2;
  219. short *ip = input;
  220. short *op = output;
  221. prefetch_load_short(ip);
  222. for (i = 4; i--;) {
  223. a1 = ip[0] + ip[12];
  224. b1 = ip[4] + ip[8];
  225. c1 = ip[4] - ip[8];
  226. d1 = ip[0] - ip[12];
  227. op[0] = a1 + b1;
  228. op[4] = c1 + d1;
  229. op[8] = a1 - b1;
  230. op[12] = d1 - c1;
  231. ip++;
  232. op++;
  233. }
  234. ip = output;
  235. op = output;
  236. prefetch_load_short(ip);
  237. for (i = 4; i--;) {
  238. a1 = ip[0] + ip[3] + 3;
  239. b1 = ip[1] + ip[2];
  240. c1 = ip[1] - ip[2];
  241. d1 = ip[0] - ip[3] + 3;
  242. a2 = a1 + b1;
  243. b2 = d1 + c1;
  244. c2 = a1 - b1;
  245. d2 = d1 - c1;
  246. op[0] = a2 >> 3;
  247. op[1] = b2 >> 3;
  248. op[2] = c2 >> 3;
  249. op[3] = d2 >> 3;
  250. ip += 4;
  251. op += 4;
  252. }
  253. for (i = 0; i < 16; ++i) {
  254. mb_dqcoeff[i * 16] = output[i];
  255. }
  256. }
  257. void vp8_short_inv_walsh4x4_1_dspr2(short *input, short *mb_dqcoeff) {
  258. int a1;
  259. a1 = ((input[0] + 3) >> 3);
  260. __asm__ __volatile__(
  261. "sh %[a1], 0(%[mb_dqcoeff]) \n\t"
  262. "sh %[a1], 32(%[mb_dqcoeff]) \n\t"
  263. "sh %[a1], 64(%[mb_dqcoeff]) \n\t"
  264. "sh %[a1], 96(%[mb_dqcoeff]) \n\t"
  265. "sh %[a1], 128(%[mb_dqcoeff]) \n\t"
  266. "sh %[a1], 160(%[mb_dqcoeff]) \n\t"
  267. "sh %[a1], 192(%[mb_dqcoeff]) \n\t"
  268. "sh %[a1], 224(%[mb_dqcoeff]) \n\t"
  269. "sh %[a1], 256(%[mb_dqcoeff]) \n\t"
  270. "sh %[a1], 288(%[mb_dqcoeff]) \n\t"
  271. "sh %[a1], 320(%[mb_dqcoeff]) \n\t"
  272. "sh %[a1], 352(%[mb_dqcoeff]) \n\t"
  273. "sh %[a1], 384(%[mb_dqcoeff]) \n\t"
  274. "sh %[a1], 416(%[mb_dqcoeff]) \n\t"
  275. "sh %[a1], 448(%[mb_dqcoeff]) \n\t"
  276. "sh %[a1], 480(%[mb_dqcoeff]) \n\t"
  277. :
  278. : [a1] "r"(a1), [mb_dqcoeff] "r"(mb_dqcoeff));
  279. }
  280. #endif