2
0

rotate_msa.cc 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250
  1. /*
  2. * Copyright 2016 The LibYuv Project Authors. All rights reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include "libyuv/rotate_row.h"
  11. // This module is for GCC MSA
  12. #if !defined(LIBYUV_DISABLE_MSA) && defined(__mips_msa)
  13. #include "libyuv/macros_msa.h"
  14. #ifdef __cplusplus
  15. namespace libyuv {
  16. extern "C" {
  17. #endif
  18. #define ILVRL_B(in0, in1, in2, in3, out0, out1, out2, out3) \
  19. { \
  20. out0 = (v16u8)__msa_ilvr_b((v16i8)in1, (v16i8)in0); \
  21. out1 = (v16u8)__msa_ilvl_b((v16i8)in1, (v16i8)in0); \
  22. out2 = (v16u8)__msa_ilvr_b((v16i8)in3, (v16i8)in2); \
  23. out3 = (v16u8)__msa_ilvl_b((v16i8)in3, (v16i8)in2); \
  24. }
  25. #define ILVRL_H(in0, in1, in2, in3, out0, out1, out2, out3) \
  26. { \
  27. out0 = (v16u8)__msa_ilvr_h((v8i16)in1, (v8i16)in0); \
  28. out1 = (v16u8)__msa_ilvl_h((v8i16)in1, (v8i16)in0); \
  29. out2 = (v16u8)__msa_ilvr_h((v8i16)in3, (v8i16)in2); \
  30. out3 = (v16u8)__msa_ilvl_h((v8i16)in3, (v8i16)in2); \
  31. }
  32. #define ILVRL_W(in0, in1, in2, in3, out0, out1, out2, out3) \
  33. { \
  34. out0 = (v16u8)__msa_ilvr_w((v4i32)in1, (v4i32)in0); \
  35. out1 = (v16u8)__msa_ilvl_w((v4i32)in1, (v4i32)in0); \
  36. out2 = (v16u8)__msa_ilvr_w((v4i32)in3, (v4i32)in2); \
  37. out3 = (v16u8)__msa_ilvl_w((v4i32)in3, (v4i32)in2); \
  38. }
  39. #define ILVRL_D(in0, in1, in2, in3, out0, out1, out2, out3) \
  40. { \
  41. out0 = (v16u8)__msa_ilvr_d((v2i64)in1, (v2i64)in0); \
  42. out1 = (v16u8)__msa_ilvl_d((v2i64)in1, (v2i64)in0); \
  43. out2 = (v16u8)__msa_ilvr_d((v2i64)in3, (v2i64)in2); \
  44. out3 = (v16u8)__msa_ilvl_d((v2i64)in3, (v2i64)in2); \
  45. }
  46. void TransposeWx16_C(const uint8_t* src,
  47. int src_stride,
  48. uint8_t* dst,
  49. int dst_stride,
  50. int width) {
  51. TransposeWx8_C(src, src_stride, dst, dst_stride, width);
  52. TransposeWx8_C((src + 8 * src_stride), src_stride, (dst + 8), dst_stride,
  53. width);
  54. }
  55. void TransposeUVWx16_C(const uint8_t* src,
  56. int src_stride,
  57. uint8_t* dst_a,
  58. int dst_stride_a,
  59. uint8_t* dst_b,
  60. int dst_stride_b,
  61. int width) {
  62. TransposeUVWx8_C(src, src_stride, dst_a, dst_stride_a, dst_b, dst_stride_b,
  63. width);
  64. TransposeUVWx8_C((src + 8 * src_stride), src_stride, (dst_a + 8),
  65. dst_stride_a, (dst_b + 8), dst_stride_b, width);
  66. }
  67. void TransposeWx16_MSA(const uint8_t* src,
  68. int src_stride,
  69. uint8_t* dst,
  70. int dst_stride,
  71. int width) {
  72. int x;
  73. const uint8_t* s;
  74. v16u8 src0, src1, src2, src3, dst0, dst1, dst2, dst3, vec0, vec1, vec2, vec3;
  75. v16u8 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
  76. v16u8 res0, res1, res2, res3, res4, res5, res6, res7, res8, res9;
  77. for (x = 0; x < width; x += 16) {
  78. s = src;
  79. src0 = (v16u8)__msa_ld_b((v16i8*)s, 0);
  80. s += src_stride;
  81. src1 = (v16u8)__msa_ld_b((v16i8*)s, 0);
  82. s += src_stride;
  83. src2 = (v16u8)__msa_ld_b((v16i8*)s, 0);
  84. s += src_stride;
  85. src3 = (v16u8)__msa_ld_b((v16i8*)s, 0);
  86. s += src_stride;
  87. ILVRL_B(src0, src1, src2, src3, vec0, vec1, vec2, vec3);
  88. ILVRL_H(vec0, vec2, vec1, vec3, reg0, reg1, reg2, reg3);
  89. src0 = (v16u8)__msa_ld_b((v16i8*)s, 0);
  90. s += src_stride;
  91. src1 = (v16u8)__msa_ld_b((v16i8*)s, 0);
  92. s += src_stride;
  93. src2 = (v16u8)__msa_ld_b((v16i8*)s, 0);
  94. s += src_stride;
  95. src3 = (v16u8)__msa_ld_b((v16i8*)s, 0);
  96. s += src_stride;
  97. ILVRL_B(src0, src1, src2, src3, vec0, vec1, vec2, vec3);
  98. ILVRL_H(vec0, vec2, vec1, vec3, reg4, reg5, reg6, reg7);
  99. ILVRL_W(reg0, reg4, reg1, reg5, res0, res1, res2, res3);
  100. ILVRL_W(reg2, reg6, reg3, reg7, res4, res5, res6, res7);
  101. src0 = (v16u8)__msa_ld_b((v16i8*)s, 0);
  102. s += src_stride;
  103. src1 = (v16u8)__msa_ld_b((v16i8*)s, 0);
  104. s += src_stride;
  105. src2 = (v16u8)__msa_ld_b((v16i8*)s, 0);
  106. s += src_stride;
  107. src3 = (v16u8)__msa_ld_b((v16i8*)s, 0);
  108. s += src_stride;
  109. ILVRL_B(src0, src1, src2, src3, vec0, vec1, vec2, vec3);
  110. ILVRL_H(vec0, vec2, vec1, vec3, reg0, reg1, reg2, reg3);
  111. src0 = (v16u8)__msa_ld_b((v16i8*)s, 0);
  112. s += src_stride;
  113. src1 = (v16u8)__msa_ld_b((v16i8*)s, 0);
  114. s += src_stride;
  115. src2 = (v16u8)__msa_ld_b((v16i8*)s, 0);
  116. s += src_stride;
  117. src3 = (v16u8)__msa_ld_b((v16i8*)s, 0);
  118. s += src_stride;
  119. ILVRL_B(src0, src1, src2, src3, vec0, vec1, vec2, vec3);
  120. ILVRL_H(vec0, vec2, vec1, vec3, reg4, reg5, reg6, reg7);
  121. res8 = (v16u8)__msa_ilvr_w((v4i32)reg4, (v4i32)reg0);
  122. res9 = (v16u8)__msa_ilvl_w((v4i32)reg4, (v4i32)reg0);
  123. ILVRL_D(res0, res8, res1, res9, dst0, dst1, dst2, dst3);
  124. ST_UB4(dst0, dst1, dst2, dst3, dst, dst_stride);
  125. dst += dst_stride * 4;
  126. res8 = (v16u8)__msa_ilvr_w((v4i32)reg5, (v4i32)reg1);
  127. res9 = (v16u8)__msa_ilvl_w((v4i32)reg5, (v4i32)reg1);
  128. ILVRL_D(res2, res8, res3, res9, dst0, dst1, dst2, dst3);
  129. ST_UB4(dst0, dst1, dst2, dst3, dst, dst_stride);
  130. dst += dst_stride * 4;
  131. res8 = (v16u8)__msa_ilvr_w((v4i32)reg6, (v4i32)reg2);
  132. res9 = (v16u8)__msa_ilvl_w((v4i32)reg6, (v4i32)reg2);
  133. ILVRL_D(res4, res8, res5, res9, dst0, dst1, dst2, dst3);
  134. ST_UB4(dst0, dst1, dst2, dst3, dst, dst_stride);
  135. dst += dst_stride * 4;
  136. res8 = (v16u8)__msa_ilvr_w((v4i32)reg7, (v4i32)reg3);
  137. res9 = (v16u8)__msa_ilvl_w((v4i32)reg7, (v4i32)reg3);
  138. ILVRL_D(res6, res8, res7, res9, dst0, dst1, dst2, dst3);
  139. ST_UB4(dst0, dst1, dst2, dst3, dst, dst_stride);
  140. src += 16;
  141. dst += dst_stride * 4;
  142. }
  143. }
  144. void TransposeUVWx16_MSA(const uint8_t* src,
  145. int src_stride,
  146. uint8_t* dst_a,
  147. int dst_stride_a,
  148. uint8_t* dst_b,
  149. int dst_stride_b,
  150. int width) {
  151. int x;
  152. const uint8_t* s;
  153. v16u8 src0, src1, src2, src3, dst0, dst1, dst2, dst3, vec0, vec1, vec2, vec3;
  154. v16u8 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
  155. v16u8 res0, res1, res2, res3, res4, res5, res6, res7, res8, res9;
  156. for (x = 0; x < width; x += 8) {
  157. s = src;
  158. src0 = (v16u8)__msa_ld_b((v16i8*)s, 0);
  159. s += src_stride;
  160. src1 = (v16u8)__msa_ld_b((v16i8*)s, 0);
  161. s += src_stride;
  162. src2 = (v16u8)__msa_ld_b((v16i8*)s, 0);
  163. s += src_stride;
  164. src3 = (v16u8)__msa_ld_b((v16i8*)s, 0);
  165. s += src_stride;
  166. ILVRL_B(src0, src1, src2, src3, vec0, vec1, vec2, vec3);
  167. ILVRL_H(vec0, vec2, vec1, vec3, reg0, reg1, reg2, reg3);
  168. src0 = (v16u8)__msa_ld_b((v16i8*)s, 0);
  169. s += src_stride;
  170. src1 = (v16u8)__msa_ld_b((v16i8*)s, 0);
  171. s += src_stride;
  172. src2 = (v16u8)__msa_ld_b((v16i8*)s, 0);
  173. s += src_stride;
  174. src3 = (v16u8)__msa_ld_b((v16i8*)s, 0);
  175. s += src_stride;
  176. ILVRL_B(src0, src1, src2, src3, vec0, vec1, vec2, vec3);
  177. ILVRL_H(vec0, vec2, vec1, vec3, reg4, reg5, reg6, reg7);
  178. ILVRL_W(reg0, reg4, reg1, reg5, res0, res1, res2, res3);
  179. ILVRL_W(reg2, reg6, reg3, reg7, res4, res5, res6, res7);
  180. src0 = (v16u8)__msa_ld_b((v16i8*)s, 0);
  181. s += src_stride;
  182. src1 = (v16u8)__msa_ld_b((v16i8*)s, 0);
  183. s += src_stride;
  184. src2 = (v16u8)__msa_ld_b((v16i8*)s, 0);
  185. s += src_stride;
  186. src3 = (v16u8)__msa_ld_b((v16i8*)s, 0);
  187. s += src_stride;
  188. ILVRL_B(src0, src1, src2, src3, vec0, vec1, vec2, vec3);
  189. ILVRL_H(vec0, vec2, vec1, vec3, reg0, reg1, reg2, reg3);
  190. src0 = (v16u8)__msa_ld_b((v16i8*)s, 0);
  191. s += src_stride;
  192. src1 = (v16u8)__msa_ld_b((v16i8*)s, 0);
  193. s += src_stride;
  194. src2 = (v16u8)__msa_ld_b((v16i8*)s, 0);
  195. s += src_stride;
  196. src3 = (v16u8)__msa_ld_b((v16i8*)s, 0);
  197. s += src_stride;
  198. ILVRL_B(src0, src1, src2, src3, vec0, vec1, vec2, vec3);
  199. ILVRL_H(vec0, vec2, vec1, vec3, reg4, reg5, reg6, reg7);
  200. res8 = (v16u8)__msa_ilvr_w((v4i32)reg4, (v4i32)reg0);
  201. res9 = (v16u8)__msa_ilvl_w((v4i32)reg4, (v4i32)reg0);
  202. ILVRL_D(res0, res8, res1, res9, dst0, dst1, dst2, dst3);
  203. ST_UB2(dst0, dst2, dst_a, dst_stride_a);
  204. ST_UB2(dst1, dst3, dst_b, dst_stride_b);
  205. dst_a += dst_stride_a * 2;
  206. dst_b += dst_stride_b * 2;
  207. res8 = (v16u8)__msa_ilvr_w((v4i32)reg5, (v4i32)reg1);
  208. res9 = (v16u8)__msa_ilvl_w((v4i32)reg5, (v4i32)reg1);
  209. ILVRL_D(res2, res8, res3, res9, dst0, dst1, dst2, dst3);
  210. ST_UB2(dst0, dst2, dst_a, dst_stride_a);
  211. ST_UB2(dst1, dst3, dst_b, dst_stride_b);
  212. dst_a += dst_stride_a * 2;
  213. dst_b += dst_stride_b * 2;
  214. res8 = (v16u8)__msa_ilvr_w((v4i32)reg6, (v4i32)reg2);
  215. res9 = (v16u8)__msa_ilvl_w((v4i32)reg6, (v4i32)reg2);
  216. ILVRL_D(res4, res8, res5, res9, dst0, dst1, dst2, dst3);
  217. ST_UB2(dst0, dst2, dst_a, dst_stride_a);
  218. ST_UB2(dst1, dst3, dst_b, dst_stride_b);
  219. dst_a += dst_stride_a * 2;
  220. dst_b += dst_stride_b * 2;
  221. res8 = (v16u8)__msa_ilvr_w((v4i32)reg7, (v4i32)reg3);
  222. res9 = (v16u8)__msa_ilvl_w((v4i32)reg7, (v4i32)reg3);
  223. ILVRL_D(res6, res8, res7, res9, dst0, dst1, dst2, dst3);
  224. ST_UB2(dst0, dst2, dst_a, dst_stride_a);
  225. ST_UB2(dst1, dst3, dst_b, dst_stride_b);
  226. src += 16;
  227. dst_a += dst_stride_a * 2;
  228. dst_b += dst_stride_b * 2;
  229. }
  230. }
  231. #ifdef __cplusplus
  232. } // extern "C"
  233. } // namespace libyuv
  234. #endif
  235. #endif // !defined(LIBYUV_DISABLE_MSA) && defined(__mips_msa)