rotate_argb.cc 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224
  1. /*
  2. * Copyright 2012 The LibYuv Project Authors. All rights reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include "libyuv/rotate.h"
  11. #include "libyuv/convert.h"
  12. #include "libyuv/cpu_id.h"
  13. #include "libyuv/planar_functions.h"
  14. #include "libyuv/row.h"
  15. #include "libyuv/scale_row.h" /* for ScaleARGBRowDownEven_ */
  16. #ifdef __cplusplus
  17. namespace libyuv {
  18. extern "C" {
  19. #endif
  20. static void ARGBTranspose(const uint8_t* src_argb,
  21. int src_stride_argb,
  22. uint8_t* dst_argb,
  23. int dst_stride_argb,
  24. int width,
  25. int height) {
  26. int i;
  27. int src_pixel_step = src_stride_argb >> 2;
  28. void (*ScaleARGBRowDownEven)(
  29. const uint8_t* src_argb, ptrdiff_t src_stride_argb, int src_step,
  30. uint8_t* dst_argb, int dst_width) = ScaleARGBRowDownEven_C;
  31. #if defined(HAS_SCALEARGBROWDOWNEVEN_SSE2)
  32. if (TestCpuFlag(kCpuHasSSE2)) {
  33. ScaleARGBRowDownEven = ScaleARGBRowDownEven_Any_SSE2;
  34. if (IS_ALIGNED(height, 4)) { // Width of dest.
  35. ScaleARGBRowDownEven = ScaleARGBRowDownEven_SSE2;
  36. }
  37. }
  38. #endif
  39. #if defined(HAS_SCALEARGBROWDOWNEVEN_NEON)
  40. if (TestCpuFlag(kCpuHasNEON)) {
  41. ScaleARGBRowDownEven = ScaleARGBRowDownEven_Any_NEON;
  42. if (IS_ALIGNED(height, 4)) { // Width of dest.
  43. ScaleARGBRowDownEven = ScaleARGBRowDownEven_NEON;
  44. }
  45. }
  46. #endif
  47. #if defined(HAS_SCALEARGBROWDOWNEVEN_MSA)
  48. if (TestCpuFlag(kCpuHasMSA)) {
  49. ScaleARGBRowDownEven = ScaleARGBRowDownEven_Any_MSA;
  50. if (IS_ALIGNED(height, 4)) { // Width of dest.
  51. ScaleARGBRowDownEven = ScaleARGBRowDownEven_MSA;
  52. }
  53. }
  54. #endif
  55. for (i = 0; i < width; ++i) { // column of source to row of dest.
  56. ScaleARGBRowDownEven(src_argb, 0, src_pixel_step, dst_argb, height);
  57. dst_argb += dst_stride_argb;
  58. src_argb += 4;
  59. }
  60. }
  61. void ARGBRotate90(const uint8_t* src_argb,
  62. int src_stride_argb,
  63. uint8_t* dst_argb,
  64. int dst_stride_argb,
  65. int width,
  66. int height) {
  67. // Rotate by 90 is a ARGBTranspose with the source read
  68. // from bottom to top. So set the source pointer to the end
  69. // of the buffer and flip the sign of the source stride.
  70. src_argb += src_stride_argb * (height - 1);
  71. src_stride_argb = -src_stride_argb;
  72. ARGBTranspose(src_argb, src_stride_argb, dst_argb, dst_stride_argb, width,
  73. height);
  74. }
  75. void ARGBRotate270(const uint8_t* src_argb,
  76. int src_stride_argb,
  77. uint8_t* dst_argb,
  78. int dst_stride_argb,
  79. int width,
  80. int height) {
  81. // Rotate by 270 is a ARGBTranspose with the destination written
  82. // from bottom to top. So set the destination pointer to the end
  83. // of the buffer and flip the sign of the destination stride.
  84. dst_argb += dst_stride_argb * (width - 1);
  85. dst_stride_argb = -dst_stride_argb;
  86. ARGBTranspose(src_argb, src_stride_argb, dst_argb, dst_stride_argb, width,
  87. height);
  88. }
  89. void ARGBRotate180(const uint8_t* src_argb,
  90. int src_stride_argb,
  91. uint8_t* dst_argb,
  92. int dst_stride_argb,
  93. int width,
  94. int height) {
  95. // Swap first and last row and mirror the content. Uses a temporary row.
  96. align_buffer_64(row, width * 4);
  97. const uint8_t* src_bot = src_argb + src_stride_argb * (height - 1);
  98. uint8_t* dst_bot = dst_argb + dst_stride_argb * (height - 1);
  99. int half_height = (height + 1) >> 1;
  100. int y;
  101. void (*ARGBMirrorRow)(const uint8_t* src_argb, uint8_t* dst_argb, int width) =
  102. ARGBMirrorRow_C;
  103. void (*CopyRow)(const uint8_t* src_argb, uint8_t* dst_argb, int width) =
  104. CopyRow_C;
  105. #if defined(HAS_ARGBMIRRORROW_NEON)
  106. if (TestCpuFlag(kCpuHasNEON)) {
  107. ARGBMirrorRow = ARGBMirrorRow_Any_NEON;
  108. if (IS_ALIGNED(width, 4)) {
  109. ARGBMirrorRow = ARGBMirrorRow_NEON;
  110. }
  111. }
  112. #endif
  113. #if defined(HAS_ARGBMIRRORROW_SSE2)
  114. if (TestCpuFlag(kCpuHasSSE2)) {
  115. ARGBMirrorRow = ARGBMirrorRow_Any_SSE2;
  116. if (IS_ALIGNED(width, 4)) {
  117. ARGBMirrorRow = ARGBMirrorRow_SSE2;
  118. }
  119. }
  120. #endif
  121. #if defined(HAS_ARGBMIRRORROW_AVX2)
  122. if (TestCpuFlag(kCpuHasAVX2)) {
  123. ARGBMirrorRow = ARGBMirrorRow_Any_AVX2;
  124. if (IS_ALIGNED(width, 8)) {
  125. ARGBMirrorRow = ARGBMirrorRow_AVX2;
  126. }
  127. }
  128. #endif
  129. #if defined(HAS_ARGBMIRRORROW_MSA)
  130. if (TestCpuFlag(kCpuHasMSA)) {
  131. ARGBMirrorRow = ARGBMirrorRow_Any_MSA;
  132. if (IS_ALIGNED(width, 16)) {
  133. ARGBMirrorRow = ARGBMirrorRow_MSA;
  134. }
  135. }
  136. #endif
  137. #if defined(HAS_COPYROW_SSE2)
  138. if (TestCpuFlag(kCpuHasSSE2)) {
  139. CopyRow = IS_ALIGNED(width * 4, 32) ? CopyRow_SSE2 : CopyRow_Any_SSE2;
  140. }
  141. #endif
  142. #if defined(HAS_COPYROW_AVX)
  143. if (TestCpuFlag(kCpuHasAVX)) {
  144. CopyRow = IS_ALIGNED(width * 4, 64) ? CopyRow_AVX : CopyRow_Any_AVX;
  145. }
  146. #endif
  147. #if defined(HAS_COPYROW_ERMS)
  148. if (TestCpuFlag(kCpuHasERMS)) {
  149. CopyRow = CopyRow_ERMS;
  150. }
  151. #endif
  152. #if defined(HAS_COPYROW_NEON)
  153. if (TestCpuFlag(kCpuHasNEON)) {
  154. CopyRow = IS_ALIGNED(width * 4, 32) ? CopyRow_NEON : CopyRow_Any_NEON;
  155. }
  156. #endif
  157. // Odd height will harmlessly mirror the middle row twice.
  158. for (y = 0; y < half_height; ++y) {
  159. ARGBMirrorRow(src_argb, row, width); // Mirror first row into a buffer
  160. ARGBMirrorRow(src_bot, dst_argb, width); // Mirror last row into first row
  161. CopyRow(row, dst_bot, width * 4); // Copy first mirrored row into last
  162. src_argb += src_stride_argb;
  163. dst_argb += dst_stride_argb;
  164. src_bot -= src_stride_argb;
  165. dst_bot -= dst_stride_argb;
  166. }
  167. free_aligned_buffer_64(row);
  168. }
  169. LIBYUV_API
  170. int ARGBRotate(const uint8_t* src_argb,
  171. int src_stride_argb,
  172. uint8_t* dst_argb,
  173. int dst_stride_argb,
  174. int width,
  175. int height,
  176. enum RotationMode mode) {
  177. if (!src_argb || width <= 0 || height == 0 || !dst_argb) {
  178. return -1;
  179. }
  180. // Negative height means invert the image.
  181. if (height < 0) {
  182. height = -height;
  183. src_argb = src_argb + (height - 1) * src_stride_argb;
  184. src_stride_argb = -src_stride_argb;
  185. }
  186. switch (mode) {
  187. case kRotate0:
  188. // copy frame
  189. return ARGBCopy(src_argb, src_stride_argb, dst_argb, dst_stride_argb,
  190. width, height);
  191. case kRotate90:
  192. ARGBRotate90(src_argb, src_stride_argb, dst_argb, dst_stride_argb, width,
  193. height);
  194. return 0;
  195. case kRotate270:
  196. ARGBRotate270(src_argb, src_stride_argb, dst_argb, dst_stride_argb, width,
  197. height);
  198. return 0;
  199. case kRotate180:
  200. ARGBRotate180(src_argb, src_stride_argb, dst_argb, dst_stride_argb, width,
  201. height);
  202. return 0;
  203. default:
  204. break;
  205. }
  206. return -1;
  207. }
  208. #ifdef __cplusplus
  209. } // extern "C"
  210. } // namespace libyuv
  211. #endif