deblock_vsx.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374
  1. /*
  2. * Copyright (c) 2018 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include <assert.h>
  11. #include "./vpx_dsp_rtcd.h"
  12. #include "vpx_dsp/ppc/types_vsx.h"
  13. extern const int16_t vpx_rv[];
  14. static const uint8x16_t load_merge = { 0x00, 0x02, 0x04, 0x06, 0x08, 0x0A,
  15. 0x0C, 0x0E, 0x18, 0x19, 0x1A, 0x1B,
  16. 0x1C, 0x1D, 0x1E, 0x1F };
  17. static const uint8x16_t st8_perm = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
  18. 0x06, 0x07, 0x18, 0x19, 0x1A, 0x1B,
  19. 0x1C, 0x1D, 0x1E, 0x1F };
  20. static INLINE uint8x16_t apply_filter(uint8x16_t ctx[4], uint8x16_t v,
  21. uint8x16_t filter) {
  22. const uint8x16_t k1 = vec_avg(ctx[0], ctx[1]);
  23. const uint8x16_t k2 = vec_avg(ctx[3], ctx[2]);
  24. const uint8x16_t k3 = vec_avg(k1, k2);
  25. const uint8x16_t f_a = vec_max(vec_absd(v, ctx[0]), vec_absd(v, ctx[1]));
  26. const uint8x16_t f_b = vec_max(vec_absd(v, ctx[2]), vec_absd(v, ctx[3]));
  27. const bool8x16_t mask = vec_cmplt(vec_max(f_a, f_b), filter);
  28. return vec_sel(v, vec_avg(k3, v), mask);
  29. }
  30. static INLINE void vert_ctx(uint8x16_t ctx[4], int col, uint8_t *src,
  31. int stride) {
  32. ctx[0] = vec_vsx_ld(col - 2 * stride, src);
  33. ctx[1] = vec_vsx_ld(col - stride, src);
  34. ctx[2] = vec_vsx_ld(col + stride, src);
  35. ctx[3] = vec_vsx_ld(col + 2 * stride, src);
  36. }
  37. static INLINE void horz_ctx(uint8x16_t ctx[4], uint8x16_t left_ctx,
  38. uint8x16_t v, uint8x16_t right_ctx) {
  39. static const uint8x16_t l2_perm = { 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13,
  40. 0x14, 0x15, 0x16, 0x17, 0x18, 0x19,
  41. 0x1A, 0x1B, 0x1C, 0x1D };
  42. static const uint8x16_t l1_perm = { 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14,
  43. 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A,
  44. 0x1B, 0x1C, 0x1D, 0x1E };
  45. static const uint8x16_t r1_perm = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06,
  46. 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C,
  47. 0x0D, 0x0E, 0x0F, 0x10 };
  48. static const uint8x16_t r2_perm = { 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
  49. 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
  50. 0x0E, 0x0F, 0x10, 0x11 };
  51. ctx[0] = vec_perm(left_ctx, v, l2_perm);
  52. ctx[1] = vec_perm(left_ctx, v, l1_perm);
  53. ctx[2] = vec_perm(v, right_ctx, r1_perm);
  54. ctx[3] = vec_perm(v, right_ctx, r2_perm);
  55. }
  56. void vpx_post_proc_down_and_across_mb_row_vsx(unsigned char *src_ptr,
  57. unsigned char *dst_ptr,
  58. int src_pixels_per_line,
  59. int dst_pixels_per_line, int cols,
  60. unsigned char *f, int size) {
  61. int row, col;
  62. uint8x16_t ctx[4], out, v, left_ctx;
  63. for (row = 0; row < size; row++) {
  64. for (col = 0; col < cols - 8; col += 16) {
  65. const uint8x16_t filter = vec_vsx_ld(col, f);
  66. v = vec_vsx_ld(col, src_ptr);
  67. vert_ctx(ctx, col, src_ptr, src_pixels_per_line);
  68. vec_vsx_st(apply_filter(ctx, v, filter), col, dst_ptr);
  69. }
  70. if (col != cols) {
  71. const uint8x16_t filter = vec_vsx_ld(col, f);
  72. v = vec_vsx_ld(col, src_ptr);
  73. vert_ctx(ctx, col, src_ptr, src_pixels_per_line);
  74. out = apply_filter(ctx, v, filter);
  75. vec_vsx_st(vec_perm(out, v, st8_perm), col, dst_ptr);
  76. }
  77. /* now post_proc_across */
  78. left_ctx = vec_splats(dst_ptr[0]);
  79. v = vec_vsx_ld(0, dst_ptr);
  80. for (col = 0; col < cols - 8; col += 16) {
  81. const uint8x16_t filter = vec_vsx_ld(col, f);
  82. const uint8x16_t right_ctx = (col + 16 == cols)
  83. ? vec_splats(dst_ptr[cols - 1])
  84. : vec_vsx_ld(col, dst_ptr + 16);
  85. horz_ctx(ctx, left_ctx, v, right_ctx);
  86. vec_vsx_st(apply_filter(ctx, v, filter), col, dst_ptr);
  87. left_ctx = v;
  88. v = right_ctx;
  89. }
  90. if (col != cols) {
  91. const uint8x16_t filter = vec_vsx_ld(col, f);
  92. const uint8x16_t right_ctx = vec_splats(dst_ptr[cols - 1]);
  93. horz_ctx(ctx, left_ctx, v, right_ctx);
  94. out = apply_filter(ctx, v, filter);
  95. vec_vsx_st(vec_perm(out, v, st8_perm), col, dst_ptr);
  96. }
  97. src_ptr += src_pixels_per_line;
  98. dst_ptr += dst_pixels_per_line;
  99. }
  100. }
  101. // C: s[c + 7]
  102. static INLINE int16x8_t next7l_s16(uint8x16_t c) {
  103. static const uint8x16_t next7_perm = {
  104. 0x07, 0x10, 0x08, 0x11, 0x09, 0x12, 0x0A, 0x13,
  105. 0x0B, 0x14, 0x0C, 0x15, 0x0D, 0x16, 0x0E, 0x17,
  106. };
  107. return (int16x8_t)vec_perm(c, vec_zeros_u8, next7_perm);
  108. }
  109. // Slide across window and add.
  110. static INLINE int16x8_t slide_sum_s16(int16x8_t x) {
  111. // x = A B C D E F G H
  112. //
  113. // 0 A B C D E F G
  114. const int16x8_t sum1 = vec_add(x, vec_slo(x, vec_splats((int8_t)(2 << 3))));
  115. // 0 0 A B C D E F
  116. const int16x8_t sum2 = vec_add(vec_slo(x, vec_splats((int8_t)(4 << 3))),
  117. // 0 0 0 A B C D E
  118. vec_slo(x, vec_splats((int8_t)(6 << 3))));
  119. // 0 0 0 0 A B C D
  120. const int16x8_t sum3 = vec_add(vec_slo(x, vec_splats((int8_t)(8 << 3))),
  121. // 0 0 0 0 0 A B C
  122. vec_slo(x, vec_splats((int8_t)(10 << 3))));
  123. // 0 0 0 0 0 0 A B
  124. const int16x8_t sum4 = vec_add(vec_slo(x, vec_splats((int8_t)(12 << 3))),
  125. // 0 0 0 0 0 0 0 A
  126. vec_slo(x, vec_splats((int8_t)(14 << 3))));
  127. return vec_add(vec_add(sum1, sum2), vec_add(sum3, sum4));
  128. }
  129. // Slide across window and add.
  130. static INLINE int32x4_t slide_sumsq_s32(int32x4_t xsq_even, int32x4_t xsq_odd) {
  131. // 0 A C E
  132. // + 0 B D F
  133. int32x4_t sumsq_1 = vec_add(vec_slo(xsq_even, vec_splats((int8_t)(4 << 3))),
  134. vec_slo(xsq_odd, vec_splats((int8_t)(4 << 3))));
  135. // 0 0 A C
  136. // + 0 0 B D
  137. int32x4_t sumsq_2 = vec_add(vec_slo(xsq_even, vec_splats((int8_t)(8 << 3))),
  138. vec_slo(xsq_odd, vec_splats((int8_t)(8 << 3))));
  139. // 0 0 0 A
  140. // + 0 0 0 B
  141. int32x4_t sumsq_3 = vec_add(vec_slo(xsq_even, vec_splats((int8_t)(12 << 3))),
  142. vec_slo(xsq_odd, vec_splats((int8_t)(12 << 3))));
  143. sumsq_1 = vec_add(sumsq_1, xsq_even);
  144. sumsq_2 = vec_add(sumsq_2, sumsq_3);
  145. return vec_add(sumsq_1, sumsq_2);
  146. }
  147. // C: (b + sum + val) >> 4
  148. static INLINE int16x8_t filter_s16(int16x8_t b, int16x8_t sum, int16x8_t val) {
  149. return vec_sra(vec_add(vec_add(b, sum), val), vec_splats((uint16_t)4));
  150. }
  151. // C: sumsq * 15 - sum * sum
  152. static INLINE bool16x8_t mask_s16(int32x4_t sumsq_even, int32x4_t sumsq_odd,
  153. int16x8_t sum, int32x4_t lim) {
  154. static const uint8x16_t mask_merge = { 0x00, 0x01, 0x10, 0x11, 0x04, 0x05,
  155. 0x14, 0x15, 0x08, 0x09, 0x18, 0x19,
  156. 0x0C, 0x0D, 0x1C, 0x1D };
  157. const int32x4_t sumsq_odd_scaled =
  158. vec_mul(sumsq_odd, vec_splats((int32_t)15));
  159. const int32x4_t sumsq_even_scaled =
  160. vec_mul(sumsq_even, vec_splats((int32_t)15));
  161. const int32x4_t thres_odd = vec_sub(sumsq_odd_scaled, vec_mulo(sum, sum));
  162. const int32x4_t thres_even = vec_sub(sumsq_even_scaled, vec_mule(sum, sum));
  163. const bool32x4_t mask_odd = vec_cmplt(thres_odd, lim);
  164. const bool32x4_t mask_even = vec_cmplt(thres_even, lim);
  165. return vec_perm((bool16x8_t)mask_even, (bool16x8_t)mask_odd, mask_merge);
  166. }
  167. void vpx_mbpost_proc_across_ip_vsx(unsigned char *src, int pitch, int rows,
  168. int cols, int flimit) {
  169. int row, col;
  170. const int32x4_t lim = vec_splats(flimit);
  171. // 8 columns are processed at a time.
  172. assert(cols % 8 == 0);
  173. for (row = 0; row < rows; row++) {
  174. // The sum is signed and requires at most 13 bits.
  175. // (8 bits + sign) * 15 (4 bits)
  176. int16x8_t sum;
  177. // The sum of squares requires at most 20 bits.
  178. // (16 bits + sign) * 15 (4 bits)
  179. int32x4_t sumsq_even, sumsq_odd;
  180. // Fill left context with first col.
  181. int16x8_t left_ctx = vec_splats((int16_t)src[0]);
  182. int16_t s = src[0] * 9;
  183. int32_t ssq = src[0] * src[0] * 9 + 16;
  184. // Fill the next 6 columns of the sliding window with cols 2 to 7.
  185. for (col = 1; col <= 6; ++col) {
  186. s += src[col];
  187. ssq += src[col] * src[col];
  188. }
  189. // Set this sum to every element in the window.
  190. sum = vec_splats(s);
  191. sumsq_even = vec_splats(ssq);
  192. sumsq_odd = vec_splats(ssq);
  193. for (col = 0; col < cols; col += 8) {
  194. bool16x8_t mask;
  195. int16x8_t filtered, masked;
  196. uint8x16_t out;
  197. const uint8x16_t val = vec_vsx_ld(0, src + col);
  198. const int16x8_t val_high = unpack_to_s16_h(val);
  199. // C: s[c + 7]
  200. const int16x8_t right_ctx = (col + 8 == cols)
  201. ? vec_splats((int16_t)src[col + 7])
  202. : next7l_s16(val);
  203. // C: x = s[c + 7] - s[c - 8];
  204. const int16x8_t x = vec_sub(right_ctx, left_ctx);
  205. const int32x4_t xsq_even =
  206. vec_sub(vec_mule(right_ctx, right_ctx), vec_mule(left_ctx, left_ctx));
  207. const int32x4_t xsq_odd =
  208. vec_sub(vec_mulo(right_ctx, right_ctx), vec_mulo(left_ctx, left_ctx));
  209. const int32x4_t sumsq_tmp = slide_sumsq_s32(xsq_even, xsq_odd);
  210. // A C E G
  211. // 0 B D F
  212. // 0 A C E
  213. // 0 0 B D
  214. // 0 0 A C
  215. // 0 0 0 B
  216. // 0 0 0 A
  217. sumsq_even = vec_add(sumsq_even, sumsq_tmp);
  218. // B D F G
  219. // A C E G
  220. // 0 B D F
  221. // 0 A C E
  222. // 0 0 B D
  223. // 0 0 A C
  224. // 0 0 0 B
  225. // 0 0 0 A
  226. sumsq_odd = vec_add(sumsq_odd, vec_add(sumsq_tmp, xsq_odd));
  227. sum = vec_add(sum, slide_sum_s16(x));
  228. // C: (8 + sum + s[c]) >> 4
  229. filtered = filter_s16(vec_splats((int16_t)8), sum, val_high);
  230. // C: sumsq * 15 - sum * sum
  231. mask = mask_s16(sumsq_even, sumsq_odd, sum, lim);
  232. masked = vec_sel(val_high, filtered, mask);
  233. out = vec_perm((uint8x16_t)masked, vec_vsx_ld(0, src + col), load_merge);
  234. vec_vsx_st(out, 0, src + col);
  235. // Update window sum and square sum
  236. sum = vec_splat(sum, 7);
  237. sumsq_even = vec_splat(sumsq_odd, 3);
  238. sumsq_odd = vec_splat(sumsq_odd, 3);
  239. // C: s[c - 8] (for next iteration)
  240. left_ctx = val_high;
  241. }
  242. src += pitch;
  243. }
  244. }
  245. void vpx_mbpost_proc_down_vsx(uint8_t *dst, int pitch, int rows, int cols,
  246. int flimit) {
  247. int col, row, i;
  248. int16x8_t window[16];
  249. const int32x4_t lim = vec_splats(flimit);
  250. // 8 columns are processed at a time.
  251. assert(cols % 8 == 0);
  252. // If rows is less than 8 the bottom border extension fails.
  253. assert(rows >= 8);
  254. for (col = 0; col < cols; col += 8) {
  255. // The sum is signed and requires at most 13 bits.
  256. // (8 bits + sign) * 15 (4 bits)
  257. int16x8_t r1, sum;
  258. // The sum of squares requires at most 20 bits.
  259. // (16 bits + sign) * 15 (4 bits)
  260. int32x4_t sumsq_even, sumsq_odd;
  261. r1 = unpack_to_s16_h(vec_vsx_ld(0, dst));
  262. // Fill sliding window with first row.
  263. for (i = 0; i <= 8; i++) {
  264. window[i] = r1;
  265. }
  266. // First 9 rows of the sliding window are the same.
  267. // sum = r1 * 9
  268. sum = vec_mladd(r1, vec_splats((int16_t)9), vec_zeros_s16);
  269. // sumsq = r1 * r1 * 9
  270. sumsq_even = vec_mule(sum, r1);
  271. sumsq_odd = vec_mulo(sum, r1);
  272. // Fill the next 6 rows of the sliding window with rows 2 to 7.
  273. for (i = 1; i <= 6; ++i) {
  274. const int16x8_t next_row = unpack_to_s16_h(vec_vsx_ld(i * pitch, dst));
  275. window[i + 8] = next_row;
  276. sum = vec_add(sum, next_row);
  277. sumsq_odd = vec_add(sumsq_odd, vec_mulo(next_row, next_row));
  278. sumsq_even = vec_add(sumsq_even, vec_mule(next_row, next_row));
  279. }
  280. for (row = 0; row < rows; row++) {
  281. int32x4_t d15_even, d15_odd, d0_even, d0_odd;
  282. bool16x8_t mask;
  283. int16x8_t filtered, masked;
  284. uint8x16_t out;
  285. const int16x8_t rv = vec_vsx_ld(0, vpx_rv + (row & 127));
  286. // Move the sliding window
  287. if (row + 7 < rows) {
  288. window[15] = unpack_to_s16_h(vec_vsx_ld((row + 7) * pitch, dst));
  289. } else {
  290. window[15] = window[14];
  291. }
  292. // C: sum += s[7 * pitch] - s[-8 * pitch];
  293. sum = vec_add(sum, vec_sub(window[15], window[0]));
  294. // C: sumsq += s[7 * pitch] * s[7 * pitch] - s[-8 * pitch] * s[-8 *
  295. // pitch];
  296. // Optimization Note: Caching a squared-window for odd and even is
  297. // slower than just repeating the multiplies.
  298. d15_odd = vec_mulo(window[15], window[15]);
  299. d15_even = vec_mule(window[15], window[15]);
  300. d0_odd = vec_mulo(window[0], window[0]);
  301. d0_even = vec_mule(window[0], window[0]);
  302. sumsq_odd = vec_add(sumsq_odd, vec_sub(d15_odd, d0_odd));
  303. sumsq_even = vec_add(sumsq_even, vec_sub(d15_even, d0_even));
  304. // C: (vpx_rv[(r & 127) + (c & 7)] + sum + s[0]) >> 4
  305. filtered = filter_s16(rv, sum, window[8]);
  306. // C: sumsq * 15 - sum * sum
  307. mask = mask_s16(sumsq_even, sumsq_odd, sum, lim);
  308. masked = vec_sel(window[8], filtered, mask);
  309. // TODO(ltrudeau) If cols % 16 == 0, we could just process 16 per
  310. // iteration
  311. out = vec_perm((uint8x16_t)masked, vec_vsx_ld(0, dst + row * pitch),
  312. load_merge);
  313. vec_vsx_st(out, 0, dst + row * pitch);
  314. // Optimization Note: Turns out that the following loop is faster than
  315. // using pointers to manage the sliding window.
  316. for (i = 1; i < 16; i++) {
  317. window[i - 1] = window[i];
  318. }
  319. }
  320. dst += 8;
  321. }
  322. }