avg_pred_sse2.c 2.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869
  1. /*
  2. * Copyright (c) 2017 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include <assert.h>
  11. #include <emmintrin.h>
  12. #include "./vpx_dsp_rtcd.h"
  13. #include "vpx/vpx_integer.h"
  14. #include "vpx_dsp/x86/mem_sse2.h"
  15. void vpx_comp_avg_pred_sse2(uint8_t *comp_pred, const uint8_t *pred, int width,
  16. int height, const uint8_t *ref, int ref_stride) {
  17. /* comp_pred and pred must be 16 byte aligned. */
  18. assert(((intptr_t)comp_pred & 0xf) == 0);
  19. assert(((intptr_t)pred & 0xf) == 0);
  20. if (width > 8) {
  21. int x, y;
  22. for (y = 0; y < height; ++y) {
  23. for (x = 0; x < width; x += 16) {
  24. const __m128i p = _mm_load_si128((const __m128i *)(pred + x));
  25. const __m128i r = _mm_loadu_si128((const __m128i *)(ref + x));
  26. const __m128i avg = _mm_avg_epu8(p, r);
  27. _mm_store_si128((__m128i *)(comp_pred + x), avg);
  28. }
  29. comp_pred += width;
  30. pred += width;
  31. ref += ref_stride;
  32. }
  33. } else { // width must be 4 or 8.
  34. int i;
  35. // Process 16 elements at a time. comp_pred and pred have width == stride
  36. // and therefore live in contigious memory. 4*4, 4*8, 8*4, 8*8, and 8*16 are
  37. // all divisible by 16 so just ref needs to be massaged when loading.
  38. for (i = 0; i < width * height; i += 16) {
  39. const __m128i p = _mm_load_si128((const __m128i *)pred);
  40. __m128i r;
  41. __m128i avg;
  42. if (width == ref_stride) {
  43. r = _mm_loadu_si128((const __m128i *)ref);
  44. ref += 16;
  45. } else if (width == 4) {
  46. r = _mm_set_epi32(loadu_uint32(ref + 3 * ref_stride),
  47. loadu_uint32(ref + 2 * ref_stride),
  48. loadu_uint32(ref + ref_stride), loadu_uint32(ref));
  49. ref += 4 * ref_stride;
  50. } else {
  51. const __m128i r_0 = _mm_loadl_epi64((const __m128i *)ref);
  52. assert(width == 8);
  53. r = _mm_castps_si128(_mm_loadh_pi(_mm_castsi128_ps(r_0),
  54. (const __m64 *)(ref + ref_stride)));
  55. ref += 2 * ref_stride;
  56. }
  57. avg = _mm_avg_epu8(p, r);
  58. _mm_store_si128((__m128i *)comp_pred, avg);
  59. pred += 16;
  60. comp_pred += 16;
  61. }
  62. }
  63. }