2
0

intmath.h 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134
  1. /*
  2. * Copyright (c) 2010 Mans Rullgard <mans@mansr.com>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #ifndef AVUTIL_ARM_INTMATH_H
  21. #define AVUTIL_ARM_INTMATH_H
  22. #include <stdint.h>
  23. #include "config.h"
  24. #include "libavutil/attributes.h"
  25. #if HAVE_INLINE_ASM
  26. #if HAVE_ARMV6_INLINE
  27. #define av_clip_uint8 av_clip_uint8_arm
  28. static av_always_inline av_const int av_clip_uint8_arm(int a)
  29. {
  30. int x;
  31. __asm__ ("usat %0, #8, %1" : "=r"(x) : "r"(a));
  32. return x;
  33. }
  34. #define av_clip_int8 av_clip_int8_arm
  35. static av_always_inline av_const int av_clip_int8_arm(int a)
  36. {
  37. int x;
  38. __asm__ ("ssat %0, #8, %1" : "=r"(x) : "r"(a));
  39. return x;
  40. }
  41. #define av_clip_uint16 av_clip_uint16_arm
  42. static av_always_inline av_const int av_clip_uint16_arm(int a)
  43. {
  44. int x;
  45. __asm__ ("usat %0, #16, %1" : "=r"(x) : "r"(a));
  46. return x;
  47. }
  48. #define av_clip_int16 av_clip_int16_arm
  49. static av_always_inline av_const int av_clip_int16_arm(int a)
  50. {
  51. int x;
  52. __asm__ ("ssat %0, #16, %1" : "=r"(x) : "r"(a));
  53. return x;
  54. }
  55. #define av_clip_intp2 av_clip_intp2_arm
  56. static av_always_inline av_const int av_clip_intp2_arm(int a, int p)
  57. {
  58. unsigned x;
  59. __asm__ ("ssat %0, %2, %1" : "=r"(x) : "r"(a), "i"(p+1));
  60. return x;
  61. }
  62. #define av_clip_uintp2 av_clip_uintp2_arm
  63. static av_always_inline av_const unsigned av_clip_uintp2_arm(int a, int p)
  64. {
  65. unsigned x;
  66. __asm__ ("usat %0, %2, %1" : "=r"(x) : "r"(a), "i"(p));
  67. return x;
  68. }
  69. #define av_sat_add32 av_sat_add32_arm
  70. static av_always_inline int av_sat_add32_arm(int a, int b)
  71. {
  72. int r;
  73. __asm__ ("qadd %0, %1, %2" : "=r"(r) : "r"(a), "r"(b));
  74. return r;
  75. }
  76. #define av_sat_dadd32 av_sat_dadd32_arm
  77. static av_always_inline int av_sat_dadd32_arm(int a, int b)
  78. {
  79. int r;
  80. __asm__ ("qdadd %0, %1, %2" : "=r"(r) : "r"(a), "r"(b));
  81. return r;
  82. }
  83. #define av_sat_sub32 av_sat_sub32_arm
  84. static av_always_inline int av_sat_sub32_arm(int a, int b)
  85. {
  86. int r;
  87. __asm__ ("qsub %0, %1, %2" : "=r"(r) : "r"(a), "r"(b));
  88. return r;
  89. }
  90. #define av_sat_dsub32 av_sat_dsub32_arm
  91. static av_always_inline int av_sat_dsub32_arm(int a, int b)
  92. {
  93. int r;
  94. __asm__ ("qdsub %0, %1, %2" : "=r"(r) : "r"(a), "r"(b));
  95. return r;
  96. }
  97. #endif /* HAVE_ARMV6_INLINE */
  98. #if HAVE_ASM_MOD_Q
  99. #define av_clipl_int32 av_clipl_int32_arm
  100. static av_always_inline av_const int32_t av_clipl_int32_arm(int64_t a)
  101. {
  102. int x, y;
  103. __asm__ ("adds %1, %R2, %Q2, lsr #31 \n\t"
  104. "itet ne \n\t"
  105. "mvnne %1, #1<<31 \n\t"
  106. "moveq %0, %Q2 \n\t"
  107. "eorne %0, %1, %R2, asr #31 \n\t"
  108. : "=r"(x), "=&r"(y) : "r"(a) : "cc");
  109. return x;
  110. }
  111. #endif /* HAVE_ASM_MOD_Q */
  112. #endif /* HAVE_INLINE_ASM */
  113. #endif /* AVUTIL_ARM_INTMATH_H */