refcount.h 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150
  1. /*
  2. * Copyright 2016-2019 The OpenSSL Project Authors. All Rights Reserved.
  3. *
  4. * Licensed under the OpenSSL license (the "License"). You may not use
  5. * this file except in compliance with the License. You can obtain a copy
  6. * in the file LICENSE in the source distribution or at
  7. * https://www.openssl.org/source/license.html
  8. */
  9. #ifndef OSSL_INTERNAL_REFCOUNT_H
  10. # define OSSL_INTERNAL_REFCOUNT_H
  11. /* Used to checking reference counts, most while doing perl5 stuff :-) */
  12. # if defined(OPENSSL_NO_STDIO)
  13. # if defined(REF_PRINT)
  14. # error "REF_PRINT requires stdio"
  15. # endif
  16. # endif
  17. # if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L \
  18. && !defined(__STDC_NO_ATOMICS__)
  19. # include <stdatomic.h>
  20. # define HAVE_C11_ATOMICS
  21. # endif
  22. # if defined(HAVE_C11_ATOMICS) && defined(ATOMIC_INT_LOCK_FREE) \
  23. && ATOMIC_INT_LOCK_FREE > 0
  24. # define HAVE_ATOMICS 1
  25. typedef _Atomic int CRYPTO_REF_COUNT;
  26. static inline int CRYPTO_UP_REF(_Atomic int *val, int *ret, void *lock)
  27. {
  28. *ret = atomic_fetch_add_explicit(val, 1, memory_order_relaxed) + 1;
  29. return 1;
  30. }
  31. /*
  32. * Changes to shared structure other than reference counter have to be
  33. * serialized. And any kind of serialization implies a release fence. This
  34. * means that by the time reference counter is decremented all other
  35. * changes are visible on all processors. Hence decrement itself can be
  36. * relaxed. In case it hits zero, object will be destructed. Since it's
  37. * last use of the object, destructor programmer might reason that access
  38. * to mutable members doesn't have to be serialized anymore, which would
  39. * otherwise imply an acquire fence. Hence conditional acquire fence...
  40. */
  41. static inline int CRYPTO_DOWN_REF(_Atomic int *val, int *ret, void *lock)
  42. {
  43. *ret = atomic_fetch_sub_explicit(val, 1, memory_order_relaxed) - 1;
  44. if (*ret == 0)
  45. atomic_thread_fence(memory_order_acquire);
  46. return 1;
  47. }
  48. # elif defined(__GNUC__) && defined(__ATOMIC_RELAXED) && __GCC_ATOMIC_INT_LOCK_FREE > 0
  49. # define HAVE_ATOMICS 1
  50. typedef int CRYPTO_REF_COUNT;
  51. static __inline__ int CRYPTO_UP_REF(int *val, int *ret, void *lock)
  52. {
  53. *ret = __atomic_fetch_add(val, 1, __ATOMIC_RELAXED) + 1;
  54. return 1;
  55. }
  56. static __inline__ int CRYPTO_DOWN_REF(int *val, int *ret, void *lock)
  57. {
  58. *ret = __atomic_fetch_sub(val, 1, __ATOMIC_RELAXED) - 1;
  59. if (*ret == 0)
  60. __atomic_thread_fence(__ATOMIC_ACQUIRE);
  61. return 1;
  62. }
  63. # elif defined(_MSC_VER) && _MSC_VER>=1200
  64. # define HAVE_ATOMICS 1
  65. typedef volatile int CRYPTO_REF_COUNT;
  66. # if (defined(_M_ARM) && _M_ARM>=7 && !defined(_WIN32_WCE)) || defined(_M_ARM64)
  67. # include <intrin.h>
  68. # if defined(_M_ARM64) && !defined(_ARM_BARRIER_ISH)
  69. # define _ARM_BARRIER_ISH _ARM64_BARRIER_ISH
  70. # endif
  71. static __inline int CRYPTO_UP_REF(volatile int *val, int *ret, void *lock)
  72. {
  73. *ret = _InterlockedExchangeAdd_nf(val, 1) + 1;
  74. return 1;
  75. }
  76. static __inline int CRYPTO_DOWN_REF(volatile int *val, int *ret, void *lock)
  77. {
  78. *ret = _InterlockedExchangeAdd_nf(val, -1) - 1;
  79. if (*ret == 0)
  80. __dmb(_ARM_BARRIER_ISH);
  81. return 1;
  82. }
  83. # else
  84. # if !defined(_WIN32_WCE)
  85. # pragma intrinsic(_InterlockedExchangeAdd)
  86. # else
  87. # if _WIN32_WCE >= 0x600
  88. extern long __cdecl _InterlockedExchangeAdd(long volatile*, long);
  89. # else
  90. /* under Windows CE we still have old-style Interlocked* functions */
  91. extern long __cdecl InterlockedExchangeAdd(long volatile*, long);
  92. # define _InterlockedExchangeAdd InterlockedExchangeAdd
  93. # endif
  94. # endif
  95. static __inline int CRYPTO_UP_REF(volatile int *val, int *ret, void *lock)
  96. {
  97. *ret = _InterlockedExchangeAdd(val, 1) + 1;
  98. return 1;
  99. }
  100. static __inline int CRYPTO_DOWN_REF(volatile int *val, int *ret, void *lock)
  101. {
  102. *ret = _InterlockedExchangeAdd(val, -1) - 1;
  103. return 1;
  104. }
  105. # endif
  106. # else
  107. typedef int CRYPTO_REF_COUNT;
  108. # define CRYPTO_UP_REF(val, ret, lock) CRYPTO_atomic_add(val, 1, ret, lock)
  109. # define CRYPTO_DOWN_REF(val, ret, lock) CRYPTO_atomic_add(val, -1, ret, lock)
  110. # endif
  111. # if !defined(NDEBUG) && !defined(OPENSSL_NO_STDIO)
  112. # define REF_ASSERT_ISNT(test) \
  113. (void)((test) ? (OPENSSL_die("refcount error", __FILE__, __LINE__), 1) : 0)
  114. # else
  115. # define REF_ASSERT_ISNT(i)
  116. # endif
  117. # ifdef REF_PRINT
  118. # define REF_PRINT_COUNT(a, b) \
  119. fprintf(stderr, "%p:%4d:%s\n", b, b->references, a)
  120. # else
  121. # define REF_PRINT_COUNT(a, b)
  122. # endif
  123. #endif