atomic_msvc.h 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245
  1. //-----------------------------------------------------------------------------
  2. // This is free and unencumbered software released into the public domain.
  3. //
  4. // Anyone is free to copy, modify, publish, use, compile, sell, or distribute
  5. // this software, either in source code form or as a compiled binary, for any
  6. // purpose, commercial or non-commercial, and by any means.
  7. //
  8. // In jurisdictions that recognize copyright laws, the author or authors of
  9. // this software dedicate any and all copyright interest in the software to the
  10. // public domain. We make this dedication for the benefit of the public at
  11. // large and to the detriment of our heirs and successors. We intend this
  12. // dedication to be an overt act of relinquishment in perpetuity of all present
  13. // and future rights to this software under copyright law.
  14. //
  15. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  18. // AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  19. // ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  20. // WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  21. //
  22. // For more information, please refer to <http://unlicense.org/>
  23. //-----------------------------------------------------------------------------
  24. // SRT Project information:
  25. // This file was adopted from a Public Domain project from
  26. // https://github.com/mbitsnbites/atomic
  27. // Only namespaces were changed to adopt it for SRT project.
  28. #ifndef SRT_SYNC_ATOMIC_MSVC_H_
  29. #define SRT_SYNC_ATOMIC_MSVC_H_
  30. // Define which functions we need (don't include <intrin.h>).
  31. extern "C" {
  32. short _InterlockedIncrement16(short volatile*);
  33. long _InterlockedIncrement(long volatile*);
  34. __int64 _InterlockedIncrement64(__int64 volatile*);
  35. short _InterlockedDecrement16(short volatile*);
  36. long _InterlockedDecrement(long volatile*);
  37. __int64 _InterlockedDecrement64(__int64 volatile*);
  38. char _InterlockedExchange8(char volatile*, char);
  39. short _InterlockedExchange16(short volatile*, short);
  40. long __cdecl _InterlockedExchange(long volatile*, long);
  41. __int64 _InterlockedExchange64(__int64 volatile*, __int64);
  42. char _InterlockedCompareExchange8(char volatile*, char, char);
  43. short _InterlockedCompareExchange16(short volatile*, short, short);
  44. long __cdecl _InterlockedCompareExchange(long volatile*, long, long);
  45. __int64 _InterlockedCompareExchange64(__int64 volatile*, __int64, __int64);
  46. };
  47. // Define which functions we want to use as inline intriniscs.
  48. #pragma intrinsic(_InterlockedIncrement)
  49. #pragma intrinsic(_InterlockedIncrement16)
  50. #pragma intrinsic(_InterlockedDecrement)
  51. #pragma intrinsic(_InterlockedDecrement16)
  52. #pragma intrinsic(_InterlockedCompareExchange)
  53. #pragma intrinsic(_InterlockedCompareExchange8)
  54. #pragma intrinsic(_InterlockedCompareExchange16)
  55. #pragma intrinsic(_InterlockedExchange)
  56. #pragma intrinsic(_InterlockedExchange8)
  57. #pragma intrinsic(_InterlockedExchange16)
  58. #if defined(_M_X64)
  59. #pragma intrinsic(_InterlockedIncrement64)
  60. #pragma intrinsic(_InterlockedDecrement64)
  61. #pragma intrinsic(_InterlockedCompareExchange64)
  62. #pragma intrinsic(_InterlockedExchange64)
  63. #endif // _M_X64
  64. namespace srt {
  65. namespace sync {
  66. namespace msvc {
  67. template <typename T, size_t N = sizeof(T)>
  68. struct interlocked {
  69. };
  70. template <typename T>
  71. struct interlocked<T, 1> {
  72. static inline T increment(T volatile* x) {
  73. // There's no _InterlockedIncrement8().
  74. char old_val, new_val;
  75. do {
  76. old_val = static_cast<char>(*x);
  77. new_val = old_val + static_cast<char>(1);
  78. } while (_InterlockedCompareExchange8(reinterpret_cast<volatile char*>(x),
  79. new_val,
  80. old_val) != old_val);
  81. return static_cast<T>(new_val);
  82. }
  83. static inline T decrement(T volatile* x) {
  84. // There's no _InterlockedDecrement8().
  85. char old_val, new_val;
  86. do {
  87. old_val = static_cast<char>(*x);
  88. new_val = old_val - static_cast<char>(1);
  89. } while (_InterlockedCompareExchange8(reinterpret_cast<volatile char*>(x),
  90. new_val,
  91. old_val) != old_val);
  92. return static_cast<T>(new_val);
  93. }
  94. static inline T compare_exchange(T volatile* x,
  95. const T new_val,
  96. const T expected_val) {
  97. return static_cast<T>(
  98. _InterlockedCompareExchange8(reinterpret_cast<volatile char*>(x),
  99. static_cast<const char>(new_val),
  100. static_cast<const char>(expected_val)));
  101. }
  102. static inline T exchange(T volatile* x, const T new_val) {
  103. return static_cast<T>(_InterlockedExchange8(
  104. reinterpret_cast<volatile char*>(x), static_cast<const char>(new_val)));
  105. }
  106. };
  107. template <typename T>
  108. struct interlocked<T, 2> {
  109. static inline T increment(T volatile* x) {
  110. return static_cast<T>(
  111. _InterlockedIncrement16(reinterpret_cast<volatile short*>(x)));
  112. }
  113. static inline T decrement(T volatile* x) {
  114. return static_cast<T>(
  115. _InterlockedDecrement16(reinterpret_cast<volatile short*>(x)));
  116. }
  117. static inline T compare_exchange(T volatile* x,
  118. const T new_val,
  119. const T expected_val) {
  120. return static_cast<T>(
  121. _InterlockedCompareExchange16(reinterpret_cast<volatile short*>(x),
  122. static_cast<const short>(new_val),
  123. static_cast<const short>(expected_val)));
  124. }
  125. static inline T exchange(T volatile* x, const T new_val) {
  126. return static_cast<T>(
  127. _InterlockedExchange16(reinterpret_cast<volatile short*>(x),
  128. static_cast<const short>(new_val)));
  129. }
  130. };
  131. template <typename T>
  132. struct interlocked<T, 4> {
  133. static inline T increment(T volatile* x) {
  134. return static_cast<T>(
  135. _InterlockedIncrement(reinterpret_cast<volatile long*>(x)));
  136. }
  137. static inline T decrement(T volatile* x) {
  138. return static_cast<T>(
  139. _InterlockedDecrement(reinterpret_cast<volatile long*>(x)));
  140. }
  141. static inline T compare_exchange(T volatile* x,
  142. const T new_val,
  143. const T expected_val) {
  144. return static_cast<T>(
  145. _InterlockedCompareExchange(reinterpret_cast<volatile long*>(x),
  146. static_cast<const long>(new_val),
  147. static_cast<const long>(expected_val)));
  148. }
  149. static inline T exchange(T volatile* x, const T new_val) {
  150. return static_cast<T>(_InterlockedExchange(
  151. reinterpret_cast<volatile long*>(x), static_cast<const long>(new_val)));
  152. }
  153. };
  154. template <typename T>
  155. struct interlocked<T, 8> {
  156. static inline T increment(T volatile* x) {
  157. #if defined(_M_X64)
  158. return static_cast<T>(
  159. _InterlockedIncrement64(reinterpret_cast<volatile __int64*>(x)));
  160. #else
  161. // There's no _InterlockedIncrement64() for 32-bit x86.
  162. __int64 old_val, new_val;
  163. do {
  164. old_val = static_cast<__int64>(*x);
  165. new_val = old_val + static_cast<__int64>(1);
  166. } while (_InterlockedCompareExchange64(
  167. reinterpret_cast<volatile __int64*>(x), new_val, old_val) !=
  168. old_val);
  169. return static_cast<T>(new_val);
  170. #endif // _M_X64
  171. }
  172. static inline T decrement(T volatile* x) {
  173. #if defined(_M_X64)
  174. return static_cast<T>(
  175. _InterlockedDecrement64(reinterpret_cast<volatile __int64*>(x)));
  176. #else
  177. // There's no _InterlockedDecrement64() for 32-bit x86.
  178. __int64 old_val, new_val;
  179. do {
  180. old_val = static_cast<__int64>(*x);
  181. new_val = old_val - static_cast<__int64>(1);
  182. } while (_InterlockedCompareExchange64(
  183. reinterpret_cast<volatile __int64*>(x), new_val, old_val) !=
  184. old_val);
  185. return static_cast<T>(new_val);
  186. #endif // _M_X64
  187. }
  188. static inline T compare_exchange(T volatile* x,
  189. const T new_val,
  190. const T expected_val) {
  191. return static_cast<T>(_InterlockedCompareExchange64(
  192. reinterpret_cast<volatile __int64*>(x),
  193. static_cast<const __int64>(new_val),
  194. static_cast<const __int64>(expected_val)));
  195. }
  196. static inline T exchange(T volatile* x, const T new_val) {
  197. #if defined(_M_X64)
  198. return static_cast<T>(
  199. _InterlockedExchange64(reinterpret_cast<volatile __int64*>(x),
  200. static_cast<const __int64>(new_val)));
  201. #else
  202. // There's no _InterlockedExchange64 for 32-bit x86.
  203. __int64 old_val;
  204. do {
  205. old_val = static_cast<__int64>(*x);
  206. } while (_InterlockedCompareExchange64(
  207. reinterpret_cast<volatile __int64*>(x), new_val, old_val) !=
  208. old_val);
  209. return static_cast<T>(old_val);
  210. #endif // _M_X64
  211. }
  212. };
  213. } // namespace msvc
  214. } // namespace sync
  215. } // namespace srt
  216. #endif // ATOMIC_ATOMIC_MSVC_H_