x86.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385
  1. /*
  2. * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #ifndef VPX_VPX_PORTS_X86_H_
  11. #define VPX_VPX_PORTS_X86_H_
  12. #include <stdlib.h>
  13. #if defined(_MSC_VER)
  14. #include <intrin.h> /* For __cpuidex, __rdtsc */
  15. #endif
  16. #include "vpx_config.h"
  17. #include "vpx/vpx_integer.h"
  18. #ifdef __cplusplus
  19. extern "C" {
  20. #endif
  21. typedef enum {
  22. VPX_CPU_UNKNOWN = -1,
  23. VPX_CPU_AMD,
  24. VPX_CPU_AMD_OLD,
  25. VPX_CPU_CENTAUR,
  26. VPX_CPU_CYRIX,
  27. VPX_CPU_INTEL,
  28. VPX_CPU_NEXGEN,
  29. VPX_CPU_NSC,
  30. VPX_CPU_RISE,
  31. VPX_CPU_SIS,
  32. VPX_CPU_TRANSMETA,
  33. VPX_CPU_TRANSMETA_OLD,
  34. VPX_CPU_UMC,
  35. VPX_CPU_VIA,
  36. VPX_CPU_LAST
  37. } vpx_cpu_t;
  38. #if defined(__GNUC__) && __GNUC__ || defined(__ANDROID__)
  39. #if ARCH_X86_64
  40. #define cpuid(func, func2, ax, bx, cx, dx) \
  41. __asm__ __volatile__("cpuid \n\t" \
  42. : "=a"(ax), "=b"(bx), "=c"(cx), "=d"(dx) \
  43. : "a"(func), "c"(func2));
  44. #else
  45. #define cpuid(func, func2, ax, bx, cx, dx) \
  46. __asm__ __volatile__( \
  47. "mov %%ebx, %%edi \n\t" \
  48. "cpuid \n\t" \
  49. "xchg %%edi, %%ebx \n\t" \
  50. : "=a"(ax), "=D"(bx), "=c"(cx), "=d"(dx) \
  51. : "a"(func), "c"(func2));
  52. #endif
  53. #elif defined(__SUNPRO_C) || \
  54. defined(__SUNPRO_CC) /* end __GNUC__ or __ANDROID__*/
  55. #if ARCH_X86_64
  56. #define cpuid(func, func2, ax, bx, cx, dx) \
  57. asm volatile( \
  58. "xchg %rsi, %rbx \n\t" \
  59. "cpuid \n\t" \
  60. "movl %ebx, %edi \n\t" \
  61. "xchg %rsi, %rbx \n\t" \
  62. : "=a"(ax), "=D"(bx), "=c"(cx), "=d"(dx) \
  63. : "a"(func), "c"(func2));
  64. #else
  65. #define cpuid(func, func2, ax, bx, cx, dx) \
  66. asm volatile( \
  67. "pushl %ebx \n\t" \
  68. "cpuid \n\t" \
  69. "movl %ebx, %edi \n\t" \
  70. "popl %ebx \n\t" \
  71. : "=a"(ax), "=D"(bx), "=c"(cx), "=d"(dx) \
  72. : "a"(func), "c"(func2));
  73. #endif
  74. #else /* end __SUNPRO__ */
  75. #if ARCH_X86_64
  76. #if defined(_MSC_VER) && _MSC_VER > 1500
  77. #define cpuid(func, func2, a, b, c, d) \
  78. do { \
  79. int regs[4]; \
  80. __cpuidex(regs, func, func2); \
  81. a = regs[0]; \
  82. b = regs[1]; \
  83. c = regs[2]; \
  84. d = regs[3]; \
  85. } while (0)
  86. #else
  87. #define cpuid(func, func2, a, b, c, d) \
  88. do { \
  89. int regs[4]; \
  90. __cpuid(regs, func); \
  91. a = regs[0]; \
  92. b = regs[1]; \
  93. c = regs[2]; \
  94. d = regs[3]; \
  95. } while (0)
  96. #endif
  97. #else
  98. #define cpuid(func, func2, a, b, c, d) \
  99. __asm mov eax, func __asm mov ecx, func2 __asm cpuid __asm mov a, \
  100. eax __asm mov b, ebx __asm mov c, ecx __asm mov d, edx
  101. #endif
  102. #endif /* end others */
  103. // NaCl has no support for xgetbv or the raw opcode.
  104. #if !defined(__native_client__) && (defined(__i386__) || defined(__x86_64__))
  105. static INLINE uint64_t xgetbv(void) {
  106. const uint32_t ecx = 0;
  107. uint32_t eax, edx;
  108. // Use the raw opcode for xgetbv for compatibility with older toolchains.
  109. __asm__ volatile(".byte 0x0f, 0x01, 0xd0\n"
  110. : "=a"(eax), "=d"(edx)
  111. : "c"(ecx));
  112. return ((uint64_t)edx << 32) | eax;
  113. }
  114. #elif (defined(_M_X64) || defined(_M_IX86)) && defined(_MSC_FULL_VER) && \
  115. _MSC_FULL_VER >= 160040219 // >= VS2010 SP1
  116. #include <immintrin.h>
  117. #define xgetbv() _xgetbv(0)
  118. #elif defined(_MSC_VER) && defined(_M_IX86)
  119. static INLINE uint64_t xgetbv(void) {
  120. uint32_t eax_, edx_;
  121. __asm {
  122. xor ecx, ecx // ecx = 0
  123. // Use the raw opcode for xgetbv for compatibility with older toolchains.
  124. __asm _emit 0x0f __asm _emit 0x01 __asm _emit 0xd0
  125. mov eax_, eax
  126. mov edx_, edx
  127. }
  128. return ((uint64_t)edx_ << 32) | eax_;
  129. }
  130. #else
  131. #define xgetbv() 0U // no AVX for older x64 or unrecognized toolchains.
  132. #endif
  133. #if defined(_MSC_VER) && _MSC_VER >= 1700
  134. #undef NOMINMAX
  135. #define NOMINMAX
  136. #ifndef WIN32_LEAN_AND_MEAN
  137. #define WIN32_LEAN_AND_MEAN
  138. #endif
  139. #include <windows.h>
  140. #if WINAPI_FAMILY_PARTITION(WINAPI_FAMILY_APP)
  141. #define getenv(x) NULL
  142. #endif
  143. #endif
  144. #define HAS_MMX 0x001
  145. #define HAS_SSE 0x002
  146. #define HAS_SSE2 0x004
  147. #define HAS_SSE3 0x008
  148. #define HAS_SSSE3 0x010
  149. #define HAS_SSE4_1 0x020
  150. #define HAS_AVX 0x040
  151. #define HAS_AVX2 0x080
  152. #define HAS_AVX512 0x100
  153. #ifndef BIT
  154. #define BIT(n) (1u << (n))
  155. #endif
  156. static INLINE int x86_simd_caps(void) {
  157. unsigned int flags = 0;
  158. unsigned int mask = ~0;
  159. unsigned int max_cpuid_val, reg_eax, reg_ebx, reg_ecx, reg_edx;
  160. char *env;
  161. (void)reg_ebx;
  162. /* See if the CPU capabilities are being overridden by the environment */
  163. env = getenv("VPX_SIMD_CAPS");
  164. if (env && *env) return (int)strtol(env, NULL, 0);
  165. env = getenv("VPX_SIMD_CAPS_MASK");
  166. if (env && *env) mask = (unsigned int)strtoul(env, NULL, 0);
  167. /* Ensure that the CPUID instruction supports extended features */
  168. cpuid(0, 0, max_cpuid_val, reg_ebx, reg_ecx, reg_edx);
  169. if (max_cpuid_val < 1) return 0;
  170. /* Get the standard feature flags */
  171. cpuid(1, 0, reg_eax, reg_ebx, reg_ecx, reg_edx);
  172. if (reg_edx & BIT(23)) flags |= HAS_MMX;
  173. if (reg_edx & BIT(25)) flags |= HAS_SSE; /* aka xmm */
  174. if (reg_edx & BIT(26)) flags |= HAS_SSE2; /* aka wmt */
  175. if (reg_ecx & BIT(0)) flags |= HAS_SSE3;
  176. if (reg_ecx & BIT(9)) flags |= HAS_SSSE3;
  177. if (reg_ecx & BIT(19)) flags |= HAS_SSE4_1;
  178. // bits 27 (OSXSAVE) & 28 (256-bit AVX)
  179. if ((reg_ecx & (BIT(27) | BIT(28))) == (BIT(27) | BIT(28))) {
  180. if ((xgetbv() & 0x6) == 0x6) {
  181. flags |= HAS_AVX;
  182. if (max_cpuid_val >= 7) {
  183. /* Get the leaf 7 feature flags. Needed to check for AVX2 support */
  184. cpuid(7, 0, reg_eax, reg_ebx, reg_ecx, reg_edx);
  185. if (reg_ebx & BIT(5)) flags |= HAS_AVX2;
  186. // bits 16 (AVX-512F) & 17 (AVX-512DQ) & 28 (AVX-512CD) &
  187. // 30 (AVX-512BW) & 32 (AVX-512VL)
  188. if ((reg_ebx & (BIT(16) | BIT(17) | BIT(28) | BIT(30) | BIT(31))) ==
  189. (BIT(16) | BIT(17) | BIT(28) | BIT(30) | BIT(31)))
  190. flags |= HAS_AVX512;
  191. }
  192. }
  193. }
  194. return flags & mask;
  195. }
  196. // Fine-Grain Measurement Functions
  197. //
  198. // If you are timing a small region of code, access the timestamp counter
  199. // (TSC) via:
  200. //
  201. // unsigned int start = x86_tsc_start();
  202. // ...
  203. // unsigned int end = x86_tsc_end();
  204. // unsigned int diff = end - start;
  205. //
  206. // The start/end functions introduce a few more instructions than using
  207. // x86_readtsc directly, but prevent the CPU's out-of-order execution from
  208. // affecting the measurement (by having earlier/later instructions be evaluated
  209. // in the time interval). See the white paper, "How to Benchmark Code
  210. // Execution Times on Intel® IA-32 and IA-64 Instruction Set Architectures" by
  211. // Gabriele Paoloni for more information.
  212. //
  213. // If you are timing a large function (CPU time > a couple of seconds), use
  214. // x86_readtsc64 to read the timestamp counter in a 64-bit integer. The
  215. // out-of-order leakage that can occur is minimal compared to total runtime.
  216. static INLINE unsigned int x86_readtsc(void) {
  217. #if defined(__GNUC__) && __GNUC__
  218. unsigned int tsc;
  219. __asm__ __volatile__("rdtsc\n\t" : "=a"(tsc) :);
  220. return tsc;
  221. #elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
  222. unsigned int tsc;
  223. asm volatile("rdtsc\n\t" : "=a"(tsc) :);
  224. return tsc;
  225. #else
  226. #if ARCH_X86_64
  227. return (unsigned int)__rdtsc();
  228. #else
  229. __asm rdtsc;
  230. #endif
  231. #endif
  232. }
  233. // 64-bit CPU cycle counter
  234. static INLINE uint64_t x86_readtsc64(void) {
  235. #if defined(__GNUC__) && __GNUC__
  236. uint32_t hi, lo;
  237. __asm__ __volatile__("rdtsc" : "=a"(lo), "=d"(hi));
  238. return ((uint64_t)hi << 32) | lo;
  239. #elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
  240. uint_t hi, lo;
  241. asm volatile("rdtsc\n\t" : "=a"(lo), "=d"(hi));
  242. return ((uint64_t)hi << 32) | lo;
  243. #else
  244. #if ARCH_X86_64
  245. return (uint64_t)__rdtsc();
  246. #else
  247. __asm rdtsc;
  248. #endif
  249. #endif
  250. }
  251. // 32-bit CPU cycle counter with a partial fence against out-of-order execution.
  252. static INLINE unsigned int x86_readtscp(void) {
  253. #if defined(__GNUC__) && __GNUC__
  254. unsigned int tscp;
  255. __asm__ __volatile__("rdtscp\n\t" : "=a"(tscp) :);
  256. return tscp;
  257. #elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
  258. unsigned int tscp;
  259. asm volatile("rdtscp\n\t" : "=a"(tscp) :);
  260. return tscp;
  261. #elif defined(_MSC_VER)
  262. unsigned int ui;
  263. return (unsigned int)__rdtscp(&ui);
  264. #else
  265. #if ARCH_X86_64
  266. return (unsigned int)__rdtscp();
  267. #else
  268. __asm rdtscp;
  269. #endif
  270. #endif
  271. }
  272. static INLINE unsigned int x86_tsc_start(void) {
  273. unsigned int reg_eax, reg_ebx, reg_ecx, reg_edx;
  274. cpuid(0, 0, reg_eax, reg_ebx, reg_ecx, reg_edx);
  275. return x86_readtsc();
  276. }
  277. static INLINE unsigned int x86_tsc_end(void) {
  278. uint32_t v = x86_readtscp();
  279. unsigned int reg_eax, reg_ebx, reg_ecx, reg_edx;
  280. cpuid(0, 0, reg_eax, reg_ebx, reg_ecx, reg_edx);
  281. return v;
  282. }
  283. #if defined(__GNUC__) && __GNUC__
  284. #define x86_pause_hint() __asm__ __volatile__("pause \n\t")
  285. #elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
  286. #define x86_pause_hint() asm volatile("pause \n\t")
  287. #else
  288. #if ARCH_X86_64
  289. #define x86_pause_hint() _mm_pause();
  290. #else
  291. #define x86_pause_hint() __asm pause
  292. #endif
  293. #endif
  294. #if defined(__GNUC__) && __GNUC__
  295. static void x87_set_control_word(unsigned short mode) {
  296. __asm__ __volatile__("fldcw %0" : : "m"(*&mode));
  297. }
  298. static unsigned short x87_get_control_word(void) {
  299. unsigned short mode;
  300. __asm__ __volatile__("fstcw %0\n\t" : "=m"(*&mode) :);
  301. return mode;
  302. }
  303. #elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
  304. static void x87_set_control_word(unsigned short mode) {
  305. asm volatile("fldcw %0" : : "m"(*&mode));
  306. }
  307. static unsigned short x87_get_control_word(void) {
  308. unsigned short mode;
  309. asm volatile("fstcw %0\n\t" : "=m"(*&mode) :);
  310. return mode;
  311. }
  312. #elif ARCH_X86_64
  313. /* No fldcw intrinsics on Windows x64, punt to external asm */
  314. extern void vpx_winx64_fldcw(unsigned short mode);
  315. extern unsigned short vpx_winx64_fstcw(void);
  316. #define x87_set_control_word vpx_winx64_fldcw
  317. #define x87_get_control_word vpx_winx64_fstcw
  318. #else
  319. static void x87_set_control_word(unsigned short mode) {
  320. __asm { fldcw mode }
  321. }
  322. static unsigned short x87_get_control_word(void) {
  323. unsigned short mode;
  324. __asm { fstcw mode }
  325. return mode;
  326. }
  327. #endif
  328. static INLINE unsigned int x87_set_double_precision(void) {
  329. unsigned int mode = x87_get_control_word();
  330. // Intel 64 and IA-32 Architectures Developer's Manual: Vol. 1
  331. // https://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-software-developer-vol-1-manual.pdf
  332. // 8.1.5.2 Precision Control Field
  333. // Bits 8 and 9 (0x300) of the x87 FPU Control Word ("Precision Control")
  334. // determine the number of bits used in floating point calculations. To match
  335. // later SSE instructions restrict x87 operations to Double Precision (0x200).
  336. // Precision PC Field
  337. // Single Precision (24-Bits) 00B
  338. // Reserved 01B
  339. // Double Precision (53-Bits) 10B
  340. // Extended Precision (64-Bits) 11B
  341. x87_set_control_word((mode & ~0x300) | 0x200);
  342. return mode;
  343. }
  344. #ifdef __cplusplus
  345. } // extern "C"
  346. #endif
  347. #endif // VPX_VPX_PORTS_X86_H_