2
0

x86inc.asm 49 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701
  1. ;*****************************************************************************
  2. ;* x86inc.asm: x264asm abstraction layer
  3. ;*****************************************************************************
  4. ;* Copyright (C) 2005-2018 x264 project
  5. ;*
  6. ;* Authors: Loren Merritt <lorenm@u.washington.edu>
  7. ;* Henrik Gramner <henrik@gramner.com>
  8. ;* Anton Mitrofanov <BugMaster@narod.ru>
  9. ;* Fiona Glaser <fiona@x264.com>
  10. ;*
  11. ;* Permission to use, copy, modify, and/or distribute this software for any
  12. ;* purpose with or without fee is hereby granted, provided that the above
  13. ;* copyright notice and this permission notice appear in all copies.
  14. ;*
  15. ;* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  16. ;* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  17. ;* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  18. ;* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  19. ;* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  20. ;* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  21. ;* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  22. ;*****************************************************************************
  23. ; This is a header file for the x264ASM assembly language, which uses
  24. ; NASM/YASM syntax combined with a large number of macros to provide easy
  25. ; abstraction between different calling conventions (x86_32, win64, linux64).
  26. ; It also has various other useful features to simplify writing the kind of
  27. ; DSP functions that are most often used in x264.
  28. ; Unlike the rest of x264, this file is available under an ISC license, as it
  29. ; has significant usefulness outside of x264 and we want it to be available
  30. ; to the largest audience possible. Of course, if you modify it for your own
  31. ; purposes to add a new feature, we strongly encourage contributing a patch
  32. ; as this feature might be useful for others as well. Send patches or ideas
  33. ; to x264-devel@videolan.org .
  34. %ifndef private_prefix
  35. %define private_prefix x264
  36. %endif
  37. %ifndef public_prefix
  38. %define public_prefix private_prefix
  39. %endif
  40. %if HAVE_ALIGNED_STACK
  41. %define STACK_ALIGNMENT 16
  42. %endif
  43. %ifndef STACK_ALIGNMENT
  44. %if ARCH_X86_64
  45. %define STACK_ALIGNMENT 16
  46. %else
  47. %define STACK_ALIGNMENT 4
  48. %endif
  49. %endif
  50. %define WIN64 0
  51. %define UNIX64 0
  52. %if ARCH_X86_64
  53. %ifidn __OUTPUT_FORMAT__,win32
  54. %define WIN64 1
  55. %elifidn __OUTPUT_FORMAT__,win64
  56. %define WIN64 1
  57. %elifidn __OUTPUT_FORMAT__,x64
  58. %define WIN64 1
  59. %else
  60. %define UNIX64 1
  61. %endif
  62. %endif
  63. %define FORMAT_ELF 0
  64. %ifidn __OUTPUT_FORMAT__,elf
  65. %define FORMAT_ELF 1
  66. %elifidn __OUTPUT_FORMAT__,elf32
  67. %define FORMAT_ELF 1
  68. %elifidn __OUTPUT_FORMAT__,elf64
  69. %define FORMAT_ELF 1
  70. %endif
  71. %ifdef PREFIX
  72. %define mangle(x) _ %+ x
  73. %else
  74. %define mangle(x) x
  75. %endif
  76. ; aout does not support align=
  77. ; NOTE: This section is out of sync with x264, in order to
  78. ; keep supporting OS/2.
  79. %macro SECTION_RODATA 0-1 16
  80. %ifidn __OUTPUT_FORMAT__,aout
  81. SECTION .text
  82. %elifidn __OUTPUT_FORMAT__,coff
  83. SECTION .text
  84. %elifidn __OUTPUT_FORMAT__,win32
  85. SECTION .rdata align=%1
  86. %elif WIN64
  87. SECTION .rdata align=%1
  88. %else
  89. SECTION .rodata align=%1
  90. %endif
  91. %endmacro
  92. %if WIN64
  93. %define PIC
  94. %elif ARCH_X86_64 == 0
  95. ; x86_32 doesn't require PIC.
  96. ; Some distros prefer shared objects to be PIC, but nothing breaks if
  97. ; the code contains a few textrels, so we'll skip that complexity.
  98. %undef PIC
  99. %endif
  100. %ifdef PIC
  101. default rel
  102. %endif
  103. %macro CPUNOP 1
  104. %if HAVE_CPUNOP
  105. CPU %1
  106. %endif
  107. %endmacro
  108. ; Macros to eliminate most code duplication between x86_32 and x86_64:
  109. ; Currently this works only for leaf functions which load all their arguments
  110. ; into registers at the start, and make no other use of the stack. Luckily that
  111. ; covers most of x264's asm.
  112. ; PROLOGUE:
  113. ; %1 = number of arguments. loads them from stack if needed.
  114. ; %2 = number of registers used. pushes callee-saved regs if needed.
  115. ; %3 = number of xmm registers used. pushes callee-saved xmm regs if needed.
  116. ; %4 = (optional) stack size to be allocated. The stack will be aligned before
  117. ; allocating the specified stack size. If the required stack alignment is
  118. ; larger than the known stack alignment the stack will be manually aligned
  119. ; and an extra register will be allocated to hold the original stack
  120. ; pointer (to not invalidate r0m etc.). To prevent the use of an extra
  121. ; register as stack pointer, request a negative stack size.
  122. ; %4+/%5+ = list of names to define to registers
  123. ; PROLOGUE can also be invoked by adding the same options to cglobal
  124. ; e.g.
  125. ; cglobal foo, 2,3,7,0x40, dst, src, tmp
  126. ; declares a function (foo) that automatically loads two arguments (dst and
  127. ; src) into registers, uses one additional register (tmp) plus 7 vector
  128. ; registers (m0-m6) and allocates 0x40 bytes of stack space.
  129. ; TODO Some functions can use some args directly from the stack. If they're the
  130. ; last args then you can just not declare them, but if they're in the middle
  131. ; we need more flexible macro.
  132. ; RET:
  133. ; Pops anything that was pushed by PROLOGUE, and returns.
  134. ; REP_RET:
  135. ; Use this instead of RET if it's a branch target.
  136. ; registers:
  137. ; rN and rNq are the native-size register holding function argument N
  138. ; rNd, rNw, rNb are dword, word, and byte size
  139. ; rNh is the high 8 bits of the word size
  140. ; rNm is the original location of arg N (a register or on the stack), dword
  141. ; rNmp is native size
  142. %macro DECLARE_REG 2-3
  143. %define r%1q %2
  144. %define r%1d %2d
  145. %define r%1w %2w
  146. %define r%1b %2b
  147. %define r%1h %2h
  148. %define %2q %2
  149. %if %0 == 2
  150. %define r%1m %2d
  151. %define r%1mp %2
  152. %elif ARCH_X86_64 ; memory
  153. %define r%1m [rstk + stack_offset + %3]
  154. %define r%1mp qword r %+ %1 %+ m
  155. %else
  156. %define r%1m [rstk + stack_offset + %3]
  157. %define r%1mp dword r %+ %1 %+ m
  158. %endif
  159. %define r%1 %2
  160. %endmacro
  161. %macro DECLARE_REG_SIZE 3
  162. %define r%1q r%1
  163. %define e%1q r%1
  164. %define r%1d e%1
  165. %define e%1d e%1
  166. %define r%1w %1
  167. %define e%1w %1
  168. %define r%1h %3
  169. %define e%1h %3
  170. %define r%1b %2
  171. %define e%1b %2
  172. %if ARCH_X86_64 == 0
  173. %define r%1 e%1
  174. %endif
  175. %endmacro
  176. DECLARE_REG_SIZE ax, al, ah
  177. DECLARE_REG_SIZE bx, bl, bh
  178. DECLARE_REG_SIZE cx, cl, ch
  179. DECLARE_REG_SIZE dx, dl, dh
  180. DECLARE_REG_SIZE si, sil, null
  181. DECLARE_REG_SIZE di, dil, null
  182. DECLARE_REG_SIZE bp, bpl, null
  183. ; t# defines for when per-arch register allocation is more complex than just function arguments
  184. %macro DECLARE_REG_TMP 1-*
  185. %assign %%i 0
  186. %rep %0
  187. CAT_XDEFINE t, %%i, r%1
  188. %assign %%i %%i+1
  189. %rotate 1
  190. %endrep
  191. %endmacro
  192. %macro DECLARE_REG_TMP_SIZE 0-*
  193. %rep %0
  194. %define t%1q t%1 %+ q
  195. %define t%1d t%1 %+ d
  196. %define t%1w t%1 %+ w
  197. %define t%1h t%1 %+ h
  198. %define t%1b t%1 %+ b
  199. %rotate 1
  200. %endrep
  201. %endmacro
  202. DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14
  203. %if ARCH_X86_64
  204. %define gprsize 8
  205. %else
  206. %define gprsize 4
  207. %endif
  208. %macro PUSH 1
  209. push %1
  210. %ifidn rstk, rsp
  211. %assign stack_offset stack_offset+gprsize
  212. %endif
  213. %endmacro
  214. %macro POP 1
  215. pop %1
  216. %ifidn rstk, rsp
  217. %assign stack_offset stack_offset-gprsize
  218. %endif
  219. %endmacro
  220. %macro PUSH_IF_USED 1-*
  221. %rep %0
  222. %if %1 < regs_used
  223. PUSH r%1
  224. %endif
  225. %rotate 1
  226. %endrep
  227. %endmacro
  228. %macro POP_IF_USED 1-*
  229. %rep %0
  230. %if %1 < regs_used
  231. pop r%1
  232. %endif
  233. %rotate 1
  234. %endrep
  235. %endmacro
  236. %macro LOAD_IF_USED 1-*
  237. %rep %0
  238. %if %1 < num_args
  239. mov r%1, r %+ %1 %+ mp
  240. %endif
  241. %rotate 1
  242. %endrep
  243. %endmacro
  244. %macro SUB 2
  245. sub %1, %2
  246. %ifidn %1, rstk
  247. %assign stack_offset stack_offset+(%2)
  248. %endif
  249. %endmacro
  250. %macro ADD 2
  251. add %1, %2
  252. %ifidn %1, rstk
  253. %assign stack_offset stack_offset-(%2)
  254. %endif
  255. %endmacro
  256. %macro movifnidn 2
  257. %ifnidn %1, %2
  258. mov %1, %2
  259. %endif
  260. %endmacro
  261. %macro movsxdifnidn 2
  262. %ifnidn %1, %2
  263. movsxd %1, %2
  264. %endif
  265. %endmacro
  266. %macro ASSERT 1
  267. %if (%1) == 0
  268. %error assertion ``%1'' failed
  269. %endif
  270. %endmacro
  271. %macro DEFINE_ARGS 0-*
  272. %ifdef n_arg_names
  273. %assign %%i 0
  274. %rep n_arg_names
  275. CAT_UNDEF arg_name %+ %%i, q
  276. CAT_UNDEF arg_name %+ %%i, d
  277. CAT_UNDEF arg_name %+ %%i, w
  278. CAT_UNDEF arg_name %+ %%i, h
  279. CAT_UNDEF arg_name %+ %%i, b
  280. CAT_UNDEF arg_name %+ %%i, m
  281. CAT_UNDEF arg_name %+ %%i, mp
  282. CAT_UNDEF arg_name, %%i
  283. %assign %%i %%i+1
  284. %endrep
  285. %endif
  286. %xdefine %%stack_offset stack_offset
  287. %undef stack_offset ; so that the current value of stack_offset doesn't get baked in by xdefine
  288. %assign %%i 0
  289. %rep %0
  290. %xdefine %1q r %+ %%i %+ q
  291. %xdefine %1d r %+ %%i %+ d
  292. %xdefine %1w r %+ %%i %+ w
  293. %xdefine %1h r %+ %%i %+ h
  294. %xdefine %1b r %+ %%i %+ b
  295. %xdefine %1m r %+ %%i %+ m
  296. %xdefine %1mp r %+ %%i %+ mp
  297. CAT_XDEFINE arg_name, %%i, %1
  298. %assign %%i %%i+1
  299. %rotate 1
  300. %endrep
  301. %xdefine stack_offset %%stack_offset
  302. %assign n_arg_names %0
  303. %endmacro
  304. %define required_stack_alignment ((mmsize + 15) & ~15)
  305. %define vzeroupper_required (mmsize > 16 && (ARCH_X86_64 == 0 || xmm_regs_used > 16 || notcpuflag(avx512)))
  306. %define high_mm_regs (16*cpuflag(avx512))
  307. %macro ALLOC_STACK 1-2 0 ; stack_size, n_xmm_regs (for win64 only)
  308. %ifnum %1
  309. %if %1 != 0
  310. %assign %%pad 0
  311. %assign stack_size %1
  312. %if stack_size < 0
  313. %assign stack_size -stack_size
  314. %endif
  315. %if WIN64
  316. %assign %%pad %%pad + 32 ; shadow space
  317. %if mmsize != 8
  318. %assign xmm_regs_used %2
  319. %if xmm_regs_used > 8
  320. %assign %%pad %%pad + (xmm_regs_used-8)*16 ; callee-saved xmm registers
  321. %endif
  322. %endif
  323. %endif
  324. %if required_stack_alignment <= STACK_ALIGNMENT
  325. ; maintain the current stack alignment
  326. %assign stack_size_padded stack_size + %%pad + ((-%%pad-stack_offset-gprsize) & (STACK_ALIGNMENT-1))
  327. SUB rsp, stack_size_padded
  328. %else
  329. %assign %%reg_num (regs_used - 1)
  330. %xdefine rstk r %+ %%reg_num
  331. ; align stack, and save original stack location directly above
  332. ; it, i.e. in [rsp+stack_size_padded], so we can restore the
  333. ; stack in a single instruction (i.e. mov rsp, rstk or mov
  334. ; rsp, [rsp+stack_size_padded])
  335. %if %1 < 0 ; need to store rsp on stack
  336. %xdefine rstkm [rsp + stack_size + %%pad]
  337. %assign %%pad %%pad + gprsize
  338. %else ; can keep rsp in rstk during whole function
  339. %xdefine rstkm rstk
  340. %endif
  341. %assign stack_size_padded stack_size + ((%%pad + required_stack_alignment-1) & ~(required_stack_alignment-1))
  342. mov rstk, rsp
  343. and rsp, ~(required_stack_alignment-1)
  344. sub rsp, stack_size_padded
  345. movifnidn rstkm, rstk
  346. %endif
  347. WIN64_PUSH_XMM
  348. %endif
  349. %endif
  350. %endmacro
  351. %macro SETUP_STACK_POINTER 1
  352. %ifnum %1
  353. %if %1 != 0 && required_stack_alignment > STACK_ALIGNMENT
  354. %if %1 > 0
  355. ; Reserve an additional register for storing the original stack pointer, but avoid using
  356. ; eax/rax for this purpose since it can potentially get overwritten as a return value.
  357. %assign regs_used (regs_used + 1)
  358. %if ARCH_X86_64 && regs_used == 7
  359. %assign regs_used 8
  360. %elif ARCH_X86_64 == 0 && regs_used == 1
  361. %assign regs_used 2
  362. %endif
  363. %endif
  364. %if ARCH_X86_64 && regs_used < 5 + UNIX64 * 3
  365. ; Ensure that we don't clobber any registers containing arguments. For UNIX64 we also preserve r6 (rax)
  366. ; since it's used as a hidden argument in vararg functions to specify the number of vector registers used.
  367. %assign regs_used 5 + UNIX64 * 3
  368. %endif
  369. %endif
  370. %endif
  371. %endmacro
  372. %macro DEFINE_ARGS_INTERNAL 3+
  373. %ifnum %2
  374. DEFINE_ARGS %3
  375. %elif %1 == 4
  376. DEFINE_ARGS %2
  377. %elif %1 > 4
  378. DEFINE_ARGS %2, %3
  379. %endif
  380. %endmacro
  381. %if WIN64 ; Windows x64 ;=================================================
  382. DECLARE_REG 0, rcx
  383. DECLARE_REG 1, rdx
  384. DECLARE_REG 2, R8
  385. DECLARE_REG 3, R9
  386. DECLARE_REG 4, R10, 40
  387. DECLARE_REG 5, R11, 48
  388. DECLARE_REG 6, rax, 56
  389. DECLARE_REG 7, rdi, 64
  390. DECLARE_REG 8, rsi, 72
  391. DECLARE_REG 9, rbx, 80
  392. DECLARE_REG 10, rbp, 88
  393. DECLARE_REG 11, R14, 96
  394. DECLARE_REG 12, R15, 104
  395. DECLARE_REG 13, R12, 112
  396. DECLARE_REG 14, R13, 120
  397. %macro PROLOGUE 2-5+ 0 ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
  398. %assign num_args %1
  399. %assign regs_used %2
  400. ASSERT regs_used >= num_args
  401. SETUP_STACK_POINTER %4
  402. ASSERT regs_used <= 15
  403. PUSH_IF_USED 7, 8, 9, 10, 11, 12, 13, 14
  404. ALLOC_STACK %4, %3
  405. %if mmsize != 8 && stack_size == 0
  406. WIN64_SPILL_XMM %3
  407. %endif
  408. LOAD_IF_USED 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
  409. DEFINE_ARGS_INTERNAL %0, %4, %5
  410. %endmacro
  411. %macro WIN64_PUSH_XMM 0
  412. ; Use the shadow space to store XMM6 and XMM7, the rest needs stack space allocated.
  413. %if xmm_regs_used > 6 + high_mm_regs
  414. movaps [rstk + stack_offset + 8], xmm6
  415. %endif
  416. %if xmm_regs_used > 7 + high_mm_regs
  417. movaps [rstk + stack_offset + 24], xmm7
  418. %endif
  419. %assign %%xmm_regs_on_stack xmm_regs_used - high_mm_regs - 8
  420. %if %%xmm_regs_on_stack > 0
  421. %assign %%i 8
  422. %rep %%xmm_regs_on_stack
  423. movaps [rsp + (%%i-8)*16 + stack_size + 32], xmm %+ %%i
  424. %assign %%i %%i+1
  425. %endrep
  426. %endif
  427. %endmacro
  428. %macro WIN64_SPILL_XMM 1
  429. %assign xmm_regs_used %1
  430. ASSERT xmm_regs_used <= 16 + high_mm_regs
  431. %assign %%xmm_regs_on_stack xmm_regs_used - high_mm_regs - 8
  432. %if %%xmm_regs_on_stack > 0
  433. ; Allocate stack space for callee-saved xmm registers plus shadow space and align the stack.
  434. %assign %%pad %%xmm_regs_on_stack*16 + 32
  435. %assign stack_size_padded %%pad + ((-%%pad-stack_offset-gprsize) & (STACK_ALIGNMENT-1))
  436. SUB rsp, stack_size_padded
  437. %endif
  438. WIN64_PUSH_XMM
  439. %endmacro
  440. %macro WIN64_RESTORE_XMM_INTERNAL 0
  441. %assign %%pad_size 0
  442. %assign %%xmm_regs_on_stack xmm_regs_used - high_mm_regs - 8
  443. %if %%xmm_regs_on_stack > 0
  444. %assign %%i xmm_regs_used - high_mm_regs
  445. %rep %%xmm_regs_on_stack
  446. %assign %%i %%i-1
  447. movaps xmm %+ %%i, [rsp + (%%i-8)*16 + stack_size + 32]
  448. %endrep
  449. %endif
  450. %if stack_size_padded > 0
  451. %if stack_size > 0 && required_stack_alignment > STACK_ALIGNMENT
  452. mov rsp, rstkm
  453. %else
  454. add rsp, stack_size_padded
  455. %assign %%pad_size stack_size_padded
  456. %endif
  457. %endif
  458. %if xmm_regs_used > 7 + high_mm_regs
  459. movaps xmm7, [rsp + stack_offset - %%pad_size + 24]
  460. %endif
  461. %if xmm_regs_used > 6 + high_mm_regs
  462. movaps xmm6, [rsp + stack_offset - %%pad_size + 8]
  463. %endif
  464. %endmacro
  465. %macro WIN64_RESTORE_XMM 0
  466. WIN64_RESTORE_XMM_INTERNAL
  467. %assign stack_offset (stack_offset-stack_size_padded)
  468. %assign stack_size_padded 0
  469. %assign xmm_regs_used 0
  470. %endmacro
  471. %define has_epilogue regs_used > 7 || stack_size > 0 || vzeroupper_required || xmm_regs_used > 6+high_mm_regs
  472. %macro RET 0
  473. WIN64_RESTORE_XMM_INTERNAL
  474. POP_IF_USED 14, 13, 12, 11, 10, 9, 8, 7
  475. %if vzeroupper_required
  476. vzeroupper
  477. %endif
  478. AUTO_REP_RET
  479. %endmacro
  480. %elif ARCH_X86_64 ; *nix x64 ;=============================================
  481. DECLARE_REG 0, rdi
  482. DECLARE_REG 1, rsi
  483. DECLARE_REG 2, rdx
  484. DECLARE_REG 3, rcx
  485. DECLARE_REG 4, R8
  486. DECLARE_REG 5, R9
  487. DECLARE_REG 6, rax, 8
  488. DECLARE_REG 7, R10, 16
  489. DECLARE_REG 8, R11, 24
  490. DECLARE_REG 9, rbx, 32
  491. DECLARE_REG 10, rbp, 40
  492. DECLARE_REG 11, R14, 48
  493. DECLARE_REG 12, R15, 56
  494. DECLARE_REG 13, R12, 64
  495. DECLARE_REG 14, R13, 72
  496. %macro PROLOGUE 2-5+ 0 ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
  497. %assign num_args %1
  498. %assign regs_used %2
  499. %assign xmm_regs_used %3
  500. ASSERT regs_used >= num_args
  501. SETUP_STACK_POINTER %4
  502. ASSERT regs_used <= 15
  503. PUSH_IF_USED 9, 10, 11, 12, 13, 14
  504. ALLOC_STACK %4
  505. LOAD_IF_USED 6, 7, 8, 9, 10, 11, 12, 13, 14
  506. DEFINE_ARGS_INTERNAL %0, %4, %5
  507. %endmacro
  508. %define has_epilogue regs_used > 9 || stack_size > 0 || vzeroupper_required
  509. %macro RET 0
  510. %if stack_size_padded > 0
  511. %if required_stack_alignment > STACK_ALIGNMENT
  512. mov rsp, rstkm
  513. %else
  514. add rsp, stack_size_padded
  515. %endif
  516. %endif
  517. POP_IF_USED 14, 13, 12, 11, 10, 9
  518. %if vzeroupper_required
  519. vzeroupper
  520. %endif
  521. AUTO_REP_RET
  522. %endmacro
  523. %else ; X86_32 ;==============================================================
  524. DECLARE_REG 0, eax, 4
  525. DECLARE_REG 1, ecx, 8
  526. DECLARE_REG 2, edx, 12
  527. DECLARE_REG 3, ebx, 16
  528. DECLARE_REG 4, esi, 20
  529. DECLARE_REG 5, edi, 24
  530. DECLARE_REG 6, ebp, 28
  531. %define rsp esp
  532. %macro DECLARE_ARG 1-*
  533. %rep %0
  534. %define r%1m [rstk + stack_offset + 4*%1 + 4]
  535. %define r%1mp dword r%1m
  536. %rotate 1
  537. %endrep
  538. %endmacro
  539. DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14
  540. %macro PROLOGUE 2-5+ ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
  541. %assign num_args %1
  542. %assign regs_used %2
  543. ASSERT regs_used >= num_args
  544. %if num_args > 7
  545. %assign num_args 7
  546. %endif
  547. %if regs_used > 7
  548. %assign regs_used 7
  549. %endif
  550. SETUP_STACK_POINTER %4
  551. ASSERT regs_used <= 7
  552. PUSH_IF_USED 3, 4, 5, 6
  553. ALLOC_STACK %4
  554. LOAD_IF_USED 0, 1, 2, 3, 4, 5, 6
  555. DEFINE_ARGS_INTERNAL %0, %4, %5
  556. %endmacro
  557. %define has_epilogue regs_used > 3 || stack_size > 0 || vzeroupper_required
  558. %macro RET 0
  559. %if stack_size_padded > 0
  560. %if required_stack_alignment > STACK_ALIGNMENT
  561. mov rsp, rstkm
  562. %else
  563. add rsp, stack_size_padded
  564. %endif
  565. %endif
  566. POP_IF_USED 6, 5, 4, 3
  567. %if vzeroupper_required
  568. vzeroupper
  569. %endif
  570. AUTO_REP_RET
  571. %endmacro
  572. %endif ;======================================================================
  573. %if WIN64 == 0
  574. %macro WIN64_SPILL_XMM 1
  575. %endmacro
  576. %macro WIN64_RESTORE_XMM 0
  577. %endmacro
  578. %macro WIN64_PUSH_XMM 0
  579. %endmacro
  580. %endif
  581. ; On AMD cpus <=K10, an ordinary ret is slow if it immediately follows either
  582. ; a branch or a branch target. So switch to a 2-byte form of ret in that case.
  583. ; We can automatically detect "follows a branch", but not a branch target.
  584. ; (SSSE3 is a sufficient condition to know that your cpu doesn't have this problem.)
  585. %macro REP_RET 0
  586. %if has_epilogue || cpuflag(ssse3)
  587. RET
  588. %else
  589. rep ret
  590. %endif
  591. annotate_function_size
  592. %endmacro
  593. %define last_branch_adr $$
  594. %macro AUTO_REP_RET 0
  595. %if notcpuflag(ssse3)
  596. times ((last_branch_adr-$)>>31)+1 rep ; times 1 iff $ == last_branch_adr.
  597. %endif
  598. ret
  599. annotate_function_size
  600. %endmacro
  601. %macro BRANCH_INSTR 0-*
  602. %rep %0
  603. %macro %1 1-2 %1
  604. %2 %1
  605. %if notcpuflag(ssse3)
  606. %%branch_instr equ $
  607. %xdefine last_branch_adr %%branch_instr
  608. %endif
  609. %endmacro
  610. %rotate 1
  611. %endrep
  612. %endmacro
  613. BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, jna, jnae, jb, jbe, jnb, jnbe, jc, jnc, js, jns, jo, jno, jp, jnp
  614. %macro TAIL_CALL 2 ; callee, is_nonadjacent
  615. %if has_epilogue
  616. call %1
  617. RET
  618. %elif %2
  619. jmp %1
  620. %endif
  621. annotate_function_size
  622. %endmacro
  623. ;=============================================================================
  624. ; arch-independent part
  625. ;=============================================================================
  626. %assign function_align 16
  627. ; Begin a function.
  628. ; Applies any symbol mangling needed for C linkage, and sets up a define such that
  629. ; subsequent uses of the function name automatically refer to the mangled version.
  630. ; Appends cpuflags to the function name if cpuflags has been specified.
  631. ; The "" empty default parameter is a workaround for nasm, which fails if SUFFIX
  632. ; is empty and we call cglobal_internal with just %1 %+ SUFFIX (without %2).
  633. %macro cglobal 1-2+ "" ; name, [PROLOGUE args]
  634. cglobal_internal 1, %1 %+ SUFFIX, %2
  635. %endmacro
  636. %macro cvisible 1-2+ "" ; name, [PROLOGUE args]
  637. cglobal_internal 0, %1 %+ SUFFIX, %2
  638. %endmacro
  639. %macro cglobal_internal 2-3+
  640. annotate_function_size
  641. %if %1
  642. %xdefine %%FUNCTION_PREFIX private_prefix
  643. %xdefine %%VISIBILITY hidden
  644. %else
  645. %xdefine %%FUNCTION_PREFIX public_prefix
  646. %xdefine %%VISIBILITY
  647. %endif
  648. %ifndef cglobaled_%2
  649. %xdefine %2 mangle(%%FUNCTION_PREFIX %+ _ %+ %2)
  650. %xdefine %2.skip_prologue %2 %+ .skip_prologue
  651. CAT_XDEFINE cglobaled_, %2, 1
  652. %endif
  653. %xdefine current_function %2
  654. %xdefine current_function_section __SECT__
  655. %if FORMAT_ELF
  656. global %2:function %%VISIBILITY
  657. %else
  658. global %2
  659. %endif
  660. align function_align
  661. %2:
  662. RESET_MM_PERMUTATION ; needed for x86-64, also makes disassembly somewhat nicer
  663. %xdefine rstk rsp ; copy of the original stack pointer, used when greater alignment than the known stack alignment is required
  664. %assign stack_offset 0 ; stack pointer offset relative to the return address
  665. %assign stack_size 0 ; amount of stack space that can be freely used inside a function
  666. %assign stack_size_padded 0 ; total amount of allocated stack space, including space for callee-saved xmm registers on WIN64 and alignment padding
  667. %assign xmm_regs_used 0 ; number of XMM registers requested, used for dealing with callee-saved registers on WIN64 and vzeroupper
  668. %ifnidn %3, ""
  669. PROLOGUE %3
  670. %endif
  671. %endmacro
  672. ; Create a global symbol from a local label with the correct name mangling and type
  673. %macro cglobal_label 1
  674. %if FORMAT_ELF
  675. global current_function %+ %1:function hidden
  676. %else
  677. global current_function %+ %1
  678. %endif
  679. %1:
  680. %endmacro
  681. %macro cextern 1
  682. %xdefine %1 mangle(private_prefix %+ _ %+ %1)
  683. CAT_XDEFINE cglobaled_, %1, 1
  684. extern %1
  685. %endmacro
  686. ; like cextern, but without the prefix
  687. %macro cextern_naked 1
  688. %ifdef PREFIX
  689. %xdefine %1 mangle(%1)
  690. %endif
  691. CAT_XDEFINE cglobaled_, %1, 1
  692. extern %1
  693. %endmacro
  694. %macro const 1-2+
  695. %xdefine %1 mangle(private_prefix %+ _ %+ %1)
  696. %if FORMAT_ELF
  697. global %1:data hidden
  698. %else
  699. global %1
  700. %endif
  701. %1: %2
  702. %endmacro
  703. ; This is needed for ELF, otherwise the GNU linker assumes the stack is executable by default.
  704. %if FORMAT_ELF
  705. [SECTION .note.GNU-stack noalloc noexec nowrite progbits]
  706. %endif
  707. ; Tell debuggers how large the function was.
  708. ; This may be invoked multiple times per function; we rely on later instances overriding earlier ones.
  709. ; This is invoked by RET and similar macros, and also cglobal does it for the previous function,
  710. ; but if the last function in a source file doesn't use any of the standard macros for its epilogue,
  711. ; then its size might be unspecified.
  712. %macro annotate_function_size 0
  713. %ifdef __YASM_VER__
  714. %ifdef current_function
  715. %if FORMAT_ELF
  716. current_function_section
  717. %%ecf equ $
  718. size current_function %%ecf - current_function
  719. __SECT__
  720. %endif
  721. %endif
  722. %endif
  723. %endmacro
  724. ; cpuflags
  725. %assign cpuflags_mmx (1<<0)
  726. %assign cpuflags_mmx2 (1<<1) | cpuflags_mmx
  727. %assign cpuflags_3dnow (1<<2) | cpuflags_mmx
  728. %assign cpuflags_3dnowext (1<<3) | cpuflags_3dnow
  729. %assign cpuflags_sse (1<<4) | cpuflags_mmx2
  730. %assign cpuflags_sse2 (1<<5) | cpuflags_sse
  731. %assign cpuflags_sse2slow (1<<6) | cpuflags_sse2
  732. %assign cpuflags_lzcnt (1<<7) | cpuflags_sse2
  733. %assign cpuflags_sse3 (1<<8) | cpuflags_sse2
  734. %assign cpuflags_ssse3 (1<<9) | cpuflags_sse3
  735. %assign cpuflags_sse4 (1<<10)| cpuflags_ssse3
  736. %assign cpuflags_sse42 (1<<11)| cpuflags_sse4
  737. %assign cpuflags_aesni (1<<12)| cpuflags_sse42
  738. %assign cpuflags_avx (1<<13)| cpuflags_sse42
  739. %assign cpuflags_xop (1<<14)| cpuflags_avx
  740. %assign cpuflags_fma4 (1<<15)| cpuflags_avx
  741. %assign cpuflags_fma3 (1<<16)| cpuflags_avx
  742. %assign cpuflags_bmi1 (1<<17)| cpuflags_avx|cpuflags_lzcnt
  743. %assign cpuflags_bmi2 (1<<18)| cpuflags_bmi1
  744. %assign cpuflags_avx2 (1<<19)| cpuflags_fma3|cpuflags_bmi2
  745. %assign cpuflags_avx512 (1<<20)| cpuflags_avx2 ; F, CD, BW, DQ, VL
  746. %assign cpuflags_cache32 (1<<21)
  747. %assign cpuflags_cache64 (1<<22)
  748. %assign cpuflags_aligned (1<<23) ; not a cpu feature, but a function variant
  749. %assign cpuflags_atom (1<<24)
  750. ; Returns a boolean value expressing whether or not the specified cpuflag is enabled.
  751. %define cpuflag(x) (((((cpuflags & (cpuflags_ %+ x)) ^ (cpuflags_ %+ x)) - 1) >> 31) & 1)
  752. %define notcpuflag(x) (cpuflag(x) ^ 1)
  753. ; Takes an arbitrary number of cpuflags from the above list.
  754. ; All subsequent functions (up to the next INIT_CPUFLAGS) is built for the specified cpu.
  755. ; You shouldn't need to invoke this macro directly, it's a subroutine for INIT_MMX &co.
  756. %macro INIT_CPUFLAGS 0-*
  757. %xdefine SUFFIX
  758. %undef cpuname
  759. %assign cpuflags 0
  760. %if %0 >= 1
  761. %rep %0
  762. %ifdef cpuname
  763. %xdefine cpuname cpuname %+ _%1
  764. %else
  765. %xdefine cpuname %1
  766. %endif
  767. %assign cpuflags cpuflags | cpuflags_%1
  768. %rotate 1
  769. %endrep
  770. %xdefine SUFFIX _ %+ cpuname
  771. %if cpuflag(avx)
  772. %assign avx_enabled 1
  773. %endif
  774. %if (mmsize == 16 && notcpuflag(sse2)) || (mmsize == 32 && notcpuflag(avx2))
  775. %define mova movaps
  776. %define movu movups
  777. %define movnta movntps
  778. %endif
  779. %if cpuflag(aligned)
  780. %define movu mova
  781. %elif cpuflag(sse3) && notcpuflag(ssse3)
  782. %define movu lddqu
  783. %endif
  784. %endif
  785. %if ARCH_X86_64 || cpuflag(sse2)
  786. CPUNOP amdnop
  787. %else
  788. CPUNOP basicnop
  789. %endif
  790. %endmacro
  791. ; Merge mmx, sse*, and avx*
  792. ; m# is a simd register of the currently selected size
  793. ; xm# is the corresponding xmm register if mmsize >= 16, otherwise the same as m#
  794. ; ym# is the corresponding ymm register if mmsize >= 32, otherwise the same as m#
  795. ; zm# is the corresponding zmm register if mmsize >= 64, otherwise the same as m#
  796. ; (All 4 remain in sync through SWAP.)
  797. %macro CAT_XDEFINE 3
  798. %xdefine %1%2 %3
  799. %endmacro
  800. %macro CAT_UNDEF 2
  801. %undef %1%2
  802. %endmacro
  803. %macro DEFINE_MMREGS 1 ; mmtype
  804. %assign %%prev_mmregs 0
  805. %ifdef num_mmregs
  806. %assign %%prev_mmregs num_mmregs
  807. %endif
  808. %assign num_mmregs 8
  809. %if ARCH_X86_64 && mmsize >= 16
  810. %assign num_mmregs 16
  811. %if cpuflag(avx512) || mmsize == 64
  812. %assign num_mmregs 32
  813. %endif
  814. %endif
  815. %assign %%i 0
  816. %rep num_mmregs
  817. CAT_XDEFINE m, %%i, %1 %+ %%i
  818. CAT_XDEFINE nn%1, %%i, %%i
  819. %assign %%i %%i+1
  820. %endrep
  821. %if %%prev_mmregs > num_mmregs
  822. %rep %%prev_mmregs - num_mmregs
  823. CAT_UNDEF m, %%i
  824. CAT_UNDEF nn %+ mmtype, %%i
  825. %assign %%i %%i+1
  826. %endrep
  827. %endif
  828. %xdefine mmtype %1
  829. %endmacro
  830. ; Prefer registers 16-31 over 0-15 to avoid having to use vzeroupper
  831. %macro AVX512_MM_PERMUTATION 0-1 0 ; start_reg
  832. %if ARCH_X86_64 && cpuflag(avx512)
  833. %assign %%i %1
  834. %rep 16-%1
  835. %assign %%i_high %%i+16
  836. SWAP %%i, %%i_high
  837. %assign %%i %%i+1
  838. %endrep
  839. %endif
  840. %endmacro
  841. %macro INIT_MMX 0-1+
  842. %assign avx_enabled 0
  843. %define RESET_MM_PERMUTATION INIT_MMX %1
  844. %define mmsize 8
  845. %define mova movq
  846. %define movu movq
  847. %define movh movd
  848. %define movnta movntq
  849. INIT_CPUFLAGS %1
  850. DEFINE_MMREGS mm
  851. %endmacro
  852. %macro INIT_XMM 0-1+
  853. %assign avx_enabled 0
  854. %define RESET_MM_PERMUTATION INIT_XMM %1
  855. %define mmsize 16
  856. %define mova movdqa
  857. %define movu movdqu
  858. %define movh movq
  859. %define movnta movntdq
  860. INIT_CPUFLAGS %1
  861. DEFINE_MMREGS xmm
  862. %if WIN64
  863. AVX512_MM_PERMUTATION 6 ; Swap callee-saved registers with volatile registers
  864. %endif
  865. %endmacro
  866. %macro INIT_YMM 0-1+
  867. %assign avx_enabled 1
  868. %define RESET_MM_PERMUTATION INIT_YMM %1
  869. %define mmsize 32
  870. %define mova movdqa
  871. %define movu movdqu
  872. %undef movh
  873. %define movnta movntdq
  874. INIT_CPUFLAGS %1
  875. DEFINE_MMREGS ymm
  876. AVX512_MM_PERMUTATION
  877. %endmacro
  878. %macro INIT_ZMM 0-1+
  879. %assign avx_enabled 1
  880. %define RESET_MM_PERMUTATION INIT_ZMM %1
  881. %define mmsize 64
  882. %define mova movdqa
  883. %define movu movdqu
  884. %undef movh
  885. %define movnta movntdq
  886. INIT_CPUFLAGS %1
  887. DEFINE_MMREGS zmm
  888. AVX512_MM_PERMUTATION
  889. %endmacro
  890. INIT_XMM
  891. %macro DECLARE_MMCAST 1
  892. %define mmmm%1 mm%1
  893. %define mmxmm%1 mm%1
  894. %define mmymm%1 mm%1
  895. %define mmzmm%1 mm%1
  896. %define xmmmm%1 mm%1
  897. %define xmmxmm%1 xmm%1
  898. %define xmmymm%1 xmm%1
  899. %define xmmzmm%1 xmm%1
  900. %define ymmmm%1 mm%1
  901. %define ymmxmm%1 xmm%1
  902. %define ymmymm%1 ymm%1
  903. %define ymmzmm%1 ymm%1
  904. %define zmmmm%1 mm%1
  905. %define zmmxmm%1 xmm%1
  906. %define zmmymm%1 ymm%1
  907. %define zmmzmm%1 zmm%1
  908. %define xm%1 xmm %+ m%1
  909. %define ym%1 ymm %+ m%1
  910. %define zm%1 zmm %+ m%1
  911. %endmacro
  912. %assign i 0
  913. %rep 32
  914. DECLARE_MMCAST i
  915. %assign i i+1
  916. %endrep
  917. ; I often want to use macros that permute their arguments. e.g. there's no
  918. ; efficient way to implement butterfly or transpose or dct without swapping some
  919. ; arguments.
  920. ;
  921. ; I would like to not have to manually keep track of the permutations:
  922. ; If I insert a permutation in the middle of a function, it should automatically
  923. ; change everything that follows. For more complex macros I may also have multiple
  924. ; implementations, e.g. the SSE2 and SSSE3 versions may have different permutations.
  925. ;
  926. ; Hence these macros. Insert a PERMUTE or some SWAPs at the end of a macro that
  927. ; permutes its arguments. It's equivalent to exchanging the contents of the
  928. ; registers, except that this way you exchange the register names instead, so it
  929. ; doesn't cost any cycles.
  930. %macro PERMUTE 2-* ; takes a list of pairs to swap
  931. %rep %0/2
  932. %xdefine %%tmp%2 m%2
  933. %rotate 2
  934. %endrep
  935. %rep %0/2
  936. %xdefine m%1 %%tmp%2
  937. CAT_XDEFINE nn, m%1, %1
  938. %rotate 2
  939. %endrep
  940. %endmacro
  941. %macro SWAP 2+ ; swaps a single chain (sometimes more concise than pairs)
  942. %ifnum %1 ; SWAP 0, 1, ...
  943. SWAP_INTERNAL_NUM %1, %2
  944. %else ; SWAP m0, m1, ...
  945. SWAP_INTERNAL_NAME %1, %2
  946. %endif
  947. %endmacro
  948. %macro SWAP_INTERNAL_NUM 2-*
  949. %rep %0-1
  950. %xdefine %%tmp m%1
  951. %xdefine m%1 m%2
  952. %xdefine m%2 %%tmp
  953. CAT_XDEFINE nn, m%1, %1
  954. CAT_XDEFINE nn, m%2, %2
  955. %rotate 1
  956. %endrep
  957. %endmacro
  958. %macro SWAP_INTERNAL_NAME 2-*
  959. %xdefine %%args nn %+ %1
  960. %rep %0-1
  961. %xdefine %%args %%args, nn %+ %2
  962. %rotate 1
  963. %endrep
  964. SWAP_INTERNAL_NUM %%args
  965. %endmacro
  966. ; If SAVE_MM_PERMUTATION is placed at the end of a function, then any later
  967. ; calls to that function will automatically load the permutation, so values can
  968. ; be returned in mmregs.
  969. %macro SAVE_MM_PERMUTATION 0-1
  970. %if %0
  971. %xdefine %%f %1_m
  972. %else
  973. %xdefine %%f current_function %+ _m
  974. %endif
  975. %assign %%i 0
  976. %rep num_mmregs
  977. CAT_XDEFINE %%f, %%i, m %+ %%i
  978. %assign %%i %%i+1
  979. %endrep
  980. %endmacro
  981. %macro LOAD_MM_PERMUTATION 1 ; name to load from
  982. %ifdef %1_m0
  983. %assign %%i 0
  984. %rep num_mmregs
  985. CAT_XDEFINE m, %%i, %1_m %+ %%i
  986. CAT_XDEFINE nn, m %+ %%i, %%i
  987. %assign %%i %%i+1
  988. %endrep
  989. %endif
  990. %endmacro
  991. ; Append cpuflags to the callee's name iff the appended name is known and the plain name isn't
  992. %macro call 1
  993. %ifid %1
  994. call_internal %1 %+ SUFFIX, %1
  995. %else
  996. call %1
  997. %endif
  998. %endmacro
  999. %macro call_internal 2
  1000. %xdefine %%i %2
  1001. %ifndef cglobaled_%2
  1002. %ifdef cglobaled_%1
  1003. %xdefine %%i %1
  1004. %endif
  1005. %endif
  1006. call %%i
  1007. LOAD_MM_PERMUTATION %%i
  1008. %endmacro
  1009. ; Substitutions that reduce instruction size but are functionally equivalent
  1010. %macro add 2
  1011. %ifnum %2
  1012. %if %2==128
  1013. sub %1, -128
  1014. %else
  1015. add %1, %2
  1016. %endif
  1017. %else
  1018. add %1, %2
  1019. %endif
  1020. %endmacro
  1021. %macro sub 2
  1022. %ifnum %2
  1023. %if %2==128
  1024. add %1, -128
  1025. %else
  1026. sub %1, %2
  1027. %endif
  1028. %else
  1029. sub %1, %2
  1030. %endif
  1031. %endmacro
  1032. ;=============================================================================
  1033. ; AVX abstraction layer
  1034. ;=============================================================================
  1035. %assign i 0
  1036. %rep 32
  1037. %if i < 8
  1038. CAT_XDEFINE sizeofmm, i, 8
  1039. CAT_XDEFINE regnumofmm, i, i
  1040. %endif
  1041. CAT_XDEFINE sizeofxmm, i, 16
  1042. CAT_XDEFINE sizeofymm, i, 32
  1043. CAT_XDEFINE sizeofzmm, i, 64
  1044. CAT_XDEFINE regnumofxmm, i, i
  1045. CAT_XDEFINE regnumofymm, i, i
  1046. CAT_XDEFINE regnumofzmm, i, i
  1047. %assign i i+1
  1048. %endrep
  1049. %undef i
  1050. %macro CHECK_AVX_INSTR_EMU 3-*
  1051. %xdefine %%opcode %1
  1052. %xdefine %%dst %2
  1053. %rep %0-2
  1054. %ifidn %%dst, %3
  1055. %error non-avx emulation of ``%%opcode'' is not supported
  1056. %endif
  1057. %rotate 1
  1058. %endrep
  1059. %endmacro
  1060. ;%1 == instruction
  1061. ;%2 == minimal instruction set
  1062. ;%3 == 1 if float, 0 if int
  1063. ;%4 == 1 if 4-operand emulation, 0 if 3-operand emulation, 255 otherwise (no emulation)
  1064. ;%5 == 1 if commutative (i.e. doesn't matter which src arg is which), 0 if not
  1065. ;%6+: operands
  1066. %macro RUN_AVX_INSTR 6-9+
  1067. %ifnum sizeof%7
  1068. %assign __sizeofreg sizeof%7
  1069. %elifnum sizeof%6
  1070. %assign __sizeofreg sizeof%6
  1071. %else
  1072. %assign __sizeofreg mmsize
  1073. %endif
  1074. %assign __emulate_avx 0
  1075. %if avx_enabled && __sizeofreg >= 16
  1076. %xdefine __instr v%1
  1077. %else
  1078. %xdefine __instr %1
  1079. %if %0 >= 8+%4
  1080. %assign __emulate_avx 1
  1081. %endif
  1082. %endif
  1083. %ifnidn %2, fnord
  1084. %ifdef cpuname
  1085. %if notcpuflag(%2)
  1086. %error use of ``%1'' %2 instruction in cpuname function: current_function
  1087. %elif cpuflags_%2 < cpuflags_sse && notcpuflag(sse2) && __sizeofreg > 8
  1088. %error use of ``%1'' sse2 instruction in cpuname function: current_function
  1089. %endif
  1090. %endif
  1091. %endif
  1092. %if __emulate_avx
  1093. %xdefine __src1 %7
  1094. %xdefine __src2 %8
  1095. %if %5 && %4 == 0
  1096. %ifnidn %6, %7
  1097. %ifidn %6, %8
  1098. %xdefine __src1 %8
  1099. %xdefine __src2 %7
  1100. %elifnnum sizeof%8
  1101. ; 3-operand AVX instructions with a memory arg can only have it in src2,
  1102. ; whereas SSE emulation prefers to have it in src1 (i.e. the mov).
  1103. ; So, if the instruction is commutative with a memory arg, swap them.
  1104. %xdefine __src1 %8
  1105. %xdefine __src2 %7
  1106. %endif
  1107. %endif
  1108. %endif
  1109. %ifnidn %6, __src1
  1110. %if %0 >= 9
  1111. CHECK_AVX_INSTR_EMU {%1 %6, %7, %8, %9}, %6, __src2, %9
  1112. %else
  1113. CHECK_AVX_INSTR_EMU {%1 %6, %7, %8}, %6, __src2
  1114. %endif
  1115. %if __sizeofreg == 8
  1116. MOVQ %6, __src1
  1117. %elif %3
  1118. MOVAPS %6, __src1
  1119. %else
  1120. MOVDQA %6, __src1
  1121. %endif
  1122. %endif
  1123. %if %0 >= 9
  1124. %1 %6, __src2, %9
  1125. %else
  1126. %1 %6, __src2
  1127. %endif
  1128. %elif %0 >= 9
  1129. __instr %6, %7, %8, %9
  1130. %elif %0 == 8
  1131. __instr %6, %7, %8
  1132. %elif %0 == 7
  1133. __instr %6, %7
  1134. %else
  1135. __instr %6
  1136. %endif
  1137. %endmacro
  1138. ;%1 == instruction
  1139. ;%2 == minimal instruction set
  1140. ;%3 == 1 if float, 0 if int
  1141. ;%4 == 1 if 4-operand emulation, 0 if 3-operand emulation, 255 otherwise (no emulation)
  1142. ;%5 == 1 if commutative (i.e. doesn't matter which src arg is which), 0 if not
  1143. %macro AVX_INSTR 1-5 fnord, 0, 255, 0
  1144. %macro %1 1-10 fnord, fnord, fnord, fnord, %1, %2, %3, %4, %5
  1145. %ifidn %2, fnord
  1146. RUN_AVX_INSTR %6, %7, %8, %9, %10, %1
  1147. %elifidn %3, fnord
  1148. RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2
  1149. %elifidn %4, fnord
  1150. RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2, %3
  1151. %elifidn %5, fnord
  1152. RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2, %3, %4
  1153. %else
  1154. RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2, %3, %4, %5
  1155. %endif
  1156. %endmacro
  1157. %endmacro
  1158. ; Instructions with both VEX/EVEX and legacy encodings
  1159. ; Non-destructive instructions are written without parameters
  1160. AVX_INSTR addpd, sse2, 1, 0, 1
  1161. AVX_INSTR addps, sse, 1, 0, 1
  1162. AVX_INSTR addsd, sse2, 1, 0, 0
  1163. AVX_INSTR addss, sse, 1, 0, 0
  1164. AVX_INSTR addsubpd, sse3, 1, 0, 0
  1165. AVX_INSTR addsubps, sse3, 1, 0, 0
  1166. AVX_INSTR aesdec, aesni, 0, 0, 0
  1167. AVX_INSTR aesdeclast, aesni, 0, 0, 0
  1168. AVX_INSTR aesenc, aesni, 0, 0, 0
  1169. AVX_INSTR aesenclast, aesni, 0, 0, 0
  1170. AVX_INSTR aesimc, aesni
  1171. AVX_INSTR aeskeygenassist, aesni
  1172. AVX_INSTR andnpd, sse2, 1, 0, 0
  1173. AVX_INSTR andnps, sse, 1, 0, 0
  1174. AVX_INSTR andpd, sse2, 1, 0, 1
  1175. AVX_INSTR andps, sse, 1, 0, 1
  1176. AVX_INSTR blendpd, sse4, 1, 1, 0
  1177. AVX_INSTR blendps, sse4, 1, 1, 0
  1178. AVX_INSTR blendvpd, sse4 ; can't be emulated
  1179. AVX_INSTR blendvps, sse4 ; can't be emulated
  1180. AVX_INSTR cmpeqpd, sse2, 1, 0, 1
  1181. AVX_INSTR cmpeqps, sse, 1, 0, 1
  1182. AVX_INSTR cmpeqsd, sse2, 1, 0, 0
  1183. AVX_INSTR cmpeqss, sse, 1, 0, 0
  1184. AVX_INSTR cmplepd, sse2, 1, 0, 0
  1185. AVX_INSTR cmpleps, sse, 1, 0, 0
  1186. AVX_INSTR cmplesd, sse2, 1, 0, 0
  1187. AVX_INSTR cmpless, sse, 1, 0, 0
  1188. AVX_INSTR cmpltpd, sse2, 1, 0, 0
  1189. AVX_INSTR cmpltps, sse, 1, 0, 0
  1190. AVX_INSTR cmpltsd, sse2, 1, 0, 0
  1191. AVX_INSTR cmpltss, sse, 1, 0, 0
  1192. AVX_INSTR cmpneqpd, sse2, 1, 0, 1
  1193. AVX_INSTR cmpneqps, sse, 1, 0, 1
  1194. AVX_INSTR cmpneqsd, sse2, 1, 0, 0
  1195. AVX_INSTR cmpneqss, sse, 1, 0, 0
  1196. AVX_INSTR cmpnlepd, sse2, 1, 0, 0
  1197. AVX_INSTR cmpnleps, sse, 1, 0, 0
  1198. AVX_INSTR cmpnlesd, sse2, 1, 0, 0
  1199. AVX_INSTR cmpnless, sse, 1, 0, 0
  1200. AVX_INSTR cmpnltpd, sse2, 1, 0, 0
  1201. AVX_INSTR cmpnltps, sse, 1, 0, 0
  1202. AVX_INSTR cmpnltsd, sse2, 1, 0, 0
  1203. AVX_INSTR cmpnltss, sse, 1, 0, 0
  1204. AVX_INSTR cmpordpd, sse2 1, 0, 1
  1205. AVX_INSTR cmpordps, sse 1, 0, 1
  1206. AVX_INSTR cmpordsd, sse2 1, 0, 0
  1207. AVX_INSTR cmpordss, sse 1, 0, 0
  1208. AVX_INSTR cmppd, sse2, 1, 1, 0
  1209. AVX_INSTR cmpps, sse, 1, 1, 0
  1210. AVX_INSTR cmpsd, sse2, 1, 1, 0
  1211. AVX_INSTR cmpss, sse, 1, 1, 0
  1212. AVX_INSTR cmpunordpd, sse2, 1, 0, 1
  1213. AVX_INSTR cmpunordps, sse, 1, 0, 1
  1214. AVX_INSTR cmpunordsd, sse2, 1, 0, 0
  1215. AVX_INSTR cmpunordss, sse, 1, 0, 0
  1216. AVX_INSTR comisd, sse2
  1217. AVX_INSTR comiss, sse
  1218. AVX_INSTR cvtdq2pd, sse2
  1219. AVX_INSTR cvtdq2ps, sse2
  1220. AVX_INSTR cvtpd2dq, sse2
  1221. AVX_INSTR cvtpd2ps, sse2
  1222. AVX_INSTR cvtps2dq, sse2
  1223. AVX_INSTR cvtps2pd, sse2
  1224. AVX_INSTR cvtsd2si, sse2
  1225. AVX_INSTR cvtsd2ss, sse2, 1, 0, 0
  1226. AVX_INSTR cvtsi2sd, sse2, 1, 0, 0
  1227. AVX_INSTR cvtsi2ss, sse, 1, 0, 0
  1228. AVX_INSTR cvtss2sd, sse2, 1, 0, 0
  1229. AVX_INSTR cvtss2si, sse
  1230. AVX_INSTR cvttpd2dq, sse2
  1231. AVX_INSTR cvttps2dq, sse2
  1232. AVX_INSTR cvttsd2si, sse2
  1233. AVX_INSTR cvttss2si, sse
  1234. AVX_INSTR divpd, sse2, 1, 0, 0
  1235. AVX_INSTR divps, sse, 1, 0, 0
  1236. AVX_INSTR divsd, sse2, 1, 0, 0
  1237. AVX_INSTR divss, sse, 1, 0, 0
  1238. AVX_INSTR dppd, sse4, 1, 1, 0
  1239. AVX_INSTR dpps, sse4, 1, 1, 0
  1240. AVX_INSTR extractps, sse4
  1241. AVX_INSTR haddpd, sse3, 1, 0, 0
  1242. AVX_INSTR haddps, sse3, 1, 0, 0
  1243. AVX_INSTR hsubpd, sse3, 1, 0, 0
  1244. AVX_INSTR hsubps, sse3, 1, 0, 0
  1245. AVX_INSTR insertps, sse4, 1, 1, 0
  1246. AVX_INSTR lddqu, sse3
  1247. AVX_INSTR ldmxcsr, sse
  1248. AVX_INSTR maskmovdqu, sse2
  1249. AVX_INSTR maxpd, sse2, 1, 0, 1
  1250. AVX_INSTR maxps, sse, 1, 0, 1
  1251. AVX_INSTR maxsd, sse2, 1, 0, 0
  1252. AVX_INSTR maxss, sse, 1, 0, 0
  1253. AVX_INSTR minpd, sse2, 1, 0, 1
  1254. AVX_INSTR minps, sse, 1, 0, 1
  1255. AVX_INSTR minsd, sse2, 1, 0, 0
  1256. AVX_INSTR minss, sse, 1, 0, 0
  1257. AVX_INSTR movapd, sse2
  1258. AVX_INSTR movaps, sse
  1259. AVX_INSTR movd, mmx
  1260. AVX_INSTR movddup, sse3
  1261. AVX_INSTR movdqa, sse2
  1262. AVX_INSTR movdqu, sse2
  1263. AVX_INSTR movhlps, sse, 1, 0, 0
  1264. AVX_INSTR movhpd, sse2, 1, 0, 0
  1265. AVX_INSTR movhps, sse, 1, 0, 0
  1266. AVX_INSTR movlhps, sse, 1, 0, 0
  1267. AVX_INSTR movlpd, sse2, 1, 0, 0
  1268. AVX_INSTR movlps, sse, 1, 0, 0
  1269. AVX_INSTR movmskpd, sse2
  1270. AVX_INSTR movmskps, sse
  1271. AVX_INSTR movntdq, sse2
  1272. AVX_INSTR movntdqa, sse4
  1273. AVX_INSTR movntpd, sse2
  1274. AVX_INSTR movntps, sse
  1275. AVX_INSTR movq, mmx
  1276. AVX_INSTR movsd, sse2, 1, 0, 0
  1277. AVX_INSTR movshdup, sse3
  1278. AVX_INSTR movsldup, sse3
  1279. AVX_INSTR movss, sse, 1, 0, 0
  1280. AVX_INSTR movupd, sse2
  1281. AVX_INSTR movups, sse
  1282. AVX_INSTR mpsadbw, sse4, 0, 1, 0
  1283. AVX_INSTR mulpd, sse2, 1, 0, 1
  1284. AVX_INSTR mulps, sse, 1, 0, 1
  1285. AVX_INSTR mulsd, sse2, 1, 0, 0
  1286. AVX_INSTR mulss, sse, 1, 0, 0
  1287. AVX_INSTR orpd, sse2, 1, 0, 1
  1288. AVX_INSTR orps, sse, 1, 0, 1
  1289. AVX_INSTR pabsb, ssse3
  1290. AVX_INSTR pabsd, ssse3
  1291. AVX_INSTR pabsw, ssse3
  1292. AVX_INSTR packsswb, mmx, 0, 0, 0
  1293. AVX_INSTR packssdw, mmx, 0, 0, 0
  1294. AVX_INSTR packuswb, mmx, 0, 0, 0
  1295. AVX_INSTR packusdw, sse4, 0, 0, 0
  1296. AVX_INSTR paddb, mmx, 0, 0, 1
  1297. AVX_INSTR paddw, mmx, 0, 0, 1
  1298. AVX_INSTR paddd, mmx, 0, 0, 1
  1299. AVX_INSTR paddq, sse2, 0, 0, 1
  1300. AVX_INSTR paddsb, mmx, 0, 0, 1
  1301. AVX_INSTR paddsw, mmx, 0, 0, 1
  1302. AVX_INSTR paddusb, mmx, 0, 0, 1
  1303. AVX_INSTR paddusw, mmx, 0, 0, 1
  1304. AVX_INSTR palignr, ssse3, 0, 1, 0
  1305. AVX_INSTR pand, mmx, 0, 0, 1
  1306. AVX_INSTR pandn, mmx, 0, 0, 0
  1307. AVX_INSTR pavgb, mmx2, 0, 0, 1
  1308. AVX_INSTR pavgw, mmx2, 0, 0, 1
  1309. AVX_INSTR pblendvb, sse4 ; can't be emulated
  1310. AVX_INSTR pblendw, sse4, 0, 1, 0
  1311. AVX_INSTR pclmulqdq, fnord, 0, 1, 0
  1312. AVX_INSTR pclmulhqhqdq, fnord, 0, 0, 0
  1313. AVX_INSTR pclmulhqlqdq, fnord, 0, 0, 0
  1314. AVX_INSTR pclmullqhqdq, fnord, 0, 0, 0
  1315. AVX_INSTR pclmullqlqdq, fnord, 0, 0, 0
  1316. AVX_INSTR pcmpestri, sse42
  1317. AVX_INSTR pcmpestrm, sse42
  1318. AVX_INSTR pcmpistri, sse42
  1319. AVX_INSTR pcmpistrm, sse42
  1320. AVX_INSTR pcmpeqb, mmx, 0, 0, 1
  1321. AVX_INSTR pcmpeqw, mmx, 0, 0, 1
  1322. AVX_INSTR pcmpeqd, mmx, 0, 0, 1
  1323. AVX_INSTR pcmpeqq, sse4, 0, 0, 1
  1324. AVX_INSTR pcmpgtb, mmx, 0, 0, 0
  1325. AVX_INSTR pcmpgtw, mmx, 0, 0, 0
  1326. AVX_INSTR pcmpgtd, mmx, 0, 0, 0
  1327. AVX_INSTR pcmpgtq, sse42, 0, 0, 0
  1328. AVX_INSTR pextrb, sse4
  1329. AVX_INSTR pextrd, sse4
  1330. AVX_INSTR pextrq, sse4
  1331. AVX_INSTR pextrw, mmx2
  1332. AVX_INSTR phaddw, ssse3, 0, 0, 0
  1333. AVX_INSTR phaddd, ssse3, 0, 0, 0
  1334. AVX_INSTR phaddsw, ssse3, 0, 0, 0
  1335. AVX_INSTR phminposuw, sse4
  1336. AVX_INSTR phsubw, ssse3, 0, 0, 0
  1337. AVX_INSTR phsubd, ssse3, 0, 0, 0
  1338. AVX_INSTR phsubsw, ssse3, 0, 0, 0
  1339. AVX_INSTR pinsrb, sse4, 0, 1, 0
  1340. AVX_INSTR pinsrd, sse4, 0, 1, 0
  1341. AVX_INSTR pinsrq, sse4, 0, 1, 0
  1342. AVX_INSTR pinsrw, mmx2, 0, 1, 0
  1343. AVX_INSTR pmaddwd, mmx, 0, 0, 1
  1344. AVX_INSTR pmaddubsw, ssse3, 0, 0, 0
  1345. AVX_INSTR pmaxsb, sse4, 0, 0, 1
  1346. AVX_INSTR pmaxsw, mmx2, 0, 0, 1
  1347. AVX_INSTR pmaxsd, sse4, 0, 0, 1
  1348. AVX_INSTR pmaxub, mmx2, 0, 0, 1
  1349. AVX_INSTR pmaxuw, sse4, 0, 0, 1
  1350. AVX_INSTR pmaxud, sse4, 0, 0, 1
  1351. AVX_INSTR pminsb, sse4, 0, 0, 1
  1352. AVX_INSTR pminsw, mmx2, 0, 0, 1
  1353. AVX_INSTR pminsd, sse4, 0, 0, 1
  1354. AVX_INSTR pminub, mmx2, 0, 0, 1
  1355. AVX_INSTR pminuw, sse4, 0, 0, 1
  1356. AVX_INSTR pminud, sse4, 0, 0, 1
  1357. AVX_INSTR pmovmskb, mmx2
  1358. AVX_INSTR pmovsxbw, sse4
  1359. AVX_INSTR pmovsxbd, sse4
  1360. AVX_INSTR pmovsxbq, sse4
  1361. AVX_INSTR pmovsxwd, sse4
  1362. AVX_INSTR pmovsxwq, sse4
  1363. AVX_INSTR pmovsxdq, sse4
  1364. AVX_INSTR pmovzxbw, sse4
  1365. AVX_INSTR pmovzxbd, sse4
  1366. AVX_INSTR pmovzxbq, sse4
  1367. AVX_INSTR pmovzxwd, sse4
  1368. AVX_INSTR pmovzxwq, sse4
  1369. AVX_INSTR pmovzxdq, sse4
  1370. AVX_INSTR pmuldq, sse4, 0, 0, 1
  1371. AVX_INSTR pmulhrsw, ssse3, 0, 0, 1
  1372. AVX_INSTR pmulhuw, mmx2, 0, 0, 1
  1373. AVX_INSTR pmulhw, mmx, 0, 0, 1
  1374. AVX_INSTR pmullw, mmx, 0, 0, 1
  1375. AVX_INSTR pmulld, sse4, 0, 0, 1
  1376. AVX_INSTR pmuludq, sse2, 0, 0, 1
  1377. AVX_INSTR por, mmx, 0, 0, 1
  1378. AVX_INSTR psadbw, mmx2, 0, 0, 1
  1379. AVX_INSTR pshufb, ssse3, 0, 0, 0
  1380. AVX_INSTR pshufd, sse2
  1381. AVX_INSTR pshufhw, sse2
  1382. AVX_INSTR pshuflw, sse2
  1383. AVX_INSTR psignb, ssse3, 0, 0, 0
  1384. AVX_INSTR psignw, ssse3, 0, 0, 0
  1385. AVX_INSTR psignd, ssse3, 0, 0, 0
  1386. AVX_INSTR psllw, mmx, 0, 0, 0
  1387. AVX_INSTR pslld, mmx, 0, 0, 0
  1388. AVX_INSTR psllq, mmx, 0, 0, 0
  1389. AVX_INSTR pslldq, sse2, 0, 0, 0
  1390. AVX_INSTR psraw, mmx, 0, 0, 0
  1391. AVX_INSTR psrad, mmx, 0, 0, 0
  1392. AVX_INSTR psrlw, mmx, 0, 0, 0
  1393. AVX_INSTR psrld, mmx, 0, 0, 0
  1394. AVX_INSTR psrlq, mmx, 0, 0, 0
  1395. AVX_INSTR psrldq, sse2, 0, 0, 0
  1396. AVX_INSTR psubb, mmx, 0, 0, 0
  1397. AVX_INSTR psubw, mmx, 0, 0, 0
  1398. AVX_INSTR psubd, mmx, 0, 0, 0
  1399. AVX_INSTR psubq, sse2, 0, 0, 0
  1400. AVX_INSTR psubsb, mmx, 0, 0, 0
  1401. AVX_INSTR psubsw, mmx, 0, 0, 0
  1402. AVX_INSTR psubusb, mmx, 0, 0, 0
  1403. AVX_INSTR psubusw, mmx, 0, 0, 0
  1404. AVX_INSTR ptest, sse4
  1405. AVX_INSTR punpckhbw, mmx, 0, 0, 0
  1406. AVX_INSTR punpckhwd, mmx, 0, 0, 0
  1407. AVX_INSTR punpckhdq, mmx, 0, 0, 0
  1408. AVX_INSTR punpckhqdq, sse2, 0, 0, 0
  1409. AVX_INSTR punpcklbw, mmx, 0, 0, 0
  1410. AVX_INSTR punpcklwd, mmx, 0, 0, 0
  1411. AVX_INSTR punpckldq, mmx, 0, 0, 0
  1412. AVX_INSTR punpcklqdq, sse2, 0, 0, 0
  1413. AVX_INSTR pxor, mmx, 0, 0, 1
  1414. AVX_INSTR rcpps, sse
  1415. AVX_INSTR rcpss, sse, 1, 0, 0
  1416. AVX_INSTR roundpd, sse4
  1417. AVX_INSTR roundps, sse4
  1418. AVX_INSTR roundsd, sse4, 1, 1, 0
  1419. AVX_INSTR roundss, sse4, 1, 1, 0
  1420. AVX_INSTR rsqrtps, sse
  1421. AVX_INSTR rsqrtss, sse, 1, 0, 0
  1422. AVX_INSTR shufpd, sse2, 1, 1, 0
  1423. AVX_INSTR shufps, sse, 1, 1, 0
  1424. AVX_INSTR sqrtpd, sse2
  1425. AVX_INSTR sqrtps, sse
  1426. AVX_INSTR sqrtsd, sse2, 1, 0, 0
  1427. AVX_INSTR sqrtss, sse, 1, 0, 0
  1428. AVX_INSTR stmxcsr, sse
  1429. AVX_INSTR subpd, sse2, 1, 0, 0
  1430. AVX_INSTR subps, sse, 1, 0, 0
  1431. AVX_INSTR subsd, sse2, 1, 0, 0
  1432. AVX_INSTR subss, sse, 1, 0, 0
  1433. AVX_INSTR ucomisd, sse2
  1434. AVX_INSTR ucomiss, sse
  1435. AVX_INSTR unpckhpd, sse2, 1, 0, 0
  1436. AVX_INSTR unpckhps, sse, 1, 0, 0
  1437. AVX_INSTR unpcklpd, sse2, 1, 0, 0
  1438. AVX_INSTR unpcklps, sse, 1, 0, 0
  1439. AVX_INSTR xorpd, sse2, 1, 0, 1
  1440. AVX_INSTR xorps, sse, 1, 0, 1
  1441. ; 3DNow instructions, for sharing code between AVX, SSE and 3DN
  1442. AVX_INSTR pfadd, 3dnow, 1, 0, 1
  1443. AVX_INSTR pfsub, 3dnow, 1, 0, 0
  1444. AVX_INSTR pfmul, 3dnow, 1, 0, 1
  1445. ; base-4 constants for shuffles
  1446. %assign i 0
  1447. %rep 256
  1448. %assign j ((i>>6)&3)*1000 + ((i>>4)&3)*100 + ((i>>2)&3)*10 + (i&3)
  1449. %if j < 10
  1450. CAT_XDEFINE q000, j, i
  1451. %elif j < 100
  1452. CAT_XDEFINE q00, j, i
  1453. %elif j < 1000
  1454. CAT_XDEFINE q0, j, i
  1455. %else
  1456. CAT_XDEFINE q, j, i
  1457. %endif
  1458. %assign i i+1
  1459. %endrep
  1460. %undef i
  1461. %undef j
  1462. %macro FMA_INSTR 3
  1463. %macro %1 4-7 %1, %2, %3
  1464. %if cpuflag(xop)
  1465. v%5 %1, %2, %3, %4
  1466. %elifnidn %1, %4
  1467. %6 %1, %2, %3
  1468. %7 %1, %4
  1469. %else
  1470. %error non-xop emulation of ``%5 %1, %2, %3, %4'' is not supported
  1471. %endif
  1472. %endmacro
  1473. %endmacro
  1474. FMA_INSTR pmacsww, pmullw, paddw
  1475. FMA_INSTR pmacsdd, pmulld, paddd ; sse4 emulation
  1476. FMA_INSTR pmacsdql, pmuldq, paddq ; sse4 emulation
  1477. FMA_INSTR pmadcswd, pmaddwd, paddd
  1478. ; tzcnt is equivalent to "rep bsf" and is backwards-compatible with bsf.
  1479. ; This lets us use tzcnt without bumping the yasm version requirement yet.
  1480. %define tzcnt rep bsf
  1481. ; Macros for consolidating FMA3 and FMA4 using 4-operand (dst, src1, src2, src3) syntax.
  1482. ; FMA3 is only possible if dst is the same as one of the src registers.
  1483. ; Either src2 or src3 can be a memory operand.
  1484. %macro FMA4_INSTR 2-*
  1485. %push fma4_instr
  1486. %xdefine %$prefix %1
  1487. %rep %0 - 1
  1488. %macro %$prefix%2 4-6 %$prefix, %2
  1489. %if notcpuflag(fma3) && notcpuflag(fma4)
  1490. %error use of ``%5%6'' fma instruction in cpuname function: current_function
  1491. %elif cpuflag(fma4)
  1492. v%5%6 %1, %2, %3, %4
  1493. %elifidn %1, %2
  1494. ; If %3 or %4 is a memory operand it needs to be encoded as the last operand.
  1495. %ifnum sizeof%3
  1496. v%{5}213%6 %2, %3, %4
  1497. %else
  1498. v%{5}132%6 %2, %4, %3
  1499. %endif
  1500. %elifidn %1, %3
  1501. v%{5}213%6 %3, %2, %4
  1502. %elifidn %1, %4
  1503. v%{5}231%6 %4, %2, %3
  1504. %else
  1505. %error fma3 emulation of ``%5%6 %1, %2, %3, %4'' is not supported
  1506. %endif
  1507. %endmacro
  1508. %rotate 1
  1509. %endrep
  1510. %pop
  1511. %endmacro
  1512. FMA4_INSTR fmadd, pd, ps, sd, ss
  1513. FMA4_INSTR fmaddsub, pd, ps
  1514. FMA4_INSTR fmsub, pd, ps, sd, ss
  1515. FMA4_INSTR fmsubadd, pd, ps
  1516. FMA4_INSTR fnmadd, pd, ps, sd, ss
  1517. FMA4_INSTR fnmsub, pd, ps, sd, ss
  1518. ; Macros for converting VEX instructions to equivalent EVEX ones.
  1519. %macro EVEX_INSTR 2-3 0 ; vex, evex, prefer_evex
  1520. %macro %1 2-7 fnord, fnord, %1, %2, %3
  1521. %ifidn %3, fnord
  1522. %define %%args %1, %2
  1523. %elifidn %4, fnord
  1524. %define %%args %1, %2, %3
  1525. %else
  1526. %define %%args %1, %2, %3, %4
  1527. %endif
  1528. %assign %%evex_required cpuflag(avx512) & %7
  1529. %ifnum regnumof%1
  1530. %if regnumof%1 >= 16 || sizeof%1 > 32
  1531. %assign %%evex_required 1
  1532. %endif
  1533. %endif
  1534. %ifnum regnumof%2
  1535. %if regnumof%2 >= 16 || sizeof%2 > 32
  1536. %assign %%evex_required 1
  1537. %endif
  1538. %endif
  1539. %if %%evex_required
  1540. %6 %%args
  1541. %else
  1542. %5 %%args ; Prefer VEX over EVEX due to shorter instruction length
  1543. %endif
  1544. %endmacro
  1545. %endmacro
  1546. EVEX_INSTR vbroadcastf128, vbroadcastf32x4
  1547. EVEX_INSTR vbroadcasti128, vbroadcasti32x4
  1548. EVEX_INSTR vextractf128, vextractf32x4
  1549. EVEX_INSTR vextracti128, vextracti32x4
  1550. EVEX_INSTR vinsertf128, vinsertf32x4
  1551. EVEX_INSTR vinserti128, vinserti32x4
  1552. EVEX_INSTR vmovdqa, vmovdqa32
  1553. EVEX_INSTR vmovdqu, vmovdqu32
  1554. EVEX_INSTR vpand, vpandd
  1555. EVEX_INSTR vpandn, vpandnd
  1556. EVEX_INSTR vpor, vpord
  1557. EVEX_INSTR vpxor, vpxord
  1558. EVEX_INSTR vrcpps, vrcp14ps, 1 ; EVEX versions have higher precision
  1559. EVEX_INSTR vrcpss, vrcp14ss, 1
  1560. EVEX_INSTR vrsqrtps, vrsqrt14ps, 1
  1561. EVEX_INSTR vrsqrtss, vrsqrt14ss, 1
  1562. ; workaround: vpbroadcastq is broken in x86_32 due to a yasm bug (fixed in 1.3.0)
  1563. %ifdef __YASM_VER__
  1564. %if __YASM_VERSION_ID__ < 0x01030000 && ARCH_X86_64 == 0
  1565. %macro vpbroadcastq 2
  1566. %if sizeof%1 == 16
  1567. movddup %1, %2
  1568. %else
  1569. vbroadcastsd %1, %2
  1570. %endif
  1571. %endmacro
  1572. %endif
  1573. %endif