2
0

snowdsp.c 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908
  1. /*
  2. * MMX and SSE2 optimized snow DSP utils
  3. * Copyright (c) 2005-2006 Robert Edele <yartrebo@earthlink.net>
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "libavutil/cpu.h"
  22. #include "libavutil/x86/asm.h"
  23. #include "libavcodec/avcodec.h"
  24. #include "libavcodec/snow.h"
  25. #include "libavcodec/snow_dwt.h"
  26. #if HAVE_INLINE_ASM
  27. static void ff_snow_horizontal_compose97i_sse2(IDWTELEM *b, IDWTELEM *temp, int width){
  28. const int w2= (width+1)>>1;
  29. const int w_l= (width>>1);
  30. const int w_r= w2 - 1;
  31. int i;
  32. { // Lift 0
  33. IDWTELEM * const ref = b + w2 - 1;
  34. IDWTELEM b_0 = b[0]; //By allowing the first entry in b[0] to be calculated twice
  35. // (the first time erroneously), we allow the SSE2 code to run an extra pass.
  36. // The savings in code and time are well worth having to store this value and
  37. // calculate b[0] correctly afterwards.
  38. i = 0;
  39. __asm__ volatile(
  40. "pcmpeqd %%xmm7, %%xmm7 \n\t"
  41. "pcmpeqd %%xmm3, %%xmm3 \n\t"
  42. "psllw $1, %%xmm3 \n\t"
  43. "paddw %%xmm7, %%xmm3 \n\t"
  44. "psllw $13, %%xmm3 \n\t"
  45. ::);
  46. for(; i<w_l-15; i+=16){
  47. __asm__ volatile(
  48. "movdqu (%1), %%xmm1 \n\t"
  49. "movdqu 16(%1), %%xmm5 \n\t"
  50. "movdqu 2(%1), %%xmm2 \n\t"
  51. "movdqu 18(%1), %%xmm6 \n\t"
  52. "paddw %%xmm1, %%xmm2 \n\t"
  53. "paddw %%xmm5, %%xmm6 \n\t"
  54. "paddw %%xmm7, %%xmm2 \n\t"
  55. "paddw %%xmm7, %%xmm6 \n\t"
  56. "pmulhw %%xmm3, %%xmm2 \n\t"
  57. "pmulhw %%xmm3, %%xmm6 \n\t"
  58. "paddw (%0), %%xmm2 \n\t"
  59. "paddw 16(%0), %%xmm6 \n\t"
  60. "movdqa %%xmm2, (%0) \n\t"
  61. "movdqa %%xmm6, 16(%0) \n\t"
  62. :: "r"(&b[i]), "r"(&ref[i])
  63. : "memory"
  64. );
  65. }
  66. snow_horizontal_compose_lift_lead_out(i, b, b, ref, width, w_l, 0, W_DM, W_DO, W_DS);
  67. b[0] = b_0 - ((W_DM * 2 * ref[1]+W_DO)>>W_DS);
  68. }
  69. { // Lift 1
  70. IDWTELEM * const dst = b+w2;
  71. i = 0;
  72. for(; (((x86_reg)&dst[i]) & 0x1F) && i<w_r; i++){
  73. dst[i] = dst[i] - (b[i] + b[i + 1]);
  74. }
  75. for(; i<w_r-15; i+=16){
  76. __asm__ volatile(
  77. "movdqu (%1), %%xmm1 \n\t"
  78. "movdqu 16(%1), %%xmm5 \n\t"
  79. "movdqu 2(%1), %%xmm2 \n\t"
  80. "movdqu 18(%1), %%xmm6 \n\t"
  81. "paddw %%xmm1, %%xmm2 \n\t"
  82. "paddw %%xmm5, %%xmm6 \n\t"
  83. "movdqa (%0), %%xmm0 \n\t"
  84. "movdqa 16(%0), %%xmm4 \n\t"
  85. "psubw %%xmm2, %%xmm0 \n\t"
  86. "psubw %%xmm6, %%xmm4 \n\t"
  87. "movdqa %%xmm0, (%0) \n\t"
  88. "movdqa %%xmm4, 16(%0) \n\t"
  89. :: "r"(&dst[i]), "r"(&b[i])
  90. : "memory"
  91. );
  92. }
  93. snow_horizontal_compose_lift_lead_out(i, dst, dst, b, width, w_r, 1, W_CM, W_CO, W_CS);
  94. }
  95. { // Lift 2
  96. IDWTELEM * const ref = b+w2 - 1;
  97. IDWTELEM b_0 = b[0];
  98. i = 0;
  99. __asm__ volatile(
  100. "psllw $15, %%xmm7 \n\t"
  101. "pcmpeqw %%xmm6, %%xmm6 \n\t"
  102. "psrlw $13, %%xmm6 \n\t"
  103. "paddw %%xmm7, %%xmm6 \n\t"
  104. ::);
  105. for(; i<w_l-15; i+=16){
  106. __asm__ volatile(
  107. "movdqu (%1), %%xmm0 \n\t"
  108. "movdqu 16(%1), %%xmm4 \n\t"
  109. "movdqu 2(%1), %%xmm1 \n\t"
  110. "movdqu 18(%1), %%xmm5 \n\t" //FIXME try aligned reads and shifts
  111. "paddw %%xmm6, %%xmm0 \n\t"
  112. "paddw %%xmm6, %%xmm4 \n\t"
  113. "paddw %%xmm7, %%xmm1 \n\t"
  114. "paddw %%xmm7, %%xmm5 \n\t"
  115. "pavgw %%xmm1, %%xmm0 \n\t"
  116. "pavgw %%xmm5, %%xmm4 \n\t"
  117. "psubw %%xmm7, %%xmm0 \n\t"
  118. "psubw %%xmm7, %%xmm4 \n\t"
  119. "psraw $1, %%xmm0 \n\t"
  120. "psraw $1, %%xmm4 \n\t"
  121. "movdqa (%0), %%xmm1 \n\t"
  122. "movdqa 16(%0), %%xmm5 \n\t"
  123. "paddw %%xmm1, %%xmm0 \n\t"
  124. "paddw %%xmm5, %%xmm4 \n\t"
  125. "psraw $2, %%xmm0 \n\t"
  126. "psraw $2, %%xmm4 \n\t"
  127. "paddw %%xmm1, %%xmm0 \n\t"
  128. "paddw %%xmm5, %%xmm4 \n\t"
  129. "movdqa %%xmm0, (%0) \n\t"
  130. "movdqa %%xmm4, 16(%0) \n\t"
  131. :: "r"(&b[i]), "r"(&ref[i])
  132. : "memory"
  133. );
  134. }
  135. snow_horizontal_compose_liftS_lead_out(i, b, b, ref, width, w_l);
  136. b[0] = b_0 + ((2 * ref[1] + W_BO-1 + 4 * b_0) >> W_BS);
  137. }
  138. { // Lift 3
  139. IDWTELEM * const src = b+w2;
  140. i = 0;
  141. for(; (((x86_reg)&temp[i]) & 0x1F) && i<w_r; i++){
  142. temp[i] = src[i] - ((-W_AM*(b[i] + b[i+1]))>>W_AS);
  143. }
  144. for(; i<w_r-7; i+=8){
  145. __asm__ volatile(
  146. "movdqu 2(%1), %%xmm2 \n\t"
  147. "movdqu 18(%1), %%xmm6 \n\t"
  148. "paddw (%1), %%xmm2 \n\t"
  149. "paddw 16(%1), %%xmm6 \n\t"
  150. "movdqu (%0), %%xmm0 \n\t"
  151. "movdqu 16(%0), %%xmm4 \n\t"
  152. "paddw %%xmm2, %%xmm0 \n\t"
  153. "paddw %%xmm6, %%xmm4 \n\t"
  154. "psraw $1, %%xmm2 \n\t"
  155. "psraw $1, %%xmm6 \n\t"
  156. "paddw %%xmm0, %%xmm2 \n\t"
  157. "paddw %%xmm4, %%xmm6 \n\t"
  158. "movdqa %%xmm2, (%2) \n\t"
  159. "movdqa %%xmm6, 16(%2) \n\t"
  160. :: "r"(&src[i]), "r"(&b[i]), "r"(&temp[i])
  161. : "memory"
  162. );
  163. }
  164. snow_horizontal_compose_lift_lead_out(i, temp, src, b, width, w_r, 1, -W_AM, W_AO+1, W_AS);
  165. }
  166. {
  167. snow_interleave_line_header(&i, width, b, temp);
  168. for (; (i & 0x3E) != 0x3E; i-=2){
  169. b[i+1] = temp[i>>1];
  170. b[i] = b[i>>1];
  171. }
  172. for (i-=62; i>=0; i-=64){
  173. __asm__ volatile(
  174. "movdqa (%1), %%xmm0 \n\t"
  175. "movdqa 16(%1), %%xmm2 \n\t"
  176. "movdqa 32(%1), %%xmm4 \n\t"
  177. "movdqa 48(%1), %%xmm6 \n\t"
  178. "movdqa (%1), %%xmm1 \n\t"
  179. "movdqa 16(%1), %%xmm3 \n\t"
  180. "movdqa 32(%1), %%xmm5 \n\t"
  181. "movdqa 48(%1), %%xmm7 \n\t"
  182. "punpcklwd (%2), %%xmm0 \n\t"
  183. "punpcklwd 16(%2), %%xmm2 \n\t"
  184. "punpcklwd 32(%2), %%xmm4 \n\t"
  185. "punpcklwd 48(%2), %%xmm6 \n\t"
  186. "movdqa %%xmm0, (%0) \n\t"
  187. "movdqa %%xmm2, 32(%0) \n\t"
  188. "movdqa %%xmm4, 64(%0) \n\t"
  189. "movdqa %%xmm6, 96(%0) \n\t"
  190. "punpckhwd (%2), %%xmm1 \n\t"
  191. "punpckhwd 16(%2), %%xmm3 \n\t"
  192. "punpckhwd 32(%2), %%xmm5 \n\t"
  193. "punpckhwd 48(%2), %%xmm7 \n\t"
  194. "movdqa %%xmm1, 16(%0) \n\t"
  195. "movdqa %%xmm3, 48(%0) \n\t"
  196. "movdqa %%xmm5, 80(%0) \n\t"
  197. "movdqa %%xmm7, 112(%0) \n\t"
  198. :: "r"(&(b)[i]), "r"(&(b)[i>>1]), "r"(&(temp)[i>>1])
  199. : "memory"
  200. );
  201. }
  202. }
  203. }
  204. static void ff_snow_horizontal_compose97i_mmx(IDWTELEM *b, IDWTELEM *temp, int width){
  205. const int w2= (width+1)>>1;
  206. const int w_l= (width>>1);
  207. const int w_r= w2 - 1;
  208. int i;
  209. { // Lift 0
  210. IDWTELEM * const ref = b + w2 - 1;
  211. i = 1;
  212. b[0] = b[0] - ((W_DM * 2 * ref[1]+W_DO)>>W_DS);
  213. __asm__ volatile(
  214. "pcmpeqw %%mm7, %%mm7 \n\t"
  215. "pcmpeqw %%mm3, %%mm3 \n\t"
  216. "psllw $1, %%mm3 \n\t"
  217. "paddw %%mm7, %%mm3 \n\t"
  218. "psllw $13, %%mm3 \n\t"
  219. ::);
  220. for(; i<w_l-7; i+=8){
  221. __asm__ volatile(
  222. "movq (%1), %%mm2 \n\t"
  223. "movq 8(%1), %%mm6 \n\t"
  224. "paddw 2(%1), %%mm2 \n\t"
  225. "paddw 10(%1), %%mm6 \n\t"
  226. "paddw %%mm7, %%mm2 \n\t"
  227. "paddw %%mm7, %%mm6 \n\t"
  228. "pmulhw %%mm3, %%mm2 \n\t"
  229. "pmulhw %%mm3, %%mm6 \n\t"
  230. "paddw (%0), %%mm2 \n\t"
  231. "paddw 8(%0), %%mm6 \n\t"
  232. "movq %%mm2, (%0) \n\t"
  233. "movq %%mm6, 8(%0) \n\t"
  234. :: "r"(&b[i]), "r"(&ref[i])
  235. : "memory"
  236. );
  237. }
  238. snow_horizontal_compose_lift_lead_out(i, b, b, ref, width, w_l, 0, W_DM, W_DO, W_DS);
  239. }
  240. { // Lift 1
  241. IDWTELEM * const dst = b+w2;
  242. i = 0;
  243. for(; i<w_r-7; i+=8){
  244. __asm__ volatile(
  245. "movq (%1), %%mm2 \n\t"
  246. "movq 8(%1), %%mm6 \n\t"
  247. "paddw 2(%1), %%mm2 \n\t"
  248. "paddw 10(%1), %%mm6 \n\t"
  249. "movq (%0), %%mm0 \n\t"
  250. "movq 8(%0), %%mm4 \n\t"
  251. "psubw %%mm2, %%mm0 \n\t"
  252. "psubw %%mm6, %%mm4 \n\t"
  253. "movq %%mm0, (%0) \n\t"
  254. "movq %%mm4, 8(%0) \n\t"
  255. :: "r"(&dst[i]), "r"(&b[i])
  256. : "memory"
  257. );
  258. }
  259. snow_horizontal_compose_lift_lead_out(i, dst, dst, b, width, w_r, 1, W_CM, W_CO, W_CS);
  260. }
  261. { // Lift 2
  262. IDWTELEM * const ref = b+w2 - 1;
  263. i = 1;
  264. b[0] = b[0] + (((2 * ref[1] + W_BO) + 4 * b[0]) >> W_BS);
  265. __asm__ volatile(
  266. "psllw $15, %%mm7 \n\t"
  267. "pcmpeqw %%mm6, %%mm6 \n\t"
  268. "psrlw $13, %%mm6 \n\t"
  269. "paddw %%mm7, %%mm6 \n\t"
  270. ::);
  271. for(; i<w_l-7; i+=8){
  272. __asm__ volatile(
  273. "movq (%1), %%mm0 \n\t"
  274. "movq 8(%1), %%mm4 \n\t"
  275. "movq 2(%1), %%mm1 \n\t"
  276. "movq 10(%1), %%mm5 \n\t"
  277. "paddw %%mm6, %%mm0 \n\t"
  278. "paddw %%mm6, %%mm4 \n\t"
  279. "paddw %%mm7, %%mm1 \n\t"
  280. "paddw %%mm7, %%mm5 \n\t"
  281. "pavgw %%mm1, %%mm0 \n\t"
  282. "pavgw %%mm5, %%mm4 \n\t"
  283. "psubw %%mm7, %%mm0 \n\t"
  284. "psubw %%mm7, %%mm4 \n\t"
  285. "psraw $1, %%mm0 \n\t"
  286. "psraw $1, %%mm4 \n\t"
  287. "movq (%0), %%mm1 \n\t"
  288. "movq 8(%0), %%mm5 \n\t"
  289. "paddw %%mm1, %%mm0 \n\t"
  290. "paddw %%mm5, %%mm4 \n\t"
  291. "psraw $2, %%mm0 \n\t"
  292. "psraw $2, %%mm4 \n\t"
  293. "paddw %%mm1, %%mm0 \n\t"
  294. "paddw %%mm5, %%mm4 \n\t"
  295. "movq %%mm0, (%0) \n\t"
  296. "movq %%mm4, 8(%0) \n\t"
  297. :: "r"(&b[i]), "r"(&ref[i])
  298. : "memory"
  299. );
  300. }
  301. snow_horizontal_compose_liftS_lead_out(i, b, b, ref, width, w_l);
  302. }
  303. { // Lift 3
  304. IDWTELEM * const src = b+w2;
  305. i = 0;
  306. for(; i<w_r-7; i+=8){
  307. __asm__ volatile(
  308. "movq 2(%1), %%mm2 \n\t"
  309. "movq 10(%1), %%mm6 \n\t"
  310. "paddw (%1), %%mm2 \n\t"
  311. "paddw 8(%1), %%mm6 \n\t"
  312. "movq (%0), %%mm0 \n\t"
  313. "movq 8(%0), %%mm4 \n\t"
  314. "paddw %%mm2, %%mm0 \n\t"
  315. "paddw %%mm6, %%mm4 \n\t"
  316. "psraw $1, %%mm2 \n\t"
  317. "psraw $1, %%mm6 \n\t"
  318. "paddw %%mm0, %%mm2 \n\t"
  319. "paddw %%mm4, %%mm6 \n\t"
  320. "movq %%mm2, (%2) \n\t"
  321. "movq %%mm6, 8(%2) \n\t"
  322. :: "r"(&src[i]), "r"(&b[i]), "r"(&temp[i])
  323. : "memory"
  324. );
  325. }
  326. snow_horizontal_compose_lift_lead_out(i, temp, src, b, width, w_r, 1, -W_AM, W_AO+1, W_AS);
  327. }
  328. {
  329. snow_interleave_line_header(&i, width, b, temp);
  330. for (; (i & 0x1E) != 0x1E; i-=2){
  331. b[i+1] = temp[i>>1];
  332. b[i] = b[i>>1];
  333. }
  334. for (i-=30; i>=0; i-=32){
  335. __asm__ volatile(
  336. "movq (%1), %%mm0 \n\t"
  337. "movq 8(%1), %%mm2 \n\t"
  338. "movq 16(%1), %%mm4 \n\t"
  339. "movq 24(%1), %%mm6 \n\t"
  340. "movq (%1), %%mm1 \n\t"
  341. "movq 8(%1), %%mm3 \n\t"
  342. "movq 16(%1), %%mm5 \n\t"
  343. "movq 24(%1), %%mm7 \n\t"
  344. "punpcklwd (%2), %%mm0 \n\t"
  345. "punpcklwd 8(%2), %%mm2 \n\t"
  346. "punpcklwd 16(%2), %%mm4 \n\t"
  347. "punpcklwd 24(%2), %%mm6 \n\t"
  348. "movq %%mm0, (%0) \n\t"
  349. "movq %%mm2, 16(%0) \n\t"
  350. "movq %%mm4, 32(%0) \n\t"
  351. "movq %%mm6, 48(%0) \n\t"
  352. "punpckhwd (%2), %%mm1 \n\t"
  353. "punpckhwd 8(%2), %%mm3 \n\t"
  354. "punpckhwd 16(%2), %%mm5 \n\t"
  355. "punpckhwd 24(%2), %%mm7 \n\t"
  356. "movq %%mm1, 8(%0) \n\t"
  357. "movq %%mm3, 24(%0) \n\t"
  358. "movq %%mm5, 40(%0) \n\t"
  359. "movq %%mm7, 56(%0) \n\t"
  360. :: "r"(&b[i]), "r"(&b[i>>1]), "r"(&temp[i>>1])
  361. : "memory"
  362. );
  363. }
  364. }
  365. }
  366. #if HAVE_7REGS
  367. #define snow_vertical_compose_sse2_load_add(op,r,t0,t1,t2,t3)\
  368. ""op" ("r",%%"FF_REG_d"), %%"t0" \n\t"\
  369. ""op" 16("r",%%"FF_REG_d"), %%"t1" \n\t"\
  370. ""op" 32("r",%%"FF_REG_d"), %%"t2" \n\t"\
  371. ""op" 48("r",%%"FF_REG_d"), %%"t3" \n\t"
  372. #define snow_vertical_compose_sse2_load(r,t0,t1,t2,t3)\
  373. snow_vertical_compose_sse2_load_add("movdqa",r,t0,t1,t2,t3)
  374. #define snow_vertical_compose_sse2_add(r,t0,t1,t2,t3)\
  375. snow_vertical_compose_sse2_load_add("paddw",r,t0,t1,t2,t3)
  376. #define snow_vertical_compose_r2r_sub(s0,s1,s2,s3,t0,t1,t2,t3)\
  377. "psubw %%"s0", %%"t0" \n\t"\
  378. "psubw %%"s1", %%"t1" \n\t"\
  379. "psubw %%"s2", %%"t2" \n\t"\
  380. "psubw %%"s3", %%"t3" \n\t"
  381. #define snow_vertical_compose_sse2_store(w,s0,s1,s2,s3)\
  382. "movdqa %%"s0", ("w",%%"FF_REG_d") \n\t"\
  383. "movdqa %%"s1", 16("w",%%"FF_REG_d") \n\t"\
  384. "movdqa %%"s2", 32("w",%%"FF_REG_d") \n\t"\
  385. "movdqa %%"s3", 48("w",%%"FF_REG_d") \n\t"
  386. #define snow_vertical_compose_sra(n,t0,t1,t2,t3)\
  387. "psraw $"n", %%"t0" \n\t"\
  388. "psraw $"n", %%"t1" \n\t"\
  389. "psraw $"n", %%"t2" \n\t"\
  390. "psraw $"n", %%"t3" \n\t"
  391. #define snow_vertical_compose_r2r_add(s0,s1,s2,s3,t0,t1,t2,t3)\
  392. "paddw %%"s0", %%"t0" \n\t"\
  393. "paddw %%"s1", %%"t1" \n\t"\
  394. "paddw %%"s2", %%"t2" \n\t"\
  395. "paddw %%"s3", %%"t3" \n\t"
  396. #define snow_vertical_compose_r2r_pmulhw(s0,s1,s2,s3,t0,t1,t2,t3)\
  397. "pmulhw %%"s0", %%"t0" \n\t"\
  398. "pmulhw %%"s1", %%"t1" \n\t"\
  399. "pmulhw %%"s2", %%"t2" \n\t"\
  400. "pmulhw %%"s3", %%"t3" \n\t"
  401. #define snow_vertical_compose_sse2_move(s0,s1,s2,s3,t0,t1,t2,t3)\
  402. "movdqa %%"s0", %%"t0" \n\t"\
  403. "movdqa %%"s1", %%"t1" \n\t"\
  404. "movdqa %%"s2", %%"t2" \n\t"\
  405. "movdqa %%"s3", %%"t3" \n\t"
  406. static void ff_snow_vertical_compose97i_sse2(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5, int width){
  407. x86_reg i = width;
  408. while(i & 0x1F)
  409. {
  410. i--;
  411. b4[i] -= (W_DM*(b3[i] + b5[i])+W_DO)>>W_DS;
  412. b3[i] -= (W_CM*(b2[i] + b4[i])+W_CO)>>W_CS;
  413. b2[i] += (W_BM*(b1[i] + b3[i])+4*b2[i]+W_BO)>>W_BS;
  414. b1[i] += (W_AM*(b0[i] + b2[i])+W_AO)>>W_AS;
  415. }
  416. i+=i;
  417. __asm__ volatile (
  418. "jmp 2f \n\t"
  419. "1: \n\t"
  420. snow_vertical_compose_sse2_load("%4","xmm0","xmm2","xmm4","xmm6")
  421. snow_vertical_compose_sse2_add("%6","xmm0","xmm2","xmm4","xmm6")
  422. "pcmpeqw %%xmm0, %%xmm0 \n\t"
  423. "pcmpeqw %%xmm2, %%xmm2 \n\t"
  424. "paddw %%xmm2, %%xmm2 \n\t"
  425. "paddw %%xmm0, %%xmm2 \n\t"
  426. "psllw $13, %%xmm2 \n\t"
  427. snow_vertical_compose_r2r_add("xmm0","xmm0","xmm0","xmm0","xmm1","xmm3","xmm5","xmm7")
  428. snow_vertical_compose_r2r_pmulhw("xmm2","xmm2","xmm2","xmm2","xmm1","xmm3","xmm5","xmm7")
  429. snow_vertical_compose_sse2_add("%5","xmm1","xmm3","xmm5","xmm7")
  430. snow_vertical_compose_sse2_store("%5","xmm1","xmm3","xmm5","xmm7")
  431. snow_vertical_compose_sse2_load("%4","xmm0","xmm2","xmm4","xmm6")
  432. snow_vertical_compose_sse2_add("%3","xmm1","xmm3","xmm5","xmm7")
  433. snow_vertical_compose_r2r_sub("xmm1","xmm3","xmm5","xmm7","xmm0","xmm2","xmm4","xmm6")
  434. snow_vertical_compose_sse2_store("%4","xmm0","xmm2","xmm4","xmm6")
  435. "pcmpeqw %%xmm7, %%xmm7 \n\t"
  436. "pcmpeqw %%xmm5, %%xmm5 \n\t"
  437. "psllw $15, %%xmm7 \n\t"
  438. "psrlw $13, %%xmm5 \n\t"
  439. "paddw %%xmm7, %%xmm5 \n\t"
  440. snow_vertical_compose_r2r_add("xmm5","xmm5","xmm5","xmm5","xmm0","xmm2","xmm4","xmm6")
  441. "movq (%2,%%"FF_REG_d"), %%xmm1 \n\t"
  442. "movq 8(%2,%%"FF_REG_d"), %%xmm3 \n\t"
  443. "paddw %%xmm7, %%xmm1 \n\t"
  444. "paddw %%xmm7, %%xmm3 \n\t"
  445. "pavgw %%xmm1, %%xmm0 \n\t"
  446. "pavgw %%xmm3, %%xmm2 \n\t"
  447. "movq 16(%2,%%"FF_REG_d"), %%xmm1 \n\t"
  448. "movq 24(%2,%%"FF_REG_d"), %%xmm3 \n\t"
  449. "paddw %%xmm7, %%xmm1 \n\t"
  450. "paddw %%xmm7, %%xmm3 \n\t"
  451. "pavgw %%xmm1, %%xmm4 \n\t"
  452. "pavgw %%xmm3, %%xmm6 \n\t"
  453. snow_vertical_compose_r2r_sub("xmm7","xmm7","xmm7","xmm7","xmm0","xmm2","xmm4","xmm6")
  454. snow_vertical_compose_sra("1","xmm0","xmm2","xmm4","xmm6")
  455. snow_vertical_compose_sse2_add("%3","xmm0","xmm2","xmm4","xmm6")
  456. snow_vertical_compose_sra("2","xmm0","xmm2","xmm4","xmm6")
  457. snow_vertical_compose_sse2_add("%3","xmm0","xmm2","xmm4","xmm6")
  458. snow_vertical_compose_sse2_store("%3","xmm0","xmm2","xmm4","xmm6")
  459. snow_vertical_compose_sse2_add("%1","xmm0","xmm2","xmm4","xmm6")
  460. snow_vertical_compose_sse2_move("xmm0","xmm2","xmm4","xmm6","xmm1","xmm3","xmm5","xmm7")
  461. snow_vertical_compose_sra("1","xmm0","xmm2","xmm4","xmm6")
  462. snow_vertical_compose_r2r_add("xmm1","xmm3","xmm5","xmm7","xmm0","xmm2","xmm4","xmm6")
  463. snow_vertical_compose_sse2_add("%2","xmm0","xmm2","xmm4","xmm6")
  464. snow_vertical_compose_sse2_store("%2","xmm0","xmm2","xmm4","xmm6")
  465. "2: \n\t"
  466. "sub $64, %%"FF_REG_d" \n\t"
  467. "jge 1b \n\t"
  468. :"+d"(i)
  469. :"r"(b0),"r"(b1),"r"(b2),"r"(b3),"r"(b4),"r"(b5));
  470. }
  471. #define snow_vertical_compose_mmx_load_add(op,r,t0,t1,t2,t3)\
  472. ""op" ("r",%%"FF_REG_d"), %%"t0" \n\t"\
  473. ""op" 8("r",%%"FF_REG_d"), %%"t1" \n\t"\
  474. ""op" 16("r",%%"FF_REG_d"), %%"t2" \n\t"\
  475. ""op" 24("r",%%"FF_REG_d"), %%"t3" \n\t"
  476. #define snow_vertical_compose_mmx_load(r,t0,t1,t2,t3)\
  477. snow_vertical_compose_mmx_load_add("movq",r,t0,t1,t2,t3)
  478. #define snow_vertical_compose_mmx_add(r,t0,t1,t2,t3)\
  479. snow_vertical_compose_mmx_load_add("paddw",r,t0,t1,t2,t3)
  480. #define snow_vertical_compose_mmx_store(w,s0,s1,s2,s3)\
  481. "movq %%"s0", ("w",%%"FF_REG_d") \n\t"\
  482. "movq %%"s1", 8("w",%%"FF_REG_d") \n\t"\
  483. "movq %%"s2", 16("w",%%"FF_REG_d") \n\t"\
  484. "movq %%"s3", 24("w",%%"FF_REG_d") \n\t"
  485. #define snow_vertical_compose_mmx_move(s0,s1,s2,s3,t0,t1,t2,t3)\
  486. "movq %%"s0", %%"t0" \n\t"\
  487. "movq %%"s1", %%"t1" \n\t"\
  488. "movq %%"s2", %%"t2" \n\t"\
  489. "movq %%"s3", %%"t3" \n\t"
  490. static void ff_snow_vertical_compose97i_mmx(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5, int width){
  491. x86_reg i = width;
  492. while(i & 15)
  493. {
  494. i--;
  495. b4[i] -= (W_DM*(b3[i] + b5[i])+W_DO)>>W_DS;
  496. b3[i] -= (W_CM*(b2[i] + b4[i])+W_CO)>>W_CS;
  497. b2[i] += (W_BM*(b1[i] + b3[i])+4*b2[i]+W_BO)>>W_BS;
  498. b1[i] += (W_AM*(b0[i] + b2[i])+W_AO)>>W_AS;
  499. }
  500. i+=i;
  501. __asm__ volatile(
  502. "jmp 2f \n\t"
  503. "1: \n\t"
  504. snow_vertical_compose_mmx_load("%4","mm1","mm3","mm5","mm7")
  505. snow_vertical_compose_mmx_add("%6","mm1","mm3","mm5","mm7")
  506. "pcmpeqw %%mm0, %%mm0 \n\t"
  507. "pcmpeqw %%mm2, %%mm2 \n\t"
  508. "paddw %%mm2, %%mm2 \n\t"
  509. "paddw %%mm0, %%mm2 \n\t"
  510. "psllw $13, %%mm2 \n\t"
  511. snow_vertical_compose_r2r_add("mm0","mm0","mm0","mm0","mm1","mm3","mm5","mm7")
  512. snow_vertical_compose_r2r_pmulhw("mm2","mm2","mm2","mm2","mm1","mm3","mm5","mm7")
  513. snow_vertical_compose_mmx_add("%5","mm1","mm3","mm5","mm7")
  514. snow_vertical_compose_mmx_store("%5","mm1","mm3","mm5","mm7")
  515. snow_vertical_compose_mmx_load("%4","mm0","mm2","mm4","mm6")
  516. snow_vertical_compose_mmx_add("%3","mm1","mm3","mm5","mm7")
  517. snow_vertical_compose_r2r_sub("mm1","mm3","mm5","mm7","mm0","mm2","mm4","mm6")
  518. snow_vertical_compose_mmx_store("%4","mm0","mm2","mm4","mm6")
  519. "pcmpeqw %%mm7, %%mm7 \n\t"
  520. "pcmpeqw %%mm5, %%mm5 \n\t"
  521. "psllw $15, %%mm7 \n\t"
  522. "psrlw $13, %%mm5 \n\t"
  523. "paddw %%mm7, %%mm5 \n\t"
  524. snow_vertical_compose_r2r_add("mm5","mm5","mm5","mm5","mm0","mm2","mm4","mm6")
  525. "movq (%2,%%"FF_REG_d"), %%mm1 \n\t"
  526. "movq 8(%2,%%"FF_REG_d"), %%mm3 \n\t"
  527. "paddw %%mm7, %%mm1 \n\t"
  528. "paddw %%mm7, %%mm3 \n\t"
  529. "pavgw %%mm1, %%mm0 \n\t"
  530. "pavgw %%mm3, %%mm2 \n\t"
  531. "movq 16(%2,%%"FF_REG_d"), %%mm1 \n\t"
  532. "movq 24(%2,%%"FF_REG_d"), %%mm3 \n\t"
  533. "paddw %%mm7, %%mm1 \n\t"
  534. "paddw %%mm7, %%mm3 \n\t"
  535. "pavgw %%mm1, %%mm4 \n\t"
  536. "pavgw %%mm3, %%mm6 \n\t"
  537. snow_vertical_compose_r2r_sub("mm7","mm7","mm7","mm7","mm0","mm2","mm4","mm6")
  538. snow_vertical_compose_sra("1","mm0","mm2","mm4","mm6")
  539. snow_vertical_compose_mmx_add("%3","mm0","mm2","mm4","mm6")
  540. snow_vertical_compose_sra("2","mm0","mm2","mm4","mm6")
  541. snow_vertical_compose_mmx_add("%3","mm0","mm2","mm4","mm6")
  542. snow_vertical_compose_mmx_store("%3","mm0","mm2","mm4","mm6")
  543. snow_vertical_compose_mmx_add("%1","mm0","mm2","mm4","mm6")
  544. snow_vertical_compose_mmx_move("mm0","mm2","mm4","mm6","mm1","mm3","mm5","mm7")
  545. snow_vertical_compose_sra("1","mm0","mm2","mm4","mm6")
  546. snow_vertical_compose_r2r_add("mm1","mm3","mm5","mm7","mm0","mm2","mm4","mm6")
  547. snow_vertical_compose_mmx_add("%2","mm0","mm2","mm4","mm6")
  548. snow_vertical_compose_mmx_store("%2","mm0","mm2","mm4","mm6")
  549. "2: \n\t"
  550. "sub $32, %%"FF_REG_d" \n\t"
  551. "jge 1b \n\t"
  552. :"+d"(i)
  553. :"r"(b0),"r"(b1),"r"(b2),"r"(b3),"r"(b4),"r"(b5));
  554. }
  555. #endif //HAVE_7REGS
  556. #if HAVE_6REGS
  557. #define snow_inner_add_yblock_sse2_header \
  558. IDWTELEM * * dst_array = sb->line + src_y;\
  559. x86_reg tmp;\
  560. __asm__ volatile(\
  561. "mov %7, %%"FF_REG_c" \n\t"\
  562. "mov %6, %2 \n\t"\
  563. "mov %4, %%"FF_REG_S" \n\t"\
  564. "pxor %%xmm7, %%xmm7 \n\t" /* 0 */\
  565. "pcmpeqd %%xmm3, %%xmm3 \n\t"\
  566. "psllw $15, %%xmm3 \n\t"\
  567. "psrlw $12, %%xmm3 \n\t" /* FRAC_BITS >> 1 */\
  568. "1: \n\t"\
  569. "mov %1, %%"FF_REG_D" \n\t"\
  570. "mov (%%"FF_REG_D"), %%"FF_REG_D" \n\t"\
  571. "add %3, %%"FF_REG_D" \n\t"
  572. #define snow_inner_add_yblock_sse2_start_8(out_reg1, out_reg2, ptr_offset, s_offset)\
  573. "mov "FF_PTR_SIZE"*"ptr_offset"(%%"FF_REG_a"), %%"FF_REG_d"; \n\t"\
  574. "movq (%%"FF_REG_d"), %%"out_reg1" \n\t"\
  575. "movq (%%"FF_REG_d", %%"FF_REG_c"), %%"out_reg2" \n\t"\
  576. "punpcklbw %%xmm7, %%"out_reg1" \n\t"\
  577. "punpcklbw %%xmm7, %%"out_reg2" \n\t"\
  578. "movq "s_offset"(%%"FF_REG_S"), %%xmm0 \n\t"\
  579. "movq "s_offset"+16(%%"FF_REG_S"), %%xmm4 \n\t"\
  580. "punpcklbw %%xmm7, %%xmm0 \n\t"\
  581. "punpcklbw %%xmm7, %%xmm4 \n\t"\
  582. "pmullw %%xmm0, %%"out_reg1" \n\t"\
  583. "pmullw %%xmm4, %%"out_reg2" \n\t"
  584. #define snow_inner_add_yblock_sse2_start_16(out_reg1, out_reg2, ptr_offset, s_offset)\
  585. "mov "FF_PTR_SIZE"*"ptr_offset"(%%"FF_REG_a"), %%"FF_REG_d"; \n\t"\
  586. "movq (%%"FF_REG_d"), %%"out_reg1" \n\t"\
  587. "movq 8(%%"FF_REG_d"), %%"out_reg2" \n\t"\
  588. "punpcklbw %%xmm7, %%"out_reg1" \n\t"\
  589. "punpcklbw %%xmm7, %%"out_reg2" \n\t"\
  590. "movq "s_offset"(%%"FF_REG_S"), %%xmm0 \n\t"\
  591. "movq "s_offset"+8(%%"FF_REG_S"), %%xmm4 \n\t"\
  592. "punpcklbw %%xmm7, %%xmm0 \n\t"\
  593. "punpcklbw %%xmm7, %%xmm4 \n\t"\
  594. "pmullw %%xmm0, %%"out_reg1" \n\t"\
  595. "pmullw %%xmm4, %%"out_reg2" \n\t"
  596. #define snow_inner_add_yblock_sse2_accum_8(ptr_offset, s_offset) \
  597. snow_inner_add_yblock_sse2_start_8("xmm2", "xmm6", ptr_offset, s_offset)\
  598. "paddusw %%xmm2, %%xmm1 \n\t"\
  599. "paddusw %%xmm6, %%xmm5 \n\t"
  600. #define snow_inner_add_yblock_sse2_accum_16(ptr_offset, s_offset) \
  601. snow_inner_add_yblock_sse2_start_16("xmm2", "xmm6", ptr_offset, s_offset)\
  602. "paddusw %%xmm2, %%xmm1 \n\t"\
  603. "paddusw %%xmm6, %%xmm5 \n\t"
  604. #define snow_inner_add_yblock_sse2_end_common1\
  605. "add $32, %%"FF_REG_S" \n\t"\
  606. "add %%"FF_REG_c", %0 \n\t"\
  607. "add %%"FF_REG_c", "FF_PTR_SIZE"*3(%%"FF_REG_a"); \n\t"\
  608. "add %%"FF_REG_c", "FF_PTR_SIZE"*2(%%"FF_REG_a"); \n\t"\
  609. "add %%"FF_REG_c", "FF_PTR_SIZE"*1(%%"FF_REG_a"); \n\t"\
  610. "add %%"FF_REG_c", (%%"FF_REG_a") \n\t"
  611. #define snow_inner_add_yblock_sse2_end_common2\
  612. "jnz 1b \n\t"\
  613. :"+m"(dst8),"+m"(dst_array),"=&r"(tmp)\
  614. :\
  615. "rm"((x86_reg)(src_x<<1)),"m"(obmc),"a"(block),"m"(b_h),"m"(src_stride):\
  616. XMM_CLOBBERS("%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7", )\
  617. "%"FF_REG_c"","%"FF_REG_S"","%"FF_REG_D"","%"FF_REG_d"");
  618. #define snow_inner_add_yblock_sse2_end_8\
  619. "sal $1, %%"FF_REG_c" \n\t"\
  620. "add"FF_OPSIZE" $"FF_PTR_SIZE"*2, %1 \n\t"\
  621. snow_inner_add_yblock_sse2_end_common1\
  622. "sar $1, %%"FF_REG_c" \n\t"\
  623. "sub $2, %2 \n\t"\
  624. snow_inner_add_yblock_sse2_end_common2
  625. #define snow_inner_add_yblock_sse2_end_16\
  626. "add"FF_OPSIZE" $"FF_PTR_SIZE"*1, %1 \n\t"\
  627. snow_inner_add_yblock_sse2_end_common1\
  628. "dec %2 \n\t"\
  629. snow_inner_add_yblock_sse2_end_common2
  630. static void inner_add_yblock_bw_8_obmc_16_bh_even_sse2(const uint8_t *obmc, const x86_reg obmc_stride, uint8_t * * block, int b_w, x86_reg b_h,
  631. int src_x, int src_y, x86_reg src_stride, slice_buffer * sb, int add, uint8_t * dst8){
  632. snow_inner_add_yblock_sse2_header
  633. snow_inner_add_yblock_sse2_start_8("xmm1", "xmm5", "3", "0")
  634. snow_inner_add_yblock_sse2_accum_8("2", "8")
  635. snow_inner_add_yblock_sse2_accum_8("1", "128")
  636. snow_inner_add_yblock_sse2_accum_8("0", "136")
  637. "mov %0, %%"FF_REG_d" \n\t"
  638. "movdqa (%%"FF_REG_D"), %%xmm0 \n\t"
  639. "movdqa %%xmm1, %%xmm2 \n\t"
  640. "punpckhwd %%xmm7, %%xmm1 \n\t"
  641. "punpcklwd %%xmm7, %%xmm2 \n\t"
  642. "paddd %%xmm2, %%xmm0 \n\t"
  643. "movdqa 16(%%"FF_REG_D"), %%xmm2\n\t"
  644. "paddd %%xmm1, %%xmm2 \n\t"
  645. "paddd %%xmm3, %%xmm0 \n\t"
  646. "paddd %%xmm3, %%xmm2 \n\t"
  647. "mov %1, %%"FF_REG_D" \n\t"
  648. "mov "FF_PTR_SIZE"(%%"FF_REG_D"), %%"FF_REG_D"; \n\t"
  649. "add %3, %%"FF_REG_D" \n\t"
  650. "movdqa (%%"FF_REG_D"), %%xmm4 \n\t"
  651. "movdqa %%xmm5, %%xmm6 \n\t"
  652. "punpckhwd %%xmm7, %%xmm5 \n\t"
  653. "punpcklwd %%xmm7, %%xmm6 \n\t"
  654. "paddd %%xmm6, %%xmm4 \n\t"
  655. "movdqa 16(%%"FF_REG_D"), %%xmm6\n\t"
  656. "paddd %%xmm5, %%xmm6 \n\t"
  657. "paddd %%xmm3, %%xmm4 \n\t"
  658. "paddd %%xmm3, %%xmm6 \n\t"
  659. "psrad $8, %%xmm0 \n\t" /* FRAC_BITS. */
  660. "psrad $8, %%xmm2 \n\t" /* FRAC_BITS. */
  661. "packssdw %%xmm2, %%xmm0 \n\t"
  662. "packuswb %%xmm7, %%xmm0 \n\t"
  663. "movq %%xmm0, (%%"FF_REG_d") \n\t"
  664. "psrad $8, %%xmm4 \n\t" /* FRAC_BITS. */
  665. "psrad $8, %%xmm6 \n\t" /* FRAC_BITS. */
  666. "packssdw %%xmm6, %%xmm4 \n\t"
  667. "packuswb %%xmm7, %%xmm4 \n\t"
  668. "movq %%xmm4, (%%"FF_REG_d",%%"FF_REG_c"); \n\t"
  669. snow_inner_add_yblock_sse2_end_8
  670. }
  671. static void inner_add_yblock_bw_16_obmc_32_sse2(const uint8_t *obmc, const x86_reg obmc_stride, uint8_t * * block, int b_w, x86_reg b_h,
  672. int src_x, int src_y, x86_reg src_stride, slice_buffer * sb, int add, uint8_t * dst8){
  673. snow_inner_add_yblock_sse2_header
  674. snow_inner_add_yblock_sse2_start_16("xmm1", "xmm5", "3", "0")
  675. snow_inner_add_yblock_sse2_accum_16("2", "16")
  676. snow_inner_add_yblock_sse2_accum_16("1", "512")
  677. snow_inner_add_yblock_sse2_accum_16("0", "528")
  678. "mov %0, %%"FF_REG_d" \n\t"
  679. "psrlw $4, %%xmm1 \n\t"
  680. "psrlw $4, %%xmm5 \n\t"
  681. "paddw (%%"FF_REG_D"), %%xmm1 \n\t"
  682. "paddw 16(%%"FF_REG_D"), %%xmm5 \n\t"
  683. "paddw %%xmm3, %%xmm1 \n\t"
  684. "paddw %%xmm3, %%xmm5 \n\t"
  685. "psraw $4, %%xmm1 \n\t" /* FRAC_BITS. */
  686. "psraw $4, %%xmm5 \n\t" /* FRAC_BITS. */
  687. "packuswb %%xmm5, %%xmm1 \n\t"
  688. "movdqu %%xmm1, (%%"FF_REG_d") \n\t"
  689. snow_inner_add_yblock_sse2_end_16
  690. }
  691. #define snow_inner_add_yblock_mmx_header \
  692. IDWTELEM * * dst_array = sb->line + src_y;\
  693. x86_reg tmp;\
  694. __asm__ volatile(\
  695. "mov %7, %%"FF_REG_c" \n\t"\
  696. "mov %6, %2 \n\t"\
  697. "mov %4, %%"FF_REG_S" \n\t"\
  698. "pxor %%mm7, %%mm7 \n\t" /* 0 */\
  699. "pcmpeqd %%mm3, %%mm3 \n\t"\
  700. "psllw $15, %%mm3 \n\t"\
  701. "psrlw $12, %%mm3 \n\t" /* FRAC_BITS >> 1 */\
  702. "1: \n\t"\
  703. "mov %1, %%"FF_REG_D" \n\t"\
  704. "mov (%%"FF_REG_D"), %%"FF_REG_D" \n\t"\
  705. "add %3, %%"FF_REG_D" \n\t"
  706. #define snow_inner_add_yblock_mmx_start(out_reg1, out_reg2, ptr_offset, s_offset, d_offset)\
  707. "mov "FF_PTR_SIZE"*"ptr_offset"(%%"FF_REG_a"), %%"FF_REG_d"; \n\t"\
  708. "movd "d_offset"(%%"FF_REG_d"), %%"out_reg1" \n\t"\
  709. "movd "d_offset"+4(%%"FF_REG_d"), %%"out_reg2" \n\t"\
  710. "punpcklbw %%mm7, %%"out_reg1" \n\t"\
  711. "punpcklbw %%mm7, %%"out_reg2" \n\t"\
  712. "movd "s_offset"(%%"FF_REG_S"), %%mm0 \n\t"\
  713. "movd "s_offset"+4(%%"FF_REG_S"), %%mm4 \n\t"\
  714. "punpcklbw %%mm7, %%mm0 \n\t"\
  715. "punpcklbw %%mm7, %%mm4 \n\t"\
  716. "pmullw %%mm0, %%"out_reg1" \n\t"\
  717. "pmullw %%mm4, %%"out_reg2" \n\t"
  718. #define snow_inner_add_yblock_mmx_accum(ptr_offset, s_offset, d_offset) \
  719. snow_inner_add_yblock_mmx_start("mm2", "mm6", ptr_offset, s_offset, d_offset)\
  720. "paddusw %%mm2, %%mm1 \n\t"\
  721. "paddusw %%mm6, %%mm5 \n\t"
  722. #define snow_inner_add_yblock_mmx_mix(read_offset, write_offset)\
  723. "mov %0, %%"FF_REG_d" \n\t"\
  724. "psrlw $4, %%mm1 \n\t"\
  725. "psrlw $4, %%mm5 \n\t"\
  726. "paddw "read_offset"(%%"FF_REG_D"), %%mm1 \n\t"\
  727. "paddw "read_offset"+8(%%"FF_REG_D"), %%mm5 \n\t"\
  728. "paddw %%mm3, %%mm1 \n\t"\
  729. "paddw %%mm3, %%mm5 \n\t"\
  730. "psraw $4, %%mm1 \n\t"\
  731. "psraw $4, %%mm5 \n\t"\
  732. "packuswb %%mm5, %%mm1 \n\t"\
  733. "movq %%mm1, "write_offset"(%%"FF_REG_d") \n\t"
  734. #define snow_inner_add_yblock_mmx_end(s_step)\
  735. "add $"s_step", %%"FF_REG_S" \n\t"\
  736. "add %%"FF_REG_c", "FF_PTR_SIZE"*3(%%"FF_REG_a"); \n\t"\
  737. "add %%"FF_REG_c", "FF_PTR_SIZE"*2(%%"FF_REG_a"); \n\t"\
  738. "add %%"FF_REG_c", "FF_PTR_SIZE"*1(%%"FF_REG_a"); \n\t"\
  739. "add %%"FF_REG_c", (%%"FF_REG_a") \n\t"\
  740. "add"FF_OPSIZE " $"FF_PTR_SIZE"*1, %1 \n\t"\
  741. "add %%"FF_REG_c", %0 \n\t"\
  742. "dec %2 \n\t"\
  743. "jnz 1b \n\t"\
  744. :"+m"(dst8),"+m"(dst_array),"=&r"(tmp)\
  745. :\
  746. "rm"((x86_reg)(src_x<<1)),"m"(obmc),"a"(block),"m"(b_h),"m"(src_stride):\
  747. "%"FF_REG_c"","%"FF_REG_S"","%"FF_REG_D"","%"FF_REG_d"");
  748. static void inner_add_yblock_bw_8_obmc_16_mmx(const uint8_t *obmc, const x86_reg obmc_stride, uint8_t * * block, int b_w, x86_reg b_h,
  749. int src_x, int src_y, x86_reg src_stride, slice_buffer * sb, int add, uint8_t * dst8){
  750. snow_inner_add_yblock_mmx_header
  751. snow_inner_add_yblock_mmx_start("mm1", "mm5", "3", "0", "0")
  752. snow_inner_add_yblock_mmx_accum("2", "8", "0")
  753. snow_inner_add_yblock_mmx_accum("1", "128", "0")
  754. snow_inner_add_yblock_mmx_accum("0", "136", "0")
  755. snow_inner_add_yblock_mmx_mix("0", "0")
  756. snow_inner_add_yblock_mmx_end("16")
  757. }
  758. static void inner_add_yblock_bw_16_obmc_32_mmx(const uint8_t *obmc, const x86_reg obmc_stride, uint8_t * * block, int b_w, x86_reg b_h,
  759. int src_x, int src_y, x86_reg src_stride, slice_buffer * sb, int add, uint8_t * dst8){
  760. snow_inner_add_yblock_mmx_header
  761. snow_inner_add_yblock_mmx_start("mm1", "mm5", "3", "0", "0")
  762. snow_inner_add_yblock_mmx_accum("2", "16", "0")
  763. snow_inner_add_yblock_mmx_accum("1", "512", "0")
  764. snow_inner_add_yblock_mmx_accum("0", "528", "0")
  765. snow_inner_add_yblock_mmx_mix("0", "0")
  766. snow_inner_add_yblock_mmx_start("mm1", "mm5", "3", "8", "8")
  767. snow_inner_add_yblock_mmx_accum("2", "24", "8")
  768. snow_inner_add_yblock_mmx_accum("1", "520", "8")
  769. snow_inner_add_yblock_mmx_accum("0", "536", "8")
  770. snow_inner_add_yblock_mmx_mix("16", "8")
  771. snow_inner_add_yblock_mmx_end("32")
  772. }
  773. static void ff_snow_inner_add_yblock_sse2(const uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h,
  774. int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8){
  775. if (b_w == 16)
  776. inner_add_yblock_bw_16_obmc_32_sse2(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
  777. else if (b_w == 8 && obmc_stride == 16) {
  778. if (!(b_h & 1))
  779. inner_add_yblock_bw_8_obmc_16_bh_even_sse2(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
  780. else
  781. inner_add_yblock_bw_8_obmc_16_mmx(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
  782. } else
  783. ff_snow_inner_add_yblock(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
  784. }
  785. static void ff_snow_inner_add_yblock_mmx(const uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h,
  786. int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8){
  787. if (b_w == 16)
  788. inner_add_yblock_bw_16_obmc_32_mmx(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
  789. else if (b_w == 8 && obmc_stride == 16)
  790. inner_add_yblock_bw_8_obmc_16_mmx(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
  791. else
  792. ff_snow_inner_add_yblock(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
  793. }
  794. #endif /* HAVE_6REGS */
  795. #endif /* HAVE_INLINE_ASM */
  796. av_cold void ff_dwt_init_x86(SnowDWTContext *c)
  797. {
  798. #if HAVE_INLINE_ASM
  799. int mm_flags = av_get_cpu_flags();
  800. if (mm_flags & AV_CPU_FLAG_MMX) {
  801. if(mm_flags & AV_CPU_FLAG_SSE2 & 0){
  802. c->horizontal_compose97i = ff_snow_horizontal_compose97i_sse2;
  803. #if HAVE_7REGS
  804. c->vertical_compose97i = ff_snow_vertical_compose97i_sse2;
  805. #endif
  806. #if HAVE_6REGS
  807. c->inner_add_yblock = ff_snow_inner_add_yblock_sse2;
  808. #endif
  809. }
  810. else{
  811. if (mm_flags & AV_CPU_FLAG_MMXEXT) {
  812. c->horizontal_compose97i = ff_snow_horizontal_compose97i_mmx;
  813. #if HAVE_7REGS
  814. c->vertical_compose97i = ff_snow_vertical_compose97i_mmx;
  815. #endif
  816. }
  817. #if HAVE_6REGS
  818. c->inner_add_yblock = ff_snow_inner_add_yblock_mmx;
  819. #endif
  820. }
  821. }
  822. #endif /* HAVE_INLINE_ASM */
  823. }