123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487 |
- ;******************************************************************************
- ;* SIMD optimized MPEG-4 Parametric Stereo decoding functions
- ;*
- ;* Copyright (C) 2015 James Almer
- ;*
- ;* This file is part of FFmpeg.
- ;*
- ;* FFmpeg is free software; you can redistribute it and/or
- ;* modify it under the terms of the GNU Lesser General Public
- ;* License as published by the Free Software Foundation; either
- ;* version 2.1 of the License, or (at your option) any later version.
- ;*
- ;* FFmpeg is distributed in the hope that it will be useful,
- ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
- ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- ;* Lesser General Public License for more details.
- ;*
- ;* You should have received a copy of the GNU Lesser General Public
- ;* License along with FFmpeg; if not, write to the Free Software
- ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- ;******************************************************************************
- %include "libavutil/x86/x86util.asm"
- SECTION_RODATA
- ps_p1m1p1m1: dd 0, 0x80000000, 0, 0x80000000
- SECTION .text
- ;*************************************************************************
- ;void ff_ps_add_squares_<opt>(float *dst, const float (*src)[2], int n);
- ;*************************************************************************
- %macro PS_ADD_SQUARES 1
- cglobal ps_add_squares, 3, 3, %1, dst, src, n
- shl nd, 3
- add srcq, nq
- neg nq
- align 16
- .loop:
- movaps m0, [srcq+nq]
- movaps m1, [srcq+nq+mmsize]
- mulps m0, m0
- mulps m1, m1
- HADDPS m0, m1, m2
- addps m0, [dstq]
- movaps [dstq], m0
- add dstq, mmsize
- add nq, mmsize*2
- jl .loop
- REP_RET
- %endmacro
- INIT_XMM sse
- PS_ADD_SQUARES 2
- INIT_XMM sse3
- PS_ADD_SQUARES 3
- ;*******************************************************************
- ;void ff_ps_mul_pair_single_sse(float (*dst)[2], float (*src0)[2],
- ; float *src1, int n);
- ;*******************************************************************
- INIT_XMM sse
- cglobal ps_mul_pair_single, 4, 4, 4, dst, src1, src2, n
- shl nd, 3
- add src1q, nq
- add dstq, nq
- neg nq
- align 16
- .loop:
- movu m0, [src1q+nq]
- movu m1, [src1q+nq+mmsize]
- mova m2, [src2q]
- mova m3, m2
- unpcklps m2, m2
- unpckhps m3, m3
- mulps m0, m2
- mulps m1, m3
- mova [dstq+nq], m0
- mova [dstq+nq+mmsize], m1
- add src2q, mmsize
- add nq, mmsize*2
- jl .loop
- REP_RET
- ;***********************************************************************
- ;void ff_ps_stereo_interpolate_sse3(float (*l)[2], float (*r)[2],
- ; float h[2][4], float h_step[2][4],
- ; int len);
- ;***********************************************************************
- INIT_XMM sse3
- cglobal ps_stereo_interpolate, 5, 5, 6, l, r, h, h_step, n
- movaps m0, [hq]
- movaps m1, [h_stepq]
- unpcklps m4, m0, m0
- unpckhps m0, m0
- unpcklps m5, m1, m1
- unpckhps m1, m1
- shl nd, 3
- add lq, nq
- add rq, nq
- neg nq
- align 16
- .loop:
- addps m4, m5
- addps m0, m1
- movddup m2, [lq+nq]
- movddup m3, [rq+nq]
- mulps m2, m4
- mulps m3, m0
- addps m2, m3
- movsd [lq+nq], m2
- movhps [rq+nq], m2
- add nq, 8
- jl .loop
- REP_RET
- ;***************************************************************************
- ;void ps_stereo_interpolate_ipdopd_sse3(float (*l)[2], float (*r)[2],
- ; float h[2][4], float h_step[2][4],
- ; int len);
- ;***************************************************************************
- INIT_XMM sse3
- cglobal ps_stereo_interpolate_ipdopd, 5, 5, 10, l, r, h, h_step, n
- movaps m0, [hq]
- movaps m1, [hq+mmsize]
- %if ARCH_X86_64
- movaps m8, [h_stepq]
- movaps m9, [h_stepq+mmsize]
- %define H_STEP0 m8
- %define H_STEP1 m9
- %else
- %define H_STEP0 [h_stepq]
- %define H_STEP1 [h_stepq+mmsize]
- %endif
- shl nd, 3
- add lq, nq
- add rq, nq
- neg nq
- align 16
- .loop:
- addps m0, H_STEP0
- addps m1, H_STEP1
- movddup m2, [lq+nq]
- movddup m3, [rq+nq]
- shufps m4, m2, m2, q2301
- shufps m5, m3, m3, q2301
- unpcklps m6, m0, m0
- unpckhps m7, m0, m0
- mulps m2, m6
- mulps m3, m7
- unpcklps m6, m1, m1
- unpckhps m7, m1, m1
- mulps m4, m6
- mulps m5, m7
- addps m2, m3
- addsubps m2, m4
- addsubps m2, m5
- movsd [lq+nq], m2
- movhps [rq+nq], m2
- add nq, 8
- jl .loop
- REP_RET
- ;**********************************************************
- ;void ps_hybrid_analysis_ileave_sse(float out[2][38][64],
- ; float (*in)[32][2],
- ; int i, int len)
- ;**********************************************************
- INIT_XMM sse
- cglobal ps_hybrid_analysis_ileave, 3, 7, 5, out, in, i, len, in0, in1, tmp
- movsxdifnidn iq, id
- mov lend, 32 << 3
- lea inq, [inq+iq*4]
- mov tmpd, id
- shl tmpd, 8
- add outq, tmpq
- mov tmpd, 64
- sub tmpd, id
- mov id, tmpd
- test id, 1
- jne .loop4
- test id, 2
- jne .loop8
- align 16
- .loop16:
- mov in0q, inq
- mov in1q, 38*64*4
- add in1q, in0q
- mov tmpd, lend
- .inner_loop16:
- movaps m0, [in0q]
- movaps m1, [in1q]
- movaps m2, [in0q+lenq]
- movaps m3, [in1q+lenq]
- TRANSPOSE4x4PS 0, 1, 2, 3, 4
- movaps [outq], m0
- movaps [outq+lenq], m1
- movaps [outq+lenq*2], m2
- movaps [outq+3*32*2*4], m3
- lea in0q, [in0q+lenq*2]
- lea in1q, [in1q+lenq*2]
- add outq, mmsize
- sub tmpd, mmsize
- jg .inner_loop16
- add inq, 16
- add outq, 3*32*2*4
- sub id, 4
- jg .loop16
- RET
- align 16
- .loop8:
- mov in0q, inq
- mov in1q, 38*64*4
- add in1q, in0q
- mov tmpd, lend
- .inner_loop8:
- movlps m0, [in0q]
- movlps m1, [in1q]
- movhps m0, [in0q+lenq]
- movhps m1, [in1q+lenq]
- SBUTTERFLYPS 0, 1, 2
- SBUTTERFLYPD 0, 1, 2
- movaps [outq], m0
- movaps [outq+lenq], m1
- lea in0q, [in0q+lenq*2]
- lea in1q, [in1q+lenq*2]
- add outq, mmsize
- sub tmpd, mmsize
- jg .inner_loop8
- add inq, 8
- add outq, lenq
- sub id, 2
- jg .loop16
- RET
- align 16
- .loop4:
- mov in0q, inq
- mov in1q, 38*64*4
- add in1q, in0q
- mov tmpd, lend
- .inner_loop4:
- movss m0, [in0q]
- movss m1, [in1q]
- movss m2, [in0q+lenq]
- movss m3, [in1q+lenq]
- movlhps m0, m1
- movlhps m2, m3
- shufps m0, m2, q2020
- movaps [outq], m0
- lea in0q, [in0q+lenq*2]
- lea in1q, [in1q+lenq*2]
- add outq, mmsize
- sub tmpd, mmsize
- jg .inner_loop4
- add inq, 4
- sub id, 1
- test id, 2
- jne .loop8
- cmp id, 4
- jge .loop16
- RET
- ;***********************************************************
- ;void ps_hybrid_synthesis_deint_sse4(float out[2][38][64],
- ; float (*in)[32][2],
- ; int i, int len)
- ;***********************************************************
- %macro HYBRID_SYNTHESIS_DEINT 0
- cglobal ps_hybrid_synthesis_deint, 3, 7, 5, out, in, i, len, out0, out1, tmp
- %if cpuflag(sse4)
- %define MOVH movsd
- %else
- %define MOVH movlps
- %endif
- movsxdifnidn iq, id
- mov lend, 32 << 3
- lea outq, [outq+iq*4]
- mov tmpd, id
- shl tmpd, 8
- add inq, tmpq
- mov tmpd, 64
- sub tmpd, id
- mov id, tmpd
- test id, 1
- jne .loop4
- test id, 2
- jne .loop8
- align 16
- .loop16:
- mov out0q, outq
- mov out1q, 38*64*4
- add out1q, out0q
- mov tmpd, lend
- .inner_loop16:
- movaps m0, [inq]
- movaps m1, [inq+lenq]
- movaps m2, [inq+lenq*2]
- movaps m3, [inq+3*32*2*4]
- TRANSPOSE4x4PS 0, 1, 2, 3, 4
- movaps [out0q], m0
- movaps [out1q], m1
- movaps [out0q+lenq], m2
- movaps [out1q+lenq], m3
- lea out0q, [out0q+lenq*2]
- lea out1q, [out1q+lenq*2]
- add inq, mmsize
- sub tmpd, mmsize
- jg .inner_loop16
- add outq, 16
- add inq, 3*32*2*4
- sub id, 4
- jg .loop16
- RET
- align 16
- .loop8:
- mov out0q, outq
- mov out1q, 38*64*4
- add out1q, out0q
- mov tmpd, lend
- .inner_loop8:
- movaps m0, [inq]
- movaps m1, [inq+lenq]
- SBUTTERFLYPS 0, 1, 2
- SBUTTERFLYPD 0, 1, 2
- MOVH [out0q], m0
- MOVH [out1q], m1
- movhps [out0q+lenq], m0
- movhps [out1q+lenq], m1
- lea out0q, [out0q+lenq*2]
- lea out1q, [out1q+lenq*2]
- add inq, mmsize
- sub tmpd, mmsize
- jg .inner_loop8
- add outq, 8
- add inq, lenq
- sub id, 2
- jg .loop16
- RET
- align 16
- .loop4:
- mov out0q, outq
- mov out1q, 38*64*4
- add out1q, out0q
- mov tmpd, lend
- .inner_loop4:
- movaps m0, [inq]
- movss [out0q], m0
- %if cpuflag(sse4)
- extractps [out1q], m0, 1
- extractps [out0q+lenq], m0, 2
- extractps [out1q+lenq], m0, 3
- %else
- movhlps m1, m0
- movss [out0q+lenq], m1
- shufps m0, m0, 0xb1
- movss [out1q], m0
- movhlps m1, m0
- movss [out1q+lenq], m1
- %endif
- lea out0q, [out0q+lenq*2]
- lea out1q, [out1q+lenq*2]
- add inq, mmsize
- sub tmpd, mmsize
- jg .inner_loop4
- add outq, 4
- sub id, 1
- test id, 2
- jne .loop8
- cmp id, 4
- jge .loop16
- RET
- %endmacro
- INIT_XMM sse
- HYBRID_SYNTHESIS_DEINT
- INIT_XMM sse4
- HYBRID_SYNTHESIS_DEINT
- ;*******************************************************************
- ;void ff_ps_hybrid_analysis_<opt>(float (*out)[2], float (*in)[2],
- ; const float (*filter)[8][2],
- ; ptrdiff_t stride, int n);
- ;*******************************************************************
- %macro PS_HYBRID_ANALYSIS_LOOP 3
- movu %1, [inq+mmsize*%3]
- movu m1, [inq+mmsize*(5-%3)+8]
- %if cpuflag(sse3)
- pshufd %2, %1, q2301
- pshufd m4, m1, q0123
- pshufd m1, m1, q1032
- pshufd m2, [filterq+nq+mmsize*%3], q2301
- addsubps %2, m4
- addsubps %1, m1
- %else
- mova m2, [filterq+nq+mmsize*%3]
- mova %2, %1
- mova m4, m1
- shufps %2, %2, q2301
- shufps m4, m4, q0123
- shufps m1, m1, q1032
- shufps m2, m2, q2301
- xorps m4, m7
- xorps m1, m7
- subps %2, m4
- subps %1, m1
- %endif
- mulps %2, m2
- mulps %1, m2
- %if %3
- addps m3, %2
- addps m0, %1
- %endif
- %endmacro
- %macro PS_HYBRID_ANALYSIS 0
- cglobal ps_hybrid_analysis, 5, 5, 8, out, in, filter, stride, n
- %if cpuflag(sse3)
- %define MOVH movsd
- %else
- %define MOVH movlps
- %endif
- shl strideq, 3
- shl nd, 6
- add filterq, nq
- neg nq
- mova m7, [ps_p1m1p1m1]
- align 16
- .loop:
- PS_HYBRID_ANALYSIS_LOOP m0, m3, 0
- PS_HYBRID_ANALYSIS_LOOP m5, m6, 1
- PS_HYBRID_ANALYSIS_LOOP m5, m6, 2
- %if cpuflag(sse3)
- pshufd m3, m3, q2301
- xorps m0, m7
- hsubps m3, m0
- pshufd m1, m3, q0020
- pshufd m3, m3, q0031
- addps m1, m3
- movsd m2, [inq+6*8]
- %else
- mova m1, m3
- mova m2, m0
- shufps m1, m1, q2301
- shufps m2, m2, q2301
- subps m1, m3
- addps m2, m0
- unpcklps m3, m1, m2
- unpckhps m1, m2
- addps m1, m3
- movu m2, [inq+6*8] ; faster than movlps and no risk of overread
- %endif
- movss m3, [filterq+nq+8*6]
- SPLATD m3
- mulps m2, m3
- addps m1, m2
- MOVH [outq], m1
- add outq, strideq
- add nq, 64
- jl .loop
- REP_RET
- %endmacro
- INIT_XMM sse
- PS_HYBRID_ANALYSIS
- INIT_XMM sse3
- PS_HYBRID_ANALYSIS
|