123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386 |
- ;******************************************************************************
- ;* Pixel utilities SIMD
- ;*
- ;* Copyright (C) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
- ;* Copyright (C) 2014 Clément Bœsch <u pkh me>
- ;*
- ;* This file is part of FFmpeg.
- ;*
- ;* FFmpeg is free software; you can redistribute it and/or
- ;* modify it under the terms of the GNU Lesser General Public
- ;* License as published by the Free Software Foundation; either
- ;* version 2.1 of the License, or (at your option) any later version.
- ;*
- ;* FFmpeg is distributed in the hope that it will be useful,
- ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
- ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- ;* Lesser General Public License for more details.
- ;*
- ;* You should have received a copy of the GNU Lesser General Public
- ;* License along with FFmpeg; if not, write to the Free Software
- ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- ;******************************************************************************
- %include "x86util.asm"
- SECTION .text
- ;-------------------------------------------------------------------------------
- ; int ff_pixelutils_sad_8x8_mmx(const uint8_t *src1, ptrdiff_t stride1,
- ; const uint8_t *src2, ptrdiff_t stride2);
- ;-------------------------------------------------------------------------------
- INIT_MMX mmx
- cglobal pixelutils_sad_8x8, 4,4,0, src1, stride1, src2, stride2
- pxor m7, m7
- pxor m6, m6
- %rep 4
- mova m0, [src1q]
- mova m2, [src1q + stride1q]
- mova m1, [src2q]
- mova m3, [src2q + stride2q]
- psubusb m4, m0, m1
- psubusb m5, m2, m3
- psubusb m1, m0
- psubusb m3, m2
- por m1, m4
- por m3, m5
- punpcklbw m0, m1, m7
- punpcklbw m2, m3, m7
- punpckhbw m1, m7
- punpckhbw m3, m7
- paddw m0, m1
- paddw m2, m3
- paddw m0, m2
- paddw m6, m0
- lea src1q, [src1q + 2*stride1q]
- lea src2q, [src2q + 2*stride2q]
- %endrep
- psrlq m0, m6, 32
- paddw m6, m0
- psrlq m0, m6, 16
- paddw m6, m0
- movd eax, m6
- movzx eax, ax
- RET
- ;-------------------------------------------------------------------------------
- ; int ff_pixelutils_sad_8x8_mmxext(const uint8_t *src1, ptrdiff_t stride1,
- ; const uint8_t *src2, ptrdiff_t stride2);
- ;-------------------------------------------------------------------------------
- INIT_MMX mmxext
- cglobal pixelutils_sad_8x8, 4,4,0, src1, stride1, src2, stride2
- pxor m2, m2
- %rep 4
- mova m0, [src1q]
- mova m1, [src1q + stride1q]
- psadbw m0, [src2q]
- psadbw m1, [src2q + stride2q]
- paddw m2, m0
- paddw m2, m1
- lea src1q, [src1q + 2*stride1q]
- lea src2q, [src2q + 2*stride2q]
- %endrep
- movd eax, m2
- RET
- ;-------------------------------------------------------------------------------
- ; int ff_pixelutils_sad_16x16_mmxext(const uint8_t *src1, ptrdiff_t stride1,
- ; const uint8_t *src2, ptrdiff_t stride2);
- ;-------------------------------------------------------------------------------
- INIT_MMX mmxext
- cglobal pixelutils_sad_16x16, 4,4,0, src1, stride1, src2, stride2
- pxor m2, m2
- %rep 16
- mova m0, [src1q]
- mova m1, [src1q + 8]
- psadbw m0, [src2q]
- psadbw m1, [src2q + 8]
- paddw m2, m0
- paddw m2, m1
- add src1q, stride1q
- add src2q, stride2q
- %endrep
- movd eax, m2
- RET
- ;-------------------------------------------------------------------------------
- ; int ff_pixelutils_sad_16x16_sse2(const uint8_t *src1, ptrdiff_t stride1,
- ; const uint8_t *src2, ptrdiff_t stride2);
- ;-------------------------------------------------------------------------------
- INIT_XMM sse2
- cglobal pixelutils_sad_16x16, 4,4,5, src1, stride1, src2, stride2
- movu m4, [src1q]
- movu m2, [src2q]
- movu m1, [src1q + stride1q]
- movu m3, [src2q + stride2q]
- psadbw m4, m2
- psadbw m1, m3
- paddw m4, m1
- %rep 7
- lea src1q, [src1q + 2*stride1q]
- lea src2q, [src2q + 2*stride2q]
- movu m0, [src1q]
- movu m2, [src2q]
- movu m1, [src1q + stride1q]
- movu m3, [src2q + stride2q]
- psadbw m0, m2
- psadbw m1, m3
- paddw m4, m0
- paddw m4, m1
- %endrep
- movhlps m0, m4
- paddw m4, m0
- movd eax, m4
- RET
- ;-------------------------------------------------------------------------------
- ; int ff_pixelutils_sad_[au]_16x16_sse2(const uint8_t *src1, ptrdiff_t stride1,
- ; const uint8_t *src2, ptrdiff_t stride2);
- ;-------------------------------------------------------------------------------
- %macro SAD_XMM_16x16 1
- INIT_XMM sse2
- cglobal pixelutils_sad_%1_16x16, 4,4,3, src1, stride1, src2, stride2
- mov%1 m2, [src2q]
- psadbw m2, [src1q]
- mov%1 m1, [src2q + stride2q]
- psadbw m1, [src1q + stride1q]
- paddw m2, m1
- %rep 7
- lea src1q, [src1q + 2*stride1q]
- lea src2q, [src2q + 2*stride2q]
- mov%1 m0, [src2q]
- psadbw m0, [src1q]
- mov%1 m1, [src2q + stride2q]
- psadbw m1, [src1q + stride1q]
- paddw m2, m0
- paddw m2, m1
- %endrep
- movhlps m0, m2
- paddw m2, m0
- movd eax, m2
- RET
- %endmacro
- SAD_XMM_16x16 a
- SAD_XMM_16x16 u
- %macro PROCESS_SAD_32x4_U 0
- movu m1, [r2]
- movu m2, [r2 + 16]
- movu m3, [r0]
- movu m4, [r0 + 16]
- psadbw m1, m3
- psadbw m2, m4
- paddd m1, m2
- paddd m0, m1
- lea r2, [r2 + r3]
- lea r0, [r0 + r1]
- movu m1, [r2]
- movu m2, [r2 + 16]
- movu m3, [r0]
- movu m4, [r0 + 16]
- psadbw m1, m3
- psadbw m2, m4
- paddd m1, m2
- paddd m0, m1
- lea r2, [r2 + r3]
- lea r0, [r0 + r1]
- movu m1, [r2]
- movu m2, [r2 + 16]
- movu m3, [r0]
- movu m4, [r0 + 16]
- psadbw m1, m3
- psadbw m2, m4
- paddd m1, m2
- paddd m0, m1
- lea r2, [r2 + r3]
- lea r0, [r0 + r1]
- movu m1, [r2]
- movu m2, [r2 + 16]
- movu m3, [r0]
- movu m4, [r0 + 16]
- psadbw m1, m3
- psadbw m2, m4
- paddd m1, m2
- paddd m0, m1
- lea r2, [r2 + r3]
- lea r0, [r0 + r1]
- %endmacro
- %macro PROCESS_SAD_32x4 1
- mov%1 m1, [r2]
- mov%1 m2, [r2 + 16]
- psadbw m1, [r0]
- psadbw m2, [r0 + 16]
- paddd m1, m2
- paddd m0, m1
- lea r2, [r2 + r3]
- lea r0, [r0 + r1]
- mov%1 m1, [r2]
- mov%1 m2, [r2 + 16]
- psadbw m1, [r0]
- psadbw m2, [r0 + 16]
- paddd m1, m2
- paddd m0, m1
- lea r2, [r2 + r3]
- lea r0, [r0 + r1]
- mov%1 m1, [r2]
- mov%1 m2, [r2 + 16]
- psadbw m1, [r0]
- psadbw m2, [r0 + 16]
- paddd m1, m2
- paddd m0, m1
- lea r2, [r2 + r3]
- lea r0, [r0 + r1]
- mov%1 m1, [r2]
- mov%1 m2, [r2 + 16]
- psadbw m1, [r0]
- psadbw m2, [r0 + 16]
- paddd m1, m2
- paddd m0, m1
- lea r2, [r2 + r3]
- lea r0, [r0 + r1]
- %endmacro
- ;-----------------------------------------------------------------------------
- ; int ff_pixelutils_sad_32x32_sse2(const uint8_t *src1, ptrdiff_t stride1,
- ; const uint8_t *src2, ptrdiff_t stride2);
- ;-----------------------------------------------------------------------------
- INIT_XMM sse2
- cglobal pixelutils_sad_32x32, 4,5,5, src1, stride1, src2, stride2
- pxor m0, m0
- mov r4d, 4
- .loop:
- PROCESS_SAD_32x4_U
- PROCESS_SAD_32x4_U
- dec r4d
- jnz .loop
- movhlps m1, m0
- paddd m0, m1
- movd eax, m0
- RET
- ;-------------------------------------------------------------------------------
- ; int ff_pixelutils_sad_[au]_32x32_sse2(const uint8_t *src1, ptrdiff_t stride1,
- ; const uint8_t *src2, ptrdiff_t stride2);
- ;-------------------------------------------------------------------------------
- %macro SAD_XMM_32x32 1
- INIT_XMM sse2
- cglobal pixelutils_sad_%1_32x32, 4,5,3, src1, stride1, src2, stride2
- pxor m0, m0
- mov r4d, 4
- .loop:
- PROCESS_SAD_32x4 %1
- PROCESS_SAD_32x4 %1
- dec r4d
- jnz .loop
- movhlps m1, m0
- paddd m0, m1
- movd eax, m0
- RET
- %endmacro
- SAD_XMM_32x32 a
- SAD_XMM_32x32 u
- %if HAVE_AVX2_EXTERNAL
- ;-------------------------------------------------------------------------------
- ; int ff_pixelutils_sad_32x32_avx2(const uint8_t *src1, ptrdiff_t stride1,
- ; const uint8_t *src2, ptrdiff_t stride2);
- ;-------------------------------------------------------------------------------
- INIT_YMM avx2
- cglobal pixelutils_sad_32x32, 4,7,5, src1, stride1, src2, stride2
- pxor m0, m0
- mov r4d, 32/4
- lea r5, [stride1q * 3]
- lea r6, [stride2q * 3]
- .loop:
- movu m1, [src1q] ; row 0 of pix0
- movu m2, [src2q] ; row 0 of pix1
- movu m3, [src1q + stride1q] ; row 1 of pix0
- movu m4, [src2q + stride2q] ; row 1 of pix1
- psadbw m1, m2
- psadbw m3, m4
- paddd m0, m1
- paddd m0, m3
- movu m1, [src1q + 2 * stride1q] ; row 2 of pix0
- movu m2, [src2q + 2 * stride2q] ; row 2 of pix1
- movu m3, [src1q + r5] ; row 3 of pix0
- movu m4, [src2q + r6] ; row 3 of pix1
- psadbw m1, m2
- psadbw m3, m4
- paddd m0, m1
- paddd m0, m3
- lea src2q, [src2q + 4 * stride2q]
- lea src1q, [src1q + 4 * stride1q]
- dec r4d
- jnz .loop
- vextracti128 xm1, m0, 1
- paddd xm0, xm1
- pshufd xm1, xm0, 2
- paddd xm0, xm1
- movd eax, xm0
- RET
- ;-------------------------------------------------------------------------------
- ; int ff_pixelutils_sad_[au]_32x32_avx2(const uint8_t *src1, ptrdiff_t stride1,
- ; const uint8_t *src2, ptrdiff_t stride2);
- ;-------------------------------------------------------------------------------
- %macro SAD_AVX2_32x32 1
- INIT_YMM avx2
- cglobal pixelutils_sad_%1_32x32, 4,7,3, src1, stride1, src2, stride2
- pxor m0, m0
- mov r4d, 32/4
- lea r5, [stride1q * 3]
- lea r6, [stride2q * 3]
- .loop:
- mov%1 m1, [src2q] ; row 0 of pix1
- psadbw m1, [src1q]
- mov%1 m2, [src2q + stride2q] ; row 1 of pix1
- psadbw m2, [src1q + stride1q]
- paddd m0, m1
- paddd m0, m2
- mov%1 m1, [src2q + 2 * stride2q] ; row 2 of pix1
- psadbw m1, [src1q + 2 * stride1q]
- mov%1 m2, [src2q + r6] ; row 3 of pix1
- psadbw m2, [src1q + r5]
- paddd m0, m1
- paddd m0, m2
- lea src2q, [src2q + 4 * stride2q]
- lea src1q, [src1q + 4 * stride1q]
- dec r4d
- jnz .loop
- vextracti128 xm1, m0, 1
- paddd xm0, xm1
- pshufd xm1, xm0, 2
- paddd xm0, xm1
- movd eax, xm0
- RET
- %endmacro
- SAD_AVX2_32x32 a
- SAD_AVX2_32x32 u
- %endif
|