123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602 |
- /*
- * Copyright (c) 2013 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
- #include <assert.h>
- #include <stdio.h>
- #include "./vpx_dsp_rtcd.h"
- #include "vpx_dsp/mips/convolve_common_dspr2.h"
- #include "vpx_dsp/vpx_dsp_common.h"
- #include "vpx_dsp/vpx_filter.h"
- #include "vpx_ports/mem.h"
- #if HAVE_DSPR2
- static void convolve_horiz_4_transposed_dspr2(const uint8_t *src,
- int32_t src_stride, uint8_t *dst,
- int32_t dst_stride,
- const int16_t *filter_x0,
- int32_t h) {
- int32_t y;
- uint8_t *cm = vpx_ff_cropTbl;
- uint8_t *dst_ptr;
- int32_t vector1b, vector2b, vector3b, vector4b;
- int32_t Temp1, Temp2, Temp3, Temp4;
- uint32_t vector4a = 64;
- uint32_t tp1, tp2;
- uint32_t p1, p2, p3, p4;
- uint32_t tn1, tn2;
- vector1b = ((const int32_t *)filter_x0)[0];
- vector2b = ((const int32_t *)filter_x0)[1];
- vector3b = ((const int32_t *)filter_x0)[2];
- vector4b = ((const int32_t *)filter_x0)[3];
- for (y = h; y--;) {
- dst_ptr = dst;
- /* prefetch data to cache memory */
- prefetch_load(src + src_stride);
- prefetch_load(src + src_stride + 32);
- __asm__ __volatile__(
- "ulw %[tp1], 0(%[src]) \n\t"
- "ulw %[tp2], 4(%[src]) \n\t"
- /* even 1. pixel */
- "mtlo %[vector4a], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
- "preceu.ph.qbr %[p1], %[tp1] \n\t"
- "preceu.ph.qbl %[p2], %[tp1] \n\t"
- "preceu.ph.qbr %[p3], %[tp2] \n\t"
- "preceu.ph.qbl %[p4], %[tp2] \n\t"
- "dpa.w.ph $ac3, %[p1], %[vector1b] \n\t"
- "dpa.w.ph $ac3, %[p2], %[vector2b] \n\t"
- "dpa.w.ph $ac3, %[p3], %[vector3b] \n\t"
- "ulw %[tn2], 8(%[src]) \n\t"
- "dpa.w.ph $ac3, %[p4], %[vector4b] \n\t"
- "extp %[Temp1], $ac3, 31 \n\t"
- /* even 2. pixel */
- "mtlo %[vector4a], $ac2 \n\t"
- "mthi $zero, $ac2 \n\t"
- "preceu.ph.qbr %[p1], %[tn2] \n\t"
- "balign %[tn1], %[tn2], 3 \n\t"
- "balign %[tn2], %[tp2], 3 \n\t"
- "balign %[tp2], %[tp1], 3 \n\t"
- "dpa.w.ph $ac2, %[p2], %[vector1b] \n\t"
- "dpa.w.ph $ac2, %[p3], %[vector2b] \n\t"
- "dpa.w.ph $ac2, %[p4], %[vector3b] \n\t"
- "dpa.w.ph $ac2, %[p1], %[vector4b] \n\t"
- "extp %[Temp3], $ac2, 31 \n\t"
- /* odd 1. pixel */
- "lbux %[tp1], %[Temp1](%[cm]) \n\t"
- "mtlo %[vector4a], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
- "preceu.ph.qbr %[p1], %[tp2] \n\t"
- "preceu.ph.qbl %[p2], %[tp2] \n\t"
- "preceu.ph.qbr %[p3], %[tn2] \n\t"
- "preceu.ph.qbl %[p4], %[tn2] \n\t"
- "dpa.w.ph $ac3, %[p1], %[vector1b] \n\t"
- "dpa.w.ph $ac3, %[p2], %[vector2b] \n\t"
- "dpa.w.ph $ac3, %[p3], %[vector3b] \n\t"
- "dpa.w.ph $ac3, %[p4], %[vector4b] \n\t"
- "extp %[Temp2], $ac3, 31 \n\t"
- /* odd 2. pixel */
- "lbux %[tp2], %[Temp3](%[cm]) \n\t"
- "mtlo %[vector4a], $ac2 \n\t"
- "mthi $zero, $ac2 \n\t"
- "preceu.ph.qbr %[p1], %[tn1] \n\t"
- "dpa.w.ph $ac2, %[p2], %[vector1b] \n\t"
- "dpa.w.ph $ac2, %[p3], %[vector2b] \n\t"
- "dpa.w.ph $ac2, %[p4], %[vector3b] \n\t"
- "dpa.w.ph $ac2, %[p1], %[vector4b] \n\t"
- "extp %[Temp4], $ac2, 31 \n\t"
- /* clamp */
- "lbux %[tn1], %[Temp2](%[cm]) \n\t"
- "lbux %[p2], %[Temp4](%[cm]) \n\t"
- /* store bytes */
- "sb %[tp1], 0(%[dst_ptr]) \n\t"
- "addu %[dst_ptr], %[dst_ptr], %[dst_stride] \n\t"
- "sb %[tn1], 0(%[dst_ptr]) \n\t"
- "addu %[dst_ptr], %[dst_ptr], %[dst_stride] \n\t"
- "sb %[tp2], 0(%[dst_ptr]) \n\t"
- "addu %[dst_ptr], %[dst_ptr], %[dst_stride] \n\t"
- "sb %[p2], 0(%[dst_ptr]) \n\t"
- "addu %[dst_ptr], %[dst_ptr], %[dst_stride] \n\t"
- : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tn1] "=&r"(tn1),
- [tn2] "=&r"(tn2), [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3),
- [p4] "=&r"(p4), [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2),
- [Temp3] "=&r"(Temp3), [Temp4] "=&r"(Temp4), [dst_ptr] "+r"(dst_ptr)
- : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
- [vector3b] "r"(vector3b), [vector4b] "r"(vector4b),
- [vector4a] "r"(vector4a), [cm] "r"(cm), [src] "r"(src),
- [dst_stride] "r"(dst_stride));
- /* Next row... */
- src += src_stride;
- dst += 1;
- }
- }
- static void convolve_horiz_8_transposed_dspr2(const uint8_t *src,
- int32_t src_stride, uint8_t *dst,
- int32_t dst_stride,
- const int16_t *filter_x0,
- int32_t h) {
- int32_t y;
- uint8_t *cm = vpx_ff_cropTbl;
- uint8_t *dst_ptr;
- uint32_t vector4a = 64;
- int32_t vector1b, vector2b, vector3b, vector4b;
- int32_t Temp1, Temp2, Temp3;
- uint32_t tp1, tp2, tp3;
- uint32_t p1, p2, p3, p4, n1;
- uint8_t *odd_dst;
- uint32_t dst_pitch_2 = (dst_stride << 1);
- vector1b = ((const int32_t *)filter_x0)[0];
- vector2b = ((const int32_t *)filter_x0)[1];
- vector3b = ((const int32_t *)filter_x0)[2];
- vector4b = ((const int32_t *)filter_x0)[3];
- for (y = h; y--;) {
- /* prefetch data to cache memory */
- prefetch_load(src + src_stride);
- prefetch_load(src + src_stride + 32);
- dst_ptr = dst;
- odd_dst = (dst_ptr + dst_stride);
- __asm__ __volatile__(
- "ulw %[tp2], 0(%[src]) \n\t"
- "ulw %[tp1], 4(%[src]) \n\t"
- /* even 1. pixel */
- "mtlo %[vector4a], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
- "mtlo %[vector4a], $ac2 \n\t"
- "mthi $zero, $ac2 \n\t"
- "preceu.ph.qbr %[p1], %[tp2] \n\t"
- "preceu.ph.qbl %[p2], %[tp2] \n\t"
- "preceu.ph.qbr %[p3], %[tp1] \n\t"
- "preceu.ph.qbl %[p4], %[tp1] \n\t"
- "ulw %[tp3], 8(%[src]) \n\t"
- "dpa.w.ph $ac3, %[p1], %[vector1b] \n\t"
- "dpa.w.ph $ac3, %[p2], %[vector2b] \n\t"
- "dpa.w.ph $ac3, %[p3], %[vector3b] \n\t"
- "dpa.w.ph $ac3, %[p4], %[vector4b] \n\t"
- "extp %[Temp1], $ac3, 31 \n\t"
- /* even 2. pixel */
- "preceu.ph.qbr %[p1], %[tp3] \n\t"
- "preceu.ph.qbl %[n1], %[tp3] \n\t"
- "ulw %[tp2], 12(%[src]) \n\t"
- "dpa.w.ph $ac2, %[p2], %[vector1b] \n\t"
- "dpa.w.ph $ac2, %[p3], %[vector2b] \n\t"
- "dpa.w.ph $ac2, %[p4], %[vector3b] \n\t"
- "dpa.w.ph $ac2, %[p1], %[vector4b] \n\t"
- "extp %[Temp3], $ac2, 31 \n\t"
- /* even 3. pixel */
- "lbux %[Temp2], %[Temp1](%[cm]) \n\t"
- "mtlo %[vector4a], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "preceu.ph.qbr %[p2], %[tp2] \n\t"
- "dpa.w.ph $ac1, %[p3], %[vector1b] \n\t"
- "dpa.w.ph $ac1, %[p4], %[vector2b] \n\t"
- "dpa.w.ph $ac1, %[p1], %[vector3b] \n\t"
- "lbux %[tp3], %[Temp3](%[cm]) \n\t"
- "dpa.w.ph $ac1, %[n1], %[vector4b] \n\t"
- "extp %[p3], $ac1, 31 \n\t"
- /* even 4. pixel */
- "mtlo %[vector4a], $ac2 \n\t"
- "mthi $zero, $ac2 \n\t"
- "mtlo %[vector4a], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
- "sb %[Temp2], 0(%[dst_ptr]) \n\t"
- "addu %[dst_ptr], %[dst_ptr], %[dst_pitch_2] \n\t"
- "sb %[tp3], 0(%[dst_ptr]) \n\t"
- "addu %[dst_ptr], %[dst_ptr], %[dst_pitch_2] \n\t"
- "ulw %[tp1], 1(%[src]) \n\t"
- "ulw %[tp3], 5(%[src]) \n\t"
- "dpa.w.ph $ac2, %[p4], %[vector1b] \n\t"
- "dpa.w.ph $ac2, %[p1], %[vector2b] \n\t"
- "dpa.w.ph $ac2, %[n1], %[vector3b] \n\t"
- "dpa.w.ph $ac2, %[p2], %[vector4b] \n\t"
- "extp %[Temp3], $ac2, 31 \n\t"
- "lbux %[tp2], %[p3](%[cm]) \n\t"
- /* odd 1. pixel */
- "mtlo %[vector4a], $ac1 \n\t"
- "mthi $zero, $ac1 \n\t"
- "preceu.ph.qbr %[p1], %[tp1] \n\t"
- "preceu.ph.qbl %[p2], %[tp1] \n\t"
- "preceu.ph.qbr %[p3], %[tp3] \n\t"
- "preceu.ph.qbl %[p4], %[tp3] \n\t"
- "sb %[tp2], 0(%[dst_ptr]) \n\t"
- "addu %[dst_ptr], %[dst_ptr], %[dst_pitch_2] \n\t"
- "ulw %[tp2], 9(%[src]) \n\t"
- "dpa.w.ph $ac3, %[p1], %[vector1b] \n\t"
- "dpa.w.ph $ac3, %[p2], %[vector2b] \n\t"
- "dpa.w.ph $ac3, %[p3], %[vector3b] \n\t"
- "dpa.w.ph $ac3, %[p4], %[vector4b] \n\t"
- "extp %[Temp2], $ac3, 31 \n\t"
- /* odd 2. pixel */
- "lbux %[tp1], %[Temp3](%[cm]) \n\t"
- "mtlo %[vector4a], $ac3 \n\t"
- "mthi $zero, $ac3 \n\t"
- "mtlo %[vector4a], $ac2 \n\t"
- "mthi $zero, $ac2 \n\t"
- "preceu.ph.qbr %[p1], %[tp2] \n\t"
- "preceu.ph.qbl %[n1], %[tp2] \n\t"
- "ulw %[Temp1], 13(%[src]) \n\t"
- "dpa.w.ph $ac1, %[p2], %[vector1b] \n\t"
- "sb %[tp1], 0(%[dst_ptr]) \n\t"
- "addu %[dst_ptr], %[dst_ptr], %[dst_pitch_2] \n\t"
- "dpa.w.ph $ac1, %[p3], %[vector2b] \n\t"
- "dpa.w.ph $ac1, %[p4], %[vector3b] \n\t"
- "dpa.w.ph $ac1, %[p1], %[vector4b] \n\t"
- "extp %[Temp3], $ac1, 31 \n\t"
- /* odd 3. pixel */
- "lbux %[tp3], %[Temp2](%[cm]) \n\t"
- "preceu.ph.qbr %[p2], %[Temp1] \n\t"
- "dpa.w.ph $ac3, %[p3], %[vector1b] \n\t"
- "dpa.w.ph $ac3, %[p4], %[vector2b] \n\t"
- "dpa.w.ph $ac3, %[p1], %[vector3b] \n\t"
- "dpa.w.ph $ac3, %[n1], %[vector4b] \n\t"
- "extp %[Temp2], $ac3, 31 \n\t"
- /* odd 4. pixel */
- "sb %[tp3], 0(%[odd_dst]) \n\t"
- "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] \n\t"
- "dpa.w.ph $ac2, %[p4], %[vector1b] \n\t"
- "dpa.w.ph $ac2, %[p1], %[vector2b] \n\t"
- "dpa.w.ph $ac2, %[n1], %[vector3b] \n\t"
- "dpa.w.ph $ac2, %[p2], %[vector4b] \n\t"
- "extp %[Temp1], $ac2, 31 \n\t"
- /* clamp */
- "lbux %[p4], %[Temp3](%[cm]) \n\t"
- "lbux %[p2], %[Temp2](%[cm]) \n\t"
- "lbux %[n1], %[Temp1](%[cm]) \n\t"
- /* store bytes */
- "sb %[p4], 0(%[odd_dst]) \n\t"
- "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] \n\t"
- "sb %[p2], 0(%[odd_dst]) \n\t"
- "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] \n\t"
- "sb %[n1], 0(%[odd_dst]) \n\t"
- : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3), [p1] "=&r"(p1),
- [p2] "=&r"(p2), [p3] "=&r"(p3), [p4] "=&r"(p4), [n1] "=&r"(n1),
- [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3),
- [dst_ptr] "+r"(dst_ptr), [odd_dst] "+r"(odd_dst)
- : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
- [vector3b] "r"(vector3b), [vector4b] "r"(vector4b),
- [vector4a] "r"(vector4a), [cm] "r"(cm), [src] "r"(src),
- [dst_pitch_2] "r"(dst_pitch_2));
- /* Next row... */
- src += src_stride;
- dst += 1;
- }
- }
- static void convolve_horiz_16_transposed_dspr2(
- const uint8_t *src_ptr, int32_t src_stride, uint8_t *dst_ptr,
- int32_t dst_stride, const int16_t *filter_x0, int32_t h, int32_t count) {
- int32_t c, y;
- const uint8_t *src;
- uint8_t *dst;
- uint8_t *cm = vpx_ff_cropTbl;
- uint32_t vector_64 = 64;
- int32_t filter12, filter34, filter56, filter78;
- int32_t Temp1, Temp2, Temp3;
- uint32_t qload1, qload2;
- uint32_t p1, p2, p3, p4, p5;
- uint32_t st1, st2, st3;
- uint32_t dst_pitch_2 = (dst_stride << 1);
- uint8_t *odd_dst;
- filter12 = ((const int32_t *)filter_x0)[0];
- filter34 = ((const int32_t *)filter_x0)[1];
- filter56 = ((const int32_t *)filter_x0)[2];
- filter78 = ((const int32_t *)filter_x0)[3];
- for (y = h; y--;) {
- /* prefetch data to cache memory */
- prefetch_load(src_ptr + src_stride);
- prefetch_load(src_ptr + src_stride + 32);
- src = src_ptr;
- dst = dst_ptr;
- odd_dst = (dst + dst_stride);
- for (c = 0; c < count; c++) {
- __asm__ __volatile__(
- "ulw %[qload1], 0(%[src]) "
- "\n\t"
- "ulw %[qload2], 4(%[src]) "
- "\n\t"
- /* even 1. pixel */
- "mtlo %[vector_64], $ac1 "
- "\n\t" /* even 1 */
- "mthi $zero, $ac1 "
- "\n\t"
- "mtlo %[vector_64], $ac2 "
- "\n\t" /* even 2 */
- "mthi $zero, $ac2 "
- "\n\t"
- "preceu.ph.qbr %[p3], %[qload2] "
- "\n\t"
- "preceu.ph.qbl %[p4], %[qload2] "
- "\n\t"
- "preceu.ph.qbr %[p1], %[qload1] "
- "\n\t"
- "preceu.ph.qbl %[p2], %[qload1] "
- "\n\t"
- "ulw %[qload2], 8(%[src]) "
- "\n\t"
- "dpa.w.ph $ac1, %[p1], %[filter12] "
- "\n\t" /* even 1 */
- "dpa.w.ph $ac1, %[p2], %[filter34] "
- "\n\t" /* even 1 */
- "dpa.w.ph $ac1, %[p3], %[filter56] "
- "\n\t" /* even 1 */
- "dpa.w.ph $ac1, %[p4], %[filter78] "
- "\n\t" /* even 1 */
- "extp %[Temp1], $ac1, 31 "
- "\n\t" /* even 1 */
- /* even 2. pixel */
- "mtlo %[vector_64], $ac3 "
- "\n\t" /* even 3 */
- "mthi $zero, $ac3 "
- "\n\t"
- "preceu.ph.qbr %[p1], %[qload2] "
- "\n\t"
- "preceu.ph.qbl %[p5], %[qload2] "
- "\n\t"
- "ulw %[qload1], 12(%[src]) "
- "\n\t"
- "dpa.w.ph $ac2, %[p2], %[filter12] "
- "\n\t" /* even 1 */
- "dpa.w.ph $ac2, %[p3], %[filter34] "
- "\n\t" /* even 1 */
- "dpa.w.ph $ac2, %[p4], %[filter56] "
- "\n\t" /* even 1 */
- "dpa.w.ph $ac2, %[p1], %[filter78] "
- "\n\t" /* even 1 */
- "lbux %[st1], %[Temp1](%[cm]) "
- "\n\t" /* even 1 */
- "extp %[Temp2], $ac2, 31 "
- "\n\t" /* even 1 */
- /* even 3. pixel */
- "mtlo %[vector_64], $ac1 "
- "\n\t" /* even 4 */
- "mthi $zero, $ac1 "
- "\n\t"
- "preceu.ph.qbr %[p2], %[qload1] "
- "\n\t"
- "sb %[st1], 0(%[dst]) "
- "\n\t" /* even 1 */
- "addu %[dst], %[dst], %[dst_pitch_2] "
- " \n\t"
- "dpa.w.ph $ac3, %[p3], %[filter12] "
- "\n\t" /* even 3 */
- "dpa.w.ph $ac3, %[p4], %[filter34] "
- "\n\t" /* even 3 */
- "dpa.w.ph $ac3, %[p1], %[filter56] "
- "\n\t" /* even 3 */
- "dpa.w.ph $ac3, %[p5], %[filter78] "
- "\n\t" /* even 3 */
- "extp %[Temp3], $ac3, 31 "
- "\n\t" /* even 3 */
- "lbux %[st2], %[Temp2](%[cm]) "
- "\n\t" /* even 1 */
- /* even 4. pixel */
- "mtlo %[vector_64], $ac2 "
- "\n\t" /* even 5 */
- "mthi $zero, $ac2 "
- "\n\t"
- "preceu.ph.qbl %[p3], %[qload1] "
- "\n\t"
- "sb %[st2], 0(%[dst]) "
- "\n\t" /* even 2 */
- "addu %[dst], %[dst], %[dst_pitch_2] "
- "\n\t"
- "ulw %[qload2], 16(%[src]) "
- "\n\t"
- "dpa.w.ph $ac1, %[p4], %[filter12] "
- "\n\t" /* even 4 */
- "dpa.w.ph $ac1, %[p1], %[filter34] "
- "\n\t" /* even 4 */
- "dpa.w.ph $ac1, %[p5], %[filter56] "
- "\n\t" /* even 4 */
- "dpa.w.ph $ac1, %[p2], %[filter78] "
- "\n\t" /* even 4 */
- "extp %[Temp1], $ac1, 31 "
- "\n\t" /* even 4 */
- "lbux %[st3], %[Temp3](%[cm]) "
- "\n\t" /* even 3 */
- /* even 5. pixel */
- "mtlo %[vector_64], $ac3 "
- "\n\t" /* even 6 */
- "mthi $zero, $ac3 "
- "\n\t"
- "preceu.ph.qbr %[p4], %[qload2] "
- "\n\t"
- "sb %[st3], 0(%[dst]) "
- "\n\t" /* even 3 */
- "addu %[dst], %[dst], %[dst_pitch_2] "
- "\n\t"
- "dpa.w.ph $ac2, %[p1], %[filter12] "
- "\n\t" /* even 5 */
- "dpa.w.ph $ac2, %[p5], %[filter34] "
- "\n\t" /* even 5 */
- "dpa.w.ph $ac2, %[p2], %[filter56] "
- "\n\t" /* even 5 */
- "dpa.w.ph $ac2, %[p3], %[filter78] "
- "\n\t" /* even 5 */
- "extp %[Temp2], $ac2, 31 "
- "\n\t" /* even 5 */
- "lbux %[st1], %[Temp1](%[cm]) "
- "\n\t" /* even 4 */
- /* even 6. pixel */
- "mtlo %[vector_64], $ac1 "
- "\n\t" /* even 7 */
- "mthi $zero, $ac1 "
- "\n\t"
- "preceu.ph.qbl %[p1], %[qload2] "
- "\n\t"
- "sb %[st1], 0(%[dst]) "
- "\n\t" /* even 4 */
- "addu %[dst], %[dst], %[dst_pitch_2] "
- "\n\t"
- "ulw %[qload1], 20(%[src]) "
- "\n\t"
- "dpa.w.ph $ac3, %[p5], %[filter12] "
- "\n\t" /* even 6 */
- "dpa.w.ph $ac3, %[p2], %[filter34] "
- "\n\t" /* even 6 */
- "dpa.w.ph $ac3, %[p3], %[filter56] "
- "\n\t" /* even 6 */
- "dpa.w.ph $ac3, %[p4], %[filter78] "
- "\n\t" /* even 6 */
- "extp %[Temp3], $ac3, 31 "
- "\n\t" /* even 6 */
- "lbux %[st2], %[Temp2](%[cm]) "
- "\n\t" /* even 5 */
- /* even 7. pixel */
- "mtlo %[vector_64], $ac2 "
- "\n\t" /* even 8 */
- "mthi $zero, $ac2 "
- "\n\t"
- "preceu.ph.qbr %[p5], %[qload1] "
- "\n\t"
- "sb %[st2], 0(%[dst]) "
- "\n\t" /* even 5 */
- "addu %[dst], %[dst], %[dst_pitch_2] "
- "\n\t"
- "dpa.w.ph $ac1, %[p2], %[filter12] "
- "\n\t" /* even 7 */
- "dpa.w.ph $ac1, %[p3], %[filter34] "
- "\n\t" /* even 7 */
- "dpa.w.ph $ac1, %[p4], %[filter56] "
- "\n\t" /* even 7 */
- "dpa.w.ph $ac1, %[p1], %[filter78] "
- "\n\t" /* even 7 */
- "extp %[Temp1], $ac1, 31 "
- "\n\t" /* even 7 */
- "lbux %[st3], %[Temp3](%[cm]) "
- "\n\t" /* even 6 */
- /* even 8. pixel */
- "mtlo %[vector_64], $ac3 "
- "\n\t" /* odd 1 */
- "mthi $zero, $ac3 "
- "\n\t"
- "dpa.w.ph $ac2, %[p3], %[filter12] "
- "\n\t" /* even 8 */
- "dpa.w.ph $ac2, %[p4], %[filter34] "
- "\n\t" /* even 8 */
- "sb %[st3], 0(%[dst]) "
- "\n\t" /* even 6 */
- "addu %[dst], %[dst], %[dst_pitch_2] "
- "\n\t"
- "dpa.w.ph $ac2, %[p1], %[filter56] "
- "\n\t" /* even 8 */
- "dpa.w.ph $ac2, %[p5], %[filter78] "
- "\n\t" /* even 8 */
- "extp %[Temp2], $ac2, 31 "
- "\n\t" /* even 8 */
- "lbux %[st1], %[Temp1](%[cm]) "
- "\n\t" /* even 7 */
- /* ODD pixels */
- "ulw %[qload1], 1(%[src]) "
- "\n\t"
- "ulw %[qload2], 5(%[src]) "
- "\n\t"
- /* odd 1. pixel */
- "mtlo %[vector_64], $ac1 "
- "\n\t" /* odd 2 */
- "mthi $zero, $ac1 "
- "\n\t"
- "preceu.ph.qbr %[p1], %[qload1] "
- "\n\t"
- "preceu.ph.qbl %[p2], %[qload1] "
- "\n\t"
- "preceu.ph.qbr %[p3], %[qload2] "
- "\n\t"
- "preceu.ph.qbl %[p4], %[qload2] "
- "\n\t"
- "sb %[st1], 0(%[dst]) "
- "\n\t" /* even 7 */
- "addu %[dst], %[dst], %[dst_pitch_2] "
- "\n\t"
- "ulw %[qload2], 9(%[src]) "
- "\n\t"
- "dpa.w.ph $ac3, %[p1], %[filter12] "
- "\n\t" /* odd 1 */
- "dpa.w.ph $ac3, %[p2], %[filter34] "
- "\n\t" /* odd 1 */
- "dpa.w.ph $ac3, %[p3], %[filter56] "
- "\n\t" /* odd 1 */
- "dpa.w.ph $ac3, %[p4], %[filter78] "
- "\n\t" /* odd 1 */
- "extp %[Temp3], $ac3, 31 "
- "\n\t" /* odd 1 */
- "lbux %[st2], %[Temp2](%[cm]) "
- "\n\t" /* even 8 */
- /* odd 2. pixel */
- "mtlo %[vector_64], $ac2 "
- "\n\t" /* odd 3 */
- "mthi $zero, $ac2 "
- "\n\t"
- "preceu.ph.qbr %[p1], %[qload2] "
- "\n\t"
- "preceu.ph.qbl %[p5], %[qload2] "
- "\n\t"
- "sb %[st2], 0(%[dst]) "
- "\n\t" /* even 8 */
- "ulw %[qload1], 13(%[src]) "
- "\n\t"
- "dpa.w.ph $ac1, %[p2], %[filter12] "
- "\n\t" /* odd 2 */
- "dpa.w.ph $ac1, %[p3], %[filter34] "
- "\n\t" /* odd 2 */
- "dpa.w.ph $ac1, %[p4], %[filter56] "
- "\n\t" /* odd 2 */
- "dpa.w.ph $ac1, %[p1], %[filter78] "
- "\n\t" /* odd 2 */
- "extp %[Temp1], $ac1, 31 "
- "\n\t" /* odd 2 */
- "lbux %[st3], %[Temp3](%[cm]) "
- "\n\t" /* odd 1 */
- /* odd 3. pixel */
- "mtlo %[vector_64], $ac3 "
- "\n\t" /* odd 4 */
- "mthi $zero, $ac3 "
- "\n\t"
- "preceu.ph.qbr %[p2], %[qload1] "
- "\n\t"
- "sb %[st3], 0(%[odd_dst]) "
- "\n\t" /* odd 1 */
- "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] "
- "\n\t"
- "dpa.w.ph $ac2, %[p3], %[filter12] "
- "\n\t" /* odd 3 */
- "dpa.w.ph $ac2, %[p4], %[filter34] "
- "\n\t" /* odd 3 */
- "dpa.w.ph $ac2, %[p1], %[filter56] "
- "\n\t" /* odd 3 */
- "dpa.w.ph $ac2, %[p5], %[filter78] "
- "\n\t" /* odd 3 */
- "extp %[Temp2], $ac2, 31 "
- "\n\t" /* odd 3 */
- "lbux %[st1], %[Temp1](%[cm]) "
- "\n\t" /* odd 2 */
- /* odd 4. pixel */
- "mtlo %[vector_64], $ac1 "
- "\n\t" /* odd 5 */
- "mthi $zero, $ac1 "
- "\n\t"
- "preceu.ph.qbl %[p3], %[qload1] "
- "\n\t"
- "sb %[st1], 0(%[odd_dst]) "
- "\n\t" /* odd 2 */
- "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] "
- "\n\t"
- "ulw %[qload2], 17(%[src]) "
- "\n\t"
- "dpa.w.ph $ac3, %[p4], %[filter12] "
- "\n\t" /* odd 4 */
- "dpa.w.ph $ac3, %[p1], %[filter34] "
- "\n\t" /* odd 4 */
- "dpa.w.ph $ac3, %[p5], %[filter56] "
- "\n\t" /* odd 4 */
- "dpa.w.ph $ac3, %[p2], %[filter78] "
- "\n\t" /* odd 4 */
- "extp %[Temp3], $ac3, 31 "
- "\n\t" /* odd 4 */
- "lbux %[st2], %[Temp2](%[cm]) "
- "\n\t" /* odd 3 */
- /* odd 5. pixel */
- "mtlo %[vector_64], $ac2 "
- "\n\t" /* odd 6 */
- "mthi $zero, $ac2 "
- "\n\t"
- "preceu.ph.qbr %[p4], %[qload2] "
- "\n\t"
- "sb %[st2], 0(%[odd_dst]) "
- "\n\t" /* odd 3 */
- "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] "
- "\n\t"
- "dpa.w.ph $ac1, %[p1], %[filter12] "
- "\n\t" /* odd 5 */
- "dpa.w.ph $ac1, %[p5], %[filter34] "
- "\n\t" /* odd 5 */
- "dpa.w.ph $ac1, %[p2], %[filter56] "
- "\n\t" /* odd 5 */
- "dpa.w.ph $ac1, %[p3], %[filter78] "
- "\n\t" /* odd 5 */
- "extp %[Temp1], $ac1, 31 "
- "\n\t" /* odd 5 */
- "lbux %[st3], %[Temp3](%[cm]) "
- "\n\t" /* odd 4 */
- /* odd 6. pixel */
- "mtlo %[vector_64], $ac3 "
- "\n\t" /* odd 7 */
- "mthi $zero, $ac3 "
- "\n\t"
- "preceu.ph.qbl %[p1], %[qload2] "
- "\n\t"
- "sb %[st3], 0(%[odd_dst]) "
- "\n\t" /* odd 4 */
- "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] "
- "\n\t"
- "ulw %[qload1], 21(%[src]) "
- "\n\t"
- "dpa.w.ph $ac2, %[p5], %[filter12] "
- "\n\t" /* odd 6 */
- "dpa.w.ph $ac2, %[p2], %[filter34] "
- "\n\t" /* odd 6 */
- "dpa.w.ph $ac2, %[p3], %[filter56] "
- "\n\t" /* odd 6 */
- "dpa.w.ph $ac2, %[p4], %[filter78] "
- "\n\t" /* odd 6 */
- "extp %[Temp2], $ac2, 31 "
- "\n\t" /* odd 6 */
- "lbux %[st1], %[Temp1](%[cm]) "
- "\n\t" /* odd 5 */
- /* odd 7. pixel */
- "mtlo %[vector_64], $ac1 "
- "\n\t" /* odd 8 */
- "mthi $zero, $ac1 "
- "\n\t"
- "preceu.ph.qbr %[p5], %[qload1] "
- "\n\t"
- "sb %[st1], 0(%[odd_dst]) "
- "\n\t" /* odd 5 */
- "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] "
- "\n\t"
- "dpa.w.ph $ac3, %[p2], %[filter12] "
- "\n\t" /* odd 7 */
- "dpa.w.ph $ac3, %[p3], %[filter34] "
- "\n\t" /* odd 7 */
- "dpa.w.ph $ac3, %[p4], %[filter56] "
- "\n\t" /* odd 7 */
- "dpa.w.ph $ac3, %[p1], %[filter78] "
- "\n\t" /* odd 7 */
- "extp %[Temp3], $ac3, 31 "
- "\n\t" /* odd 7 */
- /* odd 8. pixel */
- "dpa.w.ph $ac1, %[p3], %[filter12] "
- "\n\t" /* odd 8 */
- "dpa.w.ph $ac1, %[p4], %[filter34] "
- "\n\t" /* odd 8 */
- "dpa.w.ph $ac1, %[p1], %[filter56] "
- "\n\t" /* odd 8 */
- "dpa.w.ph $ac1, %[p5], %[filter78] "
- "\n\t" /* odd 8 */
- "extp %[Temp1], $ac1, 31 "
- "\n\t" /* odd 8 */
- "lbux %[st2], %[Temp2](%[cm]) "
- "\n\t" /* odd 6 */
- "lbux %[st3], %[Temp3](%[cm]) "
- "\n\t" /* odd 7 */
- "lbux %[st1], %[Temp1](%[cm]) "
- "\n\t" /* odd 8 */
- "sb %[st2], 0(%[odd_dst]) "
- "\n\t" /* odd 6 */
- "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] "
- "\n\t"
- "sb %[st3], 0(%[odd_dst]) "
- "\n\t" /* odd 7 */
- "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] "
- "\n\t"
- "sb %[st1], 0(%[odd_dst]) "
- "\n\t" /* odd 8 */
- : [qload1] "=&r"(qload1), [qload2] "=&r"(qload2), [p5] "=&r"(p5),
- [st1] "=&r"(st1), [st2] "=&r"(st2), [st3] "=&r"(st3),
- [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3), [p4] "=&r"(p4),
- [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3),
- [dst] "+r"(dst), [odd_dst] "+r"(odd_dst)
- : [filter12] "r"(filter12), [filter34] "r"(filter34),
- [filter56] "r"(filter56), [filter78] "r"(filter78),
- [vector_64] "r"(vector_64), [cm] "r"(cm), [src] "r"(src),
- [dst_pitch_2] "r"(dst_pitch_2));
- src += 16;
- dst = (dst_ptr + ((c + 1) * 16 * dst_stride));
- odd_dst = (dst + dst_stride);
- }
- /* Next row... */
- src_ptr += src_stride;
- dst_ptr += 1;
- }
- }
- static void convolve_horiz_64_transposed_dspr2(
- const uint8_t *src_ptr, int32_t src_stride, uint8_t *dst_ptr,
- int32_t dst_stride, const int16_t *filter_x0, int32_t h) {
- int32_t c, y;
- const uint8_t *src;
- uint8_t *dst;
- uint8_t *cm = vpx_ff_cropTbl;
- uint32_t vector_64 = 64;
- int32_t filter12, filter34, filter56, filter78;
- int32_t Temp1, Temp2, Temp3;
- uint32_t qload1, qload2;
- uint32_t p1, p2, p3, p4, p5;
- uint32_t st1, st2, st3;
- uint32_t dst_pitch_2 = (dst_stride << 1);
- uint8_t *odd_dst;
- filter12 = ((const int32_t *)filter_x0)[0];
- filter34 = ((const int32_t *)filter_x0)[1];
- filter56 = ((const int32_t *)filter_x0)[2];
- filter78 = ((const int32_t *)filter_x0)[3];
- for (y = h; y--;) {
- /* prefetch data to cache memory */
- prefetch_load(src_ptr + src_stride);
- prefetch_load(src_ptr + src_stride + 32);
- prefetch_load(src_ptr + src_stride + 64);
- src = src_ptr;
- dst = dst_ptr;
- odd_dst = (dst + dst_stride);
- for (c = 0; c < 4; c++) {
- __asm__ __volatile__(
- "ulw %[qload1], 0(%[src]) "
- "\n\t"
- "ulw %[qload2], 4(%[src]) "
- "\n\t"
- /* even 1. pixel */
- "mtlo %[vector_64], $ac1 "
- "\n\t" /* even 1 */
- "mthi $zero, $ac1 "
- "\n\t"
- "mtlo %[vector_64], $ac2 "
- "\n\t" /* even 2 */
- "mthi $zero, $ac2 "
- "\n\t"
- "preceu.ph.qbr %[p3], %[qload2] "
- "\n\t"
- "preceu.ph.qbl %[p4], %[qload2] "
- "\n\t"
- "preceu.ph.qbr %[p1], %[qload1] "
- "\n\t"
- "preceu.ph.qbl %[p2], %[qload1] "
- "\n\t"
- "ulw %[qload2], 8(%[src]) "
- "\n\t"
- "dpa.w.ph $ac1, %[p1], %[filter12] "
- "\n\t" /* even 1 */
- "dpa.w.ph $ac1, %[p2], %[filter34] "
- "\n\t" /* even 1 */
- "dpa.w.ph $ac1, %[p3], %[filter56] "
- "\n\t" /* even 1 */
- "dpa.w.ph $ac1, %[p4], %[filter78] "
- "\n\t" /* even 1 */
- "extp %[Temp1], $ac1, 31 "
- "\n\t" /* even 1 */
- /* even 2. pixel */
- "mtlo %[vector_64], $ac3 "
- "\n\t" /* even 3 */
- "mthi $zero, $ac3 "
- "\n\t"
- "preceu.ph.qbr %[p1], %[qload2] "
- "\n\t"
- "preceu.ph.qbl %[p5], %[qload2] "
- "\n\t"
- "ulw %[qload1], 12(%[src]) "
- "\n\t"
- "dpa.w.ph $ac2, %[p2], %[filter12] "
- "\n\t" /* even 1 */
- "dpa.w.ph $ac2, %[p3], %[filter34] "
- "\n\t" /* even 1 */
- "dpa.w.ph $ac2, %[p4], %[filter56] "
- "\n\t" /* even 1 */
- "dpa.w.ph $ac2, %[p1], %[filter78] "
- "\n\t" /* even 1 */
- "lbux %[st1], %[Temp1](%[cm]) "
- "\n\t" /* even 1 */
- "extp %[Temp2], $ac2, 31 "
- "\n\t" /* even 1 */
- /* even 3. pixel */
- "mtlo %[vector_64], $ac1 "
- "\n\t" /* even 4 */
- "mthi $zero, $ac1 "
- "\n\t"
- "preceu.ph.qbr %[p2], %[qload1] "
- "\n\t"
- "sb %[st1], 0(%[dst]) "
- "\n\t" /* even 1 */
- "addu %[dst], %[dst], %[dst_pitch_2] "
- " \n\t"
- "dpa.w.ph $ac3, %[p3], %[filter12] "
- "\n\t" /* even 3 */
- "dpa.w.ph $ac3, %[p4], %[filter34] "
- "\n\t" /* even 3 */
- "dpa.w.ph $ac3, %[p1], %[filter56] "
- "\n\t" /* even 3 */
- "dpa.w.ph $ac3, %[p5], %[filter78] "
- "\n\t" /* even 3 */
- "extp %[Temp3], $ac3, 31 "
- "\n\t" /* even 3 */
- "lbux %[st2], %[Temp2](%[cm]) "
- "\n\t" /* even 1 */
- /* even 4. pixel */
- "mtlo %[vector_64], $ac2 "
- "\n\t" /* even 5 */
- "mthi $zero, $ac2 "
- "\n\t"
- "preceu.ph.qbl %[p3], %[qload1] "
- "\n\t"
- "sb %[st2], 0(%[dst]) "
- "\n\t" /* even 2 */
- "addu %[dst], %[dst], %[dst_pitch_2] "
- "\n\t"
- "ulw %[qload2], 16(%[src]) "
- "\n\t"
- "dpa.w.ph $ac1, %[p4], %[filter12] "
- "\n\t" /* even 4 */
- "dpa.w.ph $ac1, %[p1], %[filter34] "
- "\n\t" /* even 4 */
- "dpa.w.ph $ac1, %[p5], %[filter56] "
- "\n\t" /* even 4 */
- "dpa.w.ph $ac1, %[p2], %[filter78] "
- "\n\t" /* even 4 */
- "extp %[Temp1], $ac1, 31 "
- "\n\t" /* even 4 */
- "lbux %[st3], %[Temp3](%[cm]) "
- "\n\t" /* even 3 */
- /* even 5. pixel */
- "mtlo %[vector_64], $ac3 "
- "\n\t" /* even 6 */
- "mthi $zero, $ac3 "
- "\n\t"
- "preceu.ph.qbr %[p4], %[qload2] "
- "\n\t"
- "sb %[st3], 0(%[dst]) "
- "\n\t" /* even 3 */
- "addu %[dst], %[dst], %[dst_pitch_2] "
- "\n\t"
- "dpa.w.ph $ac2, %[p1], %[filter12] "
- "\n\t" /* even 5 */
- "dpa.w.ph $ac2, %[p5], %[filter34] "
- "\n\t" /* even 5 */
- "dpa.w.ph $ac2, %[p2], %[filter56] "
- "\n\t" /* even 5 */
- "dpa.w.ph $ac2, %[p3], %[filter78] "
- "\n\t" /* even 5 */
- "extp %[Temp2], $ac2, 31 "
- "\n\t" /* even 5 */
- "lbux %[st1], %[Temp1](%[cm]) "
- "\n\t" /* even 4 */
- /* even 6. pixel */
- "mtlo %[vector_64], $ac1 "
- "\n\t" /* even 7 */
- "mthi $zero, $ac1 "
- "\n\t"
- "preceu.ph.qbl %[p1], %[qload2] "
- "\n\t"
- "sb %[st1], 0(%[dst]) "
- "\n\t" /* even 4 */
- "addu %[dst], %[dst], %[dst_pitch_2] "
- "\n\t"
- "ulw %[qload1], 20(%[src]) "
- "\n\t"
- "dpa.w.ph $ac3, %[p5], %[filter12] "
- "\n\t" /* even 6 */
- "dpa.w.ph $ac3, %[p2], %[filter34] "
- "\n\t" /* even 6 */
- "dpa.w.ph $ac3, %[p3], %[filter56] "
- "\n\t" /* even 6 */
- "dpa.w.ph $ac3, %[p4], %[filter78] "
- "\n\t" /* even 6 */
- "extp %[Temp3], $ac3, 31 "
- "\n\t" /* even 6 */
- "lbux %[st2], %[Temp2](%[cm]) "
- "\n\t" /* even 5 */
- /* even 7. pixel */
- "mtlo %[vector_64], $ac2 "
- "\n\t" /* even 8 */
- "mthi $zero, $ac2 "
- "\n\t"
- "preceu.ph.qbr %[p5], %[qload1] "
- "\n\t"
- "sb %[st2], 0(%[dst]) "
- "\n\t" /* even 5 */
- "addu %[dst], %[dst], %[dst_pitch_2] "
- "\n\t"
- "dpa.w.ph $ac1, %[p2], %[filter12] "
- "\n\t" /* even 7 */
- "dpa.w.ph $ac1, %[p3], %[filter34] "
- "\n\t" /* even 7 */
- "dpa.w.ph $ac1, %[p4], %[filter56] "
- "\n\t" /* even 7 */
- "dpa.w.ph $ac1, %[p1], %[filter78] "
- "\n\t" /* even 7 */
- "extp %[Temp1], $ac1, 31 "
- "\n\t" /* even 7 */
- "lbux %[st3], %[Temp3](%[cm]) "
- "\n\t" /* even 6 */
- /* even 8. pixel */
- "mtlo %[vector_64], $ac3 "
- "\n\t" /* odd 1 */
- "mthi $zero, $ac3 "
- "\n\t"
- "dpa.w.ph $ac2, %[p3], %[filter12] "
- "\n\t" /* even 8 */
- "dpa.w.ph $ac2, %[p4], %[filter34] "
- "\n\t" /* even 8 */
- "sb %[st3], 0(%[dst]) "
- "\n\t" /* even 6 */
- "addu %[dst], %[dst], %[dst_pitch_2] "
- "\n\t"
- "dpa.w.ph $ac2, %[p1], %[filter56] "
- "\n\t" /* even 8 */
- "dpa.w.ph $ac2, %[p5], %[filter78] "
- "\n\t" /* even 8 */
- "extp %[Temp2], $ac2, 31 "
- "\n\t" /* even 8 */
- "lbux %[st1], %[Temp1](%[cm]) "
- "\n\t" /* even 7 */
- /* ODD pixels */
- "ulw %[qload1], 1(%[src]) "
- "\n\t"
- "ulw %[qload2], 5(%[src]) "
- "\n\t"
- /* odd 1. pixel */
- "mtlo %[vector_64], $ac1 "
- "\n\t" /* odd 2 */
- "mthi $zero, $ac1 "
- "\n\t"
- "preceu.ph.qbr %[p1], %[qload1] "
- "\n\t"
- "preceu.ph.qbl %[p2], %[qload1] "
- "\n\t"
- "preceu.ph.qbr %[p3], %[qload2] "
- "\n\t"
- "preceu.ph.qbl %[p4], %[qload2] "
- "\n\t"
- "sb %[st1], 0(%[dst]) "
- "\n\t" /* even 7 */
- "addu %[dst], %[dst], %[dst_pitch_2] "
- "\n\t"
- "ulw %[qload2], 9(%[src]) "
- "\n\t"
- "dpa.w.ph $ac3, %[p1], %[filter12] "
- "\n\t" /* odd 1 */
- "dpa.w.ph $ac3, %[p2], %[filter34] "
- "\n\t" /* odd 1 */
- "dpa.w.ph $ac3, %[p3], %[filter56] "
- "\n\t" /* odd 1 */
- "dpa.w.ph $ac3, %[p4], %[filter78] "
- "\n\t" /* odd 1 */
- "extp %[Temp3], $ac3, 31 "
- "\n\t" /* odd 1 */
- "lbux %[st2], %[Temp2](%[cm]) "
- "\n\t" /* even 8 */
- /* odd 2. pixel */
- "mtlo %[vector_64], $ac2 "
- "\n\t" /* odd 3 */
- "mthi $zero, $ac2 "
- "\n\t"
- "preceu.ph.qbr %[p1], %[qload2] "
- "\n\t"
- "preceu.ph.qbl %[p5], %[qload2] "
- "\n\t"
- "sb %[st2], 0(%[dst]) "
- "\n\t" /* even 8 */
- "ulw %[qload1], 13(%[src]) "
- "\n\t"
- "dpa.w.ph $ac1, %[p2], %[filter12] "
- "\n\t" /* odd 2 */
- "dpa.w.ph $ac1, %[p3], %[filter34] "
- "\n\t" /* odd 2 */
- "dpa.w.ph $ac1, %[p4], %[filter56] "
- "\n\t" /* odd 2 */
- "dpa.w.ph $ac1, %[p1], %[filter78] "
- "\n\t" /* odd 2 */
- "extp %[Temp1], $ac1, 31 "
- "\n\t" /* odd 2 */
- "lbux %[st3], %[Temp3](%[cm]) "
- "\n\t" /* odd 1 */
- /* odd 3. pixel */
- "mtlo %[vector_64], $ac3 "
- "\n\t" /* odd 4 */
- "mthi $zero, $ac3 "
- "\n\t"
- "preceu.ph.qbr %[p2], %[qload1] "
- "\n\t"
- "sb %[st3], 0(%[odd_dst]) "
- "\n\t" /* odd 1 */
- "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] "
- "\n\t"
- "dpa.w.ph $ac2, %[p3], %[filter12] "
- "\n\t" /* odd 3 */
- "dpa.w.ph $ac2, %[p4], %[filter34] "
- "\n\t" /* odd 3 */
- "dpa.w.ph $ac2, %[p1], %[filter56] "
- "\n\t" /* odd 3 */
- "dpa.w.ph $ac2, %[p5], %[filter78] "
- "\n\t" /* odd 3 */
- "extp %[Temp2], $ac2, 31 "
- "\n\t" /* odd 3 */
- "lbux %[st1], %[Temp1](%[cm]) "
- "\n\t" /* odd 2 */
- /* odd 4. pixel */
- "mtlo %[vector_64], $ac1 "
- "\n\t" /* odd 5 */
- "mthi $zero, $ac1 "
- "\n\t"
- "preceu.ph.qbl %[p3], %[qload1] "
- "\n\t"
- "sb %[st1], 0(%[odd_dst]) "
- "\n\t" /* odd 2 */
- "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] "
- "\n\t"
- "ulw %[qload2], 17(%[src]) "
- "\n\t"
- "dpa.w.ph $ac3, %[p4], %[filter12] "
- "\n\t" /* odd 4 */
- "dpa.w.ph $ac3, %[p1], %[filter34] "
- "\n\t" /* odd 4 */
- "dpa.w.ph $ac3, %[p5], %[filter56] "
- "\n\t" /* odd 4 */
- "dpa.w.ph $ac3, %[p2], %[filter78] "
- "\n\t" /* odd 4 */
- "extp %[Temp3], $ac3, 31 "
- "\n\t" /* odd 4 */
- "lbux %[st2], %[Temp2](%[cm]) "
- "\n\t" /* odd 3 */
- /* odd 5. pixel */
- "mtlo %[vector_64], $ac2 "
- "\n\t" /* odd 6 */
- "mthi $zero, $ac2 "
- "\n\t"
- "preceu.ph.qbr %[p4], %[qload2] "
- "\n\t"
- "sb %[st2], 0(%[odd_dst]) "
- "\n\t" /* odd 3 */
- "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] "
- "\n\t"
- "dpa.w.ph $ac1, %[p1], %[filter12] "
- "\n\t" /* odd 5 */
- "dpa.w.ph $ac1, %[p5], %[filter34] "
- "\n\t" /* odd 5 */
- "dpa.w.ph $ac1, %[p2], %[filter56] "
- "\n\t" /* odd 5 */
- "dpa.w.ph $ac1, %[p3], %[filter78] "
- "\n\t" /* odd 5 */
- "extp %[Temp1], $ac1, 31 "
- "\n\t" /* odd 5 */
- "lbux %[st3], %[Temp3](%[cm]) "
- "\n\t" /* odd 4 */
- /* odd 6. pixel */
- "mtlo %[vector_64], $ac3 "
- "\n\t" /* odd 7 */
- "mthi $zero, $ac3 "
- "\n\t"
- "preceu.ph.qbl %[p1], %[qload2] "
- "\n\t"
- "sb %[st3], 0(%[odd_dst]) "
- "\n\t" /* odd 4 */
- "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] "
- "\n\t"
- "ulw %[qload1], 21(%[src]) "
- "\n\t"
- "dpa.w.ph $ac2, %[p5], %[filter12] "
- "\n\t" /* odd 6 */
- "dpa.w.ph $ac2, %[p2], %[filter34] "
- "\n\t" /* odd 6 */
- "dpa.w.ph $ac2, %[p3], %[filter56] "
- "\n\t" /* odd 6 */
- "dpa.w.ph $ac2, %[p4], %[filter78] "
- "\n\t" /* odd 6 */
- "extp %[Temp2], $ac2, 31 "
- "\n\t" /* odd 6 */
- "lbux %[st1], %[Temp1](%[cm]) "
- "\n\t" /* odd 5 */
- /* odd 7. pixel */
- "mtlo %[vector_64], $ac1 "
- "\n\t" /* odd 8 */
- "mthi $zero, $ac1 "
- "\n\t"
- "preceu.ph.qbr %[p5], %[qload1] "
- "\n\t"
- "sb %[st1], 0(%[odd_dst]) "
- "\n\t" /* odd 5 */
- "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] "
- "\n\t"
- "dpa.w.ph $ac3, %[p2], %[filter12] "
- "\n\t" /* odd 7 */
- "dpa.w.ph $ac3, %[p3], %[filter34] "
- "\n\t" /* odd 7 */
- "dpa.w.ph $ac3, %[p4], %[filter56] "
- "\n\t" /* odd 7 */
- "dpa.w.ph $ac3, %[p1], %[filter78] "
- "\n\t" /* odd 7 */
- "extp %[Temp3], $ac3, 31 "
- "\n\t" /* odd 7 */
- /* odd 8. pixel */
- "dpa.w.ph $ac1, %[p3], %[filter12] "
- "\n\t" /* odd 8 */
- "dpa.w.ph $ac1, %[p4], %[filter34] "
- "\n\t" /* odd 8 */
- "dpa.w.ph $ac1, %[p1], %[filter56] "
- "\n\t" /* odd 8 */
- "dpa.w.ph $ac1, %[p5], %[filter78] "
- "\n\t" /* odd 8 */
- "extp %[Temp1], $ac1, 31 "
- "\n\t" /* odd 8 */
- "lbux %[st2], %[Temp2](%[cm]) "
- "\n\t" /* odd 6 */
- "lbux %[st3], %[Temp3](%[cm]) "
- "\n\t" /* odd 7 */
- "lbux %[st1], %[Temp1](%[cm]) "
- "\n\t" /* odd 8 */
- "sb %[st2], 0(%[odd_dst]) "
- "\n\t" /* odd 6 */
- "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] "
- "\n\t"
- "sb %[st3], 0(%[odd_dst]) "
- "\n\t" /* odd 7 */
- "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] "
- "\n\t"
- "sb %[st1], 0(%[odd_dst]) "
- "\n\t" /* odd 8 */
- : [qload1] "=&r"(qload1), [qload2] "=&r"(qload2), [p5] "=&r"(p5),
- [st1] "=&r"(st1), [st2] "=&r"(st2), [st3] "=&r"(st3),
- [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3), [p4] "=&r"(p4),
- [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3),
- [dst] "+r"(dst), [odd_dst] "+r"(odd_dst)
- : [filter12] "r"(filter12), [filter34] "r"(filter34),
- [filter56] "r"(filter56), [filter78] "r"(filter78),
- [vector_64] "r"(vector_64), [cm] "r"(cm), [src] "r"(src),
- [dst_pitch_2] "r"(dst_pitch_2));
- src += 16;
- dst = (dst_ptr + ((c + 1) * 16 * dst_stride));
- odd_dst = (dst + dst_stride);
- }
- /* Next row... */
- src_ptr += src_stride;
- dst_ptr += 1;
- }
- }
- void convolve_horiz_transposed(const uint8_t *src, ptrdiff_t src_stride,
- uint8_t *dst, ptrdiff_t dst_stride,
- const int16_t *filter, int w, int h) {
- int x, y, k;
- for (y = 0; y < h; ++y) {
- for (x = 0; x < w; ++x) {
- int sum = 0;
- for (k = 0; k < 8; ++k) sum += src[x + k] * filter[k];
- dst[x * dst_stride] = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS));
- }
- src += src_stride;
- dst += 1;
- }
- }
- void copy_horiz_transposed(const uint8_t *src, ptrdiff_t src_stride,
- uint8_t *dst, ptrdiff_t dst_stride, int w, int h) {
- int x, y;
- for (y = 0; y < h; ++y) {
- for (x = 0; x < w; ++x) {
- dst[x * dst_stride] = src[x];
- }
- src += src_stride;
- dst += 1;
- }
- }
- void vpx_convolve8_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
- ptrdiff_t dst_stride, const InterpKernel *filter,
- int x0_q4, int32_t x_step_q4, int y0_q4, int y_step_q4,
- int w, int h) {
- const int16_t *const filter_x = filter[x0_q4];
- const int16_t *const filter_y = filter[y0_q4];
- DECLARE_ALIGNED(32, uint8_t, temp[64 * 135]);
- int32_t intermediate_height = ((h * y_step_q4) >> 4) + 7;
- uint32_t pos = 38;
- assert(x_step_q4 == 16);
- assert(y_step_q4 == 16);
- assert(((const int32_t *)filter_x)[1] != 0x800000);
- assert(((const int32_t *)filter_y)[1] != 0x800000);
- (void)x_step_q4;
- /* bit positon for extract from acc */
- __asm__ __volatile__("wrdsp %[pos], 1 \n\t"
- :
- : [pos] "r"(pos));
- if (intermediate_height < h) intermediate_height = h;
- /* copy the src to dst */
- if (filter_x[3] == 0x80) {
- copy_horiz_transposed(src - src_stride * 3, src_stride, temp,
- intermediate_height, w, intermediate_height);
- } else if (vpx_get_filter_taps(filter_x) == 2) {
- vpx_convolve2_dspr2(src - src_stride * 3, src_stride, temp,
- intermediate_height, filter_x, w, intermediate_height);
- } else {
- src -= (src_stride * 3 + 3);
- /* prefetch data to cache memory */
- prefetch_load(src);
- prefetch_load(src + 32);
- switch (w) {
- case 4:
- convolve_horiz_4_transposed_dspr2(src, src_stride, temp,
- intermediate_height, filter_x,
- intermediate_height);
- break;
- case 8:
- convolve_horiz_8_transposed_dspr2(src, src_stride, temp,
- intermediate_height, filter_x,
- intermediate_height);
- break;
- case 16:
- case 32:
- convolve_horiz_16_transposed_dspr2(src, src_stride, temp,
- intermediate_height, filter_x,
- intermediate_height, (w / 16));
- break;
- case 64:
- prefetch_load(src + 32);
- convolve_horiz_64_transposed_dspr2(src, src_stride, temp,
- intermediate_height, filter_x,
- intermediate_height);
- break;
- default:
- convolve_horiz_transposed(src, src_stride, temp, intermediate_height,
- filter_x, w, intermediate_height);
- break;
- }
- }
- /* copy the src to dst */
- if (filter_y[3] == 0x80) {
- copy_horiz_transposed(temp + 3, intermediate_height, dst, dst_stride, h, w);
- } else if (vpx_get_filter_taps(filter_y) == 2) {
- vpx_convolve2_dspr2(temp + 3, intermediate_height, dst, dst_stride,
- filter_y, h, w);
- } else {
- switch (h) {
- case 4:
- convolve_horiz_4_transposed_dspr2(temp, intermediate_height, dst,
- dst_stride, filter_y, w);
- break;
- case 8:
- convolve_horiz_8_transposed_dspr2(temp, intermediate_height, dst,
- dst_stride, filter_y, w);
- break;
- case 16:
- case 32:
- convolve_horiz_16_transposed_dspr2(temp, intermediate_height, dst,
- dst_stride, filter_y, w, (h / 16));
- break;
- case 64:
- convolve_horiz_64_transposed_dspr2(temp, intermediate_height, dst,
- dst_stride, filter_y, w);
- break;
- default:
- convolve_horiz_transposed(temp, intermediate_height, dst, dst_stride,
- filter_y, h, w);
- break;
- }
- }
- }
- void vpx_convolve_copy_dspr2(const uint8_t *src, ptrdiff_t src_stride,
- uint8_t *dst, ptrdiff_t dst_stride,
- const InterpKernel *filter, int x0_q4,
- int x_step_q4, int y0_q4, int y_step_q4, int w,
- int h) {
- int x, y;
- (void)filter;
- (void)x0_q4;
- (void)x_step_q4;
- (void)y0_q4;
- (void)y_step_q4;
- /* prefetch data to cache memory */
- prefetch_load(src);
- prefetch_load(src + 32);
- prefetch_store(dst);
- switch (w) {
- case 4: {
- uint32_t tp1;
- /* 1 word storage */
- for (y = h; y--;) {
- prefetch_load(src + src_stride);
- prefetch_load(src + src_stride + 32);
- prefetch_store(dst + dst_stride);
- __asm__ __volatile__(
- "ulw %[tp1], (%[src]) \n\t"
- "sw %[tp1], (%[dst]) \n\t" /* store */
- : [tp1] "=&r"(tp1)
- : [src] "r"(src), [dst] "r"(dst));
- src += src_stride;
- dst += dst_stride;
- }
- break;
- }
- case 8: {
- uint32_t tp1, tp2;
- /* 2 word storage */
- for (y = h; y--;) {
- prefetch_load(src + src_stride);
- prefetch_load(src + src_stride + 32);
- prefetch_store(dst + dst_stride);
- __asm__ __volatile__(
- "ulw %[tp1], 0(%[src]) \n\t"
- "ulw %[tp2], 4(%[src]) \n\t"
- "sw %[tp1], 0(%[dst]) \n\t" /* store */
- "sw %[tp2], 4(%[dst]) \n\t" /* store */
- : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2)
- : [src] "r"(src), [dst] "r"(dst));
- src += src_stride;
- dst += dst_stride;
- }
- break;
- }
- case 16: {
- uint32_t tp1, tp2, tp3, tp4;
- /* 4 word storage */
- for (y = h; y--;) {
- prefetch_load(src + src_stride);
- prefetch_load(src + src_stride + 32);
- prefetch_store(dst + dst_stride);
- __asm__ __volatile__(
- "ulw %[tp1], 0(%[src]) \n\t"
- "ulw %[tp2], 4(%[src]) \n\t"
- "ulw %[tp3], 8(%[src]) \n\t"
- "ulw %[tp4], 12(%[src]) \n\t"
- "sw %[tp1], 0(%[dst]) \n\t" /* store */
- "sw %[tp2], 4(%[dst]) \n\t" /* store */
- "sw %[tp3], 8(%[dst]) \n\t" /* store */
- "sw %[tp4], 12(%[dst]) \n\t" /* store */
- : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3),
- [tp4] "=&r"(tp4)
- : [src] "r"(src), [dst] "r"(dst));
- src += src_stride;
- dst += dst_stride;
- }
- break;
- }
- case 32: {
- uint32_t tp1, tp2, tp3, tp4;
- uint32_t tp5, tp6, tp7, tp8;
- /* 8 word storage */
- for (y = h; y--;) {
- prefetch_load(src + src_stride);
- prefetch_load(src + src_stride + 32);
- prefetch_store(dst + dst_stride);
- __asm__ __volatile__(
- "ulw %[tp1], 0(%[src]) \n\t"
- "ulw %[tp2], 4(%[src]) \n\t"
- "ulw %[tp3], 8(%[src]) \n\t"
- "ulw %[tp4], 12(%[src]) \n\t"
- "ulw %[tp5], 16(%[src]) \n\t"
- "ulw %[tp6], 20(%[src]) \n\t"
- "ulw %[tp7], 24(%[src]) \n\t"
- "ulw %[tp8], 28(%[src]) \n\t"
- "sw %[tp1], 0(%[dst]) \n\t" /* store */
- "sw %[tp2], 4(%[dst]) \n\t" /* store */
- "sw %[tp3], 8(%[dst]) \n\t" /* store */
- "sw %[tp4], 12(%[dst]) \n\t" /* store */
- "sw %[tp5], 16(%[dst]) \n\t" /* store */
- "sw %[tp6], 20(%[dst]) \n\t" /* store */
- "sw %[tp7], 24(%[dst]) \n\t" /* store */
- "sw %[tp8], 28(%[dst]) \n\t" /* store */
- : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3),
- [tp4] "=&r"(tp4), [tp5] "=&r"(tp5), [tp6] "=&r"(tp6),
- [tp7] "=&r"(tp7), [tp8] "=&r"(tp8)
- : [src] "r"(src), [dst] "r"(dst));
- src += src_stride;
- dst += dst_stride;
- }
- break;
- }
- case 64: {
- uint32_t tp1, tp2, tp3, tp4;
- uint32_t tp5, tp6, tp7, tp8;
- prefetch_load(src + 64);
- prefetch_store(dst + 32);
- /* 16 word storage */
- for (y = h; y--;) {
- prefetch_load(src + src_stride);
- prefetch_load(src + src_stride + 32);
- prefetch_load(src + src_stride + 64);
- prefetch_store(dst + dst_stride);
- prefetch_store(dst + dst_stride + 32);
- __asm__ __volatile__(
- "ulw %[tp1], 0(%[src]) \n\t"
- "ulw %[tp2], 4(%[src]) \n\t"
- "ulw %[tp3], 8(%[src]) \n\t"
- "ulw %[tp4], 12(%[src]) \n\t"
- "ulw %[tp5], 16(%[src]) \n\t"
- "ulw %[tp6], 20(%[src]) \n\t"
- "ulw %[tp7], 24(%[src]) \n\t"
- "ulw %[tp8], 28(%[src]) \n\t"
- "sw %[tp1], 0(%[dst]) \n\t" /* store */
- "sw %[tp2], 4(%[dst]) \n\t" /* store */
- "sw %[tp3], 8(%[dst]) \n\t" /* store */
- "sw %[tp4], 12(%[dst]) \n\t" /* store */
- "sw %[tp5], 16(%[dst]) \n\t" /* store */
- "sw %[tp6], 20(%[dst]) \n\t" /* store */
- "sw %[tp7], 24(%[dst]) \n\t" /* store */
- "sw %[tp8], 28(%[dst]) \n\t" /* store */
- "ulw %[tp1], 32(%[src]) \n\t"
- "ulw %[tp2], 36(%[src]) \n\t"
- "ulw %[tp3], 40(%[src]) \n\t"
- "ulw %[tp4], 44(%[src]) \n\t"
- "ulw %[tp5], 48(%[src]) \n\t"
- "ulw %[tp6], 52(%[src]) \n\t"
- "ulw %[tp7], 56(%[src]) \n\t"
- "ulw %[tp8], 60(%[src]) \n\t"
- "sw %[tp1], 32(%[dst]) \n\t" /* store */
- "sw %[tp2], 36(%[dst]) \n\t" /* store */
- "sw %[tp3], 40(%[dst]) \n\t" /* store */
- "sw %[tp4], 44(%[dst]) \n\t" /* store */
- "sw %[tp5], 48(%[dst]) \n\t" /* store */
- "sw %[tp6], 52(%[dst]) \n\t" /* store */
- "sw %[tp7], 56(%[dst]) \n\t" /* store */
- "sw %[tp8], 60(%[dst]) \n\t" /* store */
- : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3),
- [tp4] "=&r"(tp4), [tp5] "=&r"(tp5), [tp6] "=&r"(tp6),
- [tp7] "=&r"(tp7), [tp8] "=&r"(tp8)
- : [src] "r"(src), [dst] "r"(dst));
- src += src_stride;
- dst += dst_stride;
- }
- break;
- }
- default:
- for (y = h; y--;) {
- for (x = 0; x < w; ++x) {
- dst[x] = src[x];
- }
- src += src_stride;
- dst += dst_stride;
- }
- break;
- }
- }
- #endif
|