From 7474d46e457db932b20a139e103d9c8ec6b34a97 Mon Sep 17 00:00:00 2001 From: Vincent Rabaud Date: Thu, 24 Nov 2016 13:02:08 +0100 Subject: [PATCH] Do not use a register array in SSE. Change-Id: I79cf95bdac1164fc4de899828e9380c23df8d141 --- src/dsp/common_sse2.h | 82 +++++++++++++++++------------- src/dsp/lossless_sse2.c | 53 ++++++++----------- src/dsp/yuv_sse2.c | 110 ++++++++++++++++++++-------------------- 3 files changed, 122 insertions(+), 123 deletions(-) diff --git a/src/dsp/common_sse2.h b/src/dsp/common_sse2.h index e4bfc81b..995d7cf4 100644 --- a/src/dsp/common_sse2.h +++ b/src/dsp/common_sse2.h @@ -106,28 +106,31 @@ static WEBP_INLINE void VP8Transpose_2_4x4_16b( // Function used several times in VP8PlanarTo24b. // It samples the in buffer as follows: one every two unsigned char is stored // at the beginning of the buffer, while the other half is stored at the end. -static WEBP_INLINE void VP8PlanarTo24bHelper(const __m128i* const in /*in[6]*/, - __m128i* const out /*out[6]*/) { - const __m128i v_mask = _mm_set1_epi16(0x00ff); - - // Take one every two upper 8b values. - out[0] = _mm_packus_epi16(_mm_and_si128(in[0], v_mask), - _mm_and_si128(in[1], v_mask)); - out[1] = _mm_packus_epi16(_mm_and_si128(in[2], v_mask), - _mm_and_si128(in[3], v_mask)); - out[2] = _mm_packus_epi16(_mm_and_si128(in[4], v_mask), - _mm_and_si128(in[5], v_mask)); - // Take one every two lower 8b values. - out[3] = _mm_packus_epi16(_mm_srli_epi16(in[0], 8), _mm_srli_epi16(in[1], 8)); - out[4] = _mm_packus_epi16(_mm_srli_epi16(in[2], 8), _mm_srli_epi16(in[3], 8)); - out[5] = _mm_packus_epi16(_mm_srli_epi16(in[4], 8), _mm_srli_epi16(in[5], 8)); -} +#define VP8PlanarTo24bHelper(IN, OUT) \ + do { \ + const __m128i v_mask = _mm_set1_epi16(0x00ff); \ + /* Take one every two upper 8b values.*/ \ + (OUT##0) = _mm_packus_epi16(_mm_and_si128((IN##0), v_mask), \ + _mm_and_si128((IN##1), v_mask)); \ + (OUT##1) = _mm_packus_epi16(_mm_and_si128((IN##2), v_mask), \ + _mm_and_si128((IN##3), v_mask)); \ + (OUT##2) = _mm_packus_epi16(_mm_and_si128((IN##4), v_mask), \ + _mm_and_si128((IN##5), v_mask)); \ + /* Take one every two lower 8b values.*/ \ + (OUT##3) = _mm_packus_epi16(_mm_srli_epi16((IN##0), 8), \ + _mm_srli_epi16((IN##1), 8)); \ + (OUT##4) = _mm_packus_epi16(_mm_srli_epi16((IN##2), 8), \ + _mm_srli_epi16((IN##3), 8)); \ + (OUT##5) = _mm_packus_epi16(_mm_srli_epi16((IN##4), 8), \ + _mm_srli_epi16((IN##5), 8)); \ + } while (0) // Pack the planar buffers // rrrr... rrrr... gggg... gggg... bbbb... bbbb.... // triplet by triplet in the output buffer rgb as rgbrgbrgbrgb ... -static WEBP_INLINE void VP8PlanarTo24b(const __m128i* const in /*in[6]*/, - __m128i* const out /*out[6]*/) { +static WEBP_INLINE void VP8PlanarTo24b(__m128i* const in0, __m128i* const in1, + __m128i* const in2, __m128i* const in3, + __m128i* const in4, __m128i* const in5) { // The input is 6 registers of sixteen 8b but for the sake of explanation, // let's take 6 registers of four 8b values. // To pack, we will keep taking one every two 8b integer and move it @@ -140,24 +143,31 @@ static WEBP_INLINE void VP8PlanarTo24b(const __m128i* const in /*in[6]*/, // Repeat the same permutations twice more: // r0r4g0g4 | b0b4r1r5 | g1g5b1b5 | r2r6g2g6 | b2b6r3r7 | g3g7b3b7 // r0g0b0r1 | g1b1r2g2 | b2r3g3b3 | r4g4b4r5 | g5b5r6g6 | b6r7g7b7 - __m128i tmp[6]; - VP8PlanarTo24bHelper(in, out); - VP8PlanarTo24bHelper(out, tmp); - VP8PlanarTo24bHelper(tmp, out); + __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5; + VP8PlanarTo24bHelper(*in, tmp); + VP8PlanarTo24bHelper(tmp, *in); + VP8PlanarTo24bHelper(*in, tmp); // We need to do it two more times than the example as we have sixteen bytes. - VP8PlanarTo24bHelper(out, tmp); - VP8PlanarTo24bHelper(tmp, out); + { + __m128i out0, out1, out2, out3, out4, out5; + VP8PlanarTo24bHelper(tmp, out); + VP8PlanarTo24bHelper(out, *in); + } } -// Convert two packed buffers like argbargbargbargb... into the split channels -// aaaaa ... rrrr ... gggg .... bbbbb ...... -static WEBP_INLINE void VP8L32bToPlanar(const __m128i* const in /*in[4]*/, - __m128i* const out /*out[4]*/) { +#undef VP8PlanarTo24bHelper + +// Convert four packed four-channel buffers like argbargbargbargb... into the +// split channels aaaaa ... rrrr ... gggg .... bbbbb ...... +static WEBP_INLINE void VP8L32bToPlanar(__m128i* const in0, + __m128i* const in1, + __m128i* const in2, + __m128i* const in3) { // Column-wise transpose. - const __m128i A0 = _mm_unpacklo_epi8(in[0], in[1]); - const __m128i A1 = _mm_unpackhi_epi8(in[0], in[1]); - const __m128i A2 = _mm_unpacklo_epi8(in[2], in[3]); - const __m128i A3 = _mm_unpackhi_epi8(in[2], in[3]); + const __m128i A0 = _mm_unpacklo_epi8(*in0, *in1); + const __m128i A1 = _mm_unpackhi_epi8(*in0, *in1); + const __m128i A2 = _mm_unpacklo_epi8(*in2, *in3); + const __m128i A3 = _mm_unpackhi_epi8(*in2, *in3); const __m128i B0 = _mm_unpacklo_epi8(A0, A1); const __m128i B1 = _mm_unpackhi_epi8(A0, A1); const __m128i B2 = _mm_unpacklo_epi8(A2, A3); @@ -169,10 +179,10 @@ static WEBP_INLINE void VP8L32bToPlanar(const __m128i* const in /*in[4]*/, const __m128i C2 = _mm_unpacklo_epi8(B2, B3); const __m128i C3 = _mm_unpackhi_epi8(B2, B3); // Gather the channels. - out[0] = _mm_unpackhi_epi64(C1, C3); - out[1] = _mm_unpacklo_epi64(C1, C3); - out[2] = _mm_unpackhi_epi64(C0, C2); - out[3] = _mm_unpacklo_epi64(C0, C2); + *in0 = _mm_unpackhi_epi64(C1, C3); + *in1 = _mm_unpacklo_epi64(C1, C3); + *in2 = _mm_unpackhi_epi64(C0, C2); + *in3 = _mm_unpacklo_epi64(C0, C2); } #endif // WEBP_USE_SSE2 diff --git a/src/dsp/lossless_sse2.c b/src/dsp/lossless_sse2.c index 7a1f1228..92dbdea9 100644 --- a/src/dsp/lossless_sse2.c +++ b/src/dsp/lossless_sse2.c @@ -217,39 +217,26 @@ static void ConvertBGRAToRGB(const uint32_t* src, int num_pixels, __m128i* out = (__m128i*)dst; while (num_pixels >= 32) { - __m128i rgb_planar[6]; - { - const __m128i bgra[4] = { _mm_loadu_si128(in + 0), - _mm_loadu_si128(in + 1), - _mm_loadu_si128(in + 2), - _mm_loadu_si128(in + 3) }; - __m128i bgra_planar[4]; - VP8L32bToPlanar(bgra, bgra_planar); - rgb_planar[0] = _mm_loadu_si128(bgra_planar + 1); - rgb_planar[2] = _mm_loadu_si128(bgra_planar + 2); - rgb_planar[4] = _mm_loadu_si128(bgra_planar + 3); - } - { - const __m128i bgra[4] = { _mm_loadu_si128(in + 4), - _mm_loadu_si128(in + 5), - _mm_loadu_si128(in + 6), - _mm_loadu_si128(in + 7) }; - __m128i bgra_planar[4]; - VP8L32bToPlanar(bgra, bgra_planar); - rgb_planar[1] = _mm_loadu_si128(bgra_planar + 1); - rgb_planar[3] = _mm_loadu_si128(bgra_planar + 2); - rgb_planar[5] = _mm_loadu_si128(bgra_planar + 3); - } - { - __m128i bgr[6]; - VP8PlanarTo24b(rgb_planar, bgr); - _mm_storeu_si128(out + 0, bgr[0]); - _mm_storeu_si128(out + 1, bgr[1]); - _mm_storeu_si128(out + 2, bgr[2]); - _mm_storeu_si128(out + 3, bgr[3]); - _mm_storeu_si128(out + 4, bgr[4]); - _mm_storeu_si128(out + 5, bgr[5]); - } + // Load the BGRA buffers. + __m128i in0 = _mm_loadu_si128(in + 0); + __m128i in1 = _mm_loadu_si128(in + 1); + __m128i in2 = _mm_loadu_si128(in + 2); + __m128i in3 = _mm_loadu_si128(in + 3); + __m128i in4 = _mm_loadu_si128(in + 4); + __m128i in5 = _mm_loadu_si128(in + 5); + __m128i in6 = _mm_loadu_si128(in + 6); + __m128i in7 = _mm_loadu_si128(in + 7); + VP8L32bToPlanar(&in0, &in1, &in2, &in3); + VP8L32bToPlanar(&in4, &in5, &in6, &in7); + // At this points, in1/in5 contains red only, in2/in6 green only ... + // Pack the colors in 24b RGB. + VP8PlanarTo24b(&in1, &in5, &in2, &in6, &in3, &in7); + _mm_storeu_si128(out + 0, in1); + _mm_storeu_si128(out + 1, in5); + _mm_storeu_si128(out + 2, in2); + _mm_storeu_si128(out + 3, in6); + _mm_storeu_si128(out + 4, in3); + _mm_storeu_si128(out + 5, in7); in += 8; out += 6; num_pixels -= 32; diff --git a/src/dsp/yuv_sse2.c b/src/dsp/yuv_sse2.c index bf18bc05..1f610d68 100644 --- a/src/dsp/yuv_sse2.c +++ b/src/dsp/yuv_sse2.c @@ -160,8 +160,10 @@ static WEBP_INLINE void PackAndStore565(const __m128i* const R, // Pack the planar buffers // rrrr... rrrr... gggg... gggg... bbbb... bbbb.... // triplet by triplet in the output buffer rgb as rgbrgbrgbrgb ... -static WEBP_INLINE void PlanarTo24b(const __m128i* const in /*in[6]*/, - uint8_t* rgb) { +static WEBP_INLINE void PlanarTo24b(__m128i* const in0, __m128i* const in1, + __m128i* const in2, __m128i* const in3, + __m128i* const in4, __m128i* const in5, + uint8_t* const rgb) { // The input is 6 registers of sixteen 8b but for the sake of explanation, // let's take 6 registers of four 8b values. // To pack, we will keep taking one every two 8b integer and move it @@ -174,15 +176,14 @@ static WEBP_INLINE void PlanarTo24b(const __m128i* const in /*in[6]*/, // Repeat the same permutations twice more: // r0r4g0g4 | b0b4r1r5 | g1g5b1b5 | r2r6g2g6 | b2b6r3r7 | g3g7b3b7 // r0g0b0r1 | g1b1r2g2 | b2r3g3b3 | r4g4b4r5 | g5b5r6g6 | b6r7g7b7 - __m128i tmp[6]; - VP8PlanarTo24b(in, tmp); + VP8PlanarTo24b(in0, in1, in2, in3, in4, in5); - _mm_storeu_si128((__m128i*)(rgb + 0), tmp[0]); - _mm_storeu_si128((__m128i*)(rgb + 16), tmp[1]); - _mm_storeu_si128((__m128i*)(rgb + 32), tmp[2]); - _mm_storeu_si128((__m128i*)(rgb + 48), tmp[3]); - _mm_storeu_si128((__m128i*)(rgb + 64), tmp[4]); - _mm_storeu_si128((__m128i*)(rgb + 80), tmp[5]); + _mm_storeu_si128((__m128i*)(rgb + 0), *in0); + _mm_storeu_si128((__m128i*)(rgb + 16), *in1); + _mm_storeu_si128((__m128i*)(rgb + 32), *in2); + _mm_storeu_si128((__m128i*)(rgb + 48), *in3); + _mm_storeu_si128((__m128i*)(rgb + 64), *in4); + _mm_storeu_si128((__m128i*)(rgb + 80), *in5); } void VP8YuvToRgba32(const uint8_t* y, const uint8_t* u, const uint8_t* v, @@ -242,29 +243,29 @@ void VP8YuvToRgb56532(const uint8_t* y, const uint8_t* u, const uint8_t* v, void VP8YuvToRgb32(const uint8_t* y, const uint8_t* u, const uint8_t* v, uint8_t* dst) { __m128i R0, R1, R2, R3, G0, G1, G2, G3, B0, B1, B2, B3; - __m128i rgb[6]; + __m128i rgb0, rgb1, rgb2, rgb3, rgb4, rgb5; - YUV444ToRGB(y + 0, u + 0, v + 0, &R0, &G0, &B0); - YUV444ToRGB(y + 8, u + 8, v + 8, &R1, &G1, &B1); + YUV444ToRGB(y + 0, u + 0, v + 0, &R0, &G0, &B0); + YUV444ToRGB(y + 8, u + 8, v + 8, &R1, &G1, &B1); YUV444ToRGB(y + 16, u + 16, v + 16, &R2, &G2, &B2); YUV444ToRGB(y + 24, u + 24, v + 24, &R3, &G3, &B3); // Cast to 8b and store as RRRRGGGGBBBB. - rgb[0] = _mm_packus_epi16(R0, R1); - rgb[1] = _mm_packus_epi16(R2, R3); - rgb[2] = _mm_packus_epi16(G0, G1); - rgb[3] = _mm_packus_epi16(G2, G3); - rgb[4] = _mm_packus_epi16(B0, B1); - rgb[5] = _mm_packus_epi16(B2, B3); + rgb0 = _mm_packus_epi16(R0, R1); + rgb1 = _mm_packus_epi16(R2, R3); + rgb2 = _mm_packus_epi16(G0, G1); + rgb3 = _mm_packus_epi16(G2, G3); + rgb4 = _mm_packus_epi16(B0, B1); + rgb5 = _mm_packus_epi16(B2, B3); // Pack as RGBRGBRGBRGB. - PlanarTo24b(rgb, dst); + PlanarTo24b(&rgb0, &rgb1, &rgb2, &rgb3, &rgb4, &rgb5, dst); } void VP8YuvToBgr32(const uint8_t* y, const uint8_t* u, const uint8_t* v, uint8_t* dst) { __m128i R0, R1, R2, R3, G0, G1, G2, G3, B0, B1, B2, B3; - __m128i bgr[6]; + __m128i bgr0, bgr1, bgr2, bgr3, bgr4, bgr5; YUV444ToRGB(y + 0, u + 0, v + 0, &R0, &G0, &B0); YUV444ToRGB(y + 8, u + 8, v + 8, &R1, &G1, &B1); @@ -272,15 +273,15 @@ void VP8YuvToBgr32(const uint8_t* y, const uint8_t* u, const uint8_t* v, YUV444ToRGB(y + 24, u + 24, v + 24, &R3, &G3, &B3); // Cast to 8b and store as BBBBGGGGRRRR. - bgr[0] = _mm_packus_epi16(B0, B1); - bgr[1] = _mm_packus_epi16(B2, B3); - bgr[2] = _mm_packus_epi16(G0, G1); - bgr[3] = _mm_packus_epi16(G2, G3); - bgr[4] = _mm_packus_epi16(R0, R1); - bgr[5] = _mm_packus_epi16(R2, R3); + bgr0 = _mm_packus_epi16(B0, B1); + bgr1 = _mm_packus_epi16(B2, B3); + bgr2 = _mm_packus_epi16(G0, G1); + bgr3 = _mm_packus_epi16(G2, G3); + bgr4 = _mm_packus_epi16(R0, R1); + bgr5= _mm_packus_epi16(R2, R3); // Pack as BGRBGRBGRBGR. - PlanarTo24b(bgr, dst); + PlanarTo24b(&bgr0, &bgr1, &bgr2, &bgr3, &bgr4, &bgr5, dst); } //----------------------------------------------------------------------------- @@ -354,7 +355,7 @@ static void YuvToRgbRow(const uint8_t* y, const uint8_t* u, const uint8_t* v, int n; for (n = 0; n + 32 <= len; n += 32, dst += 32 * 3) { __m128i R0, R1, R2, R3, G0, G1, G2, G3, B0, B1, B2, B3; - __m128i rgb[6]; + __m128i rgb0, rgb1, rgb2, rgb3, rgb4, rgb5; YUV420ToRGB(y + 0, u + 0, v + 0, &R0, &G0, &B0); YUV420ToRGB(y + 8, u + 4, v + 4, &R1, &G1, &B1); @@ -362,15 +363,15 @@ static void YuvToRgbRow(const uint8_t* y, const uint8_t* u, const uint8_t* v, YUV420ToRGB(y + 24, u + 12, v + 12, &R3, &G3, &B3); // Cast to 8b and store as RRRRGGGGBBBB. - rgb[0] = _mm_packus_epi16(R0, R1); - rgb[1] = _mm_packus_epi16(R2, R3); - rgb[2] = _mm_packus_epi16(G0, G1); - rgb[3] = _mm_packus_epi16(G2, G3); - rgb[4] = _mm_packus_epi16(B0, B1); - rgb[5] = _mm_packus_epi16(B2, B3); + rgb0 = _mm_packus_epi16(R0, R1); + rgb1 = _mm_packus_epi16(R2, R3); + rgb2 = _mm_packus_epi16(G0, G1); + rgb3 = _mm_packus_epi16(G2, G3); + rgb4 = _mm_packus_epi16(B0, B1); + rgb5 = _mm_packus_epi16(B2, B3); // Pack as RGBRGBRGBRGB. - PlanarTo24b(rgb, dst); + PlanarTo24b(&rgb0, &rgb1, &rgb2, &rgb3, &rgb4, &rgb5, dst); y += 32; u += 16; @@ -390,7 +391,7 @@ static void YuvToBgrRow(const uint8_t* y, const uint8_t* u, const uint8_t* v, int n; for (n = 0; n + 32 <= len; n += 32, dst += 32 * 3) { __m128i R0, R1, R2, R3, G0, G1, G2, G3, B0, B1, B2, B3; - __m128i bgr[6]; + __m128i bgr0, bgr1, bgr2, bgr3, bgr4, bgr5; YUV420ToRGB(y + 0, u + 0, v + 0, &R0, &G0, &B0); YUV420ToRGB(y + 8, u + 4, v + 4, &R1, &G1, &B1); @@ -398,15 +399,15 @@ static void YuvToBgrRow(const uint8_t* y, const uint8_t* u, const uint8_t* v, YUV420ToRGB(y + 24, u + 12, v + 12, &R3, &G3, &B3); // Cast to 8b and store as BBBBGGGGRRRR. - bgr[0] = _mm_packus_epi16(B0, B1); - bgr[1] = _mm_packus_epi16(B2, B3); - bgr[2] = _mm_packus_epi16(G0, G1); - bgr[3] = _mm_packus_epi16(G2, G3); - bgr[4] = _mm_packus_epi16(R0, R1); - bgr[5] = _mm_packus_epi16(R2, R3); + bgr0 = _mm_packus_epi16(B0, B1); + bgr1 = _mm_packus_epi16(B2, B3); + bgr2 = _mm_packus_epi16(G0, G1); + bgr3 = _mm_packus_epi16(G2, G3); + bgr4 = _mm_packus_epi16(R0, R1); + bgr5 = _mm_packus_epi16(R2, R3); // Pack as BGRBGRBGRBGR. - PlanarTo24b(bgr, dst); + PlanarTo24b(&bgr0, &bgr1, &bgr2, &bgr3, &bgr4, &bgr5, dst); y += 32; u += 16; @@ -478,16 +479,17 @@ static WEBP_INLINE void RGB24PackedToPlanar(const uint8_t* const rgb, static WEBP_INLINE void RGB32PackedToPlanar(const uint32_t* const argb, __m128i* const rgb /*in[6]*/) { const __m128i zero = _mm_setzero_si128(); - const __m128i in[4] = {LOAD_16(argb + 0), LOAD_16(argb + 4), - LOAD_16(argb + 8), LOAD_16(argb + 12)}; - __m128i out[4]; - VP8L32bToPlanar(in, out); - rgb[0] = _mm_unpacklo_epi8(out[1], zero); - rgb[1] = _mm_unpackhi_epi8(out[1], zero); - rgb[2] = _mm_unpacklo_epi8(out[2], zero); - rgb[3] = _mm_unpackhi_epi8(out[2], zero); - rgb[4] = _mm_unpacklo_epi8(out[3], zero); - rgb[5] = _mm_unpackhi_epi8(out[3], zero); + __m128i a0 = LOAD_16(argb + 0); + __m128i a1 = LOAD_16(argb + 4); + __m128i a2 = LOAD_16(argb + 8); + __m128i a3 = LOAD_16(argb + 12); + VP8L32bToPlanar(&a0, &a1, &a2, &a3); + rgb[0] = _mm_unpacklo_epi8(a1, zero); + rgb[1] = _mm_unpackhi_epi8(a1, zero); + rgb[2] = _mm_unpacklo_epi8(a2, zero); + rgb[3] = _mm_unpackhi_epi8(a2, zero); + rgb[4] = _mm_unpacklo_epi8(a3, zero); + rgb[5] = _mm_unpackhi_epi8(a3, zero); } // This macro computes (RG * MULT_RG + GB * MULT_GB + ROUNDER) >> DESCALE_FIX