add SSE2 code for transform

Pretty similar to the encoder's version
3% faster decoding on average

patch by Christian Duvivier (cduvivier at google dot com)
This commit is contained in:
Pascal Massimino 2011-06-15 10:50:52 -07:00
parent c5d4584b2c
commit e4d540c842

View File

@ -8,6 +8,7 @@
// SSE2 version of dsp functions and loop filtering. // SSE2 version of dsp functions and loop filtering.
// //
// Author: somnath@google.com (Somnath Banerjee) // Author: somnath@google.com (Somnath Banerjee)
// cduvivier@google.com (Christian Duvivier)
#if defined(__SSE2__) || defined(_MSC_VER) #if defined(__SSE2__) || defined(_MSC_VER)
@ -18,7 +19,197 @@
extern "C" { extern "C" {
#endif #endif
static inline void SignedShift3(__m128i *a) { //-----------------------------------------------------------------------------
// Transforms (Paragraph 14.4)
static void TransformSSE2(const int16_t* in, uint8_t* dst) {
// This implementation makes use of 16-bit fixed point versions of two
// multiply constants:
// K1 = sqrt(2) * cos (pi/8) ~= 85627 / 2^16
// K2 = sqrt(2) * sin (pi/8) ~= 35468 / 2^16
//
// To be able to use signed 16-bit integers, we use the following trick to
// have constants within range:
// - Associated constants are obtained by subtracting the 16-bit fixed point
// version of one:
// k = K - (1 << 16) => K = k + (1 << 16)
// K1 = 85267 => k1 = 20091
// K2 = 35468 => k2 = -30068
// - The multiplication of a variable by a constant become the sum of the
// variable and the multiplication of that variable by the associated
// constant:
// (x * K) >> 16 = (x * (k + (1 << 16))) >> 16 = ((x * k ) >> 16) + x
const __m128i k1 = _mm_set1_epi16(20091);
const __m128i k2 = _mm_set1_epi16(-30068);
__m128i T0, T1, T2, T3;
// Load the transform coefficients. The second half of the vectors will just
// contain random value we'll never use nor store.
__m128i in0, in1, in2, in3;
{
in0 = _mm_loadl_epi64((__m128i*)&in[0]);
in1 = _mm_loadl_epi64((__m128i*)&in[4]);
in2 = _mm_loadl_epi64((__m128i*)&in[8]);
in3 = _mm_loadl_epi64((__m128i*)&in[12]);
// a00 a10 a20 a30 x x x x
// a01 a11 a21 a31 x x x x
// a02 a12 a22 a32 x x x x
// a03 a13 a23 a33 x x x x
}
// Vertical pass and subsequent transpose.
{
// First pass, c and d calculations are longer because of the "trick"
// multiplications.
const __m128i a = _mm_add_epi16(in0, in2);
const __m128i b = _mm_sub_epi16(in0, in2);
// c = MUL(in1, K2) - MUL(in3, K1) = MUL(in1, k2) - MUL(in3, k1) + in1 - in3
const __m128i c1 = _mm_mulhi_epi16(in1, k2);
const __m128i c2 = _mm_mulhi_epi16(in3, k1);
const __m128i c3 = _mm_sub_epi16(in1, in3);
const __m128i c4 = _mm_sub_epi16(c1, c2);
const __m128i c = _mm_add_epi16(c3, c4);
// d = MUL(in1, K1) + MUL(in3, K2) = MUL(in1, k1) + MUL(in3, k2) + in1 + in3
const __m128i d1 = _mm_mulhi_epi16(in1, k1);
const __m128i d2 = _mm_mulhi_epi16(in3, k2);
const __m128i d3 = _mm_add_epi16(in1, in3);
const __m128i d4 = _mm_add_epi16(d1, d2);
const __m128i d = _mm_add_epi16(d3, d4);
// Second pass.
const __m128i tmp0 = _mm_add_epi16(a, d);
const __m128i tmp1 = _mm_add_epi16(b, c);
const __m128i tmp2 = _mm_sub_epi16(b, c);
const __m128i tmp3 = _mm_sub_epi16(a, d);
// Transpose the two 4x4.
// a00 a01 a02 a03 b00 b01 b02 b03
// a10 a11 a12 a13 b10 b11 b12 b13
// a20 a21 a22 a23 b20 b21 b22 b23
// a30 a31 a32 a33 b30 b31 b32 b33
const __m128i transpose0_0 = _mm_unpacklo_epi16(tmp0, tmp1);
const __m128i transpose0_1 = _mm_unpacklo_epi16(tmp2, tmp3);
const __m128i transpose0_2 = _mm_unpackhi_epi16(tmp0, tmp1);
const __m128i transpose0_3 = _mm_unpackhi_epi16(tmp2, tmp3);
// a00 a10 a01 a11 a02 a12 a03 a13
// a20 a30 a21 a31 a22 a32 a23 a33
// b00 b10 b01 b11 b02 b12 b03 b13
// b20 b30 b21 b31 b22 b32 b23 b33
const __m128i transpose1_0 = _mm_unpacklo_epi32(transpose0_0, transpose0_1);
const __m128i transpose1_1 = _mm_unpacklo_epi32(transpose0_2, transpose0_3);
const __m128i transpose1_2 = _mm_unpackhi_epi32(transpose0_0, transpose0_1);
const __m128i transpose1_3 = _mm_unpackhi_epi32(transpose0_2, transpose0_3);
// a00 a10 a20 a30 a01 a11 a21 a31
// b00 b10 b20 b30 b01 b11 b21 b31
// a02 a12 a22 a32 a03 a13 a23 a33
// b02 b12 a22 b32 b03 b13 b23 b33
T0 = _mm_unpacklo_epi64(transpose1_0, transpose1_1);
T1 = _mm_unpackhi_epi64(transpose1_0, transpose1_1);
T2 = _mm_unpacklo_epi64(transpose1_2, transpose1_3);
T3 = _mm_unpackhi_epi64(transpose1_2, transpose1_3);
// a00 a10 a20 a30 b00 b10 b20 b30
// a01 a11 a21 a31 b01 b11 b21 b31
// a02 a12 a22 a32 b02 b12 b22 b32
// a03 a13 a23 a33 b03 b13 b23 b33
}
// Horizontal pass and subsequent transpose.
{
// First pass, c and d calculations are longer because of the "trick"
// multiplications.
const __m128i four = _mm_set1_epi16(4);
const __m128i dc = _mm_add_epi16(T0, four);
const __m128i a = _mm_add_epi16(dc, T2);
const __m128i b = _mm_sub_epi16(dc, T2);
// c = MUL(T1, K2) - MUL(T3, K1) = MUL(T1, k2) - MUL(T3, k1) + T1 - T3
const __m128i c1 = _mm_mulhi_epi16(T1, k2);
const __m128i c2 = _mm_mulhi_epi16(T3, k1);
const __m128i c3 = _mm_sub_epi16(T1, T3);
const __m128i c4 = _mm_sub_epi16(c1, c2);
const __m128i c = _mm_add_epi16(c3, c4);
// d = MUL(T1, K1) + MUL(T3, K2) = MUL(T1, k1) + MUL(T3, k2) + T1 + T3
const __m128i d1 = _mm_mulhi_epi16(T1, k1);
const __m128i d2 = _mm_mulhi_epi16(T3, k2);
const __m128i d3 = _mm_add_epi16(T1, T3);
const __m128i d4 = _mm_add_epi16(d1, d2);
const __m128i d = _mm_add_epi16(d3, d4);
// Second pass.
const __m128i tmp0 = _mm_add_epi16(a, d);
const __m128i tmp1 = _mm_add_epi16(b, c);
const __m128i tmp2 = _mm_sub_epi16(b, c);
const __m128i tmp3 = _mm_sub_epi16(a, d);
const __m128i shifted0 = _mm_srai_epi16(tmp0, 3);
const __m128i shifted1 = _mm_srai_epi16(tmp1, 3);
const __m128i shifted2 = _mm_srai_epi16(tmp2, 3);
const __m128i shifted3 = _mm_srai_epi16(tmp3, 3);
// Transpose the two 4x4.
// a00 a01 a02 a03 b00 b01 b02 b03
// a10 a11 a12 a13 b10 b11 b12 b13
// a20 a21 a22 a23 b20 b21 b22 b23
// a30 a31 a32 a33 b30 b31 b32 b33
const __m128i transpose0_0 = _mm_unpacklo_epi16(shifted0, shifted1);
const __m128i transpose0_1 = _mm_unpacklo_epi16(shifted2, shifted3);
const __m128i transpose0_2 = _mm_unpackhi_epi16(shifted0, shifted1);
const __m128i transpose0_3 = _mm_unpackhi_epi16(shifted2, shifted3);
// a00 a10 a01 a11 a02 a12 a03 a13
// a20 a30 a21 a31 a22 a32 a23 a33
// b00 b10 b01 b11 b02 b12 b03 b13
// b20 b30 b21 b31 b22 b32 b23 b33
const __m128i transpose1_0 = _mm_unpacklo_epi32(transpose0_0, transpose0_1);
const __m128i transpose1_1 = _mm_unpacklo_epi32(transpose0_2, transpose0_3);
const __m128i transpose1_2 = _mm_unpackhi_epi32(transpose0_0, transpose0_1);
const __m128i transpose1_3 = _mm_unpackhi_epi32(transpose0_2, transpose0_3);
// a00 a10 a20 a30 a01 a11 a21 a31
// b00 b10 b20 b30 b01 b11 b21 b31
// a02 a12 a22 a32 a03 a13 a23 a33
// b02 b12 a22 b32 b03 b13 b23 b33
T0 = _mm_unpacklo_epi64(transpose1_0, transpose1_1);
T1 = _mm_unpackhi_epi64(transpose1_0, transpose1_1);
T2 = _mm_unpacklo_epi64(transpose1_2, transpose1_3);
T3 = _mm_unpackhi_epi64(transpose1_2, transpose1_3);
// a00 a10 a20 a30 b00 b10 b20 b30
// a01 a11 a21 a31 b01 b11 b21 b31
// a02 a12 a22 a32 b02 b12 b22 b32
// a03 a13 a23 a33 b03 b13 b23 b33
}
// Add inverse transform to 'dst' and store.
{
const __m128i zero = _mm_set1_epi16(0);
// Load the reference(s).
__m128i dst0 = _mm_cvtsi32_si128(*(int*)&dst[0 * BPS]);
__m128i dst1 = _mm_cvtsi32_si128(*(int*)&dst[1 * BPS]);
__m128i dst2 = _mm_cvtsi32_si128(*(int*)&dst[2 * BPS]);
__m128i dst3 = _mm_cvtsi32_si128(*(int*)&dst[3 * BPS]);
// Convert to 16b.
dst0 = _mm_unpacklo_epi8(dst0, zero);
dst1 = _mm_unpacklo_epi8(dst1, zero);
dst2 = _mm_unpacklo_epi8(dst2, zero);
dst3 = _mm_unpacklo_epi8(dst3, zero);
// Add the inverse transform(s).
dst0 = _mm_add_epi16(dst0, T0);
dst1 = _mm_add_epi16(dst1, T1);
dst2 = _mm_add_epi16(dst2, T2);
dst3 = _mm_add_epi16(dst3, T3);
// Unsigned saturate to 8b.
dst0 = _mm_packus_epi16(dst0, dst0);
dst1 = _mm_packus_epi16(dst1, dst1);
dst2 = _mm_packus_epi16(dst2, dst2);
dst3 = _mm_packus_epi16(dst3, dst3);
// Store the results, four bytes/pixels per line.
*((int32_t *)&dst[0 * BPS]) = _mm_cvtsi128_si32(dst0);
*((int32_t *)&dst[1 * BPS]) = _mm_cvtsi128_si32(dst1);
*((int32_t *)&dst[2 * BPS]) = _mm_cvtsi128_si32(dst2);
*((int32_t *)&dst[3 * BPS]) = _mm_cvtsi128_si32(dst3);
}
}
//-----------------------------------------------------------------------------
// Simple In-loop filtering (Paragraph 15.2)
static inline void SignedShift3(__m128i* a) {
__m128i t1 = *a; __m128i t1 = *a;
// Shift the lower byte of 16 bit by 3 while preserving the sign bit // Shift the lower byte of 16 bit by 3 while preserving the sign bit
t1 = _mm_slli_epi16(t1, 8); t1 = _mm_slli_epi16(t1, 8);
@ -139,7 +330,6 @@ static inline void Store4x4(__m128i* x, uint8_t* dst, int stride) {
} }
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
// Simple In-loop filtering (Paragraph 15.2)
static void SimpleVFilter16SSE2(uint8_t* p, int stride, int thresh) { static void SimpleVFilter16SSE2(uint8_t* p, int stride, int thresh) {
__m128i op, oq; __m128i op, oq;
@ -240,6 +430,7 @@ static void SimpleHFilter16iSSE2(uint8_t* p, int stride, int thresh) {
extern void VP8DspInitSSE2(void); extern void VP8DspInitSSE2(void);
void VP8DspInitSSE2(void) { void VP8DspInitSSE2(void) {
VP8Transform = TransformSSE2;
VP8SimpleVFilter16 = SimpleVFilter16SSE2; VP8SimpleVFilter16 = SimpleVFilter16SSE2;
VP8SimpleHFilter16 = SimpleHFilter16SSE2; VP8SimpleHFilter16 = SimpleHFilter16SSE2;
VP8SimpleVFilter16i = SimpleVFilter16iSSE2; VP8SimpleVFilter16i = SimpleVFilter16iSSE2;