mirror of
https://github.com/webmproject/libwebp.git
synced 2024-11-20 04:18:26 +01:00
PredictorSub: implement fully-SSE2 version
and inline the C-version too. Predictor #13 is still a hard one. Change-Id: Iedecfb5cbf216da4e28ccfdd0810286133f42331
This commit is contained in:
parent
fbba5bc2c1
commit
9cc421675b
@ -35,6 +35,7 @@ extern "C" {
|
||||
typedef uint32_t (*VP8LPredictorFunc)(uint32_t left, const uint32_t* const top);
|
||||
extern VP8LPredictorFunc VP8LPredictors[16];
|
||||
extern VP8LPredictorFunc VP8LPredictors_C[16];
|
||||
// These Add/Sub function expects upper[-1] and out[-1] to be readable.
|
||||
typedef void (*VP8LPredictorAddSubFunc)(const uint32_t* in,
|
||||
const uint32_t* upper, int num_pixels,
|
||||
uint32_t* out);
|
||||
@ -150,6 +151,7 @@ void VP8LCollectColorBlueTransforms_C(const uint32_t* argb, int stride,
|
||||
int histo[]);
|
||||
|
||||
extern VP8LPredictorAddSubFunc VP8LPredictorsSub[16];
|
||||
extern VP8LPredictorAddSubFunc VP8LPredictorsSub_C[16];
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Huffman-cost related functions.
|
||||
|
@ -663,6 +663,141 @@ static void HistogramAdd(const VP8LHistogram* const a,
|
||||
}
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// Image transforms.
|
||||
|
||||
static WEBP_INLINE uint32_t Average2(uint32_t a0, uint32_t a1) {
|
||||
return (((a0 ^ a1) & 0xfefefefeu) >> 1) + (a0 & a1);
|
||||
}
|
||||
|
||||
static WEBP_INLINE uint32_t Average3(uint32_t a0, uint32_t a1, uint32_t a2) {
|
||||
return Average2(Average2(a0, a2), a1);
|
||||
}
|
||||
|
||||
static WEBP_INLINE uint32_t Average4(uint32_t a0, uint32_t a1,
|
||||
uint32_t a2, uint32_t a3) {
|
||||
return Average2(Average2(a0, a1), Average2(a2, a3));
|
||||
}
|
||||
|
||||
static WEBP_INLINE uint32_t Clip255(uint32_t a) {
|
||||
if (a < 256) {
|
||||
return a;
|
||||
}
|
||||
// return 0, when a is a negative integer.
|
||||
// return 255, when a is positive.
|
||||
return ~a >> 24;
|
||||
}
|
||||
|
||||
static WEBP_INLINE int AddSubtractComponentFull(int a, int b, int c) {
|
||||
return Clip255(a + b - c);
|
||||
}
|
||||
|
||||
static WEBP_INLINE uint32_t ClampedAddSubtractFull(uint32_t c0, uint32_t c1,
|
||||
uint32_t c2) {
|
||||
const int a = AddSubtractComponentFull(c0 >> 24, c1 >> 24, c2 >> 24);
|
||||
const int r = AddSubtractComponentFull((c0 >> 16) & 0xff,
|
||||
(c1 >> 16) & 0xff,
|
||||
(c2 >> 16) & 0xff);
|
||||
const int g = AddSubtractComponentFull((c0 >> 8) & 0xff,
|
||||
(c1 >> 8) & 0xff,
|
||||
(c2 >> 8) & 0xff);
|
||||
const int b = AddSubtractComponentFull(c0 & 0xff, c1 & 0xff, c2 & 0xff);
|
||||
return ((uint32_t)a << 24) | (r << 16) | (g << 8) | b;
|
||||
}
|
||||
|
||||
static WEBP_INLINE int AddSubtractComponentHalf(int a, int b) {
|
||||
return Clip255(a + (a - b) / 2);
|
||||
}
|
||||
|
||||
static WEBP_INLINE uint32_t ClampedAddSubtractHalf(uint32_t c0, uint32_t c1,
|
||||
uint32_t c2) {
|
||||
const uint32_t ave = Average2(c0, c1);
|
||||
const int a = AddSubtractComponentHalf(ave >> 24, c2 >> 24);
|
||||
const int r = AddSubtractComponentHalf((ave >> 16) & 0xff, (c2 >> 16) & 0xff);
|
||||
const int g = AddSubtractComponentHalf((ave >> 8) & 0xff, (c2 >> 8) & 0xff);
|
||||
const int b = AddSubtractComponentHalf((ave >> 0) & 0xff, (c2 >> 0) & 0xff);
|
||||
return ((uint32_t)a << 24) | (r << 16) | (g << 8) | b;
|
||||
}
|
||||
|
||||
// gcc-4.9 on ARM generates incorrect code in Select() when Sub3() is inlined.
|
||||
#if defined(__arm__) && \
|
||||
(LOCAL_GCC_VERSION == 0x409 || LOCAL_GCC_VERSION == 0x408)
|
||||
# define LOCAL_INLINE __attribute__ ((noinline))
|
||||
#else
|
||||
# define LOCAL_INLINE WEBP_INLINE
|
||||
#endif
|
||||
|
||||
static LOCAL_INLINE int Sub3(int a, int b, int c) {
|
||||
const int pb = b - c;
|
||||
const int pa = a - c;
|
||||
return abs(pb) - abs(pa);
|
||||
}
|
||||
|
||||
#undef LOCAL_INLINE
|
||||
|
||||
static WEBP_INLINE uint32_t Select(uint32_t a, uint32_t b, uint32_t c) {
|
||||
const int pa_minus_pb =
|
||||
Sub3((a >> 24) , (b >> 24) , (c >> 24) ) +
|
||||
Sub3((a >> 16) & 0xff, (b >> 16) & 0xff, (c >> 16) & 0xff) +
|
||||
Sub3((a >> 8) & 0xff, (b >> 8) & 0xff, (c >> 8) & 0xff) +
|
||||
Sub3((a ) & 0xff, (b ) & 0xff, (c ) & 0xff);
|
||||
return (pa_minus_pb <= 0) ? a : b;
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// Predictors
|
||||
|
||||
static uint32_t Predictor2(uint32_t left, const uint32_t* const top) {
|
||||
(void)left;
|
||||
return top[0];
|
||||
}
|
||||
static uint32_t Predictor3(uint32_t left, const uint32_t* const top) {
|
||||
(void)left;
|
||||
return top[1];
|
||||
}
|
||||
static uint32_t Predictor4(uint32_t left, const uint32_t* const top) {
|
||||
(void)left;
|
||||
return top[-1];
|
||||
}
|
||||
static uint32_t Predictor5(uint32_t left, const uint32_t* const top) {
|
||||
const uint32_t pred = Average3(left, top[0], top[1]);
|
||||
return pred;
|
||||
}
|
||||
static uint32_t Predictor6(uint32_t left, const uint32_t* const top) {
|
||||
const uint32_t pred = Average2(left, top[-1]);
|
||||
return pred;
|
||||
}
|
||||
static uint32_t Predictor7(uint32_t left, const uint32_t* const top) {
|
||||
const uint32_t pred = Average2(left, top[0]);
|
||||
return pred;
|
||||
}
|
||||
static uint32_t Predictor8(uint32_t left, const uint32_t* const top) {
|
||||
const uint32_t pred = Average2(top[-1], top[0]);
|
||||
(void)left;
|
||||
return pred;
|
||||
}
|
||||
static uint32_t Predictor9(uint32_t left, const uint32_t* const top) {
|
||||
const uint32_t pred = Average2(top[0], top[1]);
|
||||
(void)left;
|
||||
return pred;
|
||||
}
|
||||
static uint32_t Predictor10(uint32_t left, const uint32_t* const top) {
|
||||
const uint32_t pred = Average4(left, top[-1], top[0], top[1]);
|
||||
return pred;
|
||||
}
|
||||
static uint32_t Predictor11(uint32_t left, const uint32_t* const top) {
|
||||
const uint32_t pred = Select(top[0], left, top[-1]);
|
||||
return pred;
|
||||
}
|
||||
static uint32_t Predictor12(uint32_t left, const uint32_t* const top) {
|
||||
const uint32_t pred = ClampedAddSubtractFull(left, top[0], top[-1]);
|
||||
return pred;
|
||||
}
|
||||
static uint32_t Predictor13(uint32_t left, const uint32_t* const top) {
|
||||
const uint32_t pred = ClampedAddSubtractHalf(left, top[0], top[-1]);
|
||||
return pred;
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
static void PredictorSub0_C(const uint32_t* in, const uint32_t* upper,
|
||||
@ -679,18 +814,20 @@ static void PredictorSub1_C(const uint32_t* in, const uint32_t* upper,
|
||||
(void)upper;
|
||||
}
|
||||
|
||||
GENERATE_PREDICTOR_SUB(VP8LPredictors[2], PredictorSub2_C)
|
||||
GENERATE_PREDICTOR_SUB(VP8LPredictors[3], PredictorSub3_C)
|
||||
GENERATE_PREDICTOR_SUB(VP8LPredictors[4], PredictorSub4_C)
|
||||
GENERATE_PREDICTOR_SUB(VP8LPredictors[5], PredictorSub5_C)
|
||||
GENERATE_PREDICTOR_SUB(VP8LPredictors[6], PredictorSub6_C)
|
||||
GENERATE_PREDICTOR_SUB(VP8LPredictors[7], PredictorSub7_C)
|
||||
GENERATE_PREDICTOR_SUB(VP8LPredictors[8], PredictorSub8_C)
|
||||
GENERATE_PREDICTOR_SUB(VP8LPredictors[9], PredictorSub9_C)
|
||||
GENERATE_PREDICTOR_SUB(VP8LPredictors[10], PredictorSub10_C)
|
||||
GENERATE_PREDICTOR_SUB(VP8LPredictors[11], PredictorSub11_C)
|
||||
GENERATE_PREDICTOR_SUB(VP8LPredictors[12], PredictorSub12_C)
|
||||
GENERATE_PREDICTOR_SUB(VP8LPredictors[13], PredictorSub13_C)
|
||||
GENERATE_PREDICTOR_SUB(Predictor2, PredictorSub2_C)
|
||||
GENERATE_PREDICTOR_SUB(Predictor3, PredictorSub3_C)
|
||||
GENERATE_PREDICTOR_SUB(Predictor4, PredictorSub4_C)
|
||||
GENERATE_PREDICTOR_SUB(Predictor5, PredictorSub5_C)
|
||||
GENERATE_PREDICTOR_SUB(Predictor6, PredictorSub6_C)
|
||||
GENERATE_PREDICTOR_SUB(Predictor7, PredictorSub7_C)
|
||||
GENERATE_PREDICTOR_SUB(Predictor8, PredictorSub8_C)
|
||||
GENERATE_PREDICTOR_SUB(Predictor9, PredictorSub9_C)
|
||||
GENERATE_PREDICTOR_SUB(Predictor10, PredictorSub10_C)
|
||||
GENERATE_PREDICTOR_SUB(Predictor11, PredictorSub11_C)
|
||||
GENERATE_PREDICTOR_SUB(Predictor12, PredictorSub12_C)
|
||||
GENERATE_PREDICTOR_SUB(Predictor13, PredictorSub13_C)
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
VP8LProcessEncBlueAndRedFunc VP8LSubtractGreenFromBlueAndRed;
|
||||
|
||||
@ -714,6 +851,7 @@ VP8LHistogramAddFunc VP8LHistogramAdd;
|
||||
VP8LVectorMismatchFunc VP8LVectorMismatch;
|
||||
|
||||
VP8LPredictorAddSubFunc VP8LPredictorsSub[16];
|
||||
VP8LPredictorAddSubFunc VP8LPredictorsSub_C[16];
|
||||
|
||||
extern void VP8LEncDspInitSSE2(void);
|
||||
extern void VP8LEncDspInitSSE41(void);
|
||||
@ -768,6 +906,23 @@ WEBP_TSAN_IGNORE_FUNCTION void VP8LEncDspInit(void) {
|
||||
VP8LPredictorsSub[14] = PredictorSub0_C; // <- padding security sentinels
|
||||
VP8LPredictorsSub[15] = PredictorSub0_C;
|
||||
|
||||
VP8LPredictorsSub_C[0] = PredictorSub0_C;
|
||||
VP8LPredictorsSub_C[1] = PredictorSub1_C;
|
||||
VP8LPredictorsSub_C[2] = PredictorSub2_C;
|
||||
VP8LPredictorsSub_C[3] = PredictorSub3_C;
|
||||
VP8LPredictorsSub_C[4] = PredictorSub4_C;
|
||||
VP8LPredictorsSub_C[5] = PredictorSub5_C;
|
||||
VP8LPredictorsSub_C[6] = PredictorSub6_C;
|
||||
VP8LPredictorsSub_C[7] = PredictorSub7_C;
|
||||
VP8LPredictorsSub_C[8] = PredictorSub8_C;
|
||||
VP8LPredictorsSub_C[9] = PredictorSub9_C;
|
||||
VP8LPredictorsSub_C[10] = PredictorSub10_C;
|
||||
VP8LPredictorsSub_C[11] = PredictorSub11_C;
|
||||
VP8LPredictorsSub_C[12] = PredictorSub12_C;
|
||||
VP8LPredictorsSub_C[13] = PredictorSub13_C;
|
||||
VP8LPredictorsSub_C[14] = PredictorSub0_C; // <- padding security sentinels
|
||||
VP8LPredictorsSub_C[15] = PredictorSub0_C;
|
||||
|
||||
// If defined, use CPUInfo() to overwrite some pointers with faster versions.
|
||||
if (VP8GetCPUInfo != NULL) {
|
||||
#if defined(WEBP_USE_SSE2)
|
||||
|
@ -377,6 +377,203 @@ static int VectorMismatch(const uint32_t* const array1,
|
||||
return match_len;
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// Batch version of Predictor Transform subtraction
|
||||
|
||||
static WEBP_INLINE void Average2_m128i(const __m128i* const a0,
|
||||
const __m128i* const a1,
|
||||
__m128i* const avg) {
|
||||
// (a + b) >> 1 = ((a + b + 1) >> 1) - ((a ^ b) & 1)
|
||||
const __m128i ones = _mm_set1_epi8(1);
|
||||
const __m128i avg1 = _mm_avg_epu8(*a0, *a1);
|
||||
const __m128i one = _mm_and_si128(_mm_xor_si128(*a0, *a1), ones);
|
||||
*avg = _mm_sub_epi8(avg1, one);
|
||||
}
|
||||
|
||||
// Predictor0: ARGB_BLACK.
|
||||
static void PredictorSub0_SSE2(const uint32_t* in, const uint32_t* upper,
|
||||
int num_pixels, uint32_t* out) {
|
||||
int i;
|
||||
const __m128i black = _mm_set1_epi32(ARGB_BLACK);
|
||||
for (i = 0; i + 4 <= num_pixels; i += 4) {
|
||||
const __m128i src = _mm_loadu_si128((const __m128i*)&in[i]);
|
||||
const __m128i res = _mm_sub_epi8(src, black);
|
||||
_mm_storeu_si128((__m128i*)&out[i], res);
|
||||
}
|
||||
VP8LPredictorsSub_C[0](in + i, upper + i, num_pixels - i, out + i);
|
||||
}
|
||||
|
||||
#define GENERATE_PREDICTOR_1(X, IN) \
|
||||
static void PredictorSub##X##_SSE2(const uint32_t* in, const uint32_t* upper, \
|
||||
int num_pixels, uint32_t* out) { \
|
||||
int i; \
|
||||
for (i = 0; i + 4 <= num_pixels; i += 4) { \
|
||||
const __m128i src = _mm_loadu_si128((const __m128i*)&in[i]); \
|
||||
const __m128i pred = _mm_loadu_si128((const __m128i*)&(IN)); \
|
||||
const __m128i res = _mm_sub_epi8(src, pred); \
|
||||
_mm_storeu_si128((__m128i*)&out[i], res); \
|
||||
} \
|
||||
VP8LPredictorsSub_C[(X)](in + i, upper + i, num_pixels - i, out + i); \
|
||||
}
|
||||
|
||||
GENERATE_PREDICTOR_1(1, in[i - 1]) // Predictor1: L
|
||||
GENERATE_PREDICTOR_1(2, upper[i]) // Predictor2: T
|
||||
GENERATE_PREDICTOR_1(3, upper[i + 1]) // Predictor3: TR
|
||||
GENERATE_PREDICTOR_1(4, upper[i - 1]) // Predictor4: TL
|
||||
#undef GENERATE_PREDICTOR_1
|
||||
|
||||
// Predictor5: avg2(avg2(L, TR), T)
|
||||
static void PredictorSub5_SSE2(const uint32_t* in, const uint32_t* upper,
|
||||
int num_pixels, uint32_t* out) {
|
||||
int i;
|
||||
for (i = 0; i + 4 <= num_pixels; i += 4) {
|
||||
const __m128i L = _mm_loadu_si128((const __m128i*)&in[i - 1]);
|
||||
const __m128i T = _mm_loadu_si128((const __m128i*)&upper[i]);
|
||||
const __m128i TR = _mm_loadu_si128((const __m128i*)&upper[i + 1]);
|
||||
const __m128i src = _mm_loadu_si128((const __m128i*)&in[i]);
|
||||
__m128i avg, pred, res;
|
||||
Average2_m128i(&L, &TR, &avg);
|
||||
Average2_m128i(&avg, &T, &pred);
|
||||
res = _mm_sub_epi8(src, pred);
|
||||
_mm_storeu_si128((__m128i*)&out[i], res);
|
||||
}
|
||||
VP8LPredictorsSub_C[5](in + i, upper + i, num_pixels - i, out + i);
|
||||
}
|
||||
|
||||
#define GENERATE_PREDICTOR_2(X, A, B) \
|
||||
static void PredictorSub##X##_SSE2(const uint32_t* in, const uint32_t* upper, \
|
||||
int num_pixels, uint32_t* out) { \
|
||||
int i; \
|
||||
for (i = 0; i + 4 <= num_pixels; i += 4) { \
|
||||
const __m128i tA = _mm_loadu_si128((const __m128i*)&(A)); \
|
||||
const __m128i tB = _mm_loadu_si128((const __m128i*)&(B)); \
|
||||
const __m128i src = _mm_loadu_si128((const __m128i*)&in[i]); \
|
||||
__m128i pred, res; \
|
||||
Average2_m128i(&tA, &tB, &pred); \
|
||||
res = _mm_sub_epi8(src, pred); \
|
||||
_mm_storeu_si128((__m128i*)&out[i], res); \
|
||||
} \
|
||||
VP8LPredictorsSub_C[(X)](in + i, upper + i, num_pixels - i, out + i); \
|
||||
}
|
||||
|
||||
GENERATE_PREDICTOR_2(6, in[i - 1], upper[i - 1]) // Predictor6: avg(L, TL)
|
||||
GENERATE_PREDICTOR_2(7, in[i - 1], upper[i]) // Predictor7: avg(L, T)
|
||||
GENERATE_PREDICTOR_2(8, upper[i - 1], upper[i]) // Predictor8: avg(TL, T)
|
||||
GENERATE_PREDICTOR_2(9, upper[i], upper[i + 1]) // Predictor9: average(T, TR)
|
||||
#undef GENERATE_PREDICTOR_2
|
||||
|
||||
// Predictor10: avg(avg(L,TL), avg(T, TR)).
|
||||
static void PredictorSub10_SSE2(const uint32_t* in, const uint32_t* upper,
|
||||
int num_pixels, uint32_t* out) {
|
||||
int i;
|
||||
for (i = 0; i + 4 <= num_pixels; i += 4) {
|
||||
const __m128i L = _mm_loadu_si128((const __m128i*)&in[i - 1]);
|
||||
const __m128i src = _mm_loadu_si128((const __m128i*)&in[i]);
|
||||
const __m128i TL = _mm_loadu_si128((const __m128i*)&upper[i - 1]);
|
||||
const __m128i T = _mm_loadu_si128((const __m128i*)&upper[i]);
|
||||
const __m128i TR = _mm_loadu_si128((const __m128i*)&upper[i + 1]);
|
||||
__m128i avgTTR, avgLTL, avg, res;
|
||||
Average2_m128i(&T, &TR, &avgTTR);
|
||||
Average2_m128i(&L, &TL, &avgLTL);
|
||||
Average2_m128i(&avgTTR, &avgLTL, &avg);
|
||||
res = _mm_sub_epi8(src, avg);
|
||||
_mm_storeu_si128((__m128i*)&out[i], res);
|
||||
}
|
||||
VP8LPredictorsSub_C[10](in + i, upper + i, num_pixels - i, out + i);
|
||||
}
|
||||
|
||||
// Predictor11: select.
|
||||
static void GetSumAbsDiff32(const __m128i* const A, const __m128i* const B,
|
||||
__m128i* const out) {
|
||||
// We can unpack with any value on the upper 32 bits, provided it's the same
|
||||
// on both operands (to that their sum of abs diff is zero). Here we use *A.
|
||||
const __m128i A_lo = _mm_unpacklo_epi32(*A, *A);
|
||||
const __m128i B_lo = _mm_unpacklo_epi32(*B, *A);
|
||||
const __m128i A_hi = _mm_unpackhi_epi32(*A, *A);
|
||||
const __m128i B_hi = _mm_unpackhi_epi32(*B, *A);
|
||||
const __m128i s_lo = _mm_sad_epu8(A_lo, B_lo);
|
||||
const __m128i s_hi = _mm_sad_epu8(A_hi, B_hi);
|
||||
*out = _mm_packs_epi32(s_lo, s_hi);
|
||||
}
|
||||
|
||||
static void PredictorSub11_SSE2(const uint32_t* in, const uint32_t* upper,
|
||||
int num_pixels, uint32_t* out) {
|
||||
int i;
|
||||
for (i = 0; i + 4 <= num_pixels; i += 4) {
|
||||
const __m128i L = _mm_loadu_si128((const __m128i*)&in[i - 1]);
|
||||
const __m128i T = _mm_loadu_si128((const __m128i*)&upper[i]);
|
||||
const __m128i TL = _mm_loadu_si128((const __m128i*)&upper[i - 1]);
|
||||
const __m128i src = _mm_loadu_si128((const __m128i*)&in[i]);
|
||||
__m128i pa, pb;
|
||||
GetSumAbsDiff32(&T, &TL, &pa); // pa = sum |T-TL|
|
||||
GetSumAbsDiff32(&L, &TL, &pb); // pb = sum |L-TL|
|
||||
{
|
||||
const __m128i mask = _mm_cmpgt_epi32(pb, pa);
|
||||
const __m128i A = _mm_and_si128(mask, L);
|
||||
const __m128i B = _mm_andnot_si128(mask, T);
|
||||
const __m128i pred = _mm_or_si128(A, B); // pred = (L > T)? L : T
|
||||
const __m128i res = _mm_sub_epi8(src, pred);
|
||||
_mm_storeu_si128((__m128i*)&out[i], res);
|
||||
}
|
||||
}
|
||||
VP8LPredictorsSub_C[11](in + i, upper + i, num_pixels - i, out + i);
|
||||
}
|
||||
|
||||
// Predictor12: ClampedSubSubtractFull.
|
||||
static void PredictorSub12_SSE2(const uint32_t* in, const uint32_t* upper,
|
||||
int num_pixels, uint32_t* out) {
|
||||
int i;
|
||||
const __m128i zero = _mm_setzero_si128();
|
||||
for (i = 0; i + 4 <= num_pixels; i += 4) {
|
||||
const __m128i src = _mm_loadu_si128((const __m128i*)&in[i]);
|
||||
const __m128i L = _mm_loadu_si128((const __m128i*)&in[i - 1]);
|
||||
const __m128i L_lo = _mm_unpacklo_epi8(L, zero);
|
||||
const __m128i L_hi = _mm_unpackhi_epi8(L, zero);
|
||||
const __m128i T = _mm_loadu_si128((const __m128i*)&upper[i]);
|
||||
const __m128i T_lo = _mm_unpacklo_epi8(T, zero);
|
||||
const __m128i T_hi = _mm_unpackhi_epi8(T, zero);
|
||||
const __m128i TL = _mm_loadu_si128((const __m128i*)&upper[i - 1]);
|
||||
const __m128i TL_lo = _mm_unpacklo_epi8(TL, zero);
|
||||
const __m128i TL_hi = _mm_unpackhi_epi8(TL, zero);
|
||||
const __m128i diff_lo = _mm_sub_epi16(T_lo, TL_lo);
|
||||
const __m128i diff_hi = _mm_sub_epi16(T_hi, TL_hi);
|
||||
const __m128i pred_lo = _mm_add_epi16(L_lo, diff_lo);
|
||||
const __m128i pred_hi = _mm_add_epi16(L_hi, diff_hi);
|
||||
const __m128i pred = _mm_packus_epi16(pred_lo, pred_hi);
|
||||
const __m128i res = _mm_sub_epi8(src, pred);
|
||||
_mm_storeu_si128((__m128i*)&out[i], res);
|
||||
}
|
||||
VP8LPredictorsSub_C[12](in + i, upper + i, num_pixels - i, out + i);
|
||||
}
|
||||
|
||||
// Predictors13: ClampedAddSubtractHalf
|
||||
static void PredictorSub13_SSE2(const uint32_t* in, const uint32_t* upper,
|
||||
int num_pixels, uint32_t* out) {
|
||||
int i;
|
||||
const __m128i zero = _mm_setzero_si128();
|
||||
for (i = 0; i + 2 <= num_pixels; i += 2) {
|
||||
// we can only process two pixels at a time
|
||||
const __m128i L = _mm_loadl_epi64((const __m128i*)&in[i - 1]);
|
||||
const __m128i src = _mm_loadl_epi64((const __m128i*)&in[i]);
|
||||
const __m128i T = _mm_loadl_epi64((const __m128i*)&upper[i]);
|
||||
const __m128i TL = _mm_loadl_epi64((const __m128i*)&upper[i - 1]);
|
||||
const __m128i L_lo = _mm_unpacklo_epi8(L, zero);
|
||||
const __m128i T_lo = _mm_unpacklo_epi8(T, zero);
|
||||
const __m128i TL_lo = _mm_unpacklo_epi8(TL, zero);
|
||||
const __m128i sum = _mm_add_epi16(T_lo, L_lo);
|
||||
const __m128i avg = _mm_srli_epi16(sum, 1);
|
||||
const __m128i A1 = _mm_sub_epi16(avg, TL_lo);
|
||||
const __m128i bit_fix = _mm_cmpgt_epi16(TL_lo, avg);
|
||||
const __m128i A2 = _mm_sub_epi16(A1, bit_fix);
|
||||
const __m128i A3 = _mm_srai_epi16(A2, 1);
|
||||
const __m128i A4 = _mm_add_epi16(avg, A3);
|
||||
const __m128i pred = _mm_packus_epi16(A4, A4);
|
||||
const __m128i res = _mm_sub_epi8(src, pred);
|
||||
_mm_storel_epi64((__m128i*)&out[i], res);
|
||||
}
|
||||
VP8LPredictorsSub_C[13](in + i, upper + i, num_pixels - i, out + i);
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// Entry point
|
||||
|
||||
@ -390,6 +587,23 @@ WEBP_TSAN_IGNORE_FUNCTION void VP8LEncDspInitSSE2(void) {
|
||||
VP8LHistogramAdd = HistogramAdd;
|
||||
VP8LCombinedShannonEntropy = CombinedShannonEntropy;
|
||||
VP8LVectorMismatch = VectorMismatch;
|
||||
|
||||
VP8LPredictorsSub[0] = PredictorSub0_SSE2;
|
||||
VP8LPredictorsSub[1] = PredictorSub1_SSE2;
|
||||
VP8LPredictorsSub[2] = PredictorSub2_SSE2;
|
||||
VP8LPredictorsSub[3] = PredictorSub3_SSE2;
|
||||
VP8LPredictorsSub[4] = PredictorSub4_SSE2;
|
||||
VP8LPredictorsSub[5] = PredictorSub5_SSE2;
|
||||
VP8LPredictorsSub[6] = PredictorSub6_SSE2;
|
||||
VP8LPredictorsSub[7] = PredictorSub7_SSE2;
|
||||
VP8LPredictorsSub[8] = PredictorSub8_SSE2;
|
||||
VP8LPredictorsSub[9] = PredictorSub9_SSE2;
|
||||
VP8LPredictorsSub[10] = PredictorSub10_SSE2;
|
||||
VP8LPredictorsSub[11] = PredictorSub11_SSE2;
|
||||
VP8LPredictorsSub[12] = PredictorSub12_SSE2;
|
||||
VP8LPredictorsSub[13] = PredictorSub13_SSE2;
|
||||
VP8LPredictorsSub[14] = PredictorSub0_SSE2; // <- padding security sentinels
|
||||
VP8LPredictorsSub[15] = PredictorSub0_SSE2;
|
||||
}
|
||||
|
||||
#else // !WEBP_USE_SSE2
|
||||
|
Loading…
Reference in New Issue
Block a user