libwebp/src/dsp/dec_neon.c

643 lines
25 KiB
C
Raw Normal View History

// Copyright 2012 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// ARM NEON version of dsp functions and loop filtering.
//
// Authors: Somnath Banerjee (somnath@google.com)
// Johann Koenig (johannkoenig@google.com)
#include "./dsp.h"
#if defined(WEBP_USE_NEON)
// #define USE_INTRINSICS // use intrinsics when possible
#include <arm_neon.h>
#include "../dec/vp8i.h"
#define QRegs "q0", "q1", "q2", "q3", \
"q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
#define FLIP_SIGN_BIT2(a, b, s) \
"veor " #a "," #a "," #s " \n" \
"veor " #b "," #b "," #s " \n" \
#define FLIP_SIGN_BIT4(a, b, c, d, s) \
FLIP_SIGN_BIT2(a, b, s) \
FLIP_SIGN_BIT2(c, d, s) \
#define NEEDS_FILTER(p1, p0, q0, q1, thresh, mask) \
"vabd.u8 q15," #p0 "," #q0 " \n" /* abs(p0 - q0) */ \
"vabd.u8 q14," #p1 "," #q1 " \n" /* abs(p1 - q1) */ \
"vqadd.u8 q15, q15, q15 \n" /* abs(p0 - q0) * 2 */ \
"vshr.u8 q14, q14, #1 \n" /* abs(p1 - q1) / 2 */ \
"vqadd.u8 q15, q15, q14 \n" /* abs(p0 - q0) * 2 + abs(p1 - q1) / 2 */ \
"vdup.8 q14, " #thresh " \n" \
"vcge.u8 " #mask ", q14, q15 \n" /* mask <= thresh */
#define GET_BASE_DELTA(p1, p0, q0, q1, o) \
"vqsub.s8 q15," #q0 "," #p0 " \n" /* (q0 - p0) */ \
"vqsub.s8 " #o "," #p1 "," #q1 " \n" /* (p1 - q1) */ \
"vqadd.s8 " #o "," #o ", q15 \n" /* (p1 - q1) + 1 * (p0 - q0) */ \
"vqadd.s8 " #o "," #o ", q15 \n" /* (p1 - q1) + 2 * (p0 - q0) */ \
"vqadd.s8 " #o "," #o ", q15 \n" /* (p1 - q1) + 3 * (p0 - q0) */
#define DO_SIMPLE_FILTER(p0, q0, fl) \
"vmov.i8 q15, #0x03 \n" \
"vqadd.s8 q15, q15, " #fl " \n" /* filter1 = filter + 3 */ \
"vshr.s8 q15, q15, #3 \n" /* filter1 >> 3 */ \
"vqadd.s8 " #p0 "," #p0 ", q15 \n" /* p0 += filter1 */ \
\
"vmov.i8 q15, #0x04 \n" \
"vqadd.s8 q15, q15, " #fl " \n" /* filter1 = filter + 4 */ \
"vshr.s8 q15, q15, #3 \n" /* filter2 >> 3 */ \
"vqsub.s8 " #q0 "," #q0 ", q15 \n" /* q0 -= filter2 */
// Applies filter on 2 pixels (p0 and q0)
#define DO_FILTER2(p1, p0, q0, q1, thresh) \
NEEDS_FILTER(p1, p0, q0, q1, thresh, q9) /* filter mask in q9 */ \
"vmov.i8 q10, #0x80 \n" /* sign bit */ \
FLIP_SIGN_BIT4(p1, p0, q0, q1, q10) /* convert to signed value */ \
GET_BASE_DELTA(p1, p0, q0, q1, q11) /* get filter level */ \
"vand q9, q9, q11 \n" /* apply filter mask */ \
DO_SIMPLE_FILTER(p0, q0, q9) /* apply filter */ \
FLIP_SIGN_BIT2(p0, q0, q10)
#if defined(USE_INTRINSICS)
static WEBP_INLINE uint8x8x4_t Load4x8(const uint8_t* const src, int stride) {
uint8x8x4_t out;
out = vld4_lane_u8(src + 0 * stride, out, 0);
out = vld4_lane_u8(src + 1 * stride, out, 1);
out = vld4_lane_u8(src + 2 * stride, out, 2);
out = vld4_lane_u8(src + 3 * stride, out, 3);
out = vld4_lane_u8(src + 4 * stride, out, 4);
out = vld4_lane_u8(src + 5 * stride, out, 5);
out = vld4_lane_u8(src + 6 * stride, out, 6);
out = vld4_lane_u8(src + 7 * stride, out, 7);
return out;
}
static WEBP_INLINE void Load4x16(const uint8_t* const src, int stride,
uint8x16_t* const p1, uint8x16_t* const p0,
uint8x16_t* const q0, uint8x16_t* const q1) {
// row0 = p1[0..7]|p0[0..7]|q0[0..7]|q1[0..7]
const uint8x8x4_t row0 = Load4x8(src - 2 + 0 * stride, stride);
// row8 = p1[8..15]|p0[8..15]|q0[8..15]|q1[8..15]
const uint8x8x4_t row8 = Load4x8(src - 2 + 8 * stride, stride);
*p1 = vcombine_u8(row0.val[0], row8.val[0]);
*p0 = vcombine_u8(row0.val[1], row8.val[1]);
*q0 = vcombine_u8(row0.val[2], row8.val[2]);
*q1 = vcombine_u8(row0.val[3], row8.val[3]);
}
static WEBP_INLINE void Load16x4(const uint8_t* const src, int stride,
uint8x16_t* const p1, uint8x16_t* const p0,
uint8x16_t* const q0, uint8x16_t* const q1) {
*p1 = vld1q_u8(src - 2 * stride);
*p0 = vld1q_u8(src - 1 * stride);
*q0 = vld1q_u8(src + 0 * stride);
*q1 = vld1q_u8(src + 1 * stride);
}
static WEBP_INLINE void Store2x8(const uint8x8x2_t v,
uint8_t* const dst, int stride) {
vst2_lane_u8(dst + 0 * stride, v, 0);
vst2_lane_u8(dst + 1 * stride, v, 1);
vst2_lane_u8(dst + 2 * stride, v, 2);
vst2_lane_u8(dst + 3 * stride, v, 3);
vst2_lane_u8(dst + 4 * stride, v, 4);
vst2_lane_u8(dst + 5 * stride, v, 5);
vst2_lane_u8(dst + 6 * stride, v, 6);
vst2_lane_u8(dst + 7 * stride, v, 7);
}
static WEBP_INLINE void Store2x16(const uint8x16_t p0, const uint8x16_t q0,
uint8_t* const dst, int stride) {
uint8x8x2_t lo, hi;
lo.val[0] = vget_low_u8(p0);
lo.val[1] = vget_low_u8(q0);
hi.val[0] = vget_high_u8(p0);
hi.val[1] = vget_high_u8(q0);
Store2x8(lo, dst - 1 + 0 * stride, stride);
Store2x8(hi, dst - 1 + 8 * stride, stride);
}
static WEBP_INLINE void Store16x2(const uint8x16_t p0, const uint8x16_t q0,
uint8_t* const dst, int stride) {
vst1q_u8(dst - stride, p0);
vst1q_u8(dst, q0);
}
static uint8x16_t NeedsFilter(const uint8x16_t p1, const uint8x16_t p0,
const uint8x16_t q0, const uint8x16_t q1,
int thresh) {
const uint8x16_t thresh_v = vdupq_n_u8((uint8_t)thresh);
const uint8x16_t a_p0_q0 = vabdq_u8(p0, q0); // abs(p0-q0)
const uint8x16_t a_p1_q1 = vabdq_u8(p1, q1); // abs(p1-q1)
const uint8x16_t a_p0_q0_2 = vqaddq_u8(a_p0_q0, a_p0_q0); // 2 * abs(p0-q0)
const uint8x16_t a_p1_q1_2 = vshrq_n_u8(a_p1_q1, 1); // abs(p1-q1) / 2
const uint8x16_t sum = vqaddq_u8(a_p0_q0_2, a_p1_q1_2);
const uint8x16_t mask = vcgeq_u8(thresh_v, sum);
return mask;
}
static int8x16_t FlipSign(const uint8x16_t v) {
const uint8x16_t sign_bit = vdupq_n_u8(0x80);
return vreinterpretq_s8_u8(veorq_u8(v, sign_bit));
}
static uint8x16_t FlipSignBack(const int8x16_t v) {
const int8x16_t sign_bit = vdupq_n_s8(0x80);
return vreinterpretq_u8_s8(veorq_s8(v, sign_bit));
}
static int8x16_t GetBaseDelta(const int8x16_t p1, const int8x16_t p0,
const int8x16_t q0, const int8x16_t q1) {
const int8x16_t q0_p0 = vqsubq_s8(q0, p0); // (q0-p0)
const int8x16_t p1_q1 = vqsubq_s8(p1, q1); // (p1-q1)
const int8x16_t s1 = vqaddq_s8(p1_q1, q0_p0); // (p1-q1) + 1 * (q0 - p0)
const int8x16_t s2 = vqaddq_s8(q0_p0, s1); // (p1-q1) + 2 * (q0 - p0)
const int8x16_t s3 = vqaddq_s8(q0_p0, s2); // (p1-q1) + 3 * (q0 - p0)
return s3;
}
static void DoFilter2(const uint8x16_t p1, const uint8x16_t p0,
const uint8x16_t q0, const uint8x16_t q1,
uint8x16_t* const op0, uint8x16_t* const oq0,
int thresh) {
const uint8x16_t mask = NeedsFilter(p1, p0, q0, q1, thresh);
const int8x16_t p1s = FlipSign(p1);
const int8x16_t p0s = FlipSign(p0);
const int8x16_t q0s = FlipSign(q0);
const int8x16_t q1s = FlipSign(q1);
const int8x16_t delta0 = GetBaseDelta(p1s, p0s, q0s, q1s);
const int8x16_t delta1 = vandq_s8(delta0, vreinterpretq_s8_u8(mask));
// DoSimpleFilter:
const int8x16_t kCst3 = vdupq_n_s8(0x03);
const int8x16_t kCst4 = vdupq_n_s8(0x04);
const int8x16_t delta_p3 = vqaddq_s8(delta1, kCst3);
const int8x16_t delta_p4 = vqaddq_s8(delta1, kCst4);
const int8x16_t delta3 = vshrq_n_s8(delta_p3, 3);
const int8x16_t delta4 = vshrq_n_s8(delta_p4, 3);
const int8x16_t sp0 = vqaddq_s8(p0s, delta3);
const int8x16_t sq0 = vqsubq_s8(q0s, delta4);
*op0 = FlipSignBack(sp0);
*oq0 = FlipSignBack(sq0);
}
#endif // USE_INTRINSICS
// Load/Store vertical edge
#define LOAD8x4(c1, c2, c3, c4, b1, b2, stride) \
"vld4.8 {" #c1"[0], " #c2"[0], " #c3"[0], " #c4"[0]}," #b1 "," #stride"\n" \
"vld4.8 {" #c1"[1], " #c2"[1], " #c3"[1], " #c4"[1]}," #b2 "," #stride"\n" \
"vld4.8 {" #c1"[2], " #c2"[2], " #c3"[2], " #c4"[2]}," #b1 "," #stride"\n" \
"vld4.8 {" #c1"[3], " #c2"[3], " #c3"[3], " #c4"[3]}," #b2 "," #stride"\n" \
"vld4.8 {" #c1"[4], " #c2"[4], " #c3"[4], " #c4"[4]}," #b1 "," #stride"\n" \
"vld4.8 {" #c1"[5], " #c2"[5], " #c3"[5], " #c4"[5]}," #b2 "," #stride"\n" \
"vld4.8 {" #c1"[6], " #c2"[6], " #c3"[6], " #c4"[6]}," #b1 "," #stride"\n" \
"vld4.8 {" #c1"[7], " #c2"[7], " #c3"[7], " #c4"[7]}," #b2 "," #stride"\n"
#define STORE8x2(c1, c2, p, stride) \
"vst2.8 {" #c1"[0], " #c2"[0]}," #p "," #stride " \n" \
"vst2.8 {" #c1"[1], " #c2"[1]}," #p "," #stride " \n" \
"vst2.8 {" #c1"[2], " #c2"[2]}," #p "," #stride " \n" \
"vst2.8 {" #c1"[3], " #c2"[3]}," #p "," #stride " \n" \
"vst2.8 {" #c1"[4], " #c2"[4]}," #p "," #stride " \n" \
"vst2.8 {" #c1"[5], " #c2"[5]}," #p "," #stride " \n" \
"vst2.8 {" #c1"[6], " #c2"[6]}," #p "," #stride " \n" \
"vst2.8 {" #c1"[7], " #c2"[7]}," #p "," #stride " \n"
// Treats 'v' as an uint8x8_t and zero extends to an int16x8_t.
static WEBP_INLINE int16x8_t ConvertU8ToS16(uint32x2_t v) {
return vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(v)));
}
// Performs unsigned 8b saturation on 'dst01' and 'dst23' storing the result
// to the corresponding rows of 'dst'.
static WEBP_INLINE void SaturateAndStore4x4(uint8_t* const dst,
int16x8_t dst01, int16x8_t dst23) {
// Unsigned saturate to 8b.
const uint8x8_t dst01_u8 = vqmovun_s16(dst01);
const uint8x8_t dst23_u8 = vqmovun_s16(dst23);
// Store the results.
*(int*)(dst + 0 * BPS) = vget_lane_s32(vreinterpret_s32_u8(dst01_u8), 0);
*(int*)(dst + 1 * BPS) = vget_lane_s32(vreinterpret_s32_u8(dst01_u8), 1);
*(int*)(dst + 2 * BPS) = vget_lane_s32(vreinterpret_s32_u8(dst23_u8), 0);
*(int*)(dst + 3 * BPS) = vget_lane_s32(vreinterpret_s32_u8(dst23_u8), 1);
}
//-----------------------------------------------------------------------------
// Simple In-loop filtering (Paragraph 15.2)
#if !defined(USE_INTRINSICS)
static void SimpleVFilter16NEON(uint8_t* p, int stride, int thresh) {
__asm__ volatile (
"sub %[p], %[p], %[stride], lsl #1 \n" // p -= 2 * stride
"vld1.u8 {q1}, [%[p]], %[stride] \n" // p1
"vld1.u8 {q2}, [%[p]], %[stride] \n" // p0
"vld1.u8 {q3}, [%[p]], %[stride] \n" // q0
"vld1.u8 {q12}, [%[p]] \n" // q1
DO_FILTER2(q1, q2, q3, q12, %[thresh])
"sub %[p], %[p], %[stride], lsl #1 \n" // p -= 2 * stride
"vst1.u8 {q2}, [%[p]], %[stride] \n" // store op0
"vst1.u8 {q3}, [%[p]] \n" // store oq0
: [p] "+r"(p)
: [stride] "r"(stride), [thresh] "r"(thresh)
: "memory", QRegs
);
}
#else
static void SimpleVFilter16NEON(uint8_t* p, int stride, int thresh) {
uint8x16_t p1, p0, q0, q1, op0, oq0;
Load16x4(p, stride, &p1, &p0, &q0, &q1);
DoFilter2(p1, p0, q0, q1, &op0, &oq0, thresh);
Store16x2(op0, oq0, p, stride);
}
#endif // USE_INTRINSICS
#if 0 // #if defined(USE_INTRINSICS)
// This intrinsics version makes gcc-4.6.3 crash during DoFilter2() compilation
// (register alloc, probably). So we hard-disable it for now until figuring
// out what is wrong. But it compiles and works OK in -O1 optimization level.
static void SimpleHFilter16NEON(uint8_t* p, int stride, int thresh) {
uint8x16_t p1, p0, q0, q1, oq0, op0;
Load4x16(p, stride, &p1, &p0, &q0, &q1);
DoFilter2(p1, p0, q0, q1, &op0, &oq0, thresh);
Store2x16(op0, oq0, p, stride);
}
#else
static void SimpleHFilter16NEON(uint8_t* p, int stride, int thresh) {
__asm__ volatile (
"sub r4, %[p], #2 \n" // base1 = p - 2
"lsl r6, %[stride], #1 \n" // r6 = 2 * stride
"add r5, r4, %[stride] \n" // base2 = base1 + stride
LOAD8x4(d2, d3, d4, d5, [r4], [r5], r6)
LOAD8x4(d24, d25, d26, d27, [r4], [r5], r6)
"vswp d3, d24 \n" // p1:q1 p0:q3
"vswp d5, d26 \n" // q0:q2 q1:q4
"vswp q2, q12 \n" // p1:q1 p0:q2 q0:q3 q1:q4
DO_FILTER2(q1, q2, q12, q13, %[thresh])
"sub %[p], %[p], #1 \n" // p - 1
"vswp d5, d24 \n"
STORE8x2(d4, d5, [%[p]], %[stride])
STORE8x2(d24, d25, [%[p]], %[stride])
: [p] "+r"(p)
: [stride] "r"(stride), [thresh] "r"(thresh)
: "memory", "r4", "r5", "r6", QRegs
);
}
#endif // USE_INTRINSICS
static void SimpleVFilter16iNEON(uint8_t* p, int stride, int thresh) {
int k;
for (k = 3; k > 0; --k) {
p += 4 * stride;
SimpleVFilter16NEON(p, stride, thresh);
}
}
static void SimpleHFilter16iNEON(uint8_t* p, int stride, int thresh) {
int k;
for (k = 3; k > 0; --k) {
p += 4;
SimpleHFilter16NEON(p, stride, thresh);
}
}
//-----------------------------------------------------------------------------
// Inverse transforms (Paragraph 14.4)
static void TransformOne(const int16_t* in, uint8_t* dst) {
const int kBPS = BPS;
const int16_t constants[] = {20091, 17734, 0, 0};
/* kC1, kC2. Padded because vld1.16 loads 8 bytes
* Technically these are unsigned but vqdmulh is only available in signed.
* vqdmulh returns high half (effectively >> 16) but also doubles the value,
* changing the >> 16 to >> 15 and requiring an additional >> 1.
* We use this to our advantage with kC2. The canonical value is 35468.
* However, the high bit is set so treating it as signed will give incorrect
* results. We avoid this by down shifting by 1 here to clear the highest bit.
* Combined with the doubling effect of vqdmulh we get >> 16.
* This can not be applied to kC1 because the lowest bit is set. Down shifting
* the constant would reduce precision.
*/
/* libwebp uses a trick to avoid some extra addition that libvpx does.
* Instead of:
* temp2 = ip[12] + ((ip[12] * cospi8sqrt2minus1) >> 16);
* libwebp adds 1 << 16 to cospi8sqrt2minus1 (kC1). However, this causes the
* same issue with kC1 and vqdmulh that we work around by down shifting kC2
*/
/* Adapted from libvpx: vp8/common/arm/neon/shortidct4x4llm_neon.asm */
__asm__ volatile (
"vld1.16 {q1, q2}, [%[in]] \n"
"vld1.16 {d0}, [%[constants]] \n"
/* d2: in[0]
* d3: in[8]
* d4: in[4]
* d5: in[12]
*/
"vswp d3, d4 \n"
/* q8 = {in[4], in[12]} * kC1 * 2 >> 16
* q9 = {in[4], in[12]} * kC2 >> 16
*/
"vqdmulh.s16 q8, q2, d0[0] \n"
"vqdmulh.s16 q9, q2, d0[1] \n"
/* d22 = a = in[0] + in[8]
* d23 = b = in[0] - in[8]
*/
"vqadd.s16 d22, d2, d3 \n"
"vqsub.s16 d23, d2, d3 \n"
/* The multiplication should be x * kC1 >> 16
* However, with vqdmulh we get x * kC1 * 2 >> 16
* (multiply, double, return high half)
* We avoided this in kC2 by pre-shifting the constant.
* q8 = in[4]/[12] * kC1 >> 16
*/
"vshr.s16 q8, q8, #1 \n"
/* Add {in[4], in[12]} back after the multiplication. This is handled by
* adding 1 << 16 to kC1 in the libwebp C code.
*/
"vqadd.s16 q8, q2, q8 \n"
/* d20 = c = in[4]*kC2 - in[12]*kC1
* d21 = d = in[4]*kC1 + in[12]*kC2
*/
"vqsub.s16 d20, d18, d17 \n"
"vqadd.s16 d21, d19, d16 \n"
/* d2 = tmp[0] = a + d
* d3 = tmp[1] = b + c
* d4 = tmp[2] = b - c
* d5 = tmp[3] = a - d
*/
"vqadd.s16 d2, d22, d21 \n"
"vqadd.s16 d3, d23, d20 \n"
"vqsub.s16 d4, d23, d20 \n"
"vqsub.s16 d5, d22, d21 \n"
"vzip.16 q1, q2 \n"
"vzip.16 q1, q2 \n"
"vswp d3, d4 \n"
/* q8 = {tmp[4], tmp[12]} * kC1 * 2 >> 16
* q9 = {tmp[4], tmp[12]} * kC2 >> 16
*/
"vqdmulh.s16 q8, q2, d0[0] \n"
"vqdmulh.s16 q9, q2, d0[1] \n"
/* d22 = a = tmp[0] + tmp[8]
* d23 = b = tmp[0] - tmp[8]
*/
"vqadd.s16 d22, d2, d3 \n"
"vqsub.s16 d23, d2, d3 \n"
/* See long winded explanations prior */
"vshr.s16 q8, q8, #1 \n"
"vqadd.s16 q8, q2, q8 \n"
/* d20 = c = in[4]*kC2 - in[12]*kC1
* d21 = d = in[4]*kC1 + in[12]*kC2
*/
"vqsub.s16 d20, d18, d17 \n"
"vqadd.s16 d21, d19, d16 \n"
/* d2 = tmp[0] = a + d
* d3 = tmp[1] = b + c
* d4 = tmp[2] = b - c
* d5 = tmp[3] = a - d
*/
"vqadd.s16 d2, d22, d21 \n"
"vqadd.s16 d3, d23, d20 \n"
"vqsub.s16 d4, d23, d20 \n"
"vqsub.s16 d5, d22, d21 \n"
"vld1.32 d6[0], [%[dst]], %[kBPS] \n"
"vld1.32 d6[1], [%[dst]], %[kBPS] \n"
"vld1.32 d7[0], [%[dst]], %[kBPS] \n"
"vld1.32 d7[1], [%[dst]], %[kBPS] \n"
"sub %[dst], %[dst], %[kBPS], lsl #2 \n"
/* (val) + 4 >> 3 */
"vrshr.s16 d2, d2, #3 \n"
"vrshr.s16 d3, d3, #3 \n"
"vrshr.s16 d4, d4, #3 \n"
"vrshr.s16 d5, d5, #3 \n"
"vzip.16 q1, q2 \n"
"vzip.16 q1, q2 \n"
/* Must accumulate before saturating */
"vmovl.u8 q8, d6 \n"
"vmovl.u8 q9, d7 \n"
"vqadd.s16 q1, q1, q8 \n"
"vqadd.s16 q2, q2, q9 \n"
"vqmovun.s16 d0, q1 \n"
"vqmovun.s16 d1, q2 \n"
"vst1.32 d0[0], [%[dst]], %[kBPS] \n"
"vst1.32 d0[1], [%[dst]], %[kBPS] \n"
"vst1.32 d1[0], [%[dst]], %[kBPS] \n"
"vst1.32 d1[1], [%[dst]] \n"
: [in] "+r"(in), [dst] "+r"(dst) /* modified registers */
: [kBPS] "r"(kBPS), [constants] "r"(constants) /* constants */
: "memory", "q0", "q1", "q2", "q8", "q9", "q10", "q11" /* clobbered */
);
}
static void TransformTwo(const int16_t* in, uint8_t* dst, int do_two) {
TransformOne(in, dst);
if (do_two) {
TransformOne(in + 16, dst + 4);
}
}
static void TransformDC(const int16_t* in, uint8_t* dst) {
const int16x8_t DC = vdupq_n_s16((in[0] + 4) >> 3);
uint32x2_t dst01 = {0, 0};
uint32x2_t dst23 = {0, 0};
// Load the source pixels.
dst01 = vset_lane_u32(*(uint32_t*)(dst + 0 * BPS), dst01, 0);
dst23 = vset_lane_u32(*(uint32_t*)(dst + 2 * BPS), dst23, 0);
dst01 = vset_lane_u32(*(uint32_t*)(dst + 1 * BPS), dst01, 1);
dst23 = vset_lane_u32(*(uint32_t*)(dst + 3 * BPS), dst23, 1);
{
// Convert to 16b.
int16x8_t dst01_s16 = ConvertU8ToS16(dst01);
int16x8_t dst23_s16 = ConvertU8ToS16(dst23);
// Add the inverse transform.
dst01_s16 = vaddq_s16(dst01_s16, DC);
dst23_s16 = vaddq_s16(dst23_s16, DC);
SaturateAndStore4x4(dst, dst01_s16, dst23_s16);
}
}
//------------------------------------------------------------------------------
#define STORE_WHT(dst, col, row01, row23) do { \
*dst = vgetq_lane_s32(row01.val[0], col); (dst) += 16; \
*dst = vgetq_lane_s32(row01.val[1], col); (dst) += 16; \
*dst = vgetq_lane_s32(row23.val[0], col); (dst) += 16; \
*dst = vgetq_lane_s32(row23.val[1], col); (dst) += 16; \
} while (0)
static void TransformWHT(const int16_t* in, int16_t* out) {
int32x4x2_t tmp0; // tmp[0..7]
int32x4x2_t tmp1; // tmp[8..15]
{
// Load the source.
const int16x4_t in00_03 = vld1_s16(in + 0);
const int16x4_t in04_07 = vld1_s16(in + 4);
const int16x4_t in08_11 = vld1_s16(in + 8);
const int16x4_t in12_15 = vld1_s16(in + 12);
const int32x4_t a0 = vaddl_s16(in00_03, in12_15); // in[0..3] + in[12..15]
const int32x4_t a1 = vaddl_s16(in04_07, in08_11); // in[4..7] + in[8..11]
const int32x4_t a2 = vsubl_s16(in04_07, in08_11); // in[4..7] - in[8..11]
const int32x4_t a3 = vsubl_s16(in00_03, in12_15); // in[0..3] - in[12..15]
tmp0.val[0] = vaddq_s32(a0, a1);
tmp0.val[1] = vaddq_s32(a3, a2);
tmp1.val[0] = vsubq_s32(a0, a1);
tmp1.val[1] = vsubq_s32(a3, a2);
}
tmp0 = vzipq_s32(tmp0.val[0], tmp0.val[1]); // 0, 4, 1, 5 | 2, 6, 3, 7
tmp1 = vzipq_s32(tmp1.val[0], tmp1.val[1]); // 8, 12, 9, 13 | 10, 14, 11, 15
{
// Arrange the temporary results column-wise.
const int32x4_t tmp_0_4_8_12 =
vcombine_s32(vget_low_s32(tmp0.val[0]), vget_low_s32(tmp1.val[0]));
const int32x4_t tmp_2_6_10_14 =
vcombine_s32(vget_low_s32(tmp0.val[1]), vget_low_s32(tmp1.val[1]));
const int32x4_t tmp_1_5_9_13 =
vcombine_s32(vget_high_s32(tmp0.val[0]), vget_high_s32(tmp1.val[0]));
const int32x4_t tmp_3_7_11_15 =
vcombine_s32(vget_high_s32(tmp0.val[1]), vget_high_s32(tmp1.val[1]));
const int32x4_t three = vdupq_n_s32(3);
const int32x4_t dc = vaddq_s32(tmp_0_4_8_12, three); // add rounder
const int32x4_t a0 = vaddq_s32(dc, tmp_3_7_11_15);
const int32x4_t a1 = vaddq_s32(tmp_1_5_9_13, tmp_2_6_10_14);
const int32x4_t a2 = vsubq_s32(tmp_1_5_9_13, tmp_2_6_10_14);
const int32x4_t a3 = vsubq_s32(dc, tmp_3_7_11_15);
tmp0.val[0] = vaddq_s32(a0, a1);
tmp0.val[1] = vaddq_s32(a3, a2);
tmp1.val[0] = vsubq_s32(a0, a1);
tmp1.val[1] = vsubq_s32(a3, a2);
// right shift the results by 3.
tmp0.val[0] = vshrq_n_s32(tmp0.val[0], 3);
tmp0.val[1] = vshrq_n_s32(tmp0.val[1], 3);
tmp1.val[0] = vshrq_n_s32(tmp1.val[0], 3);
tmp1.val[1] = vshrq_n_s32(tmp1.val[1], 3);
STORE_WHT(out, 0, tmp0, tmp1);
STORE_WHT(out, 1, tmp0, tmp1);
STORE_WHT(out, 2, tmp0, tmp1);
STORE_WHT(out, 3, tmp0, tmp1);
}
}
#undef STORE_WHT
//------------------------------------------------------------------------------
#define MUL(a, b) (((a) * (b)) >> 16)
static void TransformAC3(const int16_t* in, uint8_t* dst) {
static const int kC1 = 20091 + (1 << 16);
static const int kC2 = 35468;
const int16x4_t A = vdup_n_s16(in[0] + 4);
const int16x4_t c4 = vdup_n_s16(MUL(in[4], kC2));
const int16x4_t d4 = vdup_n_s16(MUL(in[4], kC1));
const int c1 = MUL(in[1], kC2);
const int d1 = MUL(in[1], kC1);
const int16x4_t CD = {d1, c1, -c1, -d1};
const int16x4_t B = vqadd_s16(A, CD);
const int16x8_t m0_m1 = vcombine_s16(vqadd_s16(B, d4), vqadd_s16(B, c4));
const int16x8_t m2_m3 = vcombine_s16(vqsub_s16(B, c4), vqsub_s16(B, d4));
uint32x2_t dst01 = {0, 0};
uint32x2_t dst23 = {0, 0};
// Load the source pixels.
dst01 = vset_lane_u32(*(uint32_t*)(dst + 0 * BPS), dst01, 0);
dst23 = vset_lane_u32(*(uint32_t*)(dst + 2 * BPS), dst23, 0);
dst01 = vset_lane_u32(*(uint32_t*)(dst + 1 * BPS), dst01, 1);
dst23 = vset_lane_u32(*(uint32_t*)(dst + 3 * BPS), dst23, 1);
{
// Convert to 16b.
int16x8_t dst01_s16 = ConvertU8ToS16(dst01);
int16x8_t dst23_s16 = ConvertU8ToS16(dst23);
// Add the inverse transform.
dst01_s16 = vsraq_n_s16(dst01_s16, m0_m1, 3);
dst23_s16 = vsraq_n_s16(dst23_s16, m2_m3, 3);
SaturateAndStore4x4(dst, dst01_s16, dst23_s16);
}
}
#undef MUL
#endif // WEBP_USE_NEON
//------------------------------------------------------------------------------
// Entry point
extern void VP8DspInitNEON(void);
void VP8DspInitNEON(void) {
#if defined(WEBP_USE_NEON)
VP8Transform = TransformTwo;
VP8TransformAC3 = TransformAC3;
VP8TransformDC = TransformDC;
VP8TransformWHT = TransformWHT;
VP8SimpleVFilter16 = SimpleVFilter16NEON;
VP8SimpleHFilter16 = SimpleHFilter16NEON;
VP8SimpleVFilter16i = SimpleVFilter16iNEON;
VP8SimpleHFilter16i = SimpleHFilter16iNEON;
#endif // WEBP_USE_NEON
}