From 7a68afaac56306b4d118a6a969d5acd252a3dbc1 Mon Sep 17 00:00:00 2001 From: Maryla Date: Mon, 21 Mar 2022 11:27:39 +0100 Subject: [PATCH] Let SharpArgbToYuv caller pass in an RGB>YUV conversion matrix. Change-Id: I4ed2dfc00ce63361abd49c693f31f307e0b0262f --- sharpyuv/sharpyuv.c | 61 ++++++++++++++++++++++----------------- sharpyuv/sharpyuv.h | 25 ++++++++++++---- src/enc/picture_csp_enc.c | 10 +++---- 3 files changed, 59 insertions(+), 37 deletions(-) diff --git a/sharpyuv/sharpyuv.c b/sharpyuv/sharpyuv.c index 5376f6be..ce671f57 100644 --- a/sharpyuv/sharpyuv.c +++ b/sharpyuv/sharpyuv.c @@ -252,25 +252,18 @@ static void InterpolateTwoRows(const fixed_y_t* const best_y, } } -static WEBP_INLINE uint8_t ConvertRGBToY(int r, int g, int b) { - const int luma = 16839 * r + 33059 * g + 6420 * b + SROUNDER; - return clip_8b(16 + (luma >> (YUV_FIX + SFIX))); -} - -static WEBP_INLINE uint8_t ConvertRGBToU(int r, int g, int b) { - const int u = -9719 * r - 19081 * g + 28800 * b + SROUNDER; - return clip_8b(128 + (u >> (YUV_FIX + SFIX))); -} - -static WEBP_INLINE uint8_t ConvertRGBToV(int r, int g, int b) { - const int v = +28800 * r - 24116 * g - 4684 * b + SROUNDER; - return clip_8b(128 + (v >> (YUV_FIX + SFIX))); +static WEBP_INLINE uint8_t RGBToYUVComponent(int r, int g, int b, + const int coeffs[4]) { + const int luma = coeffs[0] * r + coeffs[1] * g + coeffs[2] * b + + (coeffs[3] << SFIX) + SROUNDER; + return clip_8b((luma >> (YUV_FIX + SFIX))); } static int ConvertWRGBToYUV(const fixed_y_t* best_y, const fixed_t* best_uv, uint8_t* dst_y, int dst_stride_y, uint8_t* dst_u, int dst_stride_u, uint8_t* dst_v, int dst_stride_v, - int width, int height) { + int width, int height, + const SharpYuvConversionMatrix* yuv_matrix) { int i, j; const fixed_t* const best_uv_base = best_uv; const int w = (width + 1) & ~1; @@ -284,7 +277,7 @@ static int ConvertWRGBToYUV(const fixed_y_t* best_y, const fixed_t* best_uv, const int r = best_uv[off + 0 * uv_w] + W; const int g = best_uv[off + 1 * uv_w] + W; const int b = best_uv[off + 2 * uv_w] + W; - dst_y[i] = ConvertRGBToY(r, g, b); + dst_y[i] = RGBToYUVComponent(r, g, b, yuv_matrix->rgb_to_y); } best_y += w; best_uv += (j & 1) * 3 * uv_w; @@ -296,8 +289,8 @@ static int ConvertWRGBToYUV(const fixed_y_t* best_y, const fixed_t* best_uv, const int r = best_uv[off + 0 * uv_w]; const int g = best_uv[off + 1 * uv_w]; const int b = best_uv[off + 2 * uv_w]; - dst_u[i] = ConvertRGBToU(r, g, b); - dst_v[i] = ConvertRGBToV(r, g, b); + dst_u[i] = RGBToYUVComponent(r, g, b, yuv_matrix->rgb_to_u); + dst_v[i] = RGBToYUVComponent(r, g, b, yuv_matrix->rgb_to_v); } best_uv += 3 * uv_w; dst_u += dst_stride_u; @@ -321,7 +314,8 @@ static int DoSharpArgbToYuv(const uint8_t* r_ptr, const uint8_t* g_ptr, const uint8_t* b_ptr, int step, int rgb_stride, uint8_t* dst_y, int dst_stride_y, uint8_t* dst_u, int dst_stride_u, uint8_t* dst_v, int dst_stride_v, - int width, int height) { + int width, int height, + const SharpYuvConversionMatrix* yuv_matrix) { // we expand the right/bottom border if needed const int w = (width + 1) & ~1; const int h = (height + 1) & ~1; @@ -429,7 +423,8 @@ static int DoSharpArgbToYuv(const uint8_t* r_ptr, const uint8_t* g_ptr, } // final reconstruction ok = ConvertWRGBToYUV(best_y_base, best_uv_base, dst_y, dst_stride_y, dst_u, - dst_stride_u, dst_v, dst_stride_v, width, height); + dst_stride_u, dst_v, dst_stride_v, width, height, + yuv_matrix); End: free(best_y_base); @@ -443,18 +438,30 @@ static int DoSharpArgbToYuv(const uint8_t* r_ptr, const uint8_t* g_ptr, } #undef SAFE_ALLOC -int SharpArgbToYuv(const uint8_t* r_ptr, const uint8_t* g_ptr, - const uint8_t* b_ptr, int step, int rgb_stride, - uint8_t* dst_y, int dst_stride_y, uint8_t* dst_u, - int dst_stride_u, uint8_t* dst_v, int dst_stride_v, - int width, int height) { +// In YUV_FIX fixed point precision. +static const SharpYuvConversionMatrix kWebpYuvMatrix = { + {16839, 33059, 6420, 16 << 16}, + {-9719, -19081, 28800, 128 << 16}, + {28800, -24116, -4684, 128 << 16}, +}; + +const SharpYuvConversionMatrix* SharpYuvGetWebpMatrix(void) { + return &kWebpYuvMatrix; +} + +int SharpYuvConvert(const uint8_t* r_ptr, const uint8_t* g_ptr, + const uint8_t* b_ptr, int step, int rgb_stride, + uint8_t* dst_y, int dst_stride_y, uint8_t* dst_u, + int dst_stride_u, uint8_t* dst_v, int dst_stride_v, + int width, int height, + const SharpYuvConversionMatrix* yuv_matrix) { if (width < kMinDimensionIterativeConversion || height < kMinDimensionIterativeConversion) { return 0; } - return DoSharpArgbToYuv( - r_ptr, g_ptr, b_ptr, step, rgb_stride, dst_y, dst_stride_y, dst_u, - dst_stride_u, dst_v, dst_stride_v, width, height); + return DoSharpArgbToYuv(r_ptr, g_ptr, b_ptr, step, rgb_stride, dst_y, + dst_stride_y, dst_u, dst_stride_u, dst_v, + dst_stride_v, width, height, yuv_matrix); } //------------------------------------------------------------------------------ diff --git a/sharpyuv/sharpyuv.h b/sharpyuv/sharpyuv.h index 51aaf482..44fc4a63 100644 --- a/sharpyuv/sharpyuv.h +++ b/sharpyuv/sharpyuv.h @@ -18,6 +18,20 @@ extern "C" { #endif +// RGB to YUV conversion matrix, in 16 bit fixed point. +// y = rgb_to_y[0] * r + rgb_to_y[1] * g + rgb_to_y[2] * b + rgb_to_y[3] +// u = rgb_to_u[0] * r + rgb_to_u[1] * g + rgb_to_u[2] * b + rgb_to_u[3] +// v = rgb_to_v[0] * r + rgb_to_v[1] * g + rgb_to_v[2] * b + rgb_to_v[3] +// Then y, u and v values are divided by 1<<16 and rounded. +typedef struct { + int rgb_to_y[4]; + int rgb_to_u[4]; + int rgb_to_v[4]; +} SharpYuvConversionMatrix; + +// Returns the RGB to YUV matrix used by WebP. +const SharpYuvConversionMatrix* SharpYuvGetWebpMatrix(void); + // Converts RGB to YUV420 using a downsampling algorithm that minimizes // artefacts caused by chroma subsampling. // This is slower than standard downsampling (averaging of 4 UV values). @@ -27,11 +41,12 @@ extern "C" { // TODO(maryla): add 10 bits and handling of various colorspaces. Add YUV444 to // YUV420 conversion. Maybe also add 422 support (it's rarely used in practice, // especially for images). -int SharpArgbToYuv(const uint8_t* r_ptr, const uint8_t* g_ptr, - const uint8_t* b_ptr, int step, int rgb_stride, - uint8_t* dst_y, int dst_stride_y, uint8_t* dst_u, - int dst_stride_u, uint8_t* dst_v, int dst_stride_v, - int width, int height); +int SharpYuvConvert(const uint8_t* r_ptr, const uint8_t* g_ptr, + const uint8_t* b_ptr, int step, int rgb_stride, + uint8_t* dst_y, int dst_stride_y, uint8_t* dst_u, + int dst_stride_u, uint8_t* dst_v, int dst_stride_v, + int width, int height, + const SharpYuvConversionMatrix* yuv_matrix); #ifdef __cplusplus } // extern "C" diff --git a/src/enc/picture_csp_enc.c b/src/enc/picture_csp_enc.c index f80625cf..cabee114 100644 --- a/src/enc/picture_csp_enc.c +++ b/src/enc/picture_csp_enc.c @@ -179,12 +179,12 @@ static int PreprocessARGB(const uint8_t* r_ptr, const uint8_t* b_ptr, int step, int rgb_stride, WebPPicture* const picture) { - int ok = SharpArgbToYuv(r_ptr, g_ptr, b_ptr, step, rgb_stride, picture->y, - picture->y_stride, picture->u, picture->uv_stride, - picture->v, picture->uv_stride, picture->width, - picture->height); + const int ok = SharpYuvConvert( + r_ptr, g_ptr, b_ptr, step, rgb_stride, picture->y, picture->y_stride, + picture->u, picture->uv_stride, picture->v, picture->uv_stride, + picture->width, picture->height, SharpYuvGetWebpMatrix()); if (!ok) { - ok = WebPEncodingSetError(picture, VP8_ENC_ERROR_OUT_OF_MEMORY); + return WebPEncodingSetError(picture, VP8_ENC_ERROR_OUT_OF_MEMORY); } return ok; }