mirror of
https://github.com/webmproject/libwebp.git
synced 2024-12-26 13:48:21 +01:00
Use deterministic random-dithering during RGB->YUV conversion
-> helps debanding (sky, gradients, etc.) This dithering can only be triggered when using -preset photo or -pre 2 (as a preprocessing). Everything is unchanged otherwise. Note that this change is likely to make the perceived PSNR/SSIM drop since we're altering the input internally. Change-Id: Id8d4326245d9b828141de162c94ba381b1fa5813
This commit is contained in:
parent
8a2fa099cc
commit
0b2b05049f
@ -1,5 +1,5 @@
|
||||
.\" Hey, EMACS: -*- nroff -*-
|
||||
.TH CWEBP 1 "March 28, 2013"
|
||||
.TH CWEBP 1 "October 17, 2013"
|
||||
.SH NAME
|
||||
cwebp \- compress an image file to a WebP file
|
||||
.SH SYNOPSIS
|
||||
@ -168,8 +168,9 @@ Output additional ASCII-map of encoding information. Possible map values
|
||||
range from 1 to 6. This is only meant to help debugging.
|
||||
.TP
|
||||
.BI \-pre " int
|
||||
Specify a pre-processing filter. This option is a placeholder
|
||||
and has currently no effect.
|
||||
Specify some pre-processing steps. Using a value of '2' will trigger
|
||||
quality-dependent pseudo-random dithering during RGBA->YUVA conversion
|
||||
(lossy compression only).
|
||||
.TP
|
||||
.BI \-alpha_filter " string
|
||||
Specify the predictive filtering method for the alpha plane. One of 'none',
|
||||
|
@ -481,7 +481,8 @@ static void ConvertToYUVA(const uint32_t* const src, int width, int y_pos,
|
||||
uint8_t* const y = buf->y + y_pos * buf->y_stride;
|
||||
for (i = 0; i < width; ++i) {
|
||||
const uint32_t p = src[i];
|
||||
y[i] = VP8RGBToY((p >> 16) & 0xff, (p >> 8) & 0xff, (p >> 0) & 0xff);
|
||||
y[i] = VP8RGBToY((p >> 16) & 0xff, (p >> 8) & 0xff, (p >> 0) & 0xff,
|
||||
YUV_HALF);
|
||||
}
|
||||
}
|
||||
|
||||
@ -500,11 +501,11 @@ static void ConvertToYUVA(const uint32_t* const src, int width, int y_pos,
|
||||
const int g = ((v0 >> 7) & 0x1fe) + ((v1 >> 7) & 0x1fe);
|
||||
const int b = ((v0 << 1) & 0x1fe) + ((v1 << 1) & 0x1fe);
|
||||
if (!(y_pos & 1)) { // even lines: store values
|
||||
u[i] = VP8RGBToU(r, g, b);
|
||||
v[i] = VP8RGBToV(r, g, b);
|
||||
u[i] = VP8RGBToU(r, g, b, YUV_HALF << 2);
|
||||
v[i] = VP8RGBToV(r, g, b, YUV_HALF << 2);
|
||||
} else { // odd lines: average with previous values
|
||||
const int tmp_u = VP8RGBToU(r, g, b);
|
||||
const int tmp_v = VP8RGBToV(r, g, b);
|
||||
const int tmp_u = VP8RGBToU(r, g, b, YUV_HALF << 2);
|
||||
const int tmp_v = VP8RGBToV(r, g, b, YUV_HALF << 2);
|
||||
// Approximated average-of-four. But it's an acceptable diff.
|
||||
u[i] = (u[i] + tmp_u + 1) >> 1;
|
||||
v[i] = (v[i] + tmp_v + 1) >> 1;
|
||||
@ -516,11 +517,11 @@ static void ConvertToYUVA(const uint32_t* const src, int width, int y_pos,
|
||||
const int g = (v0 >> 6) & 0x3fc;
|
||||
const int b = (v0 << 2) & 0x3fc;
|
||||
if (!(y_pos & 1)) { // even lines
|
||||
u[i] = VP8RGBToU(r, g, b);
|
||||
v[i] = VP8RGBToV(r, g, b);
|
||||
u[i] = VP8RGBToU(r, g, b, YUV_HALF << 2);
|
||||
v[i] = VP8RGBToV(r, g, b, YUV_HALF << 2);
|
||||
} else { // odd lines (note: we could just skip this)
|
||||
const int tmp_u = VP8RGBToU(r, g, b);
|
||||
const int tmp_v = VP8RGBToV(r, g, b);
|
||||
const int tmp_u = VP8RGBToU(r, g, b, YUV_HALF << 2);
|
||||
const int tmp_v = VP8RGBToV(r, g, b, YUV_HALF << 2);
|
||||
u[i] = (u[i] + tmp_u + 1) >> 1;
|
||||
v[i] = (v[i] + tmp_v + 1) >> 1;
|
||||
}
|
||||
|
@ -265,27 +265,27 @@ extern void VP8YUVInitSSE2(void);
|
||||
//------------------------------------------------------------------------------
|
||||
// RGB -> YUV conversion
|
||||
|
||||
static WEBP_INLINE int VP8ClipUV(int v) {
|
||||
v = (v + (257 << (YUV_FIX + 2 - 1))) >> (YUV_FIX + 2);
|
||||
return ((v & ~0xff) == 0) ? v : (v < 0) ? 0 : 255;
|
||||
// Stub functions that can be called with various rounding values:
|
||||
static WEBP_INLINE int VP8ClipUV(int uv, int rounding) {
|
||||
uv = (uv + rounding + (128 << (YUV_FIX + 2))) >> (YUV_FIX + 2);
|
||||
return ((uv & ~0xff) == 0) ? uv : (uv < 0) ? 0 : 255;
|
||||
}
|
||||
|
||||
#ifndef USE_YUVj
|
||||
|
||||
static WEBP_INLINE int VP8RGBToY(int r, int g, int b) {
|
||||
const int kRound = (1 << (YUV_FIX - 1)) + (16 << YUV_FIX);
|
||||
static WEBP_INLINE int VP8RGBToY(int r, int g, int b, int rounding) {
|
||||
const int luma = 16839 * r + 33059 * g + 6420 * b;
|
||||
return (luma + kRound) >> YUV_FIX; // no need to clip
|
||||
return (luma + rounding + (16 << YUV_FIX)) >> YUV_FIX; // no need to clip
|
||||
}
|
||||
|
||||
static WEBP_INLINE int VP8RGBToU(int r, int g, int b) {
|
||||
static WEBP_INLINE int VP8RGBToU(int r, int g, int b, int rounding) {
|
||||
const int u = -9719 * r - 19081 * g + 28800 * b;
|
||||
return VP8ClipUV(u);
|
||||
return VP8ClipUV(u, rounding);
|
||||
}
|
||||
|
||||
static WEBP_INLINE int VP8RGBToV(int r, int g, int b) {
|
||||
static WEBP_INLINE int VP8RGBToV(int r, int g, int b, int rounding) {
|
||||
const int v = +28800 * r - 24116 * g - 4684 * b;
|
||||
return VP8ClipUV(v);
|
||||
return VP8ClipUV(v, rounding);
|
||||
}
|
||||
|
||||
#else
|
||||
@ -293,20 +293,19 @@ static WEBP_INLINE int VP8RGBToV(int r, int g, int b) {
|
||||
// This JPEG-YUV colorspace, only for comparison!
|
||||
// These are also 16bit precision coefficients from Rec.601, but with full
|
||||
// [0..255] output range.
|
||||
static WEBP_INLINE int VP8RGBToY(int r, int g, int b) {
|
||||
const int kRound = (1 << (YUV_FIX - 1));
|
||||
static WEBP_INLINE int VP8RGBToY(int r, int g, int b, int rounding) {
|
||||
const int luma = 19595 * r + 38470 * g + 7471 * b;
|
||||
return (luma + kRound) >> YUV_FIX; // no need to clip
|
||||
return (luma + rounding) >> YUV_FIX; // no need to clip
|
||||
}
|
||||
|
||||
static WEBP_INLINE int VP8RGBToU(int r, int g, int b) {
|
||||
static WEBP_INLINE int VP8_RGB_TO_U(int r, int g, int b, int rounding) {
|
||||
const int u = -11058 * r - 21710 * g + 32768 * b;
|
||||
return VP8ClipUV(u);
|
||||
return VP8ClipUV(u, rounding);
|
||||
}
|
||||
|
||||
static WEBP_INLINE int VP8RGBToV(int r, int g, int b) {
|
||||
static WEBP_INLINE int VP8_RGB_TO_V(int r, int g, int b, int rounding) {
|
||||
const int v = 32768 * r - 27439 * g - 5329 * b;
|
||||
return VP8ClipUV(v);
|
||||
return VP8ClipUV(v, rounding);
|
||||
}
|
||||
|
||||
#endif // USE_YUVj
|
||||
|
@ -58,11 +58,13 @@ int WebPConfigInitInternal(WebPConfig* config,
|
||||
config->sns_strength = 80;
|
||||
config->filter_sharpness = 4;
|
||||
config->filter_strength = 35;
|
||||
config->preprocessing &= ~2; // no dithering
|
||||
break;
|
||||
case WEBP_PRESET_PHOTO:
|
||||
config->sns_strength = 80;
|
||||
config->filter_sharpness = 3;
|
||||
config->filter_strength = 30;
|
||||
config->preprocessing |= 2;
|
||||
break;
|
||||
case WEBP_PRESET_DRAWING:
|
||||
config->sns_strength = 25;
|
||||
@ -72,10 +74,12 @@ int WebPConfigInitInternal(WebPConfig* config,
|
||||
case WEBP_PRESET_ICON:
|
||||
config->sns_strength = 0;
|
||||
config->filter_strength = 0; // disable filtering to retain sharpness
|
||||
config->preprocessing &= ~2; // no dithering
|
||||
break;
|
||||
case WEBP_PRESET_TEXT:
|
||||
config->sns_strength = 0;
|
||||
config->filter_strength = 0; // disable filtering to retain sharpness
|
||||
config->preprocessing &= ~2; // no dithering
|
||||
config->segments = 2;
|
||||
break;
|
||||
case WEBP_PRESET_DEFAULT:
|
||||
@ -111,7 +115,7 @@ int WebPValidateConfig(const WebPConfig* config) {
|
||||
return 0;
|
||||
if (config->show_compressed < 0 || config->show_compressed > 1)
|
||||
return 0;
|
||||
if (config->preprocessing < 0 || config->preprocessing > 1)
|
||||
if (config->preprocessing < 0 || config->preprocessing > 3)
|
||||
return 0;
|
||||
if (config->partitions < 0 || config->partitions > 3)
|
||||
return 0;
|
||||
|
@ -590,6 +590,64 @@ int WebPPictureHasTransparency(const WebPPicture* picture) {
|
||||
//------------------------------------------------------------------------------
|
||||
// RGB -> YUV conversion
|
||||
|
||||
#define DITHER_FIX 8 // fixed-point precision for dithering
|
||||
|
||||
#define kRandomTableSize 55
|
||||
static const uint32_t kRandomTable[kRandomTableSize] = { // 31b-range values
|
||||
0x0de15230, 0x03b31886, 0x775faccb, 0x1c88626a, 0x68385c55, 0x14b3b828,
|
||||
0x4a85fef8, 0x49ddb84b, 0x64fcf397, 0x5c550289, 0x4a290000, 0x0d7ec1da,
|
||||
0x5940b7ab, 0x5492577d, 0x4e19ca72, 0x38d38c69, 0x0c01ee65, 0x32a1755f,
|
||||
0x5437f652, 0x5abb2c32, 0x0faa57b1, 0x73f533e7, 0x685feeda, 0x7563cce2,
|
||||
0x6e990e83, 0x4730a7ed, 0x4fc0d9c6, 0x496b153c, 0x4f1403fa, 0x541afb0c,
|
||||
0x73990b32, 0x26d7cb1c, 0x6fcc3706, 0x2cbb77d8, 0x75762f2a, 0x6425ccdd,
|
||||
0x24b35461, 0x0a7d8715, 0x220414a8, 0x141ebf67, 0x56b41583, 0x73e502e3,
|
||||
0x44cab16f, 0x28264d42, 0x73baaefb, 0x0a50ebed, 0x1d6ab6fb, 0x0d3ad40b,
|
||||
0x35db3b68, 0x2b081e83, 0x77ce6b95, 0x5181e5f0, 0x78853bbc, 0x009f9494,
|
||||
0x27e5ed3c
|
||||
};
|
||||
|
||||
typedef struct {
|
||||
int index1_, index2_;
|
||||
uint32_t tab_[kRandomTableSize];
|
||||
int amp_;
|
||||
} VP8Random;
|
||||
|
||||
static void InitRandom(VP8Random* const rg, float dithering) {
|
||||
memcpy(rg->tab_, kRandomTable, sizeof(rg->tab_));
|
||||
rg->index1_ = 0;
|
||||
rg->index2_ = 31;
|
||||
rg->amp_ = (dithering < 0.0) ? 0
|
||||
: (dithering > 1.0) ? (1 << DITHER_FIX)
|
||||
: (uint32_t)((1 << DITHER_FIX) * dithering);
|
||||
}
|
||||
|
||||
// D.Knuth's Difference-based random generator.
|
||||
static WEBP_INLINE int Random(VP8Random* const rg, int num_bits) {
|
||||
int diff;
|
||||
assert(num_bits + DITHER_FIX <= 31);
|
||||
diff = rg->tab_[rg->index1_] - rg->tab_[rg->index2_];
|
||||
if (diff < 0) diff += (1u << 31);
|
||||
rg->tab_[rg->index1_] = diff;
|
||||
if (++rg->index1_ == kRandomTableSize) rg->index1_ = 0;
|
||||
if (++rg->index2_ == kRandomTableSize) rg->index2_ = 0;
|
||||
diff = (diff << 1) >> (32 - num_bits); // sign-extend, 0-center
|
||||
diff = (diff * rg->amp_) >> DITHER_FIX; // restrict range
|
||||
diff += 1 << (num_bits - 1); // shift back to 0.5-center
|
||||
return diff;
|
||||
}
|
||||
|
||||
static int RGBToY(int r, int g, int b, VP8Random* const rg) {
|
||||
return VP8RGBToY(r, g, b, Random(rg, YUV_FIX));
|
||||
}
|
||||
|
||||
static int RGBToU(int r, int g, int b, VP8Random* const rg) {
|
||||
return VP8RGBToU(r, g, b, Random(rg, YUV_FIX + 2));
|
||||
}
|
||||
|
||||
static int RGBToV(int r, int g, int b, VP8Random* const rg) {
|
||||
return VP8RGBToV(r, g, b, Random(rg, YUV_FIX + 2));
|
||||
}
|
||||
|
||||
// TODO: we can do better than simply 2x2 averaging on U/V samples.
|
||||
#define SUM4(ptr) ((ptr)[0] + (ptr)[step] + \
|
||||
(ptr)[rgb_stride] + (ptr)[rgb_stride + step])
|
||||
@ -602,8 +660,8 @@ int WebPPictureHasTransparency(const WebPPicture* picture) {
|
||||
const int r = SUM(r_ptr + src); \
|
||||
const int g = SUM(g_ptr + src); \
|
||||
const int b = SUM(b_ptr + src); \
|
||||
picture->u[dst] = VP8RGBToU(r, g, b); \
|
||||
picture->v[dst] = VP8RGBToV(r, g, b); \
|
||||
picture->u[dst] = RGBToU(r, g, b, &rg); \
|
||||
picture->v[dst] = RGBToV(r, g, b, &rg); \
|
||||
}
|
||||
|
||||
#define RGB_TO_UV0(x_in, x_out, y, SUM) { \
|
||||
@ -612,8 +670,8 @@ int WebPPictureHasTransparency(const WebPPicture* picture) {
|
||||
const int r = SUM(r_ptr + src); \
|
||||
const int g = SUM(g_ptr + src); \
|
||||
const int b = SUM(b_ptr + src); \
|
||||
picture->u0[dst] = VP8RGBToU(r, g, b); \
|
||||
picture->v0[dst] = VP8RGBToV(r, g, b); \
|
||||
picture->u0[dst] = RGBToU(r, g, b, &rg); \
|
||||
picture->v0[dst] = RGBToV(r, g, b, &rg); \
|
||||
}
|
||||
|
||||
static void MakeGray(WebPPicture* const picture) {
|
||||
@ -632,12 +690,14 @@ static int ImportYUVAFromRGBA(const uint8_t* const r_ptr,
|
||||
const uint8_t* const a_ptr,
|
||||
int step, // bytes per pixel
|
||||
int rgb_stride, // bytes per scanline
|
||||
float dithering,
|
||||
WebPPicture* const picture) {
|
||||
const WebPEncCSP uv_csp = picture->colorspace & WEBP_CSP_UV_MASK;
|
||||
int x, y;
|
||||
const int width = picture->width;
|
||||
const int height = picture->height;
|
||||
const int has_alpha = CheckNonOpaque(a_ptr, width, height, step, rgb_stride);
|
||||
VP8Random rg;
|
||||
|
||||
picture->colorspace = uv_csp;
|
||||
picture->use_argb = 0;
|
||||
@ -646,12 +706,14 @@ static int ImportYUVAFromRGBA(const uint8_t* const r_ptr,
|
||||
}
|
||||
if (!WebPPictureAlloc(picture)) return 0;
|
||||
|
||||
InitRandom(&rg, dithering);
|
||||
|
||||
// Import luma plane
|
||||
for (y = 0; y < height; ++y) {
|
||||
for (x = 0; x < width; ++x) {
|
||||
const int offset = step * x + y * rgb_stride;
|
||||
picture->y[x + y * picture->y_stride] =
|
||||
VP8RGBToY(r_ptr[offset], g_ptr[offset], b_ptr[offset]);
|
||||
RGBToY(r_ptr[offset], g_ptr[offset], b_ptr[offset], &rg);
|
||||
}
|
||||
}
|
||||
|
||||
@ -722,7 +784,7 @@ static int Import(WebPPicture* const picture,
|
||||
|
||||
if (!picture->use_argb) {
|
||||
return ImportYUVAFromRGBA(r_ptr, g_ptr, b_ptr, a_ptr, step, rgb_stride,
|
||||
picture);
|
||||
0.f /* no dithering */, picture);
|
||||
}
|
||||
if (import_alpha) {
|
||||
picture->colorspace |= WEBP_CSP_ALPHA_BIT;
|
||||
@ -855,7 +917,8 @@ int WebPPictureYUVAToARGB(WebPPicture* picture) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
int WebPPictureARGBToYUVA(WebPPicture* picture, WebPEncCSP colorspace) {
|
||||
int WebPPictureARGBToYUVADithered(WebPPicture* picture, WebPEncCSP colorspace,
|
||||
float dithering) {
|
||||
if (picture == NULL) return 0;
|
||||
if (picture->argb == NULL) {
|
||||
return WebPEncodingSetError(picture, VP8_ENC_ERROR_NULL_PARAMETER);
|
||||
@ -871,7 +934,8 @@ int WebPPictureARGBToYUVA(WebPPicture* picture, WebPEncCSP colorspace) {
|
||||
PictureResetARGB(&tmp); // reset ARGB buffer so that it's not free()'d.
|
||||
tmp.use_argb = 0;
|
||||
tmp.colorspace = colorspace & WEBP_CSP_UV_MASK;
|
||||
if (!ImportYUVAFromRGBA(r, g, b, a, 4, 4 * picture->argb_stride, &tmp)) {
|
||||
if (!ImportYUVAFromRGBA(r, g, b, a, 4, 4 * picture->argb_stride, dithering,
|
||||
&tmp)) {
|
||||
return WebPEncodingSetError(picture, VP8_ENC_ERROR_OUT_OF_MEMORY);
|
||||
}
|
||||
// Copy back the YUV specs into 'picture'.
|
||||
@ -883,6 +947,10 @@ int WebPPictureARGBToYUVA(WebPPicture* picture, WebPEncCSP colorspace) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
int WebPPictureARGBToYUVA(WebPPicture* picture, WebPEncCSP colorspace) {
|
||||
return WebPPictureARGBToYUVADithered(picture, colorspace, 0.f);
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// Helper: clean up fully transparent area to help compressibility.
|
||||
|
||||
@ -960,14 +1028,16 @@ void WebPBlendAlpha(WebPPicture* pic, uint32_t background_rgb) {
|
||||
const int red = (background_rgb >> 16) & 0xff;
|
||||
const int green = (background_rgb >> 8) & 0xff;
|
||||
const int blue = (background_rgb >> 0) & 0xff;
|
||||
VP8Random rg;
|
||||
int x, y;
|
||||
if (pic == NULL) return;
|
||||
InitRandom(&rg, 0.f);
|
||||
if (!pic->use_argb) {
|
||||
const int uv_width = (pic->width >> 1); // omit last pixel during u/v loop
|
||||
const int Y0 = VP8RGBToY(red, green, blue);
|
||||
const int Y0 = RGBToY(red, green, blue, &rg);
|
||||
// VP8RGBToU/V expects the u/v values summed over four pixels
|
||||
const int U0 = VP8RGBToU(4 * red, 4 * green, 4 * blue);
|
||||
const int V0 = VP8RGBToV(4 * red, 4 * green, 4 * blue);
|
||||
const int U0 = RGBToU(4 * red, 4 * green, 4 * blue, &rg);
|
||||
const int V0 = RGBToV(4 * red, 4 * green, 4 * blue, &rg);
|
||||
const int has_alpha = pic->colorspace & WEBP_CSP_ALPHA_BIT;
|
||||
if (!has_alpha || pic->a == NULL) return; // nothing to do
|
||||
for (y = 0; y < pic->height; ++y) {
|
||||
|
@ -358,7 +358,17 @@ int WebPEncode(const WebPConfig* config, WebPPicture* pic) {
|
||||
VP8Encoder* enc = NULL;
|
||||
if (pic->y == NULL || pic->u == NULL || pic->v == NULL) {
|
||||
// Make sure we have YUVA samples.
|
||||
if (!WebPPictureARGBToYUVA(pic, WEBP_YUV420)) return 0;
|
||||
float dithering = 0.f;
|
||||
if (config->preprocessing & 2) {
|
||||
const float x = config->quality / 100.f;
|
||||
const float x2 = x * x;
|
||||
// slowly decreasing from max dithering at low quality (q->0)
|
||||
// to 0.5 dithering amplitude at high quality (q->100)
|
||||
dithering = 1.0f + (0.5f - 1.0f) * x2 * x2;
|
||||
}
|
||||
if (!WebPPictureARGBToYUVADithered(pic, WEBP_YUV420, dithering)) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
enc = InitVP8Encoder(config, pic);
|
||||
|
@ -117,7 +117,8 @@ struct WebPConfig {
|
||||
|
||||
int show_compressed; // if true, export the compressed picture back.
|
||||
// In-loop filtering is not applied.
|
||||
int preprocessing; // preprocessing filter (0=none, 1=segment-smooth)
|
||||
int preprocessing; // preprocessing filter:
|
||||
// 0=none, 1=segment-smooth, 2=pseudo-random dithering
|
||||
int partitions; // log2(number of token partitions) in [0..3]. Default
|
||||
// is set to 0 for easier progressive decoding.
|
||||
int partition_limit; // quality degradation allowed to fit the 512k limit
|
||||
@ -443,6 +444,13 @@ WEBP_EXTERN(int) WebPPictureImportBGRX(
|
||||
WEBP_EXTERN(int) WebPPictureARGBToYUVA(WebPPicture* picture,
|
||||
WebPEncCSP colorspace);
|
||||
|
||||
// Same as WebPPictureARGBToYUVA(), but the conversion is done using
|
||||
// pseudo-random dithering with a strength 'dithering' between
|
||||
// 0.0 (no dithering) and 1.0 (maximum dithering). This is useful
|
||||
// for photographic picture.
|
||||
WEBP_EXTERN(int) WebPPictureARGBToYUVADithered(
|
||||
WebPPicture* picture, WebPEncCSP colorspace, float dithering);
|
||||
|
||||
// Converts picture->yuv to picture->argb and sets picture->use_argb to true.
|
||||
// The input format must be YUV_420 or YUV_420A.
|
||||
// Note that the use of this method is discouraged if one has access to the
|
||||
|
Loading…
Reference in New Issue
Block a user